[aarch64-port-dev ] RFR: Fix biased locking and enable as default

Edward Nevill ed at camswl.com
Thu Apr 24 09:58:19 UTC 2014


Hi,

The following patch fixes a problem I was seeing assertion failures with biased locking.

The main problem was that the swap_reg (ie the compare value) and tmp_reg (ie the new value) were the wrong way around in two calls to cmpxchgptr. In most cases this worked OK, since it simply meant that there was garbage in the compare value, therefore the compare always failed, therefore it went off to the slow case instead. However, in certain cases this failed because it left the biased locking bits in the object header and the ensuing code assumed that some thread must have cleared them because at least 1 thread must have been successful with the compare and exchange whereas in our case all threads were failing because the compare value was wrong to start with.

I have also re-enabled the call to biased_locking_enter in generate_native_wrapper which was commented out for some reason and also fixed the register usage in that call (calls to biased_locking_enter must not pass rscratch1 as the tmp because that is used internally within biased_locking_enter).

Finally I have re-enabled biased locking as the default so that the overnight tests will kick in and tests this fully. This also removes changes to shared code which is always good.

OK to push?

Ed.


--- CUT HERE ---
# HG changeset patch
# User Edward Nevill edward.nevill at linaro.org
# Date 1398332582 -3600
#      Thu Apr 24 10:43:02 2014 +0100
# Node ID ef2aa7fd06f354b221432fdbc9f8b4ba6bd0a7c4
# Parent  563e44ab11a3570b1ec0f89e736832e14f7414ae
Fix biased locking and enable as default

diff -r 563e44ab11a3 -r ef2aa7fd06f3 src/cpu/aarch64/vm/globals_aarch64.hpp
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp	Wed Apr 23 09:26:04 2014 -0400
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Apr 24 10:43:02 2014 +0100
@@ -73,9 +73,6 @@
 
 define_pd_global(uintx, TypeProfileLevel, 0);
 
-// avoid biased locking while we are bootstrapping the aarch64 build
-define_pd_global(bool, UseBiasedLocking, false);
-
 #if defined(COMPILER1) || defined(COMPILER2)
 define_pd_global(intx, InlineSmallCode,          1000);
 #endif
diff -r 563e44ab11a3 -r ef2aa7fd06f3 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Apr 23 09:26:04 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Apr 24 10:43:02 2014 +0100
@@ -414,7 +414,7 @@
     Label here;
     load_prototype_header(tmp_reg, obj_reg);
     orr(tmp_reg, rthread, tmp_reg);
-    cmpxchgptr(tmp_reg, swap_reg, obj_reg, rscratch1, here, slow_case);
+    cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
     // If the biasing toward our thread failed, then another thread
     // succeeded in biasing it toward itself and we need to revoke that
     // bias. The revocation will occur in the runtime in the slow case.
@@ -441,7 +441,7 @@
   {
     Label here, nope;
     load_prototype_header(tmp_reg, obj_reg);
-    cmpxchgptr(tmp_reg, swap_reg, obj_reg, rscratch1, here, &nope);
+    cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
     bind(here);
 
     // Fall through to the normal CAS-based lock, because no matter what
diff -r 563e44ab11a3 -r ef2aa7fd06f3 src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Wed Apr 23 09:26:04 2014 -0400
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Apr 24 10:43:02 2014 +0100
@@ -1815,7 +1815,7 @@
     __ ldr(obj_reg, Address(oop_handle_reg, 0));
 
     if (UseBiasedLocking) {
-      // __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
+      __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch2, false, lock_done, &slow_path_lock);
     }
 
     // Load (object->mark() | 1) into swap_reg %r0
diff -r 563e44ab11a3 -r ef2aa7fd06f3 src/cpu/zero/vm/globals_zero.hpp
--- a/src/cpu/zero/vm/globals_zero.hpp	Wed Apr 23 09:26:04 2014 -0400
+++ b/src/cpu/zero/vm/globals_zero.hpp	Thu Apr 24 10:43:02 2014 +0100
@@ -59,12 +59,6 @@
 
 define_pd_global(uintx, TypeProfileLevel, 0);
 
-#ifdef AARCH64
-// This is declared as _pd for AARCH64 only in globals.hpp
-// so must match with definition here.
-define_pd_global(bool, UseBiasedLocking, false);
-#endif
-
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
 
 #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
diff -r 563e44ab11a3 -r ef2aa7fd06f3 src/share/vm/runtime/globals.hpp
--- a/src/share/vm/runtime/globals.hpp	Wed Apr 23 09:26:04 2014 -0400
+++ b/src/share/vm/runtime/globals.hpp	Thu Apr 24 10:43:02 2014 +0100
@@ -1229,10 +1229,8 @@
   product(bool, CompactFields, true,                                        \
           "Allocate nonstatic fields in gaps between previous fields")      \
                                                                             \
-  AARCH64_ONLY(product_pd(bool, UseBiasedLocking,                   \
-			  "Enable biased locking in JVM"))		    \
-  NOT_AARCH64(product(bool, UseBiasedLocking, true,                         \
-		      "Enable biased locking in JVM"))			    \
+  product(bool, UseBiasedLocking, true,                                     \
+		      "Enable biased locking in JVM")			    \
   notproduct(bool, PrintFieldLayout, false,                                 \
           "Print field layout for each class")                              \
                                                                             \
--- CUT HERE ---




More information about the aarch64-port-dev mailing list