[aarch64-port-dev ] /hg/icedtea7-forest-aarch64/hotspot: Various concurrency fixes.

Andrew Dinn adinn at redhat.com
Thu Nov 20 15:17:32 UTC 2014


[fowwarding bounced check-in message from adinn at icedtea.classpath.org]
------ This is a copy of the message, including all the headers. ------

Return-path: <adinn at icedtea.classpath.org>
Received: from localhost ([127.0.0.1] helo=icedtea.classpath.org)
	by icedtea.classpath.org with esmtp (Exim 4.69)
	(envelope-from <adinn at icedtea.classpath.org>)
	id 1XrSe3-0003az-78
	for aarch64-port-dev at openjdk.java.net; Thu, 20 Nov 2014 14:23:39 +0000
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Date: Thu, 20 Nov 2014 14:23:39 +0000
Subject: /hg/icedtea7-forest-aarch64/hotspot: Various concurrency fixes.
From: adinn at icedtea.classpath.org
X-Hg-Notification: changeset cc4000b1484c
Message-Id:
<hg.cc4000b1484c.1416493419.-5017525213744097322 at icedtea.classpath.org>
To: aarch64-port-dev at openjdk.java.net

changeset cc4000b1484c in /hg/icedtea7-forest-aarch64/hotspot
details:
http://icedtea.classpath.org/hg/icedtea7-forest-aarch64/hotspot?cmd=changeset;node=cc4000b1484c
author: adinn
date: Thu Nov 20 14:23:35 2014 +0000

	Various concurrency fixes.
	Invalidate the whole of a compiledIC stub.
	Add membars to interpreter in branches and ret instructions.
	Atomic::xchg must be a full barrier.


diffstat:

 src/cpu/aarch64/vm/aarch64.ad                                    |  19
++++-----
 src/cpu/aarch64/vm/nativeInst_aarch64.hpp                        |   1 +
 src/cpu/aarch64/vm/templateTable_aarch64.cpp                     |  12
++++++
 src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp      |  12
+++++-
 src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp |   4 --
 5 files changed, 31 insertions(+), 17 deletions(-)

diffs (136 lines):

diff -r f83ab0b76d43 -r cc4000b1484c src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad	Thu Nov 20 11:08:51 2014 +0000
+++ b/src/cpu/aarch64/vm/aarch64.ad	Thu Nov 20 14:23:35 2014 +0000
@@ -1358,12 +1358,10 @@
   return 4;
 }

-// !!! FIXME AARCH64 -- this needs to be reworked for jdk7
-
 uint size_java_to_interp()
 {
-  // count a mov mem --> to 3 movz/k and a branch
-  return 4 * NativeInstruction::instruction_size;
+  // ob jdk7 we only need a mov oop and a branch
+  return 2 * NativeInstruction::instruction_size;
 }

 // Offset from start of compiled java to interpreter stub to the load
@@ -1390,11 +1388,11 @@
   // static stub relocation stores the instruction address of the call
   const RelocationHolder &rspec = static_stub_Relocation::spec(mark);
   __ relocate(rspec);
-  // !!! FIXME AARCH64
   // static stub relocation also tags the methodOop in the code-stream.
-  // for jdk7 we have to use movoop and locate the oop in the cpool
-  // if we use an immediate then patching fails to update the pool
-  // oop and GC overwrites the patch with movk/z 0x0000 again
+  //
+  // n.b. for jdk7 we have to use movoop and locate the oop in the
+  // cpool if we use an immediate then patching fails to update the
+  // pool oop and GC overwrites the patch with movk/z 0x0000 again
   __ movoop(rmethod, (jobject) NULL);
   // This is recognized as unresolved by relocs/nativeinst/ic code
   __ b(__ pc());
@@ -1407,9 +1405,8 @@
 // relocation entries for call stub, compiled java to interpretor
 uint reloc_java_to_interp()
 {
-  // TODO fixme
-  // return a large number
-  return 5;
+  // n.b. on jdk7 we use a movoop and a branch
+  return 2;
 }

 //=============================================================================
diff -r f83ab0b76d43 -r cc4000b1484c
src/cpu/aarch64/vm/nativeInst_aarch64.hpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Thu Nov 20 11:08:51 2014
+0000
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Thu Nov 20 14:23:35 2014
+0000
@@ -142,6 +142,7 @@
     offset &= (1 << 26) - 1; // mask off insn part
     insn |= offset;
     set_int_at(displacement_offset, insn);
+    ICache::invalidate_range(instruction_address(), instruction_size);
   }

   // Similar to replace_mt_safe, but just changes the destination.  The
diff -r f83ab0b76d43 -r cc4000b1484c
src/cpu/aarch64/vm/templateTable_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Nov 20 11:08:51
2014 +0000
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Nov 20 14:23:35
2014 +0000
@@ -1604,6 +1604,12 @@

 void TemplateTable::branch(bool is_jsr, bool is_wide)
 {
+  // We might be moving to a safepoint.  The thread which calls
+  // Interpreter::notice_safepoints() will effectively flush its cache
+  // when it makes a system call, but we need to do something to
+  // ensure that we see the changed dispatch table.
+  __ membar(MacroAssembler::LoadLoad);
+
   __ profile_taken_branch(r0, r1);
   const ByteSize be_offset = methodOopDesc::backedge_counter_offset() +
                              InvocationCounter::counter_offset();
@@ -1867,6 +1873,12 @@

 void TemplateTable::ret() {
   transition(vtos, vtos);
+  // We might be moving to a safepoint.  The thread which calls
+  // Interpreter::notice_safepoints() will effectively flush its cache
+  // when it makes a system call, but we need to do something to
+  // ensure that we see the changed dispatch table.
+  __ membar(MacroAssembler::LoadLoad);
+
   locals_index(r1);
   __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
   __ profile_ret(r1, r2);
diff -r f83ab0b76d43 -r cc4000b1484c
src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp
--- a/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp	Thu
Nov 20 11:08:51 2014 +0000
+++ b/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp	Thu
Nov 20 14:23:35 2014 +0000
@@ -31,6 +31,10 @@

 // Implementation of class atomic

+#define FULL_MEM_BARRIER  __sync_synchronize()
+#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) {
*dest = store_value; }
 inline void Atomic::store    (jshort   store_value, jshort*   dest) {
*dest = store_value; }
 inline void Atomic::store    (jint     store_value, jint*     dest) {
*dest = store_value; }
@@ -71,7 +75,9 @@

 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
 {
- return __sync_lock_test_and_set (dest, exchange_value);
+  jint res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
 }

 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
@@ -111,7 +117,9 @@

 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile
intptr_t* dest)
 {
- return __sync_lock_test_and_set (dest, exchange_value);
+  jint res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
 }

 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
dest, jlong compare_value)
diff -r f83ab0b76d43 -r cc4000b1484c
src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp
--- a/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp
Thu Nov 20 11:08:51 2014 +0000
+++ b/src/os_cpu/linux_aarch64/vm/orderAccess_linux_aarch64.inline.hpp
Thu Nov 20 14:23:35 2014 +0000
@@ -30,10 +30,6 @@
 #include "runtime/orderAccess.hpp"
 #include "vm_version_aarch64.hpp"

-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
 // Implementation of class OrderAccess.

 inline void OrderAccess::loadload()   { acquire(); }




More information about the aarch64-port-dev mailing list