[aarch64-port-dev ] hg/icedtea7-forest-aarch64/hotspot: Add missing instruction syn...

Andrew Dinn adinn at redhat.com
Fri Nov 21 11:42:13 UTC 2014


[forwarding bounced check-in message from icedtea7-forest-aarch64 repo]
------ This is a copy of the message, including all the headers. ------

Return-path: <adinn at icedtea.classpath.org>
Received: from localhost ([127.0.0.1] helo=icedtea.classpath.org)
	by icedtea.classpath.org with esmtp (Exim 4.69)
	(envelope-from <adinn at icedtea.classpath.org>)
	id 1XrmJ8-0001ZJ-Js
	for aarch64-port-dev at openjdk.java.net; Fri, 21 Nov 2014 11:23:22 +0000
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Date: Fri, 21 Nov 2014 11:23:22 +0000
Subject: /hg/icedtea7-forest-aarch64/hotspot: Add missing instruction syn...
From: adinn at icedtea.classpath.org
X-Hg-Notification: changeset 96d45cd850cf
Message-Id:
<hg.96d45cd850cf.1416569002.-5017525213744097322 at icedtea.classpath.org>
To: aarch64-port-dev at openjdk.java.net

changeset 96d45cd850cf in /hg/icedtea7-forest-aarch64/hotspot
details:
http://icedtea.classpath.org/hg/icedtea7-forest-aarch64/hotspot?cmd=changeset;node=96d45cd850cf
author: adinn
date: Fri Nov 21 11:23:14 2014 +0000

	Add missing instruction synchronization barriers and cache flushes.


diffstat:

 src/cpu/aarch64/vm/assembler_aarch64.cpp           |   1 +
 src/cpu/aarch64/vm/assembler_aarch64.hpp           |   2 ++
 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp     |   2 ++
 src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp         |   2 ++
 src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp     |  11 ++++++++---
 src/cpu/aarch64/vm/nativeInst_aarch64.cpp          |  18 ++++++++++--------
 src/cpu/aarch64/vm/nativeInst_aarch64.hpp          |   8 ++++----
 src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp       |  10 +++++++++-
 src/cpu/aarch64/vm/stubGenerator_aarch64.cpp       |   1 +
 src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp |   2 ++
 10 files changed, 41 insertions(+), 16 deletions(-)

diffs (265 lines):

diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/assembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.cpp	Thu Nov 20 14:23:35 2014
+0000
+++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp	Fri Nov 21 11:23:14 2014
+0000
@@ -2722,6 +2722,7 @@
     bind(*retaddr);

   ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
+  maybe_isb();
 }

 void MacroAssembler::call_VM_leaf(address entry_point, int
number_of_arguments) {
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/assembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Nov 20 14:23:35 2014
+0000
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Fri Nov 21 11:23:14 2014
+0000
@@ -3475,6 +3475,8 @@
   void string_equals(Register str1, Register str2,
 		     Register cnt, Register result,
 		     Register tmp1);
+  // ISB may be needed because of a safepoint
+  void maybe_isb() { isb(); }
 };

 #ifdef ASSERT
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Nov 20 14:23:35
2014 +0000
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Fri Nov 21 11:23:14
2014 +0000
@@ -503,6 +503,7 @@
   __ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));
   __ mov(rscratch1, CAST_FROM_FN_PTR(address,
SharedRuntime::get_poll_stub));
   __ blrt(rscratch1, 1, 0, 1);
+  __ maybe_isb();
   __ pop(0x3ffffffc, sp);          // integer registers except lr & sp
& r0 & r1
   __ mov(rscratch1, r0);
   __ pop(0x3, sp);                 // r0 & r1
@@ -2681,6 +2682,7 @@
   if (info != NULL) {
     add_call_info_here(info);
   }
+  __ maybe_isb();
 }

 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest,
BasicType type, CodeEmitInfo* info) {
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Thu Nov 20 14:23:35
2014 +0000
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Fri Nov 21 11:23:14
2014 +0000
@@ -80,6 +80,7 @@
   pop(r0, sp);
 #endif
   reset_last_Java_frame(true, true);
+  __ maybe_isb();

   // check for pending exceptions
   { Label L;
@@ -569,6 +570,7 @@
   }
 #endif
   __ reset_last_Java_frame(true, false);
+  __ maybe_isb();

   // check for pending exceptions
   { Label L;
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
--- a/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp	Thu Nov 20 14:23:35
2014 +0000
+++ b/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp	Fri Nov 21 11:23:14
2014 +0000
@@ -128,10 +128,15 @@
     case T_DOUBLE:  slow_case_addr = jni_GetDoubleField_addr();  break;
     default:        ShouldNotReachHere();
   }
-  // tail call
-  __ lea(rscratch1, ExternalAddress(slow_case_addr));
-  __ br(rscratch1);

+  {
+    __ enter();
+    __ lea(rscratch1, ExternalAddress(slow_case_addr));
+    __ blr(rscratch1);
+    __ maybe_isb();
+    __ leave();
+    __ ret(lr);
+  }
   __ flush ();

   return fast_entry;
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/nativeInst_aarch64.cpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Thu Nov 20 14:23:35 2014
+0000
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Fri Nov 21 11:23:14 2014
+0000
@@ -37,11 +37,6 @@
 #include "c1/c1_Runtime1.hpp"
 #endif

-void NativeInstruction::wrote(int offset) {
-  // FIXME: Native needs ISB here
-; }
-
-
 void NativeCall::verify() { ; }

 address NativeCall::destination() const {
@@ -51,6 +46,8 @@
 // Inserts a native call instruction at a given pc
 void NativeCall::insert(address code_pos, address entry) {
Unimplemented(); }

+//-------------------------------------------------------------------
+
 void NativeMovConstReg::verify() {
   // make sure code pattern is actually mov reg64, imm64 instructions
 }
@@ -71,6 +68,7 @@
     *(intptr_t*)addr = x;
   } else {
     MacroAssembler::pd_patch_instruction(instruction_address(),
(address)x);
+    ICache::invalidate_range(instruction_address(), instruction_size);
   }
 };

@@ -102,6 +100,7 @@
     *(long*)addr = x;
   } else {
     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
+    ICache::invalidate_range(instruction_address(), instruction_size);
   }
 }

@@ -138,8 +137,11 @@
     dest = instruction_address();

   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
+  ICache::invalidate_range(instruction_address(), instruction_size);
 };

+//-------------------------------------------------------------------
+
 bool NativeInstruction::is_safepoint_poll() {
   // a safepoint_poll is implemented in two steps as either
   //
@@ -189,7 +191,7 @@
   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
 }

-// MT safe inserting of a jump over an unknown instruction sequence
(used by nmethod::makeZombie)
+/ MT safe inserting of a jump over a jump or a nop (used by
nmethod::makeZombie)

 void NativeJump::patch_verified_entry(address entry, address
verified_entry, address dest) {
   ptrdiff_t disp = dest - verified_entry;
@@ -207,7 +209,6 @@

 void NativeGeneralJump::verify() {  }

-
 void NativeGeneralJump::insert_unconditional(address code_pos, address
entry) {
   ptrdiff_t disp = entry - code_pos;
   guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
@@ -220,7 +221,8 @@

 // MT-safe patching of a long jump instruction.
 void NativeGeneralJump::replace_mt_safe(address instr_addr, address
code_buffer) {
-  assert(nativeInstruction_at(instr_addr)->is_jump_or_nop(),
+  NativeGeneralJump* n_jump = (NativeGeneralJump*)instr_addr;
+  assert(n_jump->is_jump_or_nop(),
 	 "Aarch64 cannot replace non-jump with jump");
   uint32_t instr = *(uint32_t*)code_buffer;
   *(uint32_t*)instr_addr = instr;
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/nativeInst_aarch64.hpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Thu Nov 20 14:23:35 2014
+0000
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Fri Nov 21 11:23:14 2014
+0000
@@ -80,10 +80,10 @@
   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }


-  void set_char_at(int offset, char c)        { *addr_at(offset) =
(u_char)c; wrote(offset); }
-  void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset)
= i;  wrote(offset); }
-  void set_ptr_at (int offset, intptr_t  ptr) { *(intptr_t*)
addr_at(offset) = ptr;  wrote(offset); }
-  void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset)
= o;  wrote(offset); }
+  void set_char_at(int offset, char c)        { *addr_at(offset) =
(u_char)c; }
+  void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset)
= i; }
+  void set_ptr_at (int offset, intptr_t  ptr) { *(intptr_t*)
addr_at(offset) = ptr; }
+  void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset)
= o; }

   // This doesn't really do anything on AArch64, but it is the place where
   // cache invalidation belongs, generically:
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Nov 20 14:23:35
2014 +0000
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Nov 21 11:23:14
2014 +0000
@@ -319,6 +319,7 @@
   __ mov(c_rarg1, lr);
   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address,
SharedRuntime::fixup_callers_callsite)));
   __ blrt(rscratch1, 2, 0, 0);
+ __ maybe_isb();

   __ pop_CPU_state();
   // restore sp
@@ -1171,6 +1172,7 @@
     __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type);
     __ blrt(rscratch1, rscratch2);
     // __ blrt(rscratch1, gpargs, fpargs, type);
+    __ maybe_isb();
   }
 }

@@ -1977,6 +1979,7 @@
       __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans_and_transition)));
     }
     __ blrt(rscratch1, 1, 0, 1);
+    __ maybe_isb();
     // Restore any method result value
     restore_native_result(masm, ret_type, stack_slots);

@@ -2837,6 +2840,8 @@

   __ reset_last_Java_frame(false, true);

+  __ maybe_isb();
+
   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
   __ cbz(rscratch1, noException);

@@ -2906,6 +2911,8 @@

   oop_maps->add_gc_map( __ offset() - start, map);

+  __ maybe_isb();
+
   // r0 contains the address we are going to jump to assuming no
exception got installed

   // clear last_Java_sp
@@ -3028,7 +3035,8 @@
   __ mov(c_rarg0, rthread);
   __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address,
OptoRuntime::handle_exception_C)));
   __ blrt(rscratch1, 1, 0, MacroAssembler::ret_type_integral);
-
+  __ maybe_isb();
+
   // Set an oopmap for the call site.  This oopmap will only be used if we
   // are unwinding the stack.  Hence, all locations will be dead.
   // Callee-saved registers will be the same as the frame above (i.e.,
diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Thu Nov 20 14:23:35
2014 +0000
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Nov 21 11:23:14
2014 +0000
@@ -1848,6 +1848,7 @@
     oop_maps->add_gc_map(the_pc - start, map);

     __ reset_last_Java_frame(true, true);
+    __ maybe_isb();

     __ leave();

diff -r cc4000b1484c -r 96d45cd850cf
src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Thu Nov 20
14:23:35 2014 +0000
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Fri Nov 21
11:23:14 2014 +0000
@@ -1049,6 +1049,7 @@

   // Call the native method.
   __ blrt(r10, rscratch1);
+  __ maybe_isb();
   __ get_method(rmethod);
   // result potentially in r0 or v0

@@ -1106,6 +1107,7 @@
     __ mov(c_rarg0, rthread);
     __ mov(rscratch2, CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans));
     __ blrt(rscratch2, 1, 0, 0);
+    __ maybe_isb();
     __ get_method(rmethod);
     __ reinit_heapbase();
     __ bind(Continue);



More information about the aarch64-port-dev mailing list