[aarch64-port-dev ] Remove obsolete C1 patching code

Andrew Haley aph at redhat.com
Thu Jun 19 15:47:18 UTC 2014


Finally, these patches change address loads in JIT-compiled code to use the
three-instruction immediate form.

I've also removed all of the obsolete C1 patching code, which has been
unused for some time.  Instead, I inert calls to a routine which
deoptimizes.

Andrew.


# HG changeset patch
# User aph
# Date 1403168816 14400
#      Thu Jun 19 05:06:56 2014 -0400
# Node ID 3d100af53e1893edd20edf219765179eacf4dd8d
# Parent  745e0357529b3a046af1bcb56f1493a94657b924
Save intermediate state before removing C1 patching code.

diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/aarch64.ad	Thu Jun 19 05:06:56 2014 -0400
@@ -839,19 +839,7 @@

 int MachCallDynamicJavaNode::ret_addr_offset()
 {
-  // call should be
-  //   ldr_constant
-  //   bl
-  // where ldr_constant is either
-  //   ldr // if NearCpool
-  // or
-  //   adrp // if !NearCPool
-  //   ldr
-  int off = 8;
-  if (!NearCpool) {
-    off += 4;
-  }
-  return off;
+  return 16; // movz, movk, movk, bl
 }

 int MachCallRuntimeNode::ret_addr_offset() {
@@ -2570,9 +2558,9 @@
     } else {
       relocInfo::relocType rtype = $src->constant_reloc();
       if (rtype == relocInfo::oop_type) {
-        __ movoop(dst_reg, (jobject)con, /*mt_safe*/false);
+        __ movoop(dst_reg, (jobject)con, /*immediate*/true);
       } else if (rtype == relocInfo::metadata_type) {
-        __ mov_metadata(dst_reg, (Metadata*)con, /*mt_safe*/false);
+        __ mov_metadata(dst_reg, (Metadata*)con);
       } else {
         assert(rtype == relocInfo::none, "unexpected reloc type");
 	if (con < (address)(uintptr_t)os::vm_page_size()) {
@@ -2625,7 +2613,7 @@
     } else {
       relocInfo::relocType rtype = $src->constant_reloc();
       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
-      __ set_narrow_oop(dst_reg, (jobject)con, /*mt_safe*/false);
+      __ set_narrow_oop(dst_reg, (jobject)con);
     }
   %}

@@ -2644,7 +2632,7 @@
     } else {
       relocInfo::relocType rtype = $src->constant_reloc();
       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
-      __ set_narrow_klass(dst_reg, (Klass *)con, /*mt_safe*/false);
+      __ set_narrow_klass(dst_reg, (Klass *)con);
     }
   %}

@@ -2836,8 +2824,6 @@
     address mark = __ pc();
     address addr = (address)$meth$$method;
     if (!_method) {
-      // TODO check this
-      // think we are calling generated Java here not x86
       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
       __ bl(Address(addr, relocInfo::runtime_call_type));
     } else if (_optimized_virtual) {
diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/assembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Jun 19 05:06:56 2014 -0400
@@ -1243,7 +1243,7 @@
       f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
       long offset = (adr.target() - pc()) >> 2;
       sf(offset, 23, 5);
-#ifdef ASSERT
+#if 0
       Relocation* reloc = adr.rspec().reloc();
       relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
       assert(rtype == relocInfo::internal_word_type,
diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -200,7 +200,9 @@
     if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
       return Address(base, addr_offset, Address::lsl(addr->scale()));
     else {
-      address const_addr = int_constant(addr_offset);
+      // This is a rather long-winded instruction sequence, but the
+      // offset is atomically patchable.  See PatchingStub::install().
+      Address const_addr = InternalAddress(int_constant(addr_offset));
       __ ldr_constant(tmp, const_addr);
       return Address(base, tmp, Address::lsl(addr->scale()));
     }
@@ -314,19 +316,7 @@
   if (o == NULL) {
     __ mov(reg, zr);
   } else {
-    int oop_index = __ oop_recorder()->find_index(o);
-    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
-    RelocationHolder rspec = oop_Relocation::spec(oop_index);
-    address const_ptr = int_constant(jlong(o));
-    __ code()->consts()->relocate(const_ptr, rspec);
-    __ ldr_constant(reg, const_ptr);
-
-    if (PrintRelocations && Verbose) {
-	puts("jobject2reg:\n");
-	printf("oop %p  at %p\n", o, const_ptr);
-	fflush(stdout);
-	das((uint64_t)__ pc(), -2);
-    }
+    __ movoop(reg, o, /*immediate*/true);
   }
 }

@@ -334,13 +324,16 @@
 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
   // Allocate a new index in table to hold the object once it's been patched
   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
-//  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);

-  RelocationHolder rspec = oop_Relocation::spec(oop_index);
-  address const_ptr = int_constant(-1);
-  __ code()->consts()->relocate(const_ptr, rspec);
-  __ ldr_constant(reg, const_ptr);
+  if (DeoptimizeWhenPatching) {
+    __ nop();
+  } else {
+    RelocationHolder rspec = oop_Relocation::spec(oop_index);
+    address const_ptr = int_constant(-1);
+    __ code()->consts()->relocate(const_ptr, rspec);
+    __ ldr_constant(reg, InternalAddress(const_ptr));
+  }
   patching_epilog(patch, lir_patch_normal, reg, info);
 }

@@ -924,7 +917,10 @@
 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
   Metadata* o = NULL;
   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
-  __ mov_metadata(reg, o);
+  if (DeoptimizeWhenPatching)
+    __ nop();
+  else
+    __ mov_metadata(reg, o);
   patching_epilog(patch, lir_patch_normal, reg, info);
 }

diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/compiledIC_aarch64.cpp
--- a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -139,6 +139,7 @@

   // Update stub.
   method_holder->set_data((intptr_t)callee());
+  method_holder->flush();
   jump->set_jump_destination(entry);

   // Update jump to call.
diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -622,9 +622,10 @@

 void MacroAssembler::ic_call(address entry) {
   RelocationHolder rh = virtual_call_Relocation::spec(pc());
-  address const_ptr = long_constant((jlong)Universe::non_oop_word());
-  unsigned long offset;
-  ldr_constant(rscratch2, const_ptr);
+  // address const_ptr = long_constant((jlong)Universe::non_oop_word());
+  // unsigned long offset;
+  // ldr_constant(rscratch2, const_ptr);
+  movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
   call(Address(entry, rh));
 }

@@ -2534,7 +2535,7 @@
   decode_klass_not_null(r, r);
 }

-void  MacroAssembler::set_narrow_oop(Register dst, jobject obj, bool mt_safe) {
+void  MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
   assert (UseCompressedOops, "should only be used for compressed oops");
   assert (Universe::heap() != NULL, "java heap should be initialized");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
@@ -2549,7 +2550,7 @@
   movk(dst, 0xBEEF);
 }

-void  MacroAssembler::set_narrow_klass(Register dst, Klass* k, bool mt_safe) {
+void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
   assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int index = oop_recorder()->find_index(k);
@@ -2782,11 +2783,11 @@
   return Address((address)obj, rspec);
 }

-// Move an oop into a register.  mt_safe is true iff we are not going
-// to patch this instruction while the code is being executed by
-// another thread.  In that case we can use move immediates rather
-// than the constant pool.
-void MacroAssembler::movoop(Register dst, jobject obj, bool mt_safe) {
+// Move an oop into a register.  immediate is true if we want
+// immediate instrcutions, i.e. we are not going to patch this
+// instruction while the code is being executed by another thread.  In
+// that case we can use move immediates rather than the constant pool.
+void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
   int oop_index;
   if (obj == NULL) {
     oop_index = oop_recorder()->allocate_oop_index(obj);
@@ -2795,17 +2796,15 @@
     assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
   }
   RelocationHolder rspec = oop_Relocation::spec(oop_index);
-  address const_ptr = mt_safe ? long_constant((jlong)obj) : NULL;
-  if (! const_ptr) {
+  if (! immediate) {
+    address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
+    ldr_constant(dst, Address(dummy, rspec));
+  } else
     mov(dst, Address((address)obj, rspec));
-  } else {
-    code()->consts()->relocate(const_ptr, rspec);
-    ldr_constant(dst, const_ptr);
-  }
 }

 // Move a metadata address into a register.
-void MacroAssembler::mov_metadata(Register dst, Metadata* obj, bool mt_safe) {
+void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
   int oop_index;
   if (obj == NULL) {
     oop_index = oop_recorder()->allocate_metadata_index(obj);
@@ -2813,13 +2812,7 @@
     oop_index = oop_recorder()->find_index(obj);
   }
   RelocationHolder rspec = metadata_Relocation::spec(oop_index);
-  address const_ptr = mt_safe ? long_constant((jlong)obj) : NULL;
-  if (! const_ptr) {
-    mov(dst, Address((address)obj, rspec));
-  } else {
-    code()->consts()->relocate(const_ptr, rspec);
-    ldr_constant(dst, const_ptr);
-  }
+  mov(dst, Address((address)obj, rspec));
 }

 Address MacroAssembler::constant_oop_address(jobject obj) {
@@ -3107,12 +3100,12 @@

 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
   relocInfo::relocType rtype = dest.rspec().reloc()->type();
-  guarantee(rtype == relocInfo::none
-	    || rtype == relocInfo::external_word_type
-	    || rtype == relocInfo::poll_type
-	    || rtype == relocInfo::poll_return_type,
-	    "can only use a fixed address with an ADRP");
   if (labs(pc() - dest.target()) >= (1LL << 32)) {
+    guarantee(rtype == relocInfo::none
+	      || rtype == relocInfo::external_word_type
+	      || rtype == relocInfo::poll_type
+	      || rtype == relocInfo::poll_return_type,
+	      "can only use a fixed address with an ADRP");
     // Out of range.  This doesn't happen very often, but we have to
     // handle it
     mov(reg1, dest);
diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Jun 19 05:06:56 2014 -0400
@@ -737,7 +737,7 @@
   void encode_heap_oop_not_null(Register dst, Register src);
   void decode_heap_oop_not_null(Register dst, Register src);

-  void set_narrow_oop(Register dst, jobject obj, bool mt_safe = true);
+  void set_narrow_oop(Register dst, jobject obj);
   // currently unimplemented
 #if 0
   void set_narrow_oop(Address dst, jobject obj);
@@ -750,7 +750,7 @@
   void encode_klass_not_null(Register dst, Register src);
   void decode_klass_not_null(Register dst, Register src);

-  void set_narrow_klass(Register dst, Klass* k, bool mt_safe = true);
+  void set_narrow_klass(Register dst, Klass* k);
   // currently unimplemented
 #if 0
   void set_narrow_klass(Address dst, Klass* k);
@@ -1106,7 +1106,7 @@

   // Data

-  void mov_metadata(Register dst, Metadata* obj, bool mt_safe = true);
+  void mov_metadata(Register dst, Metadata* obj);
   Address allocate_metadata_address(Metadata* obj);
   Address constant_oop_address(jobject obj);
   // unimplemented
@@ -1114,7 +1114,7 @@
   void pushoop(jobject obj);
 #endif

-  void movoop(Register dst, jobject obj, bool mt_safe = true);
+  void movoop(Register dst, jobject obj, bool immediate = false);

   // sign extend as need a l to ptr sized element
   void movl2ptr(Register dst, Address src) { Unimplemented(); }
@@ -1256,13 +1256,12 @@
     Label*   retaddr = NULL
   );

-  void ldr_constant(Register dest, address const_addr) {
-    guarantee(const_addr, "constant pool overflow");
+  void ldr_constant(Register dest, const Address &const_addr) {
     if (NearCpool) {
-      ldr(dest, const_addr, relocInfo::internal_word_type);
+      ldr(dest, const_addr);
     } else {
       unsigned long offset;
-      adrp(dest, InternalAddress(const_addr), offset);
+      adrp(dest, InternalAddress(const_addr.target()), offset);
       ldr(dest, Address(dest, offset));
     }
   }
diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/nativeInst_aarch64.cpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -53,13 +53,6 @@
 // Inserts a native call instruction at a given pc
 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }

-// MT-safe patching of a call instruction.
-// First patches first word of instruction to two jmp's that jmps to them
-// selfs (spinlock). Then patches the last byte, and then atomicly replaces
-// the jmp's with the first 4 byte of the new instruction.
-void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { Unimplemented(); }
-
-
 void NativeMovConstReg::verify() {
   // make sure code pattern is actually mov reg64, imm64 instructions
 }
@@ -83,7 +76,6 @@
   }
 };

-
 void NativeMovConstReg::print() {
   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
                 instruction_address(), data());
diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/nativeInst_aarch64.hpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Thu Jun 19 05:06:56 2014 -0400
@@ -202,8 +202,8 @@
   return call;
 }

-// An interface for accessing/manipulating native mov reg, imm32 instructions.
-// (used to manipulate inlined 32bit data dll calls, etc.)
+// An interface for accessing/manipulating native mov reg, imm instructions.
+// (used to manipulate inlined 64-bit data calls, etc.)
 class NativeMovConstReg: public NativeInstruction {
  public:
   enum Aarch64_specific_constants {
@@ -227,6 +227,12 @@
   intptr_t data() const;
   void  set_data(intptr_t x);

+  void flush() {
+    if (! maybe_cpool_ref(instruction_address())) {
+      ICache::invalidate_range(instruction_address(), instruction_size);
+    }
+  }
+
   void  verify();
   void  print();

diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/relocInfo_aarch64.cpp
--- a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -35,10 +35,20 @@
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
   switch(type()) {
   case relocInfo::oop_type:
-    MacroAssembler::patch_oop(addr(), x);
+    {
+      oop_Relocation *reloc = (oop_Relocation *)this;
+      if (NativeInstruction::is_ldr_literal_at(addr())) {
+	address constptr = (address)code()->oop_addr_at(reloc->oop_index());
+	MacroAssembler::pd_patch_instruction(addr(), constptr);
+	assert(*(address*)constptr == x, "error in oop relocation");
+      } else{
+	MacroAssembler::patch_oop(addr(), x);
+      }
+    }
     break;
   default:
     MacroAssembler::pd_patch_instruction(addr(), x);
+    break;
   }
 }

diff -r 745e0357529b -r 3d100af53e18 src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -1735,7 +1735,9 @@
   if (method->is_static() && !is_critical_native) {

     //  load oop into a register
-    __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
+    __ movoop(oop_handle_reg,
+	      JNIHandles::make_local(method->method_holder()->java_mirror()),
+	      /*immediate*/true);

     // Now handlize the static class mirror it's known not-null.
     __ str(oop_handle_reg, Address(sp, klass_offset));
diff -r 745e0357529b -r 3d100af53e18 src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
--- a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Tue Jun 17 10:14:43 2014 -0400
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
@@ -100,7 +100,7 @@
   // even in its subfields (as defined by the CPU immediate fields,
   // if the CPU splits constants across multiple instructions).

-  return (char*) -1;
+  return (char*) 0xffffffffffff;
 }

 void os::initialize_thread(Thread *thr) {
# HG changeset patch
# User aph
# Date 1403186745 14400
#      Thu Jun 19 10:05:45 2014 -0400
# Node ID bc7cb12aff996905ebe3205c7e84537fd803b682
# Parent  3d100af53e1893edd20edf219765179eacf4dd8d
Remove obsolete C1 patching code.

diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp	Thu Jun 19 10:05:45 2014 -0400
@@ -320,133 +320,7 @@
 }

 void PatchingStub::emit_code(LIR_Assembler* ce) {
-  assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
-
-  Label call_patch;
-
-  // static field accesses have special semantics while the class
-  // initializer is being run so we emit a test which can be used to
-  // check that this code is being executed by the initializing
-  // thread.
-  address being_initialized_entry = __ pc();
-  if (CommentedAssembly) {
-    __ block_comment(" patch template");
-  }
-
-  // make a copy the code which is going to be patched.
-  for (int i = 0; i < _bytes_to_copy; i++) {
-    address ptr = (address)(_pc_start + i);
-    int a_byte = (*ptr) & 0xFF;
-    __ emit_int8(a_byte);
-  }
-
-  address end_of_patch = __ pc();
-  int bytes_to_skip = 0;
-  if (_id == load_mirror_id) {
-    int offset = __ offset();
-    if (CommentedAssembly) {
-      __ block_comment(" being_initialized check");
-    }
-    assert(_obj != noreg, "must be a valid register");
-    Register tmp = r0;
-    Register tmp2 = r19;
-    __ stp(tmp, tmp2, Address(__ pre(sp, -2 * wordSize)));
-    // Load without verification to keep code size small. We need it because
-    // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
-    __ ldr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
-    __ ldr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
-    __ cmp(rthread, tmp);
-    __ ldp(tmp, tmp2, Address(__ post(sp, 2 * wordSize)));
-    __ br(Assembler::NE, call_patch);
-
-    // access_field patches may execute the patched code before it's
-    // copied back into place so we need to jump back into the main
-    // code of the nmethod to continue execution.
-    __ b(_patch_site_continuation);
-
-    // make sure this extra code gets skipped
-    bytes_to_skip += __ offset() - offset;
-  }
-  if (CommentedAssembly) {
-    __ block_comment("patch data");
-  }
-  // Now emit the patch record telling the runtime how to find the
-  // pieces of the patch.
-  int sizeof_patch_record = 8;
-  bytes_to_skip += sizeof_patch_record;
-
-  // emit the offsets needed to find the code to patch
-  int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
-
-  // If this is a field access, the offset is held in the constant
-  // pool rather than embedded in the instruction, so we don't copy
-  // any instructions: we set the value in the constant pool and
-  // overwrite the NativeGeneralJump.
-  {
-    Label L;
-    __ br(Assembler::AL, L);
-    __ emit_int8(0);
-    __ emit_int8(being_initialized_entry_offset);
-    if (_id == access_field_id) {
-      __ emit_int8(bytes_to_skip + _bytes_to_copy);
-      __ emit_int8(0);
-    } else {
-      __ emit_int8(bytes_to_skip);
-      __ emit_int8(_bytes_to_copy);
-    }
-    __ bind(L);
-  }
-
-  address patch_info_pc = __ pc();
-  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
-
-  address entry = __ pc();
-  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
-  address target = NULL;
-  relocInfo::relocType reloc_type = relocInfo::none;
-
-  switch (_id) {
-  case access_field_id:
-    target = Runtime1::entry_for(Runtime1::access_field_patching_id);
-    reloc_type = relocInfo::section_word_type;
-    break;
-  case load_klass_id:
-    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
-    reloc_type = relocInfo::metadata_type;
-    break;
-  case load_mirror_id:
-    target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
-    reloc_type = relocInfo::oop_type;
-    break;
-  case load_appendix_id:
-    target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
-    reloc_type = relocInfo::oop_type;
-    break;
-  default: ShouldNotReachHere();
-  }
-
-  __ bind(call_patch);
-
-  if (CommentedAssembly) {
-    __ block_comment("patch entry point");
-  }
-  __ bl(RuntimeAddress(target));
-  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
-  ce->add_call_info_here(_info);
-  int jmp_off = __ offset();
-  __ b(_patch_site_entry);
-  // Add enough nops so deoptimization can overwrite the jmp above with a call
-  // and not destroy the world.
-  // FIXME: AArch64 doesn't really need this
-  // __ nop(); __ nop();
-  // if (_id == load_klass_id
-  //     || _id == load_mirror_id
-  //     || _id == access_field_id
-  //     ) {
-  //   CodeSection* cs = __ code_section();
-  //   RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
-  //   relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
-  // }
+  assert(false, "AArch64 should not use C1 runtime patching");
 }


diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Jun 19 10:05:45 2014 -0400
@@ -26,6 +26,7 @@

 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
+#include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
@@ -200,10 +201,7 @@
     if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
       return Address(base, addr_offset, Address::lsl(addr->scale()));
     else {
-      // This is a rather long-winded instruction sequence, but the
-      // offset is atomically patchable.  See PatchingStub::install().
-      Address const_addr = InternalAddress(int_constant(addr_offset));
-      __ ldr_constant(tmp, const_addr);
+      __ mov(tmp, addr_offset);
       return Address(base, tmp, Address::lsl(addr->scale()));
     }
   }
@@ -320,21 +318,36 @@
   }
 }

+void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
+  address target = NULL;
+  relocInfo::relocType reloc_type = relocInfo::none;
+
+  switch (patching_id(info)) {
+  case PatchingStub::access_field_id:
+    target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+    reloc_type = relocInfo::section_word_type;
+    break;
+  case PatchingStub::load_klass_id:
+    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+    reloc_type = relocInfo::metadata_type;
+    break;
+  case PatchingStub::load_mirror_id:
+    target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+    reloc_type = relocInfo::oop_type;
+    break;
+  case PatchingStub::load_appendix_id:
+    target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+    reloc_type = relocInfo::oop_type;
+    break;
+  default: ShouldNotReachHere();
+  }
+
+  __ bl(RuntimeAddress(target));
+  add_call_info_here(info);
+}

 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
-  // Allocate a new index in table to hold the object once it's been patched
-  int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
-  PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
-
-  if (DeoptimizeWhenPatching) {
-    __ nop();
-  } else {
-    RelocationHolder rspec = oop_Relocation::spec(oop_index);
-    address const_ptr = int_constant(-1);
-    __ code()->consts()->relocate(const_ptr, rspec);
-    __ ldr_constant(reg, InternalAddress(const_ptr));
-  }
-  patching_epilog(patch, lir_patch_normal, reg, info);
+  deoptimize_trap(info);
 }


@@ -801,23 +814,21 @@
   PatchingStub* patch = NULL;
   Register compressed_src = rscratch1;

+  if (patch_code != lir_patch_none) {
+    deoptimize_trap(info);
+    return;
+  }
+
   if (type == T_ARRAY || type == T_OBJECT) {
     __ verify_oop(src->as_register());

     if (UseCompressedOops && !wide) {
       __ encode_heap_oop(compressed_src, src->as_register());
-      if (patch_code != lir_patch_none) {
-        info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
-      }
     } else {
       compressed_src = src->as_register();
     }
   }

-  if (patch_code != lir_patch_none) {
-    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
-  }
-
   int null_check_here = code_offset();
   switch (type) {
     case T_FLOAT: {
@@ -875,10 +886,6 @@
   if (info != NULL) {
     add_debug_info_for_null_check(null_check_here, info);
   }
-
-  if (patch_code != lir_patch_none) {
-    patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
-  }
 }


@@ -915,13 +922,31 @@


 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
-  Metadata* o = NULL;
-  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
-  if (DeoptimizeWhenPatching)
-    __ nop();
-  else
-    __ mov_metadata(reg, o);
-  patching_epilog(patch, lir_patch_normal, reg, info);
+  address target = NULL;
+  relocInfo::relocType reloc_type = relocInfo::none;
+
+  switch (patching_id(info)) {
+  case PatchingStub::access_field_id:
+    target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+    reloc_type = relocInfo::section_word_type;
+    break;
+  case PatchingStub::load_klass_id:
+    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+    reloc_type = relocInfo::metadata_type;
+    break;
+  case PatchingStub::load_mirror_id:
+    target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+    reloc_type = relocInfo::oop_type;
+    break;
+  case PatchingStub::load_appendix_id:
+    target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+    reloc_type = relocInfo::oop_type;
+    break;
+  default: ShouldNotReachHere();
+  }
+
+  __ bl(RuntimeAddress(target));
+  add_call_info_here(info);
 }

 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
@@ -944,10 +969,9 @@
     __ verify_oop(addr->base()->as_pointer_register());
   }

-  PatchingStub* patch = NULL;
-
   if (patch_code != lir_patch_none) {
-    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
+    deoptimize_trap(info);
+    return;
   }

   if (info != NULL) {
@@ -1019,10 +1043,6 @@
       ShouldNotReachHere();
   }

-  if (patch != NULL) {
-    patching_epilog(patch, patch_code, addr->base()->as_register(), info);
-  }
-
   if (type == T_ARRAY || type == T_OBJECT) {
 #ifdef _LP64
     if (UseCompressedOops && !wide) {
diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp	Thu Jun 19 10:05:45 2014 -0400
@@ -64,6 +64,8 @@

   void init() { tableswitch_count = 0; }

+  void deoptimize_trap(CodeEmitInfo *info);
+
 public:

   void store_parameter(Register r, int offset_from_esp_in_words);
diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Thu Jun 19 10:05:45 2014 -0400
@@ -1321,19 +1321,6 @@

 #undef __

-static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
-  Bytecode_field field_access(caller, bci);
-  // This can be static or non-static field access
-  Bytecodes::Code code       = field_access.code();
-
-  // We must load class, initialize class and resolvethe field
-  fieldDescriptor result; // initialize class if needed
-  constantPoolHandle constants(THREAD, caller->constants());
-  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
-  return result.field_holder();
-}
-
-
 // Simple helper to see if the caller of a runtime stub which
 // entered the VM has been deoptimized

@@ -1347,261 +1334,40 @@
 }

 JRT_ENTRY(void, Runtime1::patch_code_aarch64(JavaThread* thread, Runtime1::StubID stub_id ))
+{
+  RegisterMap reg_map(thread, false);
+
   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
+  // According to the ARMv8 ARM, "Concurrent modification and
+  // execution of instructions can lead to the resulting instruction
+  // performing any behavior that can be achieved by executing any
+  // sequence of instructions that can be executed from the same
+  // Exception level, except where the instruction before
+  // modification and the instruction after modification is a B, BL,
+  // NOP, BKPT, SVC, HVC, or SMC instruction."
+  //
+  // This effectively makes the games we play when patching
+  // impossible, so when we come across an access that needs
+  // patching we must deoptimize.

-  ResourceMark rm(thread);
-  RegisterMap reg_map(thread, false);
+  if (TracePatching) {
+    tty->print_cr("Deoptimizing because patch is needed");
+  }
+
   frame runtime_frame = thread->last_frame();
   frame caller_frame = runtime_frame.sender(&reg_map);

-  if (DeoptimizeWhenPatching) {
-    // According to the ARMv8 ARM, "Concurrent modification and
-    // execution of instructions can lead to the resulting instruction
-    // performing any behavior that can be achieved by executing any
-    // sequence of instructions that can be executed from the same
-    // Exception level, except where the instruction before
-    // modification and the instruction after modification is a B, BL,
-    // NOP, BKPT, SVC, HVC, or SMC instruction."
-    //
-    // This effectively makes the games we play when patching
-    // impossible, so when we come across an access that needs
-    // patching we must deoptimize.
-
-    if (TracePatching) {
-      tty->print_cr("Deoptimizing because patch is needed");
-    }
-    // It's possible the nmethod was invalidated in the last
-    // safepoint, but if it's still alive then make it not_entrant.
-    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
-    if (nm != NULL) {
-      nm->make_not_entrant();
-    }
-
-    Deoptimization::deoptimize_frame(thread, caller_frame.id());
-
-    // Return to the now deoptimized frame.
-    return;
+  // It's possible the nmethod was invalidated in the last
+  // safepoint, but if it's still alive then make it not_entrant.
+  nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+  if (nm != NULL) {
+    nm->make_not_entrant();
   }

-  // last java frame on stack
-  vframeStream vfst(thread, true);
-  assert(!vfst.at_end(), "Java frame must exist");
+  Deoptimization::deoptimize_frame(thread, caller_frame.id());

-  methodHandle caller_method(THREAD, vfst.method());
-  // Note that caller_method->code() may not be same as caller_code because of OSR's
-  // Note also that in the presence of inlining it is not guaranteed
-  // that caller_method() == caller_code->method()
-
-  int bci = vfst.bci();
-  Bytecodes::Code code = caller_method()->java_code_at(bci);
-
-  bool deoptimize_for_volatile = false;
-  int patch_field_offset = -1;
-  KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
-  KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
-  Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
-  fieldDescriptor result; // initialize class if needed
-
-  bool load_klass_or_mirror_patch_id =
-    (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
-
-  if (stub_id == Runtime1::access_field_patching_id) {
-
-    Bytecode_field field_access(caller_method, bci);
-    fieldDescriptor result; // initialize class if needed
-    Bytecodes::Code code = field_access.code();
-    constantPoolHandle constants(THREAD, caller_method->constants());
-    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
-    patch_field_offset = result.offset();
-
-    // If we're patching a field which is volatile then at compile it
-    // must not have been known to be volatile, so the generated code
-    // isn't correct for a volatile reference.  The nmethod has to be
-    // deoptimized so that the code can be regenerated correctly.
-    // This check is only needed for access_field_patching since this
-    // is the path for patching field offsets.  load_klass is only
-    // used for patching references to oops which don't need special
-    // handling in the volatile case.
-    deoptimize_for_volatile = result.access_flags().is_volatile();
-  } else if (load_klass_or_mirror_patch_id) {
-    Klass* k = NULL;
-    switch (code) {
-      case Bytecodes::_putstatic:
-      case Bytecodes::_getstatic:
-        { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
-          init_klass = KlassHandle(THREAD, klass);
-          mirror = Handle(THREAD, klass->java_mirror());
-        }
-        break;
-      case Bytecodes::_new:
-        { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(bnew.index(), CHECK);
-        }
-        break;
-      case Bytecodes::_multianewarray:
-        { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(mna.index(), CHECK);
-        }
-        break;
-      case Bytecodes::_instanceof:
-        { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(io.index(), CHECK);
-        }
-        break;
-      case Bytecodes::_checkcast:
-        { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
-          k = caller_method->constants()->klass_at(cc.index(), CHECK);
-        }
-        break;
-      case Bytecodes::_anewarray:
-        { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
-          Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
-          k = ek->array_klass(CHECK);
-        }
-        break;
-      case Bytecodes::_ldc:
-      case Bytecodes::_ldc_w:
-        {
-          Bytecode_loadconstant cc(caller_method, bci);
-          oop m = cc.resolve_constant(CHECK);
-          mirror = Handle(THREAD, m);
-        }
-        break;
-      default: Unimplemented();
-    }
-    // convert to handle
-    load_klass = KlassHandle(THREAD, k);
-  } else {
-    ShouldNotReachHere();
-  }
-
-  if (deoptimize_for_volatile) {
-    // At compile time we assumed the field wasn't volatile but after
-    // loading it turns out it was volatile so we have to throw the
-    // compiled code out and let it be regenerated.
-    if (TracePatching) {
-      tty->print_cr("Deoptimizing for patching volatile field reference");
-    }
-    // It's possible the nmethod was invalidated in the last
-    // safepoint, but if it's still alive then make it not_entrant.
-    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
-    if (nm != NULL) {
-      nm->make_not_entrant();
-    }
-
-    Deoptimization::deoptimize_frame(thread, caller_frame.id());
-
-    // Return to the now deoptimized frame.
-  }
-
-  // If we are patching in a non-perm oop, make sure the nmethod
-  // is on the right list.
-  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
-    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
-    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
-    if (!nm->on_scavenge_root_list())
-      CodeCache::add_scavenge_root_nmethod(nm);
-  }
-
-  // Now copy code back
-  {
-    MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
-    //
-    // Deoptimization may have happened while we waited for the lock.
-    // In that case we don't bother to do any patching we just return
-    // and let the deopt happen
-    if (!caller_is_deopted()) {
-      NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
-      address instr_pc = jump->jump_destination();
-      NativeInstruction* ni = nativeInstruction_at(instr_pc);
-      if (ni->is_jump() ) {
-	// the jump has not been patched yet
-	address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
-	unsigned char* byte_count = (unsigned char*) (stub_location - 1);
-	unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
-	unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
-	address copy_buff = stub_location - *byte_skip - *byte_count;
-	address being_initialized_entry = stub_location - *being_initialized_entry_offset;
-	if (TracePatching) {
-	  tty->print_cr(" Patching %s at bci %d at address 0x%x  (%s)", Bytecodes::name(code), bci,
-			instr_pc, (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
-	  nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
-	  assert(caller_code != NULL, "nmethod not found");
-
-	  // NOTE we use pc() not original_pc() because we already know they are
-	  // identical otherwise we'd have never entered this block of code
-	  OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
-	  assert(map != NULL, "null check");
-	  map->print();
-	  tty->cr();
-
-	  Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
-	}
-
-	// The word in the constant pool needs fixing.
-	unsigned insn = *(unsigned*)copy_buff;
-	unsigned long *cpool_addr
-	  = (unsigned long *)MacroAssembler::target_addr_for_insn(instr_pc, insn);
-
-	nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
-	CodeBlob *cb = CodeCache::find_blob(caller_frame.pc());
-	assert(nm != NULL, "invalid nmethod_pc");
-	assert(address(cpool_addr) >= nm->consts_begin()
-	       && address(cpool_addr) < nm->consts_end(),
-	       "constant address should be inside constant pool");
-
-	switch(stub_id) {
-	case access_field_patching_id:
-	  *cpool_addr = patch_field_offset; break;
-	case load_mirror_patching_id:
-	  *cpool_addr = cast_from_oop<uint64_t>(mirror()); break;
-	case load_klass_patching_id:
-	  *cpool_addr = (uint64_t)load_klass(); break;
-	default:
-	  ShouldNotReachHere();
-	}
-
-	// Update the location in the nmethod with the proper
-	// metadata.  When the code was generated, a NULL was stuffed
-	// in the metadata table and that table needs to be update to
-	// have the right value.  On intel the value is kept
-	// directly in the instruction instead of in the metadata
-	// table, so set_data above effectively updated the value.
-	//
-	// FIXME: It's tempting to think that rather them putting OOPs
-	// in the cpool we could refer directly to the locations in the
-	// nmethod.  However, we can't guarantee that an ADRP would be
-	// able to reach them: an ADRP can only reach within +- 4GiB of
-	// the PC using two instructions.  While it's pretty unlikely
-	// that we will exceed this limit, it's not impossible.
-	RelocIterator mds(nm, (address)cpool_addr, (address)cpool_addr + 1);
-	bool found = false;
-	while (mds.next() && !found) {
-	  if (mds.type() == relocInfo::oop_type) {
-	    assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
-	    oop_Relocation* r = mds.oop_reloc();
-	    oop* oop_adr = r->oop_addr();
-	    *oop_adr = mirror();
-	    r->fix_oop_relocation();
-	    found = true;
-	  } else if (mds.type() == relocInfo::metadata_type) {
-	    assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
-	    metadata_Relocation* r = mds.metadata_reloc();
-	    Metadata** metadata_adr = r->metadata_addr();
-	    *metadata_adr = load_klass();
-	    r->fix_metadata_relocation();
-	    found = true;
-	  }
-	}
-
-	// And we overwrite the jump
-	NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
-
-      }
-    }
-  }
-
+  // Return to the now deoptimized frame.
+}
 JRT_END

 int Runtime1::access_field_patching(JavaThread* thread) {
diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/globals_aarch64.hpp
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Jun 19 10:05:45 2014 -0400
@@ -94,9 +94,6 @@
   product(bool, NearCpool, true,                                        \
          "constant pool is close to instructions")                      \
                                                                         \
-  product(bool, DeoptimizeWhenPatching, true,                           \
-          "doptimize instead of patching instructions")                 \
-									\
   notproduct(bool, UseAcqRelForVolatileFields, false,			\
 	     "Use acquire and release insns for volatile fields")

@@ -111,9 +108,6 @@
   product(bool, NearCpool, true,					\
          "constant pool is close to instructions")			\
                                                                         \
-  product(bool, DeoptimizeWhenPatching, true,                           \
-          "doptimize instead of patching instructions")			\
-									\
   notproduct(bool, UseAcqRelForVolatileFields, false,			\
 	     "Use acquire and release insns for volatile fields")

diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/nativeInst_aarch64.cpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Thu Jun 19 10:05:45 2014 -0400
@@ -242,8 +242,7 @@

 // MT-safe patching of a long jump instruction.
 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
-  assert((! DeoptimizeWhenPatching)
-	 || nativeInstruction_at(instr_addr)->is_jump_or_nop(),
+  assert(nativeInstruction_at(instr_addr)->is_jump_or_nop(),
 	 "Aarch64 cannot replace non-jump with jump");
   uint32_t instr = *(uint32_t*)code_buffer;
   *(uint32_t*)instr_addr = instr;
diff -r 3d100af53e18 -r bc7cb12aff99 src/cpu/aarch64/vm/relocInfo_aarch64.cpp
--- a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp	Thu Jun 19 10:05:45 2014 -0400
@@ -90,78 +90,3 @@

 void metadata_Relocation::pd_fix_value(address x) {
 }
-
-// We have a relocation that points to a pair of instructions that
-// load a constant from the constant pool.  These are
-// ARDP; LDR reg [reg, #ofs].  However, until the constant is resolved
-// the first instruction may be a branch to a resolver stub, and the
-// resolver stub contains a copy of the ADRP that will replace the
-// branch instruction.
-//
-// So, when we relocate this code we have to adjust the offset in the
-// LDR instruction and the page offset in the copy of the ADRP
-// instruction that will overwrite the branch instruction.  This is
-// done by Runtime1::patch_code_aarch64.
-
-void section_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
-  unsigned insn1 = *(unsigned*)addr();
-  if (! (Instruction_aarch64::extract(insn1, 30, 26) == 0b00101)) {
-    // Unconditional branch (immediate)
-    internal_word_Relocation::fix_relocation_after_move(src, dest);
-    return;
-  }
-
-  address new_address = target();
-#ifdef ASSERT
-  // Make sure this really is a cpool address
-  address old_cpool_start = const_cast<CodeBuffer*>(src)->consts()->start();
-  address old_cpool_end = const_cast<CodeBuffer*>(src)->consts()->end();
-  address new_cpool_start =  const_cast<CodeBuffer*>(dest)->consts()->start();
-  address new_cpool_end =  const_cast<CodeBuffer*>(dest)->consts()->end();
-  address old_address = old_addr_for(target(), src, dest);
-  assert(new_address >= new_cpool_start
-	 && new_address < new_cpool_end,
-	 "should be");
-  assert(old_address >= old_cpool_start
-	 && old_address < old_cpool_end,
-	 "should be");
-#endif
-
-  address stub_location = pd_call_destination(addr());
-  unsigned char* byte_count = (unsigned char*) (stub_location - 1);
-  unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
-  address copy_buff = stub_location - *byte_skip - *byte_count;
-  unsigned insn3 = *(unsigned*)copy_buff;
-
-  if (NearCpool) {
-    int offset = new_address - addr();
-    Instruction_aarch64::spatch(copy_buff, 23, 5, offset >> 2);
-  } else {
-    // Unconditional branch (immediate)
-    unsigned insn2 = ((unsigned*)addr())[1];
-    if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001) {
-      // Load/store register (unsigned immediate)
-      unsigned size = Instruction_aarch64::extract(insn2, 31, 30);
-
-      // Offset of address in a 4k page
-      uint64_t new_offset = (uint64_t)target() & ((1<<12) - 1);
-      // Fix the LDR instruction's offset
-      Instruction_aarch64::patch(addr() + sizeof (unsigned),
-				 21, 10, new_offset >> size);
-
-      assert(Instruction_aarch64::extract(insn3, 28, 24) == 0b10000
-	     && Instruction_aarch64::extract(insn3, 31, 31),
-	     "instruction should be an ADRP");
-
-      uint64_t insn_page = (uint64_t)addr() >> 12;
-      uint64_t target_page = (uint64_t)target() >> 12;
-      int page_offset = target_page - insn_page;
-      int page_offset_lo = page_offset & 3;
-      page_offset >>= 2;
-      Instruction_aarch64::spatch(copy_buff, 23, 5, page_offset);
-      Instruction_aarch64::patch(copy_buff, 30, 29, page_offset_lo);
-
-      // Phew.
-    }
-  }
-}
diff -r 3d100af53e18 -r bc7cb12aff99 src/share/vm/code/relocInfo.cpp
--- a/src/share/vm/code/relocInfo.cpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/share/vm/code/relocInfo.cpp	Thu Jun 19 10:05:45 2014 -0400
@@ -147,11 +147,6 @@
   _section_end  [CodeBuffer::SECT_STUBS ] = nm->stub_end()    ;

   assert(!has_current(), "just checking");
-#ifndef TARGET_ARCH_aarch64
-  // aarch64 has relocs in the cpool
-  assert(begin == NULL || begin >= nm->code_begin(), "in bounds");
-  assert(limit == NULL || limit <= nm->code_end(),   "in bounds");
-#endif
   set_limits(begin, limit);
 }

diff -r 3d100af53e18 -r bc7cb12aff99 src/share/vm/code/relocInfo.hpp
--- a/src/share/vm/code/relocInfo.hpp	Thu Jun 19 05:06:56 2014 -0400
+++ b/src/share/vm/code/relocInfo.hpp	Thu Jun 19 10:05:45 2014 -0400
@@ -1307,10 +1307,6 @@
   //void pack_data_to -- inherited
   void unpack_data();

-#ifdef TARGET_ARCH_aarch64
-  void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
-#endif
-
  private:
   friend class RelocIterator;
   section_word_Relocation() { }


More information about the aarch64-port-dev mailing list