[aarch64-port-dev ] Enable breakpoints and watchpoints

Andrew Haley aph at redhat.com
Fri Aug 23 07:27:47 PDT 2013


I've done some simple smoke tests with Eclipse.  It'll need a bit more
bashing before it's solid.

Andrew.


# HG changeset patch
# User aph
# Date 1377267977 -3600
# Node ID 356ebc1be1d9454649e921fe83abf19e1c5da59b
# Parent  b0a65a4a60949130760398a2db5da7138738369e
Enable breakpoints and watchpoints

diff -r b0a65a4a6094 -r 356ebc1be1d9 src/cpu/aarch64/vm/interp_masm_aarch64.cpp
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Thu Aug 22 19:02:06 2013 +0100
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Fri Aug 23 15:26:17 2013 +0100
@@ -51,7 +51,23 @@

 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
   if (JvmtiExport::can_pop_frame()) {
-    Unimplemented();
+    Label L;
+    // Initiate popframe handling only if it is not already being
+    // processed.  If the flag has the popframe_processing bit set, it
+    // means that this code is called *during* popframe handling - we
+    // don't want to reenter.
+    // This method is only called just after the call into the vm in
+    // call_VM_base, so the arg registers are available.
+    ldr(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
+    tst(rscratch1, JavaThread::popframe_pending_bit);
+    br(Assembler::EQ, L);
+    tst(rscratch1, JavaThread::popframe_processing_bit);
+    br(Assembler::NE, L);
+    // Call Interpreter::remove_activation_preserving_args_entry() to get the
+    // address of the same-named entrypoint in the generated interpreter code.
+    call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
+    br(r0);
+    bind(L);
   }
 }

@@ -62,9 +78,26 @@
 }

 void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
-    if (JvmtiExport::can_force_early_return()) {
-      Unimplemented();
-    }
+  if (JvmtiExport::can_force_early_return()) {
+    Label L;
+    ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
+    cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit;
+
+    // Initiate earlyret handling only if it is not already being processed.
+    // If the flag has the earlyret_processing bit set, it means that this code
+    // is called *during* earlyret handling - we don't want to reenter.
+    ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset()));
+    cmpw(rscratch1, JvmtiThreadState::earlyret_pending);
+    br(Assembler::NE, L);
+
+    // Call Interpreter::remove_activation_early_entry() to get the address of the
+    // same-named entrypoint in the generated interpreter code.
+    ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
+    ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));
+    call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);
+    br(r0);
+    bind(L);
+  }
 }

 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
@@ -102,6 +135,13 @@
   }
 }

+// Return
+// Rindex: index into constant pool
+// Rcache: address of cache entry - ConstantPoolCache::base_offset()
+//
+// A caller must add ConstantPoolCache::base_offset() to Rcache to get
+// the true address of the cache entry.
+//
 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
                                                            Register index,
                                                            int bcp_offset,
@@ -1298,11 +1338,38 @@

 void InterpreterMacroAssembler::notify_method_exit(
     TosState state, NotifyMethodExitMode mode) {
+  // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
+  // track stack depth.  If it is possible to enter interp_only_mode we add
+  // the code to check if the event should be sent.
   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
-    call_Unimplemented();
+    Label L;
+    // Note: frame::interpreter_frame_result has a dependency on how the
+    // method result is saved across the call to post_method_exit. If this
+    // is changed then the interpreter_frame_result implementation will
+    // need to be updated too.
+
+    // For c++ interpreter the result is always stored at a known location in the frame
+    // template interpreter will leave it on the top of the stack.
+    NOT_CC_INTERP(push(state);)
+    ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
+    cbz(r3, L);
+    call_VM(noreg,
+            CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
+    bind(L);
+    NOT_CC_INTERP(pop(state));
+  }
+
+  {
+    SkipIfEqual skip(this, &DTraceMethodProbes, false);
+    NOT_CC_INTERP(push(state));
+    get_method(c_rarg1);
+    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
+                 rthread, c_rarg1);
+    NOT_CC_INTERP(pop(state));
   }
 }

+
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
                                                         int increment, int mask,
diff -r b0a65a4a6094 -r 356ebc1be1d9 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Aug 22 19:02:06 2013 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Aug 23 15:26:17 2013 +0100
@@ -360,11 +360,9 @@
   // Only interpreter should have to clear fp
   reset_last_Java_frame(true, false);

-#ifndef CC_INTERP
    // C++ interp handles this in the interpreter
   check_and_handle_popframe(java_thread);
   check_and_handle_earlyret(java_thread);
-#endif /* CC_INTERP */

   if (check_exceptions) {
     // check for pending exceptions (java_thread is set upon return)
diff -r b0a65a4a6094 -r 356ebc1be1d9 src/cpu/aarch64/vm/templateTable_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Aug 22 19:02:06 2013 +0100
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Fri Aug 23 15:26:17 2013 +0100
@@ -2197,13 +2197,36 @@
 // The registers cache and index expected to be set before call.
 // Correct values of the cache and index registers are preserved.
 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
-                                            bool is_static, bool has_tos)
-{
+                                            bool is_static, bool has_tos) {
   // do the JVMTI work here to avoid disturbing the register state below
   // We use c_rarg registers here because we want to use the register used in
   // the call to the VM
   if (JvmtiExport::can_post_field_access()) {
-    __ call_Unimplemented();
+    // Check to see if a field access watch has been set before we
+    // take the time to call into the VM.
+    Label L1;
+    assert_different_registers(cache, index, r0);
+    __ lea(rscratch1, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
+    __ ldrw(r0, Address(rscratch1));
+    __ cbzw(r0, L1);
+
+    __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
+    __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset())));
+
+    if (is_static) {
+      __ mov(c_rarg1, zr); // NULL object reference
+    } else {
+      __ ldr(c_rarg1, at_tos()); // get object pointer without popping it
+      __ verify_oop(c_rarg1);
+    }
+    // c_rarg1: object pointer or NULL
+    // c_rarg2: cache entry pointer
+    // c_rarg3: jvalue object on the stack
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                                       InterpreterRuntime::post_field_access),
+               c_rarg1, c_rarg2, c_rarg3);
+    __ get_cache_and_index_at_bcp(cache, index, 1);
+    __ bind(L1);
   }
 }

@@ -2363,10 +2386,59 @@

 // The registers cache and index expected to be set before call.
 // The function may destroy various registers, just not the cache and index registers.
-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static)
-{
+void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
+  transition(vtos, vtos);
+
+  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+
   if (JvmtiExport::can_post_field_modification()) {
-    __ call_Unimplemented();
+    // Check to see if a field modification watch has been set before
+    // we take the time to call into the VM.
+    Label L1;
+    assert_different_registers(cache, index, r0);
+    __ mov(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
+    __ ldrw(r0, Address(rscratch1));
+    __ cbz(r0, L1);
+
+    __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1);
+
+    if (is_static) {
+      // Life is simple.  Null out the object pointer.
+      __ mov(c_rarg1, zr);
+    } else {
+      // Life is harder. The stack holds the value on top, followed by
+      // the object.  We don't know the size of the value, though; it
+      // could be one or two words depending on its type. As a result,
+      // we must find the type to determine where the object is.
+      __ ldrw(c_rarg3, Address(c_rarg2,
+			       in_bytes(cp_base_offset +
+					ConstantPoolCacheEntry::flags_offset())));
+      __ lsr(c_rarg3, c_rarg3,
+	     ConstantPoolCacheEntry::tos_state_shift);
+      ConstantPoolCacheEntry::verify_tos_state_shift();
+      Label nope2, done, ok;
+      __ ldr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
+      __ cmpw(c_rarg3, ltos);
+      __ br(Assembler::EQ, ok);
+      __ cmpw(c_rarg3, dtos);
+      __ br(Assembler::NE, nope2);
+      __ bind(ok);
+      __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue)
+      __ bind(nope2);
+    }
+    // cache entry pointer
+    __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset));
+    // object (tos)
+    __ mov(c_rarg3, esp);
+    // c_rarg1: object pointer set up above (NULL if static)
+    // c_rarg2: cache entry pointer
+    // c_rarg3: jvalue object on the stack
+    __ call_VM(noreg,
+               CAST_FROM_FN_PTR(address,
+                                InterpreterRuntime::post_field_modification),
+               c_rarg1, c_rarg2, c_rarg3);
+    __ get_cache_and_index_at_bcp(cache, index, 1);
+    __ bind(L1);
   }
 }

@@ -2381,7 +2453,7 @@
   const Register bc    = r4;

   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
-  jvmti_post_field_mod(rcpool, index, is_static);
+  jvmti_post_field_mod(cache, index, is_static);
   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);

   Label Done;
@@ -3333,9 +3405,29 @@

 //-----------------------------------------------------------------------------
 // Breakpoints
-void TemplateTable::_breakpoint()
-{
-  __ call_Unimplemented();
+void TemplateTable::_breakpoint() {
+  // Note: We get here even if we are single stepping..
+  // jbug inists on setting breakpoints at every bytecode
+  // even if we are in single step mode.
+
+  transition(vtos, vtos);
+
+  // get the unpatched byte code
+  __ get_method(c_rarg1);
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address,
+                              InterpreterRuntime::get_original_bytecode_at),
+             c_rarg1, rbcp);
+  __ mov(r19, r0);
+
+  // post the breakpoint event
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
+             rmethod, rbcp);
+
+  // complete the execution of original bytecode
+  __ mov(rscratch1, r19);
+  __ dispatch_only_normal(vtos);
 }

 //-----------------------------------------------------------------------------



More information about the aarch64-port-dev mailing list