[aarch64-port-dev ] Merge JDK9 changes to aarch64 tree
Andrew Haley
aph at redhat.com
Mon Jul 21 10:50:00 UTC 2014
This is a bulk import of all JDK9 updates up to last week. I've
attached the fairly minor set of AArch64-specifichanges.
Andrew.
# HG changeset patch
# User aph
# Date 1405691238 14400
# Fri Jul 18 09:47:18 2014 -0400
# Node ID c75bd2134c4a9eb22442f6339ac3a6b0fae0852e
# Parent 523ddf5c9ceb32d7879cf65e15450db2540aadaa
Merge JDK9 changes to aarch64 tree
diff -r 523ddf5c9ceb -r c75bd2134c4a src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad Thu Jul 17 14:48:28 2014 +0100
+++ b/src/cpu/aarch64/vm/aarch64.ad Fri Jul 18 09:47:18 2014 -0400
@@ -987,15 +987,16 @@
MacroAssembler _masm(&cbuf);
// n.b. frame size includes space for return pc and rfp
- long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
+ const long framesize = C->frame_size_in_bytes();
assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
__ nop();
- if (C->need_stack_bang(framesize))
- __ generate_stack_overflow_check(framesize);
+ int bangsize = C->bang_size_in_bytes();
+ if (C->need_stack_bang(bangsize) && UseStackBanging)
+ __ generate_stack_overflow_check(bangsize);
__ build_frame(framesize);
@@ -6553,6 +6554,26 @@
ins_pipe(pipe_class_memory);
%}
+// Manifest a CmpL result in an integer register. Very painful.
+// This is the test to avoid.
+// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
+instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
+%{
+ match(Set dst (CmpL3 src1 src2));
+ effect(KILL flags);
+
+ ins_cost(INSN_COST * 6);
+ format %{ "CmpL3 $dst, $src1, $src2" %}
+ ins_encode %{
+ __ cmp($src1$$Register, $src2$$Register);
+ __ movw($dst$$Register, -1);
+ __ cselw($dst$$Register, zr, $dst$$Register, Assembler::EQ);
+ __ csincw($dst$$Register, $dst$$Register, zr, Assembler::GT);
+ %}
+
+ ins_pipe(pipe_class_default);
+%}
+
// ============================================================================
// Conditional Move Instructions
diff -r 523ddf5c9ceb -r c75bd2134c4a src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Fri Jul 18 09:47:18 2014 -0400
@@ -237,7 +237,7 @@
// build frame
ciMethod* m = compilation()->method();
- __ build_frame(initial_frame_size_in_bytes());
+ __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is
//
@@ -557,9 +557,10 @@
assert(os::is_poll_address(polling_page), "should be");
unsigned long off;
__ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);
+ assert(off == 0, "must be");
add_debug_info_for_branch(info); // This isn't just debug info:
// it's the oop map
- __ ldrw(zr, Address(rscratch1, off));
+ __ read_polling_page(rscratch1, relocInfo::poll_type);
} else {
poll_for_safepoint(relocInfo::poll_type, info);
}
@@ -659,6 +660,11 @@
}
}
break;
+ case T_ADDRESS:
+ {
+ const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
+ reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
+ }
case T_INT:
case T_FLOAT:
{
diff -r 523ddf5c9ceb -r c75bd2134c4a src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp Fri Jul 18 09:47:18 2014 -0400
@@ -415,6 +415,7 @@
// must ensure that this first instruction is a B, BL, NOP, BKPT,
// SVC, HVC, or SMC. Make it a NOP.
nop();
+ assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter().
generate_stack_overflow_check(bang_size_in_bytes);
diff -r 523ddf5c9ceb -r c75bd2134c4a src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Fri Jul 18 09:47:18 2014 -0400
@@ -3264,7 +3264,11 @@
br(Assembler::GT, loop);
// Bang down shadow pages too.
- // The -1 because we already subtracted 1 page.
+ // At this point, (tmp-0) is the last address touched, so don't
+ // touch it again. (It was touched as (tmp-pagesize) but then tmp
+ // was post-decremented.) Skip this address by starting at i=1, and
+ // touch a few more pages below. N.B. It is important to touch all
+ // the way down to and including i=StackShadowPages.
for (int i = 0; i< StackShadowPages-1; i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
diff -r 523ddf5c9ceb -r c75bd2134c4a src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Fri Jul 18 09:47:18 2014 -0400
@@ -2447,12 +2447,15 @@
__ ldp(rfp, lr, __ post(sp, 2 * wordSize));
// LR should now be the return address to the caller (3)
- // Stack bang to make sure there's enough room for these interpreter frames.
+#ifdef ASSERT
+ // Compilers generate code that bang the stack by as much as the
+ // interpreter would need. So this stack banging should never
+ // trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
__ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(r19, r2);
}
-
+#endif
// Load address of array of frame pcs into r2
__ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
@@ -2655,13 +2658,17 @@
__ ldp(rfp, lr, __ post(sp, 2 * wordSize));
// LR should now be the return address to the caller (3) frame
- // Stack bang to make sure there's enough room for these interpreter frames.
+#ifdef ASSERT
+ // Compilers generate code that bang the stack by as much as the
+ // interpreter would need. So this stack banging should never
+ // trigger a fault. Verify that it does not on non product builds.
if (UseStackBanging) {
__ ldrw(r1, Address(r4,
Deoptimization::UnrollBlock::
total_frame_sizes_offset_in_bytes()));
__ bang_stack_size(r1, r2);
}
+#endif
// Load address of array of frame pcs into r2 (address*)
__ ldr(r2, Address(r4,
diff -r 523ddf5c9ceb -r c75bd2134c4a src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Fri Jul 18 09:47:18 2014 -0400
@@ -1538,29 +1538,18 @@
return (overhead_size + method_stack + stub_code);
}
-void AbstractInterpreter::layout_activation(Method* method,
- int tempcount,
- int popframe_extra_args,
- int moncount,
- int caller_actual_parameters,
- int callee_param_count,
- int callee_locals,
- frame* caller,
- frame* interpreter_frame,
- bool is_top_frame,
- bool is_bottom_frame) {
+// asm based interpreter deoptimization helpers
+int AbstractInterpreter::size_activation(int max_stack,
+ int temps,
+ int extra_args,
+ int monitors,
+ int callee_params,
+ int callee_locals,
+ bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
- // If interpreter_frame!=NULL, set up the method, locals, and monitors.
- // The frame interpreter_frame, if not NULL, is guaranteed to be the
- // right size, as determined by a previous call to this method.
- // It is also guaranteed to be walkable even though it is in a skeletal state
// fixed size of an interpreter frame:
- int max_locals = method->max_locals() * Interpreter::stackElementWords;
- int extra_locals = (method->max_locals() - method->size_of_parameters()) *
- Interpreter::stackElementWords;
-
int overhead = frame::sender_sp_offset -
frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust
@@ -1568,59 +1557,78 @@
// for the callee's params we only need to account for the extra
// locals.
int size = overhead +
- (callee_locals - callee_param_count)*Interpreter::stackElementWords +
- moncount * frame::interpreter_frame_monitor_size() +
- tempcount* Interpreter::stackElementWords + popframe_extra_args;
+ (callee_locals - callee_params)*Interpreter::stackElementWords +
+ monitors * frame::interpreter_frame_monitor_size() +
+ temps* Interpreter::stackElementWords + extra_args;
// On AArch64 we always keep the stack pointer 16-aligned, so we
// must round up here.
size = round_to(size, 2);
- if (interpreter_frame != NULL) {
+ return size;
+}
+
+void AbstractInterpreter::layout_activation(Method* method,
+ int tempcount,
+ int popframe_extra_args,
+ int moncount,
+ int caller_actual_parameters,
+ int callee_param_count,
+ int callee_locals,
+ frame* caller,
+ frame* interpreter_frame,
+ bool is_top_frame,
+ bool is_bottom_frame) {
+ // The frame interpreter_frame is guaranteed to be the right size,
+ // as determined by a previous call to the size_activation() method.
+ // It is also guaranteed to be walkable even though it is in a
+ // skeletal state
+
+ int max_locals = method->max_locals() * Interpreter::stackElementWords;
+ int extra_locals = (method->max_locals() - method->size_of_parameters()) *
+ Interpreter::stackElementWords;
+
#ifdef ASSERT
- assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
+ assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
#endif
- interpreter_frame->interpreter_frame_set_method(method);
- // NOTE the difference in using sender_sp and
- // interpreter_frame_sender_sp interpreter_frame_sender_sp is
- // the original sp of the caller (the unextended_sp) and
- // sender_sp is fp+16 XXX
- intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
+ interpreter_frame->interpreter_frame_set_method(method);
+ // NOTE the difference in using sender_sp and
+ // interpreter_frame_sender_sp interpreter_frame_sender_sp is
+ // the original sp of the caller (the unextended_sp) and
+ // sender_sp is fp+8/16 (32bit/64bit) XXX
+ intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
#ifdef ASSERT
- if (caller->is_interpreted_frame()) {
- assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
- }
+ if (caller->is_interpreted_frame()) {
+ assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
+ }
#endif
- interpreter_frame->interpreter_frame_set_locals(locals);
- BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
- BasicObjectLock* monbot = montop - moncount;
- interpreter_frame->interpreter_frame_set_monitor_end(monbot);
+ interpreter_frame->interpreter_frame_set_locals(locals);
+ BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
+ BasicObjectLock* monbot = montop - moncount;
+ interpreter_frame->interpreter_frame_set_monitor_end(monbot);
- // Set last_sp
- intptr_t* esp = (intptr_t*) monbot -
- tempcount*Interpreter::stackElementWords -
- popframe_extra_args;
- interpreter_frame->interpreter_frame_set_last_sp(esp);
+ // Set last_sp
+ intptr_t* esp = (intptr_t*) monbot -
+ tempcount*Interpreter::stackElementWords -
+ popframe_extra_args;
+ interpreter_frame->interpreter_frame_set_last_sp(esp);
- // All frames but the initial (oldest) interpreter frame we fill in have
- // a value for sender_sp that allows walking the stack but isn't
- // truly correct. Correct the value here.
- if (extra_locals != 0 &&
- interpreter_frame->sender_sp() ==
- interpreter_frame->interpreter_frame_sender_sp()) {
- interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
- extra_locals);
- }
- *interpreter_frame->interpreter_frame_cache_addr() =
- method->constants()->cache();
+ // All frames but the initial (oldest) interpreter frame we fill in have
+ // a value for sender_sp that allows walking the stack but isn't
+ // truly correct. Correct the value here.
+ if (extra_locals != 0 &&
+ interpreter_frame->sender_sp() ==
+ interpreter_frame->interpreter_frame_sender_sp()) {
+ interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
+ extra_locals);
+ }
+ *interpreter_frame->interpreter_frame_cache_addr() =
+ method->constants()->cache();
+}
- // interpreter_frame->obj_at_put(frame::sender_sp_offset,
- // (oop)interpreter_frame->addr_at(frame::sender_sp_offset));
- }
-}
//-----------------------------------------------------------------------------
// Exceptions
@@ -1678,7 +1686,7 @@
// r0: exception handler entry point
// r3: preserved exception oop
- // rbcp: bcp for exception handlerg
+ // rbcp: bcp for exception handler
__ push_ptr(r3); // push exception which is now the only value on the stack
__ br(r0); // jump to exception handler (may be _remove_activation_entry!)
# HG changeset patch
# User aph
# Date 1405604908 -3600
# Thu Jul 17 14:48:28 2014 +0100
# Node ID 523ddf5c9ceb32d7879cf65e15450db2540aadaa
# Parent a54c05908b9b10694a1878f59ea1285063d68749
Merge aarch64-specific changes for JDK9
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/aarch64.ad Thu Jul 17 14:48:28 2014 +0100
@@ -1050,7 +1050,7 @@
if (do_polling() && C->is_method_compilation()) {
st->print("# touch polling page\n\t");
- st->print("mov rscratch1, #0x%x\n\t", os::get_polling_page());
+ st->print("mov rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
st->print("ldr zr, [rscratch1]");
}
}
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/bytecodes_aarch64.cpp
--- a/src/cpu/aarch64/vm/bytecodes_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/bytecodes_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -28,12 +28,3 @@
#include "interpreter/bytecodes.hpp"
-void Bytecodes::pd_initialize() {
- // No aarch64 specific initialization
-}
-
-
-Bytecodes::Code Bytecodes::pd_base_code_for(Code code) {
- // No aarch64 specific bytecodes
- return code;
-}
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -354,7 +354,7 @@
// This specifies the rsp decrement needed to build the frame
-int LIR_Assembler::initial_frame_size_in_bytes() {
+int LIR_Assembler::initial_frame_size_in_bytes() const {
// if rounding, must let FrameMap know!
// The frame_map records size in slots (32bit word)
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -1089,7 +1089,7 @@
#endif
CodeEmitInfo* info = state_for(x, x->state());
LIR_Opr reg = result_register_for(x->type());
- new_instance(reg, x->klass(),
+ new_instance(reg, x->klass(), x->is_unresolved(),
FrameMap::r2_oop_opr,
FrameMap::r5_oop_opr,
FrameMap::r4_oop_opr,
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -97,7 +97,7 @@
#ifndef PRODUCT
if (TraceFPURegisterUsage) {
- tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->print_cr("");
+ tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->print("\n");
}
#endif
}
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -409,7 +409,7 @@
}
-void C1_MacroAssembler::build_frame(int framesize) {
+void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
// If we have to make this method not-entrant we'll overwrite its
// first instruction with a jump. For this action to be legal we
// must ensure that this first instruction is a B, BL, NOP, BKPT,
@@ -417,7 +417,7 @@
nop();
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter().
- generate_stack_overflow_check(framesize);
+ generate_stack_overflow_check(bang_size_in_bytes);
MacroAssembler::build_frame(framesize + 2 * wordSize);
if (NotifySimulator) {
notify(Assembler::method_entry);
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp Thu Jul 17 14:48:28 2014 +0100
@@ -27,6 +27,8 @@
#ifndef CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
+using MacroAssembler::build_frame;
+
// C1_MacroAssembler contains high-level macros for C1
private:
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/compiledIC_aarch64.cpp
--- a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -47,34 +47,6 @@
return is_icholder_entry(call->destination());
}
-//-----------------------------------------------------------------------------
-// High-level access to an inline cache. Guaranteed to be MT-safe.
-
-CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
- : _ic_call(call)
-{
- address ic_call = call->instruction_address();
-
- assert(ic_call != NULL, "ic_call address must be set");
- assert(nm != NULL, "must pass nmethod");
- assert(nm->contains(ic_call), "must be in nmethod");
-
- // Search for the ic_call at the given address.
- RelocIterator iter(nm, ic_call, ic_call+1);
- bool ret = iter.next();
- assert(ret == true, "relocInfo must exist at this address");
- assert(iter.addr() == ic_call, "must find ic_call");
- if (iter.type() == relocInfo::virtual_call_type) {
- virtual_call_Relocation* r = iter.virtual_call_reloc();
- _is_optimized = false;
- _value = nativeMovConstReg_at(r->cached_value());
- } else {
- assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
- _is_optimized = true;
- _value = NULL;
- }
-}
-
// ----------------------------------------------------------------------------
#define __ _masm.
@@ -124,7 +96,7 @@
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- instruction_address(),
+ p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/frame_aarch64.cpp
--- a/src/cpu/aarch64/vm/frame_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/frame_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -270,7 +270,7 @@
address* pc_addr = &(((address*) sp())[-1]);
if (TracePcPatching) {
tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
- pc_addr, *pc_addr, pc);
+ p2i(pc_addr), p2i(*pc_addr), p2i(pc));
}
// Either the return address is the original one or we are going to
// patch in the same address that's already there.
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/interp_masm_aarch64.cpp
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -142,7 +142,7 @@
if (index_size == sizeof(u2)) {
load_unsigned_short(index, Address(rbcp, bcp_offset));
} else if (index_size == sizeof(u4)) {
- assert(EnableInvokeDynamic, "giant index used only for JSR 292");
+ // assert(EnableInvokeDynamic, "giant index used only for JSR 292");
ldrw(index, Address(rbcp, bcp_offset));
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/nativeInst_aarch64.cpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -78,7 +78,7 @@
void NativeMovConstReg::print() {
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
- instruction_address(), data());
+ p2i(instruction_address()), data());
}
//-------------------------------------------------------------------
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -199,8 +199,7 @@
// Restore machine SP
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
- __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
- + (EnableInvokeDynamic ? 2 : 0));
+ __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
__ ldr(rscratch2,
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
@@ -243,8 +242,7 @@
// Calculate stack limit
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
- __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
- + (EnableInvokeDynamic ? 2 : 0));
+ __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
__ ldr(rscratch2,
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
@@ -648,8 +646,7 @@
if (! native_call) {
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
- __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
- + (EnableInvokeDynamic ? 2 : 0));
+ __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
__ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
__ andr(sp, rscratch1, -16);
}
@@ -1541,7 +1538,7 @@
return (overhead_size + method_stack + stub_code);
}
-int AbstractInterpreter::layout_activation(Method* method,
+void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
@@ -1581,10 +1578,6 @@
if (interpreter_frame != NULL) {
#ifdef ASSERT
- if (!EnableInvokeDynamic)
- // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
- // Probably, since deoptimization doesn't work yet.
- assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
@@ -1627,7 +1620,6 @@
// interpreter_frame->obj_at_put(frame::sender_sp_offset,
// (oop)interpreter_frame->addr_at(frame::sender_sp_offset));
}
- return size;
}
//-----------------------------------------------------------------------------
@@ -1678,8 +1670,7 @@
// Calculate stack limit
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
- __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
- + (EnableInvokeDynamic ? 2 : 0) + 2);
+ __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
__ ldr(rscratch2,
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
@@ -1687,7 +1678,7 @@
// r0: exception handler entry point
// r3: preserved exception oop
- // rbcp: bcp for exception handler
+ // rbcp: bcp for exception handlerg
__ push_ptr(r3); // push exception which is now the only value on the stack
__ br(r0); // jump to exception handler (may be _remove_activation_entry!)
@@ -1788,7 +1779,7 @@
assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
#if INCLUDE_JVMTI
- if (EnableInvokeDynamic) {
+ {
Label L_done;
__ ldrb(rscratch1, Address(rbcp, 0));
@@ -1811,8 +1802,7 @@
// Restore machine SP
__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
- __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
- + (EnableInvokeDynamic ? 2 : 0));
+ __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
__ ldr(rscratch2,
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/templateTable_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -3152,12 +3152,6 @@
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- if (!EnableInvokeDynamic) {
- // rewriter does not generate this bytecode
- __ should_not_reach_here();
- return;
- }
-
prepare_invoke(byte_no, rmethod, r0, r2);
__ verify_method_ptr(r2);
__ verify_oop(r2);
@@ -3177,17 +3171,6 @@
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- if (!EnableInvokeDynamic) {
- // We should not encounter this bytecode if !EnableInvokeDynamic.
- // The verifier will stop it. However, if we get past the verifier,
- // this will stop the thread in a reasonable way, without crashing the JVM.
- __ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_IncompatibleClassChangeError));
- // the call_VM checks for exception, so we should never return here.
- __ should_not_reach_here();
- return;
- }
-
prepare_invoke(byte_no, rmethod, r0);
// r0: CallSite object (from cpool->resolved_references[])
diff -r a54c05908b9b -r 523ddf5c9ceb src/cpu/aarch64/vm/vtableStubs_aarch64.cpp
--- a/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -104,7 +104,7 @@
if (PrintMiscellaneous && (WizardMode || Verbose)) {
tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
- vtable_index, s->entry_point(),
+ vtable_index, p2i(s->entry_point()),
(int)(s->code_end() - s->entry_point()),
(int)(s->code_end() - __ pc()));
}
@@ -185,7 +185,7 @@
if (PrintMiscellaneous && (WizardMode || Verbose)) {
tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
- itable_index, s->entry_point(),
+ itable_index, p2i(s->entry_point()),
(int)(s->code_end() - s->entry_point()),
(int)(s->code_end() - __ pc()));
}
diff -r a54c05908b9b -r 523ddf5c9ceb src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
--- a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Thu Jul 17 13:16:17 2014 +0100
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Thu Jul 17 14:48:28 2014 +0100
@@ -621,12 +621,12 @@
st->cr();
#else
for (int r = 0; r < 31; r++)
- st->print_cr( "R%d=" INTPTR_FORMAT, r, uc->uc_mcontext.regs[r]);
+ st->print_cr( "R%d=" INTPTR_FORMAT, r, (size_t)uc->uc_mcontext.regs[r]);
#endif
st->cr();
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
- st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
+ st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
st->cr();
@@ -634,7 +634,7 @@
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Linux::ucontext_get_pc(uc);
- st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
+ st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
}
@@ -671,7 +671,7 @@
st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]);
#else
for (int r = 0; r < 31; r++)
- st->print_cr( "R%d=" INTPTR_FORMAT, r, uc->uc_mcontext.regs[r]);
+ st->print_cr( "R%d=" INTPTR_FORMAT, r, (uintptr_t)uc->uc_mcontext.regs[r]);
#endif
st->cr();
}
More information about the aarch64-port-dev
mailing list