[aarch64-port-dev ] Fix a tonne of bogus comments
Andrew Haley
aph at redhat.com
Thu May 29 17:25:42 UTC 2014
These are mostly left-over x86isms.
Andrew.
# HG changeset patch
# User aph
# Date 1401381560 -3600
# Thu May 29 17:39:20 2014 +0100
# Node ID 02139cd80d48b9c6c30302e2a5f543ef9bc4e53e
# Parent 79225ea063f38fab53c57211175d2588218c7871
Fix a tonne of bogus comments.
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/aarch64_call.cpp
--- a/src/cpu/aarch64/vm/aarch64_call.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/aarch64_call.cpp Thu May 29 17:39:20 2014 +0100
@@ -180,7 +180,7 @@
default:
break;
case MacroAssembler::ret_type_integral:
- // this overwrites the saved rax
+ // this overwrites the saved r0
*return_slot = sim->getCPUState().xreg(R0, 0);
break;
case MacroAssembler::ret_type_float:
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -209,7 +209,7 @@
__ bl(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
- assert(_result->as_register() == r0, "result must in rax,");
+ assert(_result->as_register() == r0, "result must in r0,");
__ b(_continuation);
}
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -378,7 +378,7 @@
int offset = code_offset();
- // the exception oop and pc are in rax, and rdx
+ // the exception oop and pc are in r0, and r3
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
@@ -2073,7 +2073,7 @@
add_call_info(pc_for_athrow_offset, info); // for exception handler
__ verify_not_null_oop(r0);
- // search an exception handler (rax: exception oop, rdx: throwing pc)
+ // search an exception handler (r0: exception oop, r3: throwing pc)
if (compilation()->has_fpu_code()) {
unwind_id = Runtime1::handle_exception_id;
} else {
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.hpp Thu May 29 17:39:20 2014 +0100
@@ -54,7 +54,7 @@
Register result);
// locking
- // hdr : must be rax, contents destroyed
+ // hdr : must be r0, contents destroyed
// obj : must point to the object to lock, contents preserved
// disp_hdr: must point to the displaced header location, contents preserved
// scratch : scratch register, contents destroyed
@@ -64,7 +64,7 @@
// unlocking
// hdr : contents destroyed
// obj : must point to the object to lock, contents preserved
- // disp_hdr: must be eax & must point to the displaced header location, contents destroyed
+ // disp_hdr: must be r0 & must point to the displaced header location, contents destroyed
void unlock_object(Register swap, Register obj, Register lock, Label& slow_case);
void initialize_object(
@@ -79,7 +79,7 @@
// allocation of fixed-size objects
// (can also be used to allocate fixed-size arrays, by setting
// hdr_size correctly and storing the array length afterwards)
- // obj : must be rax, will contain pointer to allocated object
+ // obj : will contain pointer to allocated object
// t1, t2 : scratch registers - contents destroyed
// header_size: size of object header in words
// object_size: total size of object in words
@@ -91,7 +91,7 @@
};
// allocation of arrays
- // obj : must be rax, will contain pointer to allocated object
+ // obj : will contain pointer to allocated object
// len : array length in number of elements
// t : scratch register - contents destroyed
// header_size: size of object header in words
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -850,7 +850,7 @@
// refilling the TLAB or allocating directly from eden.
Label retry_tlab, try_eden;
const Register thread =
- __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
+ __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread
__ bind(retry_tlab);
@@ -945,7 +945,7 @@
oop_maps->add_gc_map(call_offset, map);
restore_live_registers_except_r0(sasm);
- // rax,: new multi array
+ // r0,: new multi array
__ verify_oop(r0);
}
break;
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/compiledIC_aarch64.cpp
--- a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -81,8 +81,8 @@
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
- // movq rbx, 0
- // jmp -5 # to self
+ // movq rmethod, 0
+ // jmp -4 # to self
// address mark = cbuf.insts_mark(); // Get mark within main instrs section.
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/interp_masm_aarch64.cpp
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -263,7 +263,7 @@
profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
// Do the check.
- check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows rcx
+ check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
// Profile the failure of the check.
profile_typecheck_failed(r2); // blows r2
@@ -721,7 +721,7 @@
save_bcp(); // Save in case of exception
// Convert from BasicObjectLock structure to object and BasicLock
- // structure Store the BasicLock address into %rax
+ // structure Store the BasicLock address into %r0
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%c_rarg3)
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/interp_masm_aarch64.hpp
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.hpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.hpp Thu May 29 17:39:20 2014 +0100
@@ -166,14 +166,14 @@
// Dispatching
void dispatch_prolog(TosState state, int step = 0);
void dispatch_epilog(TosState state, int step = 0);
- // dispatch via ebx (assume ebx is loaded already)
+ // dispatch via rscratch1
void dispatch_only(TosState state);
- // dispatch normal table via ebx (assume ebx is loaded already)
+ // dispatch normal table via rscratch1 (assume rscratch1 is loaded already)
void dispatch_only_normal(TosState state);
void dispatch_only_noverify(TosState state);
- // load ebx from [esi + step] and dispatch via ebx
+ // load rscratch1 from [rbcp + step] and dispatch via rscratch1
void dispatch_next(TosState state, int step = 0);
- // load ebx from [esi] and dispatch via ebx and table
+ // load rscratch1 from [esi] and dispatch via rscratch1 and table
void dispatch_via (TosState state, address* table);
// jump to an invoked target
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -1933,9 +1933,6 @@
BytecodeCounter::print();
}
#endif
- // To see where a verify_oop failed, get $ebx+40/X for this frame.
- // XXX correct this offset for amd64
- // This is the value of eip which points to where verify_oop will return.
if (os::message_box(msg, "Execution stopped, print registers?")) {
ttyLocker ttyl;
tty->print_cr(" pc = 0x%016lx", pc);
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Thu May 29 17:39:20 2014 +0100
@@ -103,7 +103,7 @@
// Biased locking support
// lock_reg and obj_reg must be loaded up with the appropriate values.
- // swap_reg must be rax, and is killed.
+ // swap_reg is killed.
// tmp_reg is optional. If it is supplied (i.e., != noreg) it will
// be killed; if not supplied, push/pop will be used internally to
// allocate a temporary (inefficient, avoid if possible).
@@ -765,88 +765,6 @@
void int3();
#endif
- // currently unimplemented
-#if 0
- // Long operation macros for a 32bit cpu
- // Long negation for Java
- void lneg(Register hi, Register lo);
-
- // Long multiplication for Java
- // (destroys contents of eax, ebx, ecx and edx)
- void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
-
- // Long shifts for Java
- // (semantics as described in JVM spec.)
- void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
- void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
-
- // Long compare for Java
- // (semantics as described in JVM spec.)
- void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
-
-
- // misc
-
- // Sign extension
- void sign_extend_short(Register reg);
- void sign_extend_byte(Register reg);
-
- // Division by power of 2, rounding towards 0
- void division_with_shift(Register reg, int shift_value);
-#endif
-
- // unimpelements
-#if 0
- // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
- //
- // CF (corresponds to C0) if x < y
- // PF (corresponds to C2) if unordered
- // ZF (corresponds to C3) if x = y
- //
- // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
- // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
- void fcmp(Register tmp);
- // Variant of the above which allows y to be further down the stack
- // and which only pops x and y if specified. If pop_right is
- // specified then pop_left must also be specified.
- void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
-
- // Floating-point comparison for Java
- // Compares the top-most stack entries on the FPU stack and stores the result in dst.
- // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
- // (semantics as described in JVM spec.)
- void fcmp2int(Register dst, bool unordered_is_less);
- // Variant of the above which allows y to be further down the stack
- // and which only pops x and y if specified. If pop_right is
- // specified then pop_left must also be specified.
- void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
-
- // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
- // tmp is a temporary register, if none is available use noreg
- void fremr(Register tmp);
-
-
- // Inlined sin/cos generator for Java; must not use CPU instruction
- // directly on Intel as it does not have high enough precision
- // outside of the range [-pi/4, pi/4]. Extra argument indicate the
- // number of FPU stack slots in use; all but the topmost will
- // require saving if a slow case is necessary. Assumes argument is
- // on FP TOS; result is on FP TOS. No cpu registers are changed by
- // this code.
- void trigfunc(char trig, int num_fpu_regs_in_use = 1);
-
- // branch to L if FPU flag C2 is set/not set
- // tmp is a temporary register, if none is available use noreg
- void jC2 (Register tmp, Label& L);
- void jnC2(Register tmp, Label& L);
-
- void push_IU_state();
- void pop_IU_state();
-
- void push_FPU_state();
- void pop_FPU_state();
-#endif
-
void push_CPU_state();
void pop_CPU_state() ;
@@ -1011,33 +929,6 @@
// Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp);
- // unimplemented
-#if 0
- void verify_tlab();
-
- // Biased locking support
- // lock_reg and obj_reg must be loaded up with the appropriate values.
- // swap_reg must be rax, and is killed.
- // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
- // be killed; if not supplied, push/pop will be used internally to
- // allocate a temporary (inefficient, avoid if possible).
- // Optional slow case is for implementations (interpreter and C1) which branch to
- // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
- // Returns offset of first potentially-faulting instruction for null
- // check info (currently consumed only by C1). If
- // swap_reg_contains_mark is true then returns -1 as it is assumed
- // the calling code has already passed any potential faults.
- int biased_locking_enter(Register lock_reg, Register obj_reg,
- Register swap_reg, Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done, Label* slow_case = NULL,
- BiasedLockingCounters* counters = NULL);
- void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
-
-
- Condition negate_condition(Condition cond);
-#endif
-
// Arithmetics
void addptr(Address dst, int32_t src) {
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/methodHandles_aarch64.cpp
--- a/src/cpu/aarch64/vm/methodHandles_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -262,7 +262,7 @@
// temps used in this code are not used in *either* compiled or interpreted calling sequences
Register temp1 = r10;
Register temp2 = r11;
- Register temp3 = r14; // r13 is live ty this point: it contains the sender SP
+ Register temp3 = r14; // r13 is live by this point: it contains the sender SP
if (for_compiler_entry) {
assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7);
@@ -331,7 +331,7 @@
// Live registers at this point:
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
- // rsi/r13 - interpreter linkage (if interpreted) ??? FIXME
+ // r13 - interpreter linkage (if interpreted) ??? FIXME
// r1 ... r0 - compiler arguments (if compiled)
Label L_incompatible_class_change_error;
@@ -416,7 +416,7 @@
break;
}
- // live at this point: rmethod, rsi/r13 (if interpreted)
+ // live at this point: rmethod, r13 (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -479,7 +479,7 @@
// stack pointer. It also recalculates and aligns sp.
// A c2i adapter is frameless because the *callee* frame, which is
- // interpreted, routinely repairs its caller's es (from sender_sp,
+ // interpreted, routinely repairs its caller's sp (from sender_sp,
// which is set up via the senderSP register).
// In other words, if *either* the caller or callee is interpreted, we can
@@ -702,7 +702,7 @@
AArch64Simulator *sim = NULL;
size_t len = 65536;
if (NotifySimulator) {
- name = new char[len];
+ name = NEW_C_HEAP_ARRAY(char, len, mtInternal);
}
if (name) {
@@ -757,7 +757,7 @@
name[0] = 'c';
name[2] = 'i';
sim->notifyCompile(name, c2i_entry);
- delete[] name;
+ FREE_C_HEAP_ARRAY(char, name, mtInternal);
}
#endif
@@ -1608,9 +1608,6 @@
// Mark location of rfp (someday)
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp));
- // Use eax, ebx as temporaries during any memory-memory moves we have to do
- // All inbound args are referenced based on rfp and all outbound args via sp.
-
int float_args = 0;
int int_args = 0;
@@ -1959,9 +1956,6 @@
// Don't use call_VM as it will see a possible pending exception and forward it
// and never return here preventing us from clearing _last_native_pc down below.
- // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
- // preserved and correspond to the bcp/locals pointers. So we do a runtime call
- // by hand.
//
save_native_result(masm, ret_type, stack_slots);
__ mov(c_rarg0, rthread);
@@ -2887,7 +2881,7 @@
oop_maps->add_gc_map( __ offset() - start, map);
- // rax contains the address we are going to jump to assuming no exception got installed
+ // r0 contains the address we are going to jump to assuming no exception got installed
// clear last_Java_sp
__ reset_last_Java_frame(false, true);
@@ -2990,7 +2984,6 @@
// Store exception in Thread object. We cannot pass any arguments to the
// handle_exception call, since we do not want to make any assumption
// about the size of the frame where the exception happened in.
- // c_rarg0 is either rdi (Linux) or rcx (Windows).
__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -456,7 +456,7 @@
// not the case if the callee is compiled code => need to setup the
// rsp.
//
- // rax: exception oop
+ // r0: exception oop
// NOTE: this is used as a target from the signal handler so it
// needs an x86 prolog which returns into the current simulator
@@ -850,21 +850,6 @@
void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
void array_overlap_test(address no_overlap_target, Label* NOLp, int sf) { Unimplemented(); }
- // Shuffle first three arg regs on Windows into Linux/Solaris locations.
- //
- // Outputs:
- // rdi - rcx
- // rsi - rdx
- // rdx - r8
- // rcx - r9
- //
- // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
- // are non-volatile. r9 and r10 should not be used by the caller.
- //
- void setup_arg_regs(int nargs = 3) { Unimplemented(); }
-
- void restore_arg_regs() { Unimplemented(); }
-
// Generate code for an array write pre barrier
//
// addr - starting address
@@ -1796,8 +1781,8 @@
// rsp+40 - element count (32-bits)
//
// Output:
- // rax == 0 - success
- // rax == -1^K - failure, where K is partial transfer count
+ // r0 == 0 - success
+ // r0 == -1^K - failure, where K is partial transfer count
//
address generate_generic_copy(const char *name,
address byte_copy_entry, address short_copy_entry,
@@ -1952,8 +1937,12 @@
* c_rarg1 - byte* buf
* c_rarg2 - int length
*
- * Ouput:
- * rax - int crc result
+ * Output:
+ * r0 - int crc result
+ *
+ * Preserves:
+ * r13
+ *
*/
address generate_updateBytesCRC32() {
assert(UseCRC32Intrinsics, "what are we doing here?");
diff -r 79225ea063f3 -r 02139cd80d48 src/cpu/aarch64/vm/templateTable_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp Thu May 29 17:38:43 2014 +0100
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp Thu May 29 17:39:20 2014 +0100
@@ -2724,7 +2724,7 @@
// access constant pool cache
__ get_cache_and_index_at_bcp(r2, r1, 1);
- // test for volatile with rdx
+ // test for volatile with r3
__ ldrw(r3, Address(r2, in_bytes(base +
ConstantPoolCacheEntry::flags_offset())));
@@ -3188,7 +3188,7 @@
// r0: CallSite object (from cpool->resolved_references[])
// rmethod: MH.linkToCallSite method (from f2)
- // Note: rax_callsite is already pushed by prepare_invoke
+ // Note: r0_callsite is already pushed by prepare_invoke
// %%% should make a type profile for any invokedynamic that takes a ref argument
// profile this call
@@ -3657,7 +3657,6 @@
__ should_not_reach_here();
// call run-time routine
- // rsi: points to monitor entry
__ bind(found);
__ push_ptr(r0); // make sure object is on stack (contract with oopMaps)
__ unlock_object(c_rarg1);
More information about the aarch64-port-dev
mailing list