[aarch64-port-dev ] All address constants are 48 bits in size
Andrew Haley
aph at redhat.com
Thu Jun 19 14:38:36 UTC 2014
This is the first of a set of patches that reorganize addresses and
relocations.
AArch64-mode virtual address space is only 48 bits, not 64 bits, so we
can always use three movz/movk instructions to generate any address
# HG changeset patch
# User aph
# Date 1402917903 14400
# Mon Jun 16 07:25:03 2014 -0400
# Node ID e1af2a22237811f065ecadbcbc2113e102fc7a29
# Parent 8cb098504801769e6c53eec016a1767b0aa59c79
All address constants are 48 bits in size.
diff -r 8cb098504801 -r e1af2a222378 src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad Thu Jun 05 13:48:13 2014 +0100
+++ b/src/cpu/aarch64/vm/aarch64.ad Mon Jun 16 07:25:03 2014 -0400
@@ -864,7 +864,8 @@
if (cb) {
return 4;
} else {
- return 20;
+ // A 48-bit address. See movptr().
+ return 16;
}
}
@@ -2099,7 +2100,6 @@
// movz xscratch1 0xnnnn <-- current pc is here
// movk xscratch1 0xnnnn
// movk xscratch1 0xnnnn
- // movk xscratch1 0xnnnn
// str xscratch1, [xthread,#anchor_pc_off]
// mov xscratch2, sp
// str xscratch2, [xthread,#anchor_sp_off
@@ -2111,7 +2111,6 @@
// movz xscratch1 0xnnnn
// movk xscratch1 0xnnnn
// movk xscratch1 0xnnnn
- // movk xscratch1 0xnnnn
// blrt xscratch1
// . . .
//
@@ -2121,18 +2120,18 @@
// stub. we assert that nargs is < 7.
//
// so the offset we need to add to the pc (in 32-bit words) is
- // 4 + <-- load 64 bit constant return pc
+ // 3 + <-- load 48-bit constant return pc
// 1 + <-- write anchor pc
// 1 + <-- copy sp
// 1 + <-- write anchor sp
// nargs + <-- java stub arg count
// 1 + <-- extra thread arg
// [ 1 + ] <-- optional ret address of stub caller
- // 4 + <-- load 64 bit call target address
+ // 3 + <-- load 64 bit call target address
// 1 <-- blrt instruction
//
- // i.e we need to add (nargs + 13) * 4 bytes or (nargs + 14) * 4 bytes
- //
+ // i.e we need to add (nargs + 11) * 4 bytes or (nargs + 12) * 4 bytes
+ //
enc_class aarch64_enc_save_pc() %{
Compile* C = ra_->C;
@@ -2141,7 +2140,7 @@
assert(nargs <= 8, "opto runtime stub has more than 8 args!");
MacroAssembler _masm(&cbuf);
address pc = __ pc();
- int call_offset = (nargs + 13) * 4;
+ int call_offset = (nargs + 11) * 4;
int field_offset = in_bytes(JavaThread::frame_anchor_offset()) +
in_bytes(JavaFrameAnchor::last_Java_pc_offset());
__ mov(rscratch1, InternalAddress(pc + call_offset));
diff -r 8cb098504801 -r e1af2a222378 src/cpu/aarch64/vm/assembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.cpp Thu Jun 05 13:48:13 2014 +0100
+++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp Mon Jun 16 07:25:03 2014 -0400
@@ -1273,7 +1273,7 @@
if (rtype == relocInfo::none)
__ mov(r, target());
else
- __ mov64(r, (uint64_t)target());
+ __ movptr(r, (uint64_t)target());
break;
}
default:
diff -r 8cb098504801 -r e1af2a222378 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Thu Jun 05 13:48:13 2014 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Mon Jun 16 07:25:03 2014 -0400
@@ -65,6 +65,7 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void MacroAssembler::pd_patch_instruction(address branch, address target) {
+ assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant");
long offset = (target - branch) >> 2;
unsigned insn = *(unsigned*)branch;
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
@@ -139,10 +140,11 @@
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
// Move wide constant
u_int64_t dest = (u_int64_t)target;
+ assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
+ assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff);
Instruction_aarch64::patch(branch += 4, 20, 5, (dest >>= 16) & 0xffff);
Instruction_aarch64::patch(branch += 4, 20, 5, (dest >>= 16) & 0xffff);
- Instruction_aarch64::patch(branch += 4, 20, 5, (dest >>= 16));
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
// nothing to do
@@ -216,14 +218,13 @@
ShouldNotReachHere();
}
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
- // Move wide constant
- // FIXME: We assume these instructions are movz, movk, movk, movk.
- // We don't assert this; we should.
+ // Move address constant: movz, movk, movk. See movptr().
u_int32_t *insns = (u_int32_t *)insn_addr;
+ assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
+ assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
- + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)
- + (u_int64_t(Instruction_aarch64::extract(insns[3], 20, 5)) << 48));
+ + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
return 0;
@@ -1246,10 +1247,14 @@
InstructionMark im(this);
code_section()->relocate(inst_mark(), dest.rspec());
u_int64_t imm64 = (u_int64_t)dest.target();
- mov64(r, imm64);
+ movptr(r, imm64);
}
-void MacroAssembler::mov64(Register r, uintptr_t imm64) {
+// Move a constant pointer into r. In AArch64 mode the virtual
+// address space is 48 bits in size, so we only need three
+// instructions to create a patchable instruction sequence that can
+// reach anywhere.
+void MacroAssembler::movptr(Register r, uintptr_t imm64) {
#ifndef PRODUCT
{
char buffer[64];
@@ -1257,13 +1262,12 @@
block_comment(buffer);
}
#endif
+ assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
movz(r, imm64 & 0xffff);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 16);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 32);
- imm64 >>= 16;
- movk(r, imm64 & 0xffff, 48);
}
void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
diff -r 8cb098504801 -r e1af2a222378 src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Thu Jun 05 13:48:13 2014 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Mon Jun 16 07:25:03 2014 -0400
@@ -439,7 +439,7 @@
}
void mov(Register dst, Address a);
- void mov64(Register r, uintptr_t imm64);
+ void movptr(Register r, uintptr_t imm64);
// macro instructions for accessing and updating floating point
// status register
diff -r 8cb098504801 -r e1af2a222378 src/cpu/aarch64/vm/nativeInst_aarch64.cpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp Thu Jun 05 13:48:13 2014 +0100
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp Mon Jun 16 07:25:03 2014 -0400
@@ -207,6 +207,14 @@
Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
}
+bool NativeInstruction::is_movz() {
+ return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
+}
+
+bool NativeInstruction::is_movk() {
+ return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
+}
+
// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
diff -r 8cb098504801 -r e1af2a222378 src/cpu/aarch64/vm/nativeInst_aarch64.hpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp Thu Jun 05 13:48:13 2014 +0100
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp Mon Jun 16 07:25:03 2014 -0400
@@ -65,6 +65,8 @@
inline bool is_cond_jump();
bool is_safepoint_poll();
inline bool is_mov_literal64();
+ bool is_movz();
+ bool is_movk();
protected:
address addr_at(int offset) const { return address(this) + offset; }
@@ -105,11 +107,12 @@
};
inline NativeInstruction* nativeInstruction_at(address address) {
- NativeInstruction* inst = (NativeInstruction*)address;
-#ifdef ASSERT
- //inst->verify();
-#endif
- return inst;
+ return (NativeInstruction*)address;
+}
+
+// The natural type of an AArch64 instruction is uint32_t
+inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
+ return (NativeInstruction*)address;
}
inline NativeCall* nativeCall_at(address address);
@@ -204,19 +207,21 @@
class NativeMovConstReg: public NativeInstruction {
public:
enum Aarch64_specific_constants {
- instruction_size = 4 * 4,
+ instruction_size = 3 * 4, // movz, movk, movk. See movptr().
instruction_offset = 0,
displacement_offset = 0,
};
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const {
- if (is_adrp_at(instruction_address()))
+ if (nativeInstruction_at(instruction_address())->is_movz())
+ // Assume movz, movk, movk
+ return addr_at(instruction_size);
+ else if (is_adrp_at(instruction_address()))
return addr_at(2*4);
else if (is_ldr_literal_at(instruction_address()))
return(addr_at(4));
- else
- return addr_at(instruction_size);
+ assert(false, "Unknown instruction in NativeMovConstReg");
}
intptr_t data() const;
More information about the aarch64-port-dev
mailing list