aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraph <none@none>2014-06-16 07:25:03 -0400
committeraph <none@none>2014-06-16 07:25:03 -0400
commit6d9e8714134de9bf411d6a474e84bfd3712e43d0 (patch)
tree00c3ee2ccc1efca59c6d125f5328a68cab80414e
parent850f60e31e459bdf03b27c6be3d3c28c8d7bc3d9 (diff)
All address constants are 48 bits in size.
-rw-r--r--src/cpu/aarch64/vm/aarch64.ad15
-rw-r--r--src/cpu/aarch64/vm/assembler_aarch64.cpp2
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.cpp24
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.hpp2
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.cpp8
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.hpp23
6 files changed, 45 insertions, 29 deletions
diff --git a/src/cpu/aarch64/vm/aarch64.ad b/src/cpu/aarch64/vm/aarch64.ad
index 13c619a3e..2abd9cd20 100644
--- a/src/cpu/aarch64/vm/aarch64.ad
+++ b/src/cpu/aarch64/vm/aarch64.ad
@@ -864,7 +864,8 @@ int MachCallRuntimeNode::ret_addr_offset() {
if (cb) {
return 4;
} else {
- return 20;
+ // A 48-bit address. See movptr().
+ return 16;
}
}
@@ -2099,7 +2100,6 @@ encode %{
// movz xscratch1 0xnnnn <-- current pc is here
// movk xscratch1 0xnnnn
// movk xscratch1 0xnnnn
- // movk xscratch1 0xnnnn
// str xscratch1, [xthread,#anchor_pc_off]
// mov xscratch2, sp
// str xscratch2, [xthread,#anchor_sp_off
@@ -2111,7 +2111,6 @@ encode %{
// movz xscratch1 0xnnnn
// movk xscratch1 0xnnnn
// movk xscratch1 0xnnnn
- // movk xscratch1 0xnnnn
// blrt xscratch1
// . . .
//
@@ -2121,18 +2120,18 @@ encode %{
// stub. we assert that nargs is < 7.
//
// so the offset we need to add to the pc (in 32-bit words) is
- // 4 + <-- load 64 bit constant return pc
+ // 3 + <-- load 48-bit constant return pc
// 1 + <-- write anchor pc
// 1 + <-- copy sp
// 1 + <-- write anchor sp
// nargs + <-- java stub arg count
// 1 + <-- extra thread arg
// [ 1 + ] <-- optional ret address of stub caller
- // 4 + <-- load 64 bit call target address
+ // 3 + <-- load 64 bit call target address
// 1 <-- blrt instruction
//
- // i.e we need to add (nargs + 13) * 4 bytes or (nargs + 14) * 4 bytes
- //
+ // i.e we need to add (nargs + 11) * 4 bytes or (nargs + 12) * 4 bytes
+ //
enc_class aarch64_enc_save_pc() %{
Compile* C = ra_->C;
@@ -2141,7 +2140,7 @@ encode %{
assert(nargs <= 8, "opto runtime stub has more than 8 args!");
MacroAssembler _masm(&cbuf);
address pc = __ pc();
- int call_offset = (nargs + 13) * 4;
+ int call_offset = (nargs + 11) * 4;
int field_offset = in_bytes(JavaThread::frame_anchor_offset()) +
in_bytes(JavaFrameAnchor::last_Java_pc_offset());
__ mov(rscratch1, InternalAddress(pc + call_offset));
diff --git a/src/cpu/aarch64/vm/assembler_aarch64.cpp b/src/cpu/aarch64/vm/assembler_aarch64.cpp
index b8c7e5c03..d8c32756e 100644
--- a/src/cpu/aarch64/vm/assembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp
@@ -1273,7 +1273,7 @@ void Address::lea(MacroAssembler *as, Register r) const {
if (rtype == relocInfo::none)
__ mov(r, target());
else
- __ mov64(r, (uint64_t)target());
+ __ movptr(r, (uint64_t)target());
break;
}
default:
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
index 720784a0a..bbfd04197 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
@@ -65,6 +65,7 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
void MacroAssembler::pd_patch_instruction(address branch, address target) {
+ assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant");
long offset = (target - branch) >> 2;
unsigned insn = *(unsigned*)branch;
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
@@ -139,10 +140,11 @@ void MacroAssembler::pd_patch_instruction(address branch, address target) {
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
// Move wide constant
u_int64_t dest = (u_int64_t)target;
+ assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
+ assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff);
Instruction_aarch64::patch(branch += 4, 20, 5, (dest >>= 16) & 0xffff);
Instruction_aarch64::patch(branch += 4, 20, 5, (dest >>= 16) & 0xffff);
- Instruction_aarch64::patch(branch += 4, 20, 5, (dest >>= 16));
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
// nothing to do
@@ -216,14 +218,13 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
ShouldNotReachHere();
}
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
- // Move wide constant
- // FIXME: We assume these instructions are movz, movk, movk, movk.
- // We don't assert this; we should.
+ // Move address constant: movz, movk, movk. See movptr().
u_int32_t *insns = (u_int32_t *)insn_addr;
+ assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
+ assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
- + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)
- + (u_int64_t(Instruction_aarch64::extract(insns[3], 20, 5)) << 48));
+ + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
return 0;
@@ -1246,10 +1247,14 @@ void MacroAssembler::mov(Register r, Address dest) {
InstructionMark im(this);
code_section()->relocate(inst_mark(), dest.rspec());
u_int64_t imm64 = (u_int64_t)dest.target();
- mov64(r, imm64);
+ movptr(r, imm64);
}
-void MacroAssembler::mov64(Register r, uintptr_t imm64) {
+// Move a constant pointer into r. In AArch64 mode the virtual
+// address space is 48 bits in size, so we only need three
+// instructions to create a patchable instruction sequence that can
+// reach anywhere.
+void MacroAssembler::movptr(Register r, uintptr_t imm64) {
#ifndef PRODUCT
{
char buffer[64];
@@ -1257,13 +1262,12 @@ void MacroAssembler::mov64(Register r, uintptr_t imm64) {
block_comment(buffer);
}
#endif
+ assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
movz(r, imm64 & 0xffff);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 16);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 32);
- imm64 >>= 16;
- movk(r, imm64 & 0xffff, 48);
}
void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
index c9a0ad21d..985f763b8 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
@@ -439,7 +439,7 @@ public:
}
void mov(Register dst, Address a);
- void mov64(Register r, uintptr_t imm64);
+ void movptr(Register r, uintptr_t imm64);
// macro instructions for accessing and updating floating point
// status register
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
index 0b5060591..592e6e6a3 100644
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
@@ -207,6 +207,14 @@ bool NativeInstruction::is_ldrw_to_zr(address instr) {
Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
}
+bool NativeInstruction::is_movz() {
+ return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
+}
+
+bool NativeInstruction::is_movk() {
+ return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
+}
+
// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
index f9b485937..4b9e3ec56 100644
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
@@ -65,6 +65,8 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
inline bool is_cond_jump();
bool is_safepoint_poll();
inline bool is_mov_literal64();
+ bool is_movz();
+ bool is_movk();
protected:
address addr_at(int offset) const { return address(this) + offset; }
@@ -105,11 +107,12 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
};
inline NativeInstruction* nativeInstruction_at(address address) {
- NativeInstruction* inst = (NativeInstruction*)address;
-#ifdef ASSERT
- //inst->verify();
-#endif
- return inst;
+ return (NativeInstruction*)address;
+}
+
+// The natural type of an AArch64 instruction is uint32_t
+inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
+ return (NativeInstruction*)address;
}
inline NativeCall* nativeCall_at(address address);
@@ -204,19 +207,21 @@ inline NativeCall* nativeCall_before(address return_address) {
class NativeMovConstReg: public NativeInstruction {
public:
enum Aarch64_specific_constants {
- instruction_size = 4 * 4,
+ instruction_size = 3 * 4, // movz, movk, movk. See movptr().
instruction_offset = 0,
displacement_offset = 0,
};
address instruction_address() const { return addr_at(instruction_offset); }
address next_instruction_address() const {
- if (is_adrp_at(instruction_address()))
+ if (nativeInstruction_at(instruction_address())->is_movz())
+ // Assume movz, movk, movk
+ return addr_at(instruction_size);
+ else if (is_adrp_at(instruction_address()))
return addr_at(2*4);
else if (is_ldr_literal_at(instruction_address()))
return(addr_at(4));
- else
- return addr_at(instruction_size);
+ assert(false, "Unknown instruction in NativeMovConstReg");
}
intptr_t data() const;