aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraph <none@none>2014-06-19 05:06:56 -0400
committeraph <none@none>2014-06-19 05:06:56 -0400
commit1e27d4b4dcab24daf26339456633c3d64062930e (patch)
tree595392565bece7308844f4348567dbc9599acb2f
parent6474b106ac7be5e2a7269d7526e73c702bf24351 (diff)
Save intermediate state before removing C1 patching code.
-rw-r--r--src/cpu/aarch64/vm/aarch64.ad24
-rw-r--r--src/cpu/aarch64/vm/assembler_aarch64.hpp2
-rw-r--r--src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp36
-rw-r--r--src/cpu/aarch64/vm/compiledIC_aarch64.cpp1
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.cpp51
-rw-r--r--src/cpu/aarch64/vm/macroAssembler_aarch64.hpp15
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.cpp8
-rw-r--r--src/cpu/aarch64/vm/nativeInst_aarch64.hpp10
-rw-r--r--src/cpu/aarch64/vm/relocInfo_aarch64.cpp12
-rw-r--r--src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp4
-rw-r--r--src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp2
11 files changed, 75 insertions, 90 deletions
diff --git a/src/cpu/aarch64/vm/aarch64.ad b/src/cpu/aarch64/vm/aarch64.ad
index deef7d271..4ba1466a8 100644
--- a/src/cpu/aarch64/vm/aarch64.ad
+++ b/src/cpu/aarch64/vm/aarch64.ad
@@ -839,19 +839,7 @@ int MachCallStaticJavaNode::ret_addr_offset()
int MachCallDynamicJavaNode::ret_addr_offset()
{
- // call should be
- // ldr_constant
- // bl
- // where ldr_constant is either
- // ldr // if NearCpool
- // or
- // adrp // if !NearCPool
- // ldr
- int off = 8;
- if (!NearCpool) {
- off += 4;
- }
- return off;
+ return 16; // movz, movk, movk, bl
}
int MachCallRuntimeNode::ret_addr_offset() {
@@ -2570,9 +2558,9 @@ encode %{
} else {
relocInfo::relocType rtype = $src->constant_reloc();
if (rtype == relocInfo::oop_type) {
- __ movoop(dst_reg, (jobject)con, /*mt_safe*/false);
+ __ movoop(dst_reg, (jobject)con, /*immediate*/true);
} else if (rtype == relocInfo::metadata_type) {
- __ mov_metadata(dst_reg, (Metadata*)con, /*mt_safe*/false);
+ __ mov_metadata(dst_reg, (Metadata*)con);
} else {
assert(rtype == relocInfo::none, "unexpected reloc type");
if (con < (address)(uintptr_t)os::vm_page_size()) {
@@ -2625,7 +2613,7 @@ encode %{
} else {
relocInfo::relocType rtype = $src->constant_reloc();
assert(rtype == relocInfo::oop_type, "unexpected reloc type");
- __ set_narrow_oop(dst_reg, (jobject)con, /*mt_safe*/false);
+ __ set_narrow_oop(dst_reg, (jobject)con);
}
%}
@@ -2644,7 +2632,7 @@ encode %{
} else {
relocInfo::relocType rtype = $src->constant_reloc();
assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
- __ set_narrow_klass(dst_reg, (Klass *)con, /*mt_safe*/false);
+ __ set_narrow_klass(dst_reg, (Klass *)con);
}
%}
@@ -2836,8 +2824,6 @@ encode %{
address mark = __ pc();
address addr = (address)$meth$$method;
if (!_method) {
- // TODO check this
- // think we are calling generated Java here not x86
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
__ bl(Address(addr, relocInfo::runtime_call_type));
} else if (_optimized_virtual) {
diff --git a/src/cpu/aarch64/vm/assembler_aarch64.hpp b/src/cpu/aarch64/vm/assembler_aarch64.hpp
index 0411c6af1..bb53c2e0e 100644
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp
@@ -1243,7 +1243,7 @@ public:
f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
long offset = (adr.target() - pc()) >> 2;
sf(offset, 23, 5);
-#ifdef ASSERT
+#if 0
Relocation* reloc = adr.rspec().reloc();
relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
assert(rtype == relocInfo::internal_word_type,
diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
index 671ac4cf6..fb2f18958 100644
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
@@ -200,7 +200,9 @@ Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
return Address(base, addr_offset, Address::lsl(addr->scale()));
else {
- address const_addr = int_constant(addr_offset);
+ // This is a rather long-winded instruction sequence, but the
+ // offset is atomically patchable. See PatchingStub::install().
+ Address const_addr = InternalAddress(int_constant(addr_offset));
__ ldr_constant(tmp, const_addr);
return Address(base, tmp, Address::lsl(addr->scale()));
}
@@ -314,19 +316,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
if (o == NULL) {
__ mov(reg, zr);
} else {
- int oop_index = __ oop_recorder()->find_index(o);
- assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- address const_ptr = int_constant(jlong(o));
- __ code()->consts()->relocate(const_ptr, rspec);
- __ ldr_constant(reg, const_ptr);
-
- if (PrintRelocations && Verbose) {
- puts("jobject2reg:\n");
- printf("oop %p at %p\n", o, const_ptr);
- fflush(stdout);
- das((uint64_t)__ pc(), -2);
- }
+ __ movoop(reg, o, /*immediate*/true);
}
}
@@ -334,13 +324,16 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched
int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
-// PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- address const_ptr = int_constant(-1);
- __ code()->consts()->relocate(const_ptr, rspec);
- __ ldr_constant(reg, const_ptr);
+ if (DeoptimizeWhenPatching) {
+ __ nop();
+ } else {
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+ address const_ptr = int_constant(-1);
+ __ code()->consts()->relocate(const_ptr, rspec);
+ __ ldr_constant(reg, InternalAddress(const_ptr));
+ }
patching_epilog(patch, lir_patch_normal, reg, info);
}
@@ -924,7 +917,10 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
Metadata* o = NULL;
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
- __ mov_metadata(reg, o);
+ if (DeoptimizeWhenPatching)
+ __ nop();
+ else
+ __ mov_metadata(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info);
}
diff --git a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp
index b4d6e220d..c7c404e1f 100644
--- a/src/cpu/aarch64/vm/compiledIC_aarch64.cpp
+++ b/src/cpu/aarch64/vm/compiledIC_aarch64.cpp
@@ -139,6 +139,7 @@ void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry)
// Update stub.
method_holder->set_data((intptr_t)callee());
+ method_holder->flush();
jump->set_jump_destination(entry);
// Update jump to call.
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
index 48b8f76ce..f00dca1e7 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
@@ -622,9 +622,10 @@ void MacroAssembler::call(Address entry) {
void MacroAssembler::ic_call(address entry) {
RelocationHolder rh = virtual_call_Relocation::spec(pc());
- address const_ptr = long_constant((jlong)Universe::non_oop_word());
- unsigned long offset;
- ldr_constant(rscratch2, const_ptr);
+ // address const_ptr = long_constant((jlong)Universe::non_oop_word());
+ // unsigned long offset;
+ // ldr_constant(rscratch2, const_ptr);
+ movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
call(Address(entry, rh));
}
@@ -2534,7 +2535,7 @@ void MacroAssembler::decode_klass_not_null(Register r) {
decode_klass_not_null(r, r);
}
-void MacroAssembler::set_narrow_oop(Register dst, jobject obj, bool mt_safe) {
+void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
assert (UseCompressedOops, "should only be used for compressed oops");
assert (Universe::heap() != NULL, "java heap should be initialized");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
@@ -2549,7 +2550,7 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj, bool mt_safe) {
movk(dst, 0xBEEF);
}
-void MacroAssembler::set_narrow_klass(Register dst, Klass* k, bool mt_safe) {
+void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
assert (UseCompressedClassPointers, "should only be used for compressed headers");
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
int index = oop_recorder()->find_index(k);
@@ -2782,11 +2783,11 @@ Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
return Address((address)obj, rspec);
}
-// Move an oop into a register. mt_safe is true iff we are not going
-// to patch this instruction while the code is being executed by
-// another thread. In that case we can use move immediates rather
-// than the constant pool.
-void MacroAssembler::movoop(Register dst, jobject obj, bool mt_safe) {
+// Move an oop into a register. immediate is true if we want
+// immediate instrcutions, i.e. we are not going to patch this
+// instruction while the code is being executed by another thread. In
+// that case we can use move immediates rather than the constant pool.
+void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
int oop_index;
if (obj == NULL) {
oop_index = oop_recorder()->allocate_oop_index(obj);
@@ -2795,17 +2796,15 @@ void MacroAssembler::movoop(Register dst, jobject obj, bool mt_safe) {
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
}
RelocationHolder rspec = oop_Relocation::spec(oop_index);
- address const_ptr = mt_safe ? long_constant((jlong)obj) : NULL;
- if (! const_ptr) {
+ if (! immediate) {
+ address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
+ ldr_constant(dst, Address(dummy, rspec));
+ } else
mov(dst, Address((address)obj, rspec));
- } else {
- code()->consts()->relocate(const_ptr, rspec);
- ldr_constant(dst, const_ptr);
- }
}
// Move a metadata address into a register.
-void MacroAssembler::mov_metadata(Register dst, Metadata* obj, bool mt_safe) {
+void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
int oop_index;
if (obj == NULL) {
oop_index = oop_recorder()->allocate_metadata_index(obj);
@@ -2813,13 +2812,7 @@ void MacroAssembler::mov_metadata(Register dst, Metadata* obj, bool mt_safe) {
oop_index = oop_recorder()->find_index(obj);
}
RelocationHolder rspec = metadata_Relocation::spec(oop_index);
- address const_ptr = mt_safe ? long_constant((jlong)obj) : NULL;
- if (! const_ptr) {
- mov(dst, Address((address)obj, rspec));
- } else {
- code()->consts()->relocate(const_ptr, rspec);
- ldr_constant(dst, const_ptr);
- }
+ mov(dst, Address((address)obj, rspec));
}
Address MacroAssembler::constant_oop_address(jobject obj) {
@@ -3107,12 +3100,12 @@ address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype
void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
relocInfo::relocType rtype = dest.rspec().reloc()->type();
- guarantee(rtype == relocInfo::none
- || rtype == relocInfo::external_word_type
- || rtype == relocInfo::poll_type
- || rtype == relocInfo::poll_return_type,
- "can only use a fixed address with an ADRP");
if (labs(pc() - dest.target()) >= (1LL << 32)) {
+ guarantee(rtype == relocInfo::none
+ || rtype == relocInfo::external_word_type
+ || rtype == relocInfo::poll_type
+ || rtype == relocInfo::poll_return_type,
+ "can only use a fixed address with an ADRP");
// Out of range. This doesn't happen very often, but we have to
// handle it
mov(reg1, dest);
diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
index f7f59a48e..c3ebea34b 100644
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
@@ -737,7 +737,7 @@ public:
void encode_heap_oop_not_null(Register dst, Register src);
void decode_heap_oop_not_null(Register dst, Register src);
- void set_narrow_oop(Register dst, jobject obj, bool mt_safe = true);
+ void set_narrow_oop(Register dst, jobject obj);
// currently unimplemented
#if 0
void set_narrow_oop(Address dst, jobject obj);
@@ -750,7 +750,7 @@ public:
void encode_klass_not_null(Register dst, Register src);
void decode_klass_not_null(Register dst, Register src);
- void set_narrow_klass(Register dst, Klass* k, bool mt_safe = true);
+ void set_narrow_klass(Register dst, Klass* k);
// currently unimplemented
#if 0
void set_narrow_klass(Address dst, Klass* k);
@@ -1106,7 +1106,7 @@ public:
// Data
- void mov_metadata(Register dst, Metadata* obj, bool mt_safe = true);
+ void mov_metadata(Register dst, Metadata* obj);
Address allocate_metadata_address(Metadata* obj);
Address constant_oop_address(jobject obj);
// unimplemented
@@ -1114,7 +1114,7 @@ public:
void pushoop(jobject obj);
#endif
- void movoop(Register dst, jobject obj, bool mt_safe = true);
+ void movoop(Register dst, jobject obj, bool immediate = false);
// sign extend as need a l to ptr sized element
void movl2ptr(Register dst, Address src) { Unimplemented(); }
@@ -1256,13 +1256,12 @@ public:
Label* retaddr = NULL
);
- void ldr_constant(Register dest, address const_addr) {
- guarantee(const_addr, "constant pool overflow");
+ void ldr_constant(Register dest, const Address &const_addr) {
if (NearCpool) {
- ldr(dest, const_addr, relocInfo::internal_word_type);
+ ldr(dest, const_addr);
} else {
unsigned long offset;
- adrp(dest, InternalAddress(const_addr), offset);
+ adrp(dest, InternalAddress(const_addr.target()), offset);
ldr(dest, Address(dest, offset));
}
}
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
index 592e6e6a3..81a006245 100644
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
@@ -53,13 +53,6 @@ void NativeCall::print() { Unimplemented(); }
// Inserts a native call instruction at a given pc
void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
-// MT-safe patching of a call instruction.
-// First patches first word of instruction to two jmp's that jmps to them
-// selfs (spinlock). Then patches the last byte, and then atomicly replaces
-// the jmp's with the first 4 byte of the new instruction.
-void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { Unimplemented(); }
-
-
void NativeMovConstReg::verify() {
// make sure code pattern is actually mov reg64, imm64 instructions
}
@@ -83,7 +76,6 @@ void NativeMovConstReg::set_data(intptr_t x) {
}
};
-
void NativeMovConstReg::print() {
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
instruction_address(), data());
diff --git a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
index 4b9e3ec56..a84a768ed 100644
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
@@ -202,8 +202,8 @@ inline NativeCall* nativeCall_before(address return_address) {
return call;
}
-// An interface for accessing/manipulating native mov reg, imm32 instructions.
-// (used to manipulate inlined 32bit data dll calls, etc.)
+// An interface for accessing/manipulating native mov reg, imm instructions.
+// (used to manipulate inlined 64-bit data calls, etc.)
class NativeMovConstReg: public NativeInstruction {
public:
enum Aarch64_specific_constants {
@@ -227,6 +227,12 @@ class NativeMovConstReg: public NativeInstruction {
intptr_t data() const;
void set_data(intptr_t x);
+ void flush() {
+ if (! maybe_cpool_ref(instruction_address())) {
+ ICache::invalidate_range(instruction_address(), instruction_size);
+ }
+ }
+
void verify();
void print();
diff --git a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
index 7c9f6ef50..301f88a6b 100644
--- a/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
+++ b/src/cpu/aarch64/vm/relocInfo_aarch64.cpp
@@ -35,10 +35,20 @@
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
switch(type()) {
case relocInfo::oop_type:
- MacroAssembler::patch_oop(addr(), x);
+ {
+ oop_Relocation *reloc = (oop_Relocation *)this;
+ if (NativeInstruction::is_ldr_literal_at(addr())) {
+ address constptr = (address)code()->oop_addr_at(reloc->oop_index());
+ MacroAssembler::pd_patch_instruction(addr(), constptr);
+ assert(*(address*)constptr == x, "error in oop relocation");
+ } else{
+ MacroAssembler::patch_oop(addr(), x);
+ }
+ }
break;
default:
MacroAssembler::pd_patch_instruction(addr(), x);
+ break;
}
}
diff --git a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
index b4e96d2f9..61d11f09f 100644
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
@@ -1735,7 +1735,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (method->is_static() && !is_critical_native) {
// load oop into a register
- __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
+ __ movoop(oop_handle_reg,
+ JNIHandles::make_local(method->method_holder()->java_mirror()),
+ /*immediate*/true);
// Now handlize the static class mirror it's known not-null.
__ str(oop_handle_reg, Address(sp, klass_offset));
diff --git a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
index 1815e0565..6a02e9fa5 100644
--- a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
+++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
@@ -100,7 +100,7 @@ char* os::non_memory_address_word() {
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
- return (char*) -1;
+ return (char*) 0xffffffffffff;
}
void os::initialize_thread(Thread *thr) {