[aarch64-port-dev ] Improve code generation for volatile operations and other barriers
Andrew Haley
aph at redhat.com
Thu May 22 13:44:00 UTC 2014
This is the C1 part of the patch. All attempts to generate ld.acq and st.rel
instructions are gone.
Andrew.
# HG changeset patch
# User aph
# Date 1400765080 14400
# Thu May 22 09:24:40 2014 -0400
# Node ID 78eff3c05f51ce9232950278d4da868d42500779
# Parent 0be4629243a868f0d4375b5cb8aff77b25b134b3
Use explicit barrier instructions in C1.
diff -r 0be4629243a8 -r 78eff3c05f51 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu May 22 07:21:33 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu May 22 09:24:40 2014 -0400
@@ -177,10 +177,6 @@
return result;
}
-static bool is_reg(LIR_Opr op) {
- return op->is_double_cpu() | op->is_single_cpu();
-}
-
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
Register base = addr->base()->as_pointer_register();
LIR_Opr opr = addr->index();
@@ -2730,148 +2726,12 @@
}
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
- if (dest->is_address()) {
- LIR_Address* to_addr = dest->as_address_ptr();
- Register compressed_src = noreg;
- if (is_reg(src)) {
- compressed_src = as_reg(src);
- if (type == T_ARRAY || type == T_OBJECT) {
- __ verify_oop(src->as_register());
- if (UseCompressedOops) {
- compressed_src = rscratch2;
- __ mov(compressed_src, src->as_register());
- __ encode_heap_oop(compressed_src);
- }
- }
- } else if (src->is_single_fpu()) {
- __ fmovs(rscratch2, src->as_float_reg());
- src = FrameMap::rscratch2_opr, type = T_INT;
- } else if (src->is_double_fpu()) {
- __ fmovd(rscratch2, src->as_double_reg());
- src = FrameMap::rscratch2_long_opr, type = T_LONG;
- }
-
- if (dest->is_double_cpu())
- __ lea(rscratch1, as_Address(to_addr));
- else
- __ lea(rscratch1, as_Address_lo(to_addr));
-
- int null_check_here = code_offset();
- switch (type) {
- case T_ARRAY: // fall through
- case T_OBJECT: // fall through
- if (UseCompressedOops) {
- __ stlrw(compressed_src, rscratch1);
- } else {
- __ stlr(compressed_src, rscratch1);
- }
- break;
- case T_METADATA:
- // We get here to store a method pointer to the stack to pass to
- // a dtrace runtime call. This can't work on 64 bit with
- // compressed klass ptrs: T_METADATA can be a compressed klass
- // ptr or a 64 bit method pointer.
- LP64_ONLY(ShouldNotReachHere());
- __ stlr(src->as_register(), rscratch1);
- break;
- case T_ADDRESS:
- __ stlr(src->as_register(), rscratch1);
- break;
- case T_INT:
- __ stlrw(src->as_register(), rscratch1);
- break;
-
- case T_LONG: {
- __ stlr(src->as_register_lo(), rscratch1);
- break;
- }
-
- case T_BYTE: // fall through
- case T_BOOLEAN: {
- __ stlrb(src->as_register(), rscratch1);
- break;
- }
-
- case T_CHAR: // fall through
- case T_SHORT:
- __ stlrh(src->as_register(), rscratch1);
- break;
-
- default:
- ShouldNotReachHere();
- }
- if (info != NULL) {
- add_debug_info_for_null_check(null_check_here, info);
- }
- } else if (src->is_address()) {
- LIR_Address* from_addr = src->as_address_ptr();
-
- if (src->is_double_cpu())
- __ lea(rscratch1, as_Address(from_addr));
- else
- __ lea(rscratch1, as_Address_lo(from_addr));
-
- int null_check_here = code_offset();
- switch (type) {
- case T_ARRAY: // fall through
- case T_OBJECT: // fall through
- if (UseCompressedOops) {
- __ ldarw(dest->as_register(), rscratch1);
- } else {
- __ ldar(dest->as_register(), rscratch1);
- }
- break;
- case T_ADDRESS:
- __ ldar(dest->as_register(), rscratch1);
- break;
- case T_INT:
- __ ldarw(dest->as_register(), rscratch1);
- break;
- case T_LONG: {
- __ ldar(dest->as_register_lo(), rscratch1);
- break;
- }
-
- case T_BYTE: // fall through
- case T_BOOLEAN: {
- __ ldarb(dest->as_register(), rscratch1);
- break;
- }
-
- case T_CHAR: // fall through
- case T_SHORT:
- __ ldarh(dest->as_register(), rscratch1);
- break;
-
- case T_FLOAT:
- __ ldarw(rscratch2, rscratch1);
- __ fmovs(dest->as_float_reg(), rscratch2);
- break;
-
- case T_DOUBLE:
- __ ldar(rscratch2, rscratch1);
- __ fmovd(dest->as_double_reg(), rscratch2);
- break;
-
- default:
- ShouldNotReachHere();
- }
- if (info != NULL) {
- add_debug_info_for_null_check(null_check_here, info);
- }
-
- if (type == T_ARRAY || type == T_OBJECT) {
- if (UseCompressedOops) {
- __ decode_heap_oop(dest->as_register());
- }
- __ verify_oop(dest->as_register());
- } else if (type == T_ADDRESS && from_addr->disp() == oopDesc::klass_offset_in_bytes()) {
- if (UseCompressedClassPointers) {
- __ decode_klass_not_null(dest->as_register());
- }
- }
- } else
+ if (dest->is_address() || src->is_address()) {
+ move_op(src, dest, type, lir_patch_none, info,
+ /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
+ } else {
ShouldNotReachHere();
+ }
}
#ifdef ASSERT
@@ -2925,17 +2785,18 @@
}
void LIR_Assembler::membar_acquire() {
- __ block_comment("membar_acquire");
+ __ membar(Assembler::LoadLoad|Assembler::LoadStore);
}
void LIR_Assembler::membar_release() {
- __ block_comment("membar_release");
+ __ membar(Assembler::LoadStore|Assembler::StoreStore);
}
-void LIR_Assembler::membar_loadload() { Unimplemented(); }
+void LIR_Assembler::membar_loadload() {
+ __ membar(Assembler::LoadLoad);
+}
void LIR_Assembler::membar_storestore() {
- COMMENT("membar_storestore");
__ membar(MacroAssembler::StoreStore);
}
More information about the aarch64-port-dev
mailing list