[aarch64-port-dev ] Rewrite CAS operations to be more conservative
Andrew Haley
aph at redhat.com
Thu Apr 10 12:23:15 UTC 2014
After some discussion on concurrency-interest at cs.oswego.edu, I have
decided to change all of our CAS operations to use the form
<Access [A]>
// atomic_op (B)
1: ldxar x0, [B] // Exclusive load with acquire
<op(B)>
stlxr w1, x0, [B] // Exclusive store with release
cbnz w1, 1b
dmb ish // Full barrier
<Access [C]>
or a similar variant. I'm not convinced that this is necessary,
but it is safe.
I'll push this patch once the jcstress test run has finished.
Andrew.
-------------- next part --------------
# HG changeset patch
# User aph
# Date 1397127043 14400
# Thu Apr 10 06:50:43 2014 -0400
# Node ID d9468835bc5160b7fac6709b0afbc751b2159fbb
# Parent a16c651450e4b0822cfabb248e19f3b371582fce
Rewrite CAS operations to be more conservative
diff -r a16c651450e4 -r d9468835bc51 src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad Tue Apr 08 14:58:30 2014 +0100
+++ b/src/cpu/aarch64/vm/aarch64.ad Thu Apr 10 06:50:43 2014 -0400
@@ -2328,12 +2328,11 @@
}
}
Label retry_load, done;
- __ membar(__ AnyAny);
__ bind(retry_load);
- __ ldxr(rscratch1, addr_reg);
+ __ ldaxr(rscratch1, addr_reg);
__ cmp(rscratch1, old_reg);
__ br(Assembler::NE, done);
- __ stxr(rscratch1, new_reg, addr_reg);
+ __ stlxr(rscratch1, new_reg, addr_reg);
__ cbnzw(rscratch1, retry_load);
__ bind(done);
__ membar(__ AnyAny);
@@ -2370,11 +2369,10 @@
}
Label retry_load, done;
__ bind(retry_load);
- __ membar(__ AnyAny);
- __ ldxrw(rscratch1, addr_reg);
+ __ ldaxrw(rscratch1, addr_reg);
__ cmpw(rscratch1, old_reg);
__ br(Assembler::NE, done);
- __ stxrw(rscratch1, new_reg, addr_reg);
+ __ stlxrw(rscratch1, new_reg, addr_reg);
__ cbnzw(rscratch1, retry_load);
__ bind(done);
__ membar(__ AnyAny);
@@ -5927,7 +5925,7 @@
ins_encode %{
__ block_comment("membar-acquire-lock");
- // __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad|Assembler::LoadStore));
+ __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad|Assembler::LoadStore));
%}
ins_pipe(pipe_class_memory);
@@ -5940,7 +5938,7 @@
ins_encode %{
__ block_comment("MEMBAR-release-lock");
- // __ membar(Assembler::AnyAny);
+ __ membar(Assembler::AnyAny);
%}
ins_pipe(pipe_class_memory);
diff -r a16c651450e4 -r d9468835bc51 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Tue Apr 08 14:58:30 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu Apr 10 06:50:43 2014 -0400
@@ -1590,7 +1590,7 @@
__ cset(rscratch1, Assembler::NE);
__ br(Assembler::NE, nope);
// if we store+flush with no intervening write rscratch1 wil be zero
- __ stxrw(rscratch1, newval, addr);
+ __ stlxrw(rscratch1, newval, addr);
// retry so we only ever return after a load fails to compare
// ensures we don't return a stale value after a failed write.
__ cbnzw(rscratch1, retry_load);
@@ -1608,7 +1608,7 @@
__ cset(rscratch1, Assembler::NE);
__ br(Assembler::NE, nope);
// if we store+flush with no intervening write rscratch1 wil be zero
- __ stxr(rscratch1, newval, addr);
+ __ stlxr(rscratch1, newval, addr);
// retry so we only ever return after a load fails to compare
// ensures we don't return a stale value after a failed write.
__ cbnz(rscratch1, retry_load);
@@ -3087,23 +3087,23 @@
case T_INT:
lda = &MacroAssembler::ldaxrw;
add = &MacroAssembler::addw;
- stl = &MacroAssembler::stxrw;
+ stl = &MacroAssembler::stlxrw;
break;
case T_LONG:
lda = &MacroAssembler::ldaxr;
add = &MacroAssembler::add;
- stl = &MacroAssembler::stxr;
+ stl = &MacroAssembler::stlxr;
break;
case T_OBJECT:
case T_ARRAY:
if (UseCompressedOops) {
lda = &MacroAssembler::ldaxrw;
add = &MacroAssembler::addw;
- stl = &MacroAssembler::stxrw;
+ stl = &MacroAssembler::stlxrw;
} else {
lda = &MacroAssembler::ldaxr;
add = &MacroAssembler::add;
- stl = &MacroAssembler::stxr;
+ stl = &MacroAssembler::stlxr;
}
break;
default:
More information about the aarch64-port-dev
mailing list