[aarch64-port-dev ] C1: Implement UnsafeGetAndSetObject

Andrew Haley aph at redhat.com
Wed Aug 21 08:31:29 PDT 2013


We were missing a chunk of native code for UnsafeGetAndSetObject.
Fixed thusly.

Andrew.



# HG changeset patch
# User aph
# Date 1377083252 -3600
# Node ID a8ce62237e186cc9af276a7d7625c5b87c8e8002
# Parent  66282d868b9c92b6e5ec31a57783d3062dfaced4
C1: Implement UnsafeGetAndSetObject.

diff -r 66282d868b9c -r a8ce62237e18 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Mon Aug 19 16:37:09 2013 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Wed Aug 21 12:07:32 2013 +0100
@@ -161,6 +161,21 @@
   return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
 }

+static jlong as_long(LIR_Opr data) {
+  jlong result;
+  switch (data->type()) {
+  case T_INT:
+    result = (data->as_jint());
+    break;
+  case T_LONG:
+    result = (data->as_jlong());
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+  return result;
+}
+
 static bool is_reg(LIR_Opr op) {
   return op->is_double_cpu() | op->is_single_cpu();
 }
@@ -1574,6 +1589,7 @@


 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
+  assert(VM_Version::supports_cx8(), "wrong machine");
   Register addr = as_reg(op->addr());
   Register newval = as_reg(op->new_value());
   Register cmpval = as_reg(op->cmp_value());
@@ -2965,6 +2981,90 @@
   }
 }

-void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { Unimplemented(); }
+void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
+  Address addr = as_Address(src->as_address_ptr(), noreg);
+  BasicType type = src->type();
+  bool is_oop = type == T_OBJECT || type == T_ARRAY;
+
+  void (MacroAssembler::* lda)(Register Rd, Register Ra);
+  void (MacroAssembler::* add)(Register Rd, Register Rn, RegisterOrConstant increment);
+  void (MacroAssembler::* stl)(Register Rs, Register Rt, Register Rn);
+
+  switch(type) {
+  case T_INT:
+    lda = &MacroAssembler::ldaxrw;
+    add = &MacroAssembler::addw;
+    stl = &MacroAssembler::stlxrw;
+    break;
+  case T_LONG:
+    lda = &MacroAssembler::ldaxr;
+    add = &MacroAssembler::add;
+    stl = &MacroAssembler::stlxr;
+    break;
+  case T_OBJECT:
+  case T_ARRAY:
+    if (UseCompressedOops) {
+      lda = &MacroAssembler::ldaxrw;
+      add = &MacroAssembler::addw;
+      stl = &MacroAssembler::stlxrw;
+    } else {
+      lda = &MacroAssembler::ldaxr;
+      add = &MacroAssembler::add;
+      stl = &MacroAssembler::stlxr;
+    }
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+
+  switch (code) {
+  case lir_xadd:
+    {
+      RegisterOrConstant inc;
+      Register tmp = as_reg(tmp_op);
+      Register dst = as_reg(dest);
+      if (data->is_constant()) {
+	inc = RegisterOrConstant(as_long(data));
+	assert_different_registers(dst, addr.base(), tmp,
+				   rscratch1, rscratch2);
+      } else {
+	inc = RegisterOrConstant(as_reg(data));
+	assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
+				   rscratch1, rscratch2);
+      }
+      Label again;
+      __ lea(tmp, addr);
+      __ bind(again);
+      (_masm->*lda)(dst, tmp);
+      (_masm->*add)(rscratch1, dst, inc);
+      (_masm->*stl)(rscratch2, rscratch1, tmp);
+      __ cbnzw(rscratch2, again);
+      break;
+    }
+  case lir_xchg:
+    {
+      Register tmp = tmp_op->as_register();
+      Register obj = as_reg(data);
+      Register dst = as_reg(dest);
+      if (is_oop && UseCompressedOops) {
+	__ encode_heap_oop(obj);
+      }
+      assert_different_registers(obj, addr.base(), tmp, rscratch2, dst);
+      Label again;
+      __ lea(tmp, addr);
+      __ bind(again);
+      (_masm->*lda)(dst, tmp);
+      (_masm->*stl)(rscratch2, obj, tmp);
+      __ cbnzw(rscratch2, again);
+      if (is_oop && UseCompressedOops) {
+	__ decode_heap_oop(dst);
+      }
+    }
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+  asm("nop");
+}

 #undef __
diff -r 66282d868b9c -r a8ce62237e18 src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Mon Aug 19 16:37:09 2013 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Wed Aug 21 12:07:32 2013 +0100
@@ -1294,4 +1294,56 @@
   }
 }

-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { Unimplemented(); }
+void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
+  BasicType type = x->basic_type();
+  LIRItem src(x->object(), this);
+  LIRItem off(x->offset(), this);
+  LIRItem value(x->value(), this);
+
+  src.load_item();
+  off.load_nonconstant();
+
+  if (! (value.is_constant() && can_inline_as_constant(x->value()))) {
+    value.load_item();
+  }
+
+  LIR_Opr dst = rlock_result(x, type);
+  LIR_Opr data = value.result();
+  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
+  LIR_Opr offset = off.result();
+
+  if (data == dst) {
+    LIR_Opr tmp = new_register(data->type());
+    __ move(data, tmp);
+    data = tmp;
+  }
+
+  LIR_Address* addr;
+  if (offset->is_constant()) {
+    jlong l = offset->as_jlong();
+    assert((jlong)((jint)l) == l, "offset too large for constant");
+    jint c = (jint)l;
+    addr = new LIR_Address(src.result(), c, type);
+  } else {
+    addr = new LIR_Address(src.result(), offset, type);
+  }
+
+  LIR_Opr tmp = new_register(T_INT);
+  LIR_Opr ptr = LIR_OprFact::illegalOpr;
+
+  if (x->is_add()) {
+    __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
+  } else {
+    if (is_obj) {
+      // Do the pre-write barrier, if any.
+      ptr = new_pointer_register();
+      __ add(src.result(), off.result(), ptr);
+      pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
+		  true /* do_load */, false /* patch */, NULL);
+    }
+    __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
+    if (is_obj) {
+      post_barrier(ptr, data);
+    }
+  }
+}
# HG changeset patch
# User aph
# Date 1377083300 -3600
# Node ID 6ca74c561b2e3af452a8ceb78245581d8214269f
# Parent  a8ce62237e186cc9af276a7d7625c5b87c8e8002
C1: Implement UnsafeGetAndSetObject.

diff -r a8ce62237e18 -r 6ca74c561b2e src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Aug 21 12:07:32 2013 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Aug 21 12:08:20 2013 +0100
@@ -1587,6 +1587,22 @@
 }


+void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
+  if (increment.is_register()) {
+    add(Rd, Rn, increment.as_register());
+  } else {
+    add(Rd, Rn, increment.as_constant());
+  }
+}
+
+void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
+  if (increment.is_register()) {
+    add(Rd, Rn, increment.as_register());
+  } else {
+    add(Rd, Rn, increment.as_constant());
+  }
+}
+
 #ifdef ASSERT
 static Register spill_registers[] = {
   rheapbase,
diff -r a8ce62237e18 -r 6ca74c561b2e src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Wed Aug 21 12:07:32 2013 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Wed Aug 21 12:08:20 2013 +0100
@@ -1278,6 +1278,9 @@

   WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw)

+  void add(Register Rd, Register Rn, RegisterOrConstant increment);
+  void addw(Register Rd, Register Rn, RegisterOrConstant increment);
+
   void tableswitch(Register index, jint lowbound, jint highbound,
 		   Label &jumptable, Label &jumptable_end) {
     adr(rscratch1, jumptable);
diff -r a8ce62237e18 -r 6ca74c561b2e src/cpu/aarch64/vm/vm_version_aarch64.cpp
--- a/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Wed Aug 21 12:07:32 2013 +0100
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Wed Aug 21 12:08:20 2013 +0100
@@ -71,7 +71,7 @@
     // TODO : redefine fields in CpuidInfo and generate
     // code to fill them in

-    __ ret(r30);
+    __ ret(lr);

 #   undef __

@@ -81,7 +81,11 @@


 void VM_Version::get_processor_features() {
-  // TODO : define relevant processor features and initialise them
+  _supports_cx8 = true;
+  _supports_atomic_getset4 = true;
+  _supports_atomic_getadd4 = true;
+  _supports_atomic_getset8 = true;
+  _supports_atomic_getadd8 = true;
 }

 void VM_Version::initialize() {



More information about the aarch64-port-dev mailing list