[aarch64-port-dev ] RFR: Fix for large frame size adjusts (Take 2)

Edward Nevill edward.nevill at linaro.org
Wed Aug 7 05:40:56 PDT 2013


Hi,

OK. Here is my second attempt at this. I have split out wrap_add_sub_imm_insn into 2 adding a wrap_adds_subs_imm_insn.

The info about whether is sets the flag is not available at this stage. It is encoded in the names of the functions passed to wrapp_add_sum_imm_insn (ie add, adds, sub, subs etc).

The code for wrap_add_sub_imm_insn now generates in order of preference

1) <op> Rd, Rn, #imm
2) <op> Rd, Rn, #(imm & -(1 << 12)); <op> Rd, Rd, #(imm & ((1<< 12)-1))
3) MOV Rd, #imm; <op> Rd, Rn, Rd

The code for wrap_adds_subs_imm_insn generates

1) <op> Rd, Rn, #imm
2) MOV Rd, #imm; <op> Rd, Rn, Rd

OK?

Ed.

--- CUT HERE ---
exporting patch:
# HG changeset patch
# User Edward Nevill ed at camswl.com
# Date 1375877611 -3600
# Node ID 5230539fd8f96ce9493a9a2a4c01fc7b65c9bad6
# Parent  560f86cdf664e23f462120f2998dd4e15af7d32b
Fix large immediates

diff -r 560f86cdf664 -r 5230539fd8f9 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp     Mon Aug 05 16:03:11 2013 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp     Wed Aug 07 13:13:31 2013 +0100
@@ -1521,17 +1521,30 @@
 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
                                           add_sub_imm_insn insn1,
                                           add_sub_reg_insn insn2) {
+  assert(Rd != zr, "Rd = zr and not setting flags?");
   if (operand_valid_for_add_sub_immediate((int)imm)) {
     (this->*insn1)(Rd, Rn, imm);
   } else {
-    if (Rd == Rn) {
-       assert(Rd == sp, "only allowed for sp");
-       // We know S flag not set so can do this as sequence of add/sub
-       assert(labs(imm) < (1 << 24), "sp adjust too big");
+    if (labs(imm) < (1 << 24)) {
        (this->*insn1)(Rd, Rn, imm & -(1 << 12));
        (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
-       return;
+    } else {
+       assert_different_registers(Rd, Rn);
+       mov(Rd, (uint64_t)imm);
+       (this->*insn2)(Rd, Rn, Rd, LSL, 0);
     }
+  }
+}
+
+// Seperate vsn which sets the flags. Optimisations are more restricted
+// because we must set the flags correctly.
+void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
+                                          add_sub_imm_insn insn1,
+                                          add_sub_reg_insn insn2) {
+  if (operand_valid_for_add_sub_immediate((int)imm)) {
+    (this->*insn1)(Rd, Rn, imm);
+  } else {
+    assert_different_registers(Rd, Rn);
     assert(Rd != zr, "overflow in immediate operand");
     mov(Rd, (uint64_t)imm);
     (this->*insn2)(Rd, Rn, Rd, LSL, 0);
diff -r 560f86cdf664 -r 5230539fd8f9 src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp     Mon Aug 05 16:03:11 2013 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp     Wed Aug 07 13:13:31 2013 +0100
@@ -1226,6 +1226,10 @@
   void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
                             add_sub_imm_insn insn1,
                             add_sub_reg_insn insn2);
+  // Seperate vsn which sets the flags
+  void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
+                            add_sub_imm_insn insn1,
+                            add_sub_reg_insn insn2);

 #define WRAP(INSN)                                                     \
   void INSN(Register Rd, Register Rn, unsigned imm) {                  \
@@ -1247,6 +1251,27 @@
   }

   WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw)
+
+#undef WRAP
+#define WRAP(INSN)                                                     \
+  void INSN(Register Rd, Register Rn, unsigned imm) {                  \
+    wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \
+  }                                                                    \
+                                                                       \
+  void INSN(Register Rd, Register Rn, Register Rm,                     \
+            enum shift_kind kind, unsigned shift = 0) {                \
+    Assembler::INSN(Rd, Rn, Rm, kind, shift);                          \
+  }                                                                    \
+                                                                       \
+  void INSN(Register Rd, Register Rn, Register Rm) {                   \
+    Assembler::INSN(Rd, Rn, Rm);                                       \
+  }                                                                    \
+                                                                       \
+  void INSN(Register Rd, Register Rn, Register Rm,                     \
+           ext::operation option, int amount = 0) {                    \
+    Assembler::INSN(Rd, Rn, Rm, option, amount);                       \
+  }
+
   WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw)

   void tableswitch(Register index, jint lowbound, jint highbound,





More information about the aarch64-port-dev mailing list