[aarch64-port-dev ] RFR: remove -DAMD64 from sysdefs

Edward Nevill ed at camswl.com
Thu Jul 18 03:14:39 PDT 2013


Hi folks,

sysdefs in platform_aarch64 currently says

-DAARCH64 -DAMD64

This is broken logic. It should define only one architecture. The 
-DAMD64 is there so that the BUILTIN_SIM build the necessary bits for 
X86 or AMD64.

However there are many cases where this X86 code is not desirable or 
correct.

In these cases the code has been conditionalised with

#ifdef X86
#ifndef TARGET_ARCH_aarch64
..

I have changed the logic so that we positively identify the pieces of 
code we actually want IE.

#if defined(X86) || defined(AARCH64)

if the code is required for both HW/Model and BUILTIN_SIM or

#if defined(x86) || defined(BUILTIN_SIM)

if the code is required only for the BUILTIN_SIM.

Note, there is an implicit assumption that BUILTIN_SIM is 64 bit X86 
only, but that assumption was always there in any case.

I have tested release and slowdebug versions of the build on both the 
model and the BUILTIN_SIM and as far as I can tell the behaviour is the 
same as before.

Andrew Dinn: There is one section of code in c1_LIRAssembler.cpp which 
is C2 only, which I am not sure about.

-#if defined(X86) && defined(TIERED)
+#if (defined(X86) || defined(AARCH64)) && defined(TIERED)
   // C2 leave fpu stack dirty clean it

I have defaulted to leave this section of code as that is what happened 
by default before but if you could review it to see if it is actually 
necessary.

Ta very much,
Ed.

--- CUT HERE ---
# HG changeset patch
# User Edward Nevill ed at camswl.com
# Date 1374140921 -3600
# Node ID 7811cd57ac8ebbc7f89608495b0fca3d18c08386
# Parent  a305c2e430220494cc653b2f17c602cb3cf9784a
Remove -DAMD64 from sysdefs in platform_aarch64

diff -r a305c2e43022 -r 7811cd57ac8e make/linux/platform_aarch64
--- a/make/linux/platform_aarch64       Tue Jul 16 16:16:46 2013 
+0100
+++ b/make/linux/platform_aarch64       Thu Jul 18 10:48:41 2013 
+0100
@@ -12,4 +12,4 @@

 compiler = gcc

-sysdefs = -DLINUX -D_GNU_SOURCE -DAARCH64 -DAMD64
+sysdefs = -DLINUX -D_GNU_SOURCE -DAARCH64
diff -r a305c2e43022 -r 7811cd57ac8e src/os/linux/vm/os_linux.cpp
--- a/src/os/linux/vm/os_linux.cpp      Tue Jul 16 16:16:46 2013 
+0100
+++ b/src/os/linux/vm/os_linux.cpp      Thu Jul 18 10:48:41 2013 
+0100
@@ -266,7 +266,7 @@
 #  else
 static char cpu_arch[] = "sparc";
 #  endif
-#elif defined(TARGET_ARCH_aarch64)
+#elif defined(AARCH64)
 static char cpu_arch[] = "aarch64";
 #else
 #error Add appropriate cpu_arch setting
@@ -359,7 +359,7 @@
  *        ...
  *        7: The default directories, normally /lib and /usr/lib.
  */
-#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || 
defined(PPC) || defined(S390))
+#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || 
defined(PPC) || defined(S390)) || defined(BUILTIN_SIM)
 #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
 #else
 #define DEFAULT_LIBPATH "/lib:/usr/lib"
@@ -1409,8 +1409,12 @@

 #ifndef SYS_clock_getres

-#if defined(IA32) || defined(AMD64)
-#define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229)
+#if defined(IA32) || defined(AMD64) || defined(AARCH64)
+#ifdef BUILTIN_SIM
+#define SYS_clock_getres 229
+#else
+#define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229) 
AARCH64_ONLY(114)
+#endif
 #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
 #else
 #warning "SYS_clock_getres not defined for this platform, disabling 
fast_thread_cpu_time"
@@ -1904,7 +1908,7 @@
     static  Elf32_Half running_arch_code=EM_MIPS;
   #elif  (defined M68K)
     static  Elf32_Half running_arch_code=EM_68K;
-  #elif  (defined TARGET_ARCH_aarch64)
+  #elif  (defined AARCH64)
     static  Elf32_Half running_arch_code=EM_AARCH64;
   #else
     #error Method os::dll_load requires that one of following is 
defined:\
@@ -2564,12 +2568,7 @@
   unsigned int cpu;
   int retval = -1;

-#if defined(IA32)
-# ifndef SYS_getcpu
-# define SYS_getcpu 318
-# endif
-  retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
-#elif defined(AMD64)
+#if defined(AMD64) || defined(BUILTIN_SIM)
 // Unfortunately we have to bring all these macros here from 
vsyscall.h
 // to be able to compile on old linuxes.
 # define __NR_vgetcpu 2
@@ -2579,6 +2578,11 @@
   typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, 
unsigned long *tcache);
   vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
   retval = vgetcpu(&cpu, NULL, NULL);
+#elif defined(IA32) || defined(AARCH64)
+# ifndef SYS_getcpu
+#  define SYS_getcpu AARCH64_ONLY(168) NOT_AARCH64(318)
+# endif
+  retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
 #endif

   return (retval == -1) ? retval : cpu;
@@ -2993,9 +2997,7 @@
     // format has been changed), we'll use the largest page size 
supported by
     // the processor.

-#if defined(TARGET_ARCH_aarch64)
-    _large_page_size = 2 * M;
-#elif !defined(ZERO)
+#if !defined(ZERO)
     _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 
IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
                        ARM_ONLY(2 * M) PPC_ONLY(4 * M) 
AARCH64_ONLY(2 * M);
 #endif // ZERO
@@ -5299,15 +5301,19 @@
 extern char** environ;

 #ifndef __NR_fork
-#ifdef TARGET_AARCH_aarch64
-#define __NR_fork SYS_clone,SIGCHLD,0,0,0,0
+#ifdef BUILTIN_SIM
+#define __NR_fork 57
 #else
-#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
+#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57) 
AARCH64_ONLY(1079)
 #endif
 #endif

 #ifndef __NR_execve
-#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
+#ifdef BUILTIN_SIM
+#define __NR_execve 59
+#else
+#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59) 
AARCH64_ONLY(221)
+#endif
 #endif

 // Run the specified command in a separate process. Return its exit 
value,
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/adlc/output_c.cpp
--- a/src/share/vm/adlc/output_c.cpp    Tue Jul 16 16:16:46 2013 +0100
+++ b/src/share/vm/adlc/output_c.cpp    Thu Jul 18 10:48:41 2013 +0100
@@ -2278,14 +2278,9 @@
   const char* reg_conversion(const char* rep_var) {
     if (strcmp(rep_var,"$Register") == 0)      return 
"as_Register";
     if (strcmp(rep_var,"$FloatRegister") == 0) return 
"as_FloatRegister";
-// the AArch64 (x86-hybrid) simulator build defines AMD64
-// but it does not want to see XMMRegister
-#ifdef BUILTIN_SIM
-#else
 #if defined(IA32) || defined(AMD64)
     if (strcmp(rep_var,"$XMMRegister") == 0)   return 
"as_XMMRegister";
 #endif
-#endif
     return NULL;
   }

diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/c1/c1_LIR.cpp
--- a/src/share/vm/c1/c1_LIR.cpp        Tue Jul 16 16:16:46 2013 
+0100
+++ b/src/share/vm/c1/c1_LIR.cpp        Thu Jul 18 10:48:41 2013 
+0100
@@ -43,7 +43,6 @@
 }

 #if defined(X86)
-#ifndef TARGET_ARCH_aarch64

 XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
   return FrameMap::nr2xmmreg(xmm_regnr());
@@ -54,7 +53,6 @@
   return FrameMap::nr2xmmreg(xmm_regnrLo());
 }

-#endif
 #endif // X86

 #if defined(SPARC) || defined(PPC)
@@ -1528,13 +1526,16 @@
   } else if (is_double_cpu()) {
     out->print(as_register_hi()->name());
     out->print(as_register_lo()->name());
-#if defined(X86)
-#ifndef TARGET_ARCH_aarch64
+#if defined(AARCH64)
+  } else if (is_single_fpu()) {
+    out->print("fpu%d", fpu_regnr());
+  } else if (is_double_fpu()) {
+    out->print("fpu%d", fpu_regnrLo());
+#elif defined(X86)
   } else if (is_single_xmm()) {
     out->print(as_xmm_float_reg()->name());
   } else if (is_double_xmm()) {
     out->print(as_xmm_double_reg()->name());
-#endif
   } else if (is_single_fpu()) {
     out->print("fpu%d", fpu_regnr());
   } else if (is_double_fpu()) {
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/c1/c1_LIR.hpp
--- a/src/share/vm/c1/c1_LIR.hpp        Tue Jul 16 16:16:46 2013 
+0100
+++ b/src/share/vm/c1/c1_LIR.hpp        Thu Jul 18 10:48:41 2013 
+0100
@@ -447,13 +447,13 @@
     return as_register();
   }

-#if defined(X86) && !defined(TARGET_ARCH_aarch64)
+#if defined(X86)
   XMMRegister as_xmm_float_reg() const;
   XMMRegister as_xmm_double_reg() const;
   // for compatibility with RInfo
   int fpu () const                                 
 { return lo_reg_half(); }
 #endif
-#if defined(SPARC) || defined(ARM) || defined(PPC) || 
defined(TARGET_ARCH_aarch64)
+#if defined(SPARC) || defined(ARM) || defined(PPC) || defined(AARCH64)
   FloatRegister as_float_reg   () const;
   FloatRegister as_double_reg  () const;
 #endif
@@ -543,7 +543,7 @@
      , _type(type)
      , _disp(0) { verify(); }

-#if defined(X86) || defined(ARM)
+#if defined(X86) || defined(ARM) || defined(AARCH64)
   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, 
BasicType type):
        _base(base)
      , _index(index)
@@ -624,7 +624,7 @@
                                                
                              LIR_OprDesc::fpu_register 
        |
                                                
                              LIR_OprDesc::double_size); 
}
 #endif
-#ifdef X86
+#if defined(X86) || defined(AARCH64)
   static LIR_Opr double_fpu(int reg)            { return 
(LIR_Opr)(intptr_t)((reg  << LIR_OprDesc::reg1_shift) |
                                                
                              (reg  << 
LIR_OprDesc::reg2_shift) |
                                                
                              LIR_OprDesc::double_type 
         |
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/c1/c1_LIRAssembler.cpp
--- a/src/share/vm/c1/c1_LIRAssembler.cpp       Tue Jul 16 16:16:46 
2013 +0100
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp       Thu Jul 18 10:48:41 
2013 +0100
@@ -481,7 +481,7 @@
     compilation()->set_has_method_handle_invokes(true);
   }

-#if defined(X86) && defined(TIERED)
+#if (defined(X86) || defined(AARCH64)) && defined(TIERED)
   // C2 leave fpu stack dirty clean it
   if (UseSSE < 2) {
     int i;
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/c1/c1_LIRGenerator.cpp
--- a/src/share/vm/c1/c1_LIRGenerator.cpp       Tue Jul 16 16:16:46 
2013 +0100
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp       Thu Jul 18 10:48:41 
2013 +0100
@@ -2079,7 +2079,7 @@
     assert(log2_scale == 0, "must not have a scale");
     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
   } else {
-#ifdef X86
+#if defined(X86) || defined(AARCH64)
 #ifdef _LP64
     if (!index_op->is_illegal() && index_op->type() == T_INT) {
       LIR_Opr tmp = new_pointer_register();
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/c1/c1_LinearScan.cpp
--- a/src/share/vm/c1/c1_LinearScan.cpp Tue Jul 16 16:16:46 2013 +0100
+++ b/src/share/vm/c1/c1_LinearScan.cpp Thu Jul 18 10:48:41 2013 +0100
@@ -140,13 +140,11 @@
   } else if (opr->is_double_cpu()) {
     return opr->cpu_regnrLo();
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
   } else if (opr->is_single_xmm()) {
     return opr->fpu_regnr() + pd_first_xmm_reg;
   } else if (opr->is_double_xmm()) {
     return opr->fpu_regnrLo() + pd_first_xmm_reg;
 #endif
-#endif
   } else if (opr->is_single_fpu()) {
     return opr->fpu_regnr() + pd_first_fpu_reg;
   } else if (opr->is_double_fpu()) {
@@ -1098,7 +1096,7 @@
   }


-#if defined(X86) && !defined(TARGET_ARCH_aarch64)
+#if defined(X86)
   if (op->code() == lir_cmove) {
     // conditional moves can handle stack operands
     assert(op->result_opr()->is_register(), "result must always be 
in a register");
@@ -1297,10 +1295,8 @@
   // perfomed and so the temp ranges would be useless
   if (has_fpu_registers()) {
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
     if (UseSSE < 2) {
 #endif
-#endif
       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
         assert(opr->is_valid() && opr->is_register(), "FrameMap 
should not return invalid operands");
@@ -1308,7 +1304,6 @@
         caller_save_registers[num_caller_save_registers++] = 
reg_num(opr);
       }
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
     }
     if (UseSSE > 0) {
       for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
@@ -1319,7 +1314,6 @@
       }
     }
 #endif
-#endif
   }
   assert(num_caller_save_registers <= LinearScan::nof_regs, "out of 
bounds");

@@ -2122,14 +2116,12 @@
 #ifndef __SOFTFP__
       case T_FLOAT: {
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
         if (UseSSE >= 1) {
           assert(assigned_reg >= pd_first_xmm_reg && 
assigned_reg <= pd_last_xmm_reg, "no xmm register");
           assert(interval->assigned_regHi() == any_reg, "must 
not have hi register");
           return LIR_OprFact::single_xmm(assigned_reg - 
pd_first_xmm_reg);
         }
 #endif
-#endif

         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg 
<= pd_last_fpu_reg, "no fpu register");
         assert(interval->assigned_regHi() == any_reg, "must not 
have hi register");
@@ -2138,14 +2130,12 @@

       case T_DOUBLE: {
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
         if (UseSSE >= 2) {
           assert(assigned_reg >= pd_first_xmm_reg && 
assigned_reg <= pd_last_xmm_reg, "no xmm register");
           assert(interval->assigned_regHi() == any_reg, "must 
not have hi register (double xmm values are stored in one register)");
           return LIR_OprFact::double_xmm(assigned_reg - 
pd_first_xmm_reg);
         }
 #endif
-#endif

 #ifdef SPARC
         assert(assigned_reg >= pd_first_fpu_reg && assigned_reg 
<= pd_last_fpu_reg, "no fpu register");
@@ -2209,7 +2199,7 @@

   LIR_Opr res = operand_for_interval(interval);

-#ifdef X86
+#if defined(X86) || defined(AARCH64)
   // new semantic for is_last_use: not only set on definite end of 
interval,
   // but also before hole
   // This may still miss some cases (e.g. for dead values), but it 
is not necessary that the
@@ -2621,7 +2611,6 @@
     return 1;

 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
   } else if (opr->is_single_xmm()) {
     VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
     LocationValue* sv = new 
LocationValue(Location::new_reg_loc(Location::normal, rname));
@@ -2629,11 +2618,9 @@
     scope_values->append(sv);
     return 1;
 #endif
-#endif

   } else if (opr->is_single_fpu()) {
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
     // the exact location of fpu stack values is only known
     // during fpu stack allocation, so the stack allocator object
     // must be present
@@ -2641,7 +2628,6 @@
     assert(_fpu_stack_allocator != NULL, "must be present");
     opr = _fpu_stack_allocator->to_fpu_stack(opr);
 #endif
-#endif

     Location::Type loc_type = float_saved_as_double ? 
Location::float_in_dbl : Location::normal;
     VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr());
@@ -2715,7 +2701,6 @@


 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
     } else if (opr->is_double_xmm()) {
       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in 
calculation");
       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
@@ -2731,7 +2716,6 @@
       }
 #  endif
 #endif
-#endif

     } else if (opr->is_double_fpu()) {
       // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two 
halves of
@@ -2742,7 +2726,6 @@
       // least and most significant words, respectively.

 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
       // the exact location of fpu stack values is only known
       // during fpu stack allocation, so the stack allocator object
       // must be present
@@ -2752,7 +2735,6 @@

       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in 
calculation (only fpu_regnrLo is used)");
 #endif
-#endif
 #ifdef SPARC
       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, 
"assumed in calculation (only fpu_regnrHi is used)");
 #endif
@@ -3644,12 +3626,10 @@
       }

 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
       for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
         state_put(input_state, 
reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
       }
 #endif
-#endif
     }

     // process xhandler before output and temp operands
@@ -4565,11 +4545,9 @@
     } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() 
<= pd_last_fpu_reg) {
       opr = LIR_OprFact::single_fpu(assigned_reg() - 
pd_first_fpu_reg);
 #ifdef X86
-#ifndef TARGET_ARCH_aarch64
     } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() 
<= pd_last_xmm_reg) {
       opr = LIR_OprFact::single_xmm(assigned_reg() - 
pd_first_xmm_reg);
 #endif
-#endif
     } else {
       // ShouldNotReachHere();
     }
@@ -5647,7 +5625,7 @@
 }

 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
-#if defined(X86) && !defined(TARGET_ARCH_aarch64)
+#if defined(X86)
   // fast calculation of intervals that can never get a register 
because the
   // the next instruction is a call that blocks all registers
   // Note: this does not work if callee-saved registers are 
available (e.g. on Sparc)
diff -r a305c2e43022 -r 7811cd57ac8e 
src/share/vm/interpreter/interpreterRuntime.cpp
--- a/src/share/vm/interpreter/interpreterRuntime.cpp   Tue Jul 16 
16:16:46 2013 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp   Thu Jul 18 
10:48:41 2013 +0100
@@ -1189,7 +1189,7 @@
   // preparing the same method will be sure to see non-null entry & 
mirror.
 IRT_END

-#if defined(IA32) || defined(AMD64) || defined(ARM) || 
defined(TARGET_AARCH_aarch64)
+#if defined(IA32) || defined(AMD64) || defined(ARM) || defined(AARCH64)
 IRT_LEAF(void, 
InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, 
void* src_address, void* dest_address))
   if (src_address == dest_address) {
     return;
diff -r a305c2e43022 -r 7811cd57ac8e 
src/share/vm/interpreter/interpreterRuntime.hpp
--- a/src/share/vm/interpreter/interpreterRuntime.hpp   Tue Jul 16 
16:16:46 2013 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp   Thu Jul 18 
10:48:41 2013 +0100
@@ -136,7 +136,7 @@
                                         Method* 
method,
                                         intptr_t* 
from, intptr_t* to);

-#if defined(IA32) || defined(AMD64) || defined(ARM) || 
defined(TARGET_AARCH_aarch64)
+#if defined(IA32) || defined(AMD64) || defined(ARM) || defined(AARCH64)
   // Popframe support (only needed on x86, AMD64 and ARM)
   static void popframe_move_outgoing_args(JavaThread* thread, void* 
src_address, void* dest_address);
 #endif
diff -r a305c2e43022 -r 7811cd57ac8e 
src/share/vm/memory/allocation.inline.hpp
--- a/src/share/vm/memory/allocation.inline.hpp Tue Jul 16 16:16:46 
2013 +0100
+++ b/src/share/vm/memory/allocation.inline.hpp Thu Jul 18 10:48:41 
2013 +0100
@@ -36,7 +36,7 @@
 #ifndef PRODUCT
 // Increments unsigned long value for statistics (not atomic on MP).
 inline void inc_stat_counter(volatile julong* dest, julong add_value) 
{
-#if defined(SPARC) || defined(X86)
+#if defined(SPARC) || defined(X86) || defined(AARCH64)
   // Sparc and X86 have atomic jlong (8 bytes) instructions
   julong value = Atomic::load((volatile jlong*)dest);
   value += add_value;
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/opto/machnode.hpp
--- a/src/share/vm/opto/machnode.hpp    Tue Jul 16 16:16:46 2013 +0100
+++ b/src/share/vm/opto/machnode.hpp    Thu Jul 18 10:48:41 2013 +0100
@@ -94,10 +94,6 @@
     return ::as_FloatRegister(reg(ra_, node, idx));
   }

-// the AArch64 (x86-hybrid) simulator build defines AMD64
-// but it does not want to see XMMRegister
-#ifdef BUILTIN_SIM
-#else
 #if defined(IA32) || defined(AMD64)
   XMMRegister  as_XMMRegister(PhaseRegAlloc *ra_, const Node *node) 
  const {
     return ::as_XMMRegister(reg(ra_, node));
@@ -106,7 +102,6 @@
     return ::as_XMMRegister(reg(ra_, node, idx));
   }
 #endif
-#endif

   virtual intptr_t  constant() const;
   virtual relocInfo::relocType constant_reloc() const;
diff -r a305c2e43022 -r 7811cd57ac8e src/share/vm/prims/unsafe.cpp
--- a/src/share/vm/prims/unsafe.cpp     Tue Jul 16 16:16:46 2013 +0100
+++ b/src/share/vm/prims/unsafe.cpp     Thu Jul 18 10:48:41 2013 +0100
@@ -315,7 +315,7 @@
   OrderAccess::fence();
 UNSAFE_END

-#if defined(SPARC) || defined(X86)
+#if defined(SPARC) || defined(X86) || defined(AARCH64)
 // Sparc and X86 have atomic jlong (8 bytes) instructions

 #else
@@ -420,7 +420,7 @@
 DEFINE_GETSETOOP_VOLATILE(jfloat, Float);
 DEFINE_GETSETOOP_VOLATILE(jdouble, Double);

-#if defined(SPARC) || defined(X86)
+#if defined(SPARC) || defined(X86) || defined(AARCH64)
 // Sparc and X86 have atomic jlong (8 bytes) instructions
 DEFINE_GETSETOOP_VOLATILE(jlong, Long);
 #endif
@@ -450,7 +450,7 @@

 UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, 
jobject obj, jlong offset, jlong x))
   UnsafeWrapper("Unsafe_SetOrderedLong");
-#if defined(SPARC) || defined(X86)
+#if defined(SPARC) || defined(X86) || defined(AARCH64)
   // Sparc and X86 have atomic jlong (8 bytes) instructions
   SET_FIELD_VOLATILE(obj, offset, jlong, x);
 #else
diff -r a305c2e43022 -r 7811cd57ac8e 
src/share/vm/runtime/advancedThresholdPolicy.cpp
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp  Tue Jul 16 
16:16:46 2013 +0100
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp  Thu Jul 18 
10:48:41 2013 +0100
@@ -56,7 +56,7 @@
   set_c2_count(MAX2(count - count / 3, 1));

   // Some inlining tuning
-#ifdef X86
+#if defined(X86) || defined(AARCH64)
   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
     FLAG_SET_DEFAULT(InlineSmallCode, 2000);
   }
diff -r a305c2e43022 -r 7811cd57ac8e 
src/share/vm/runtime/vframeArray.cpp
--- a/src/share/vm/runtime/vframeArray.cpp      Tue Jul 16 16:16:46 
2013 +0100
+++ b/src/share/vm/runtime/vframeArray.cpp      Thu Jul 18 10:48:41 
2013 +0100
@@ -467,7 +467,7 @@
   // Copy registers for callee-saved registers
   if (reg_map != NULL) {
     for(int i = 0; i < RegisterMap::reg_count; i++) {
-#ifdef AMD64
+#if defined(AMD64) || defined(AARCH64)
       // The register map has one entry for every int (32-bit 
value), so
       // 64-bit physical registers have two entries in the map, 
one for
       // each half.  Ignore the high halves of 64-bit registers, 
just like
---- CUT HERE ----





More information about the aarch64-port-dev mailing list