[aarch64-port-dev ] Try to align metaspace on a 4G boundary

Andrew Haley aph at redhat.com
Mon Aug 4 15:56:04 UTC 2014


This improves code generation for narrow class en/decoding by trying
to allocate narrow_klass_base on a 4G boundary.

Andrew.



# HG changeset patch
# User aph
# Date 1407166161 14400
#      Mon Aug 04 11:29:21 2014 -0400
# Node ID 0bddcfcf9488c0ee68b916462b7c3be9ddfad273
# Parent  08a7c21eaa48859085a7d2c5704c561980a1421a
AArch64: try to align metaspace on a 4G boundary.

diff -r 08a7c21eaa48 -r 0bddcfcf9488 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Mon Aug 04 11:20:03 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Mon Aug 04 11:29:21 2014 -0400
@@ -2496,6 +2496,11 @@
     if (Universe::narrow_klass_base() == NULL) {
       cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift());
       return;
+    } else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
+	       && Universe::narrow_klass_shift() == 0) {
+      // Only the bottom 32 bits matter
+      cmpw(trial_klass, tmp);
+      return;
     }
     decode_klass_not_null(tmp);
   } else {
@@ -2680,6 +2685,12 @@
     return;
   }

+  if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
+      && Universe::narrow_klass_shift() == 0) {
+    movw(dst, src);
+    return;
+  }
+
 #ifdef ASSERT
   verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
 #endif
@@ -2723,6 +2734,14 @@
     return;
   }

+  if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
+      && Universe::narrow_klass_shift() == 0) {
+    if (dst != src)
+      movw(dst, src);
+    movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32);
+    return;
+  }
+
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
   // Also do not verify_oop as this is called by verify_oop.
diff -r 08a7c21eaa48 -r 0bddcfcf9488 src/share/vm/memory/metaspace.cpp
--- a/src/share/vm/memory/metaspace.cpp	Mon Aug 04 11:20:03 2014 -0400
+++ b/src/share/vm/memory/metaspace.cpp	Mon Aug 04 11:29:21 2014 -0400
@@ -2975,10 +2975,50 @@
   // Don't use large pages for the class space.
   bool large_pages = false;

+#ifndef AARCH64
   ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                              _reserve_alignment,
                                              large_pages,
                                              requested_addr, 0);
+#else // AARCH64
+  ReservedSpace metaspace_rs;
+
+  // Our compressed klass pointers may fit nicely into the lower 32
+  // bits.
+  if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G)
+    metaspace_rs = ReservedSpace(compressed_class_space_size(),
+                                             _reserve_alignment,
+                                             large_pages,
+                                             requested_addr, 0);
+
+  if (! metaspace_rs.is_reserved()) {
+    // Try to align metaspace so that we can decode a compressed klass
+    // with a single MOVK instruction.  We can do this iff the
+    // compressed class base is a multiple of 4G.
+    for (char *a = (char*)align_ptr_up(requested_addr, 4*G);
+	 a < (char*)(1024*G);
+	 a += 4*G) {
+      if (UseSharedSpaces
+	  && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
+	// We failed to find an aligned base that will reach.  Fall
+	// back to using our requested addr.
+	metaspace_rs = ReservedSpace(compressed_class_space_size(),
+                                             _reserve_alignment,
+                                             large_pages,
+                                             requested_addr, 0);
+	break;
+      }
+      metaspace_rs = ReservedSpace(compressed_class_space_size(),
+				   _reserve_alignment,
+				   large_pages,
+				   a, 0);
+      if (metaspace_rs.is_reserved())
+	break;
+    }
+  }
+
+#endif // AARCH64
+
   if (!metaspace_rs.is_reserved()) {
     if (UseSharedSpaces) {
       size_t increment = align_size_up(1*G, _reserve_alignment);


More information about the aarch64-port-dev mailing list