[aarch64-port-dev ] Freeze aarch64/jdk8

Andrew Dinn adinn at redhat.com
Thu Feb 25 12:26:18 UTC 2016



On 25/02/16 11:57, Andrew Haley wrote:
> On 02/25/2016 11:53 AM, Andrew Dinn wrote:
>> Shall I push these changes now? Or do you want to vet some of the patches?
> 
> Are any of them outside AArch64-specific directories?

Yes, in quite a few cases -- but they all appear to be backports of
changes also made upstream. See below for details. n.b. revision ids are
for aarch64/jdk8/hotspot tree.

regards,


Andrew Dinn
-----------
Senior Principal Software Engineer
Red Hat UK Ltd
Registered in UK and Wales under Company Registration No. 3798903
Directors: Michael Cunningham (US), Michael O'Neill (Ireland), Paul
Argiry (US)


8136596.patch
revid: 8561
8136596: Remove MemBarRelease when final field's allocation is NoEscape
or ArgEscape

this changes 3 files. the first is src/share/vm/opto/callnode.hpp

@@ -894,6 +894,18 @@

   // Convenience for initialization->maybe_set_complete(phase)
   bool maybe_set_complete(PhaseGVN* phase);
+
+  // Return true if allocation doesn't escape thread, its escape state
+  // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
+  // is true when its allocation's escape state is noEscape or
+  // ArgEscape. In case allocation's InitializeNode is NULL, check
+  // AlllocateNode._is_non_escaping flag.
+  // AlllocateNode._is_non_escaping is true when its escape state is
+  // noEscape.
+  bool does_not_escape_thread() {
+    InitializeNode* init = NULL;
+    return _is_non_escaping || (((init = initialization()) != NULL) &&
init->does_not_escape());
+  }
 };

 //------------------------------AllocateArray---------------------------------


the second is src/share/vm/opto/macro.cpp

@@ -1385,7 +1385,8 @@
     // MemBarStoreStore so that stores that initialize this object
     // can't be reordered with a subsequent store that makes this
     // object accessible by other threads.
-    if (init == NULL || (!init->is_complete_with_arraycopy() &&
!init->does_not_escape())) {
+    if (!alloc->does_not_escape_thread() &&
+        (init == NULL || !init->is_complete_with_arraycopy())) {
       if (init == NULL || init->req() < InitializeNode::RawStores) {
         // No InitializeNode or no stores captured by zeroing
         // elimination. Simply add the MemBarStoreStore after object

the third is src/share/vm/opto/memnode.cpp

@@ -3065,7 +3065,7 @@
       // Final field stores.
       Node* alloc =
AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
       if ((alloc != NULL) && alloc->is_Allocate() &&
-          alloc->as_Allocate()->_is_non_escaping) {
+          alloc->as_Allocate()->does_not_escape_thread()) {
         // The allocated object does not escape.
         eliminate = true;
       }


8131645.patch
revid:
8131645: crash on Cavium when using G1

this changes src/share/vm/gc_impementation/g1/g1CodeCacheRemSet.cpp

@@ -200,6 +200,9 @@

 void G1CodeRootSet::allocate_small_table() {
   _table = new CodeRootSetTable(SmallSize);
+  CodeRootSetTable* temp = new CodeRootSetTable(SmallSize);
+
+  OrderAccess::release_store_ptr(&_table, temp);
 }

 void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {


volcas.patch
revid: 8568
Backport optimization of volatile puts/gets and CAS to use ldar/stlr

this changes src/share/vm/opto/graphKit.cpp

@@ -3803,7 +3803,7 @@

   // Smash zero into card
   if( !UseConcMarkSweepGC ) {
-    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
+    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::unordered);
   } else {
     // Specialized path for CM store barrier
     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt,
adr_type);


8131645-correction.patch
revid: 8569
Fix thinko when backporting 8131645. Table ends up being allocated twice.

@@ -199,7 +199,6 @@
 }

 void G1CodeRootSet::allocate_small_table() {
-  _table = new CodeRootSetTable(SmallSize);
   CodeRootSetTable* temp = new CodeRootSetTable(SmallSize);

   OrderAccess::release_store_ptr(&_table, temp);



8138966.patch
revid: 8573
8138966: Intermittent SEGV running ParallelGC

this changes
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp


@@ -348,7 +348,7 @@
     HeapWord*            _partial_obj_addr;
     region_sz_t          _partial_obj_size;
     region_sz_t volatile _dc_and_los;
-    bool                 _blocks_filled;
+    bool        volatile _blocks_filled;

 #ifdef ASSERT
     size_t               _blocks_filled_count;   // Number of block
table fills.
@@ -499,7 +499,9 @@
 inline bool
 ParallelCompactData::RegionData::blocks_filled() const
 {
-  return _blocks_filled;
+  bool result = _blocks_filled;
+  OrderAccess::acquire();
+  return result;
 }

 #ifdef ASSERT
@@ -513,6 +515,7 @@
 inline void
 ParallelCompactData::RegionData::set_blocks_filled()
 {
+  OrderAccess::release();
   _blocks_filled = true;
   // Debug builds count the number of times the table was filled.
   DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));

largecodecache.patch
revid: 8576
Add support for large code cache

this makes changes to two files.

firstly src/share/vm/runtime/arguments.cpp

@@ -1137,9 +1137,8 @@
   }
   // Increase the code cache size - tiered compiles a lot more.
   if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
-    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
-    // The maximum B/BL offset range on AArch64 is 128MB
-    AARCH64_ONLY(FLAG_SET_DEFAULT(ReservedCodeCacheSize,
MIN2(ReservedCodeCacheSize, 128*M)));
+    FLAG_SET_DEFAULT(ReservedCodeCacheSize,
+                     MIN2(CODE_CACHE_DEFAULT_LIMIT,
ReservedCodeCacheSize * 5));
   }
   if (!UseInterpreter) { // -Xcomp
     Tier3InvokeNotifyFreqLog = 0;
@@ -2476,11 +2475,11 @@
                 "Invalid ReservedCodeCacheSize=%dK. Must be at least
%uK.\n", ReservedCodeCacheSize/K,
                 min_code_cache_size/K);
     status = false;
-  } else if (ReservedCodeCacheSize > 2*G) {
-    // Code cache size larger than MAXINT is not supported.
+  } else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) {
+    // Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported.
     jio_fprintf(defaultStream::error_stream(),
                 "Invalid ReservedCodeCacheSize=%dM. Must be at most
%uM.\n", ReservedCodeCacheSize/M,
-                (2*G)/M);
+                CODE_CACHE_SIZE_LIMIT/M);
     status = false;
   }


and also src/share/vm/utilities/globalDefinitions.hpp

@@ -414,6 +414,11 @@
   ProfileRTM = 0x0  // Use RTM with abort ratio calculation
 };

+// The maximum size of the code cache.  Can be overridden by targets.
+#define CODE_CACHE_SIZE_LIMIT (2*G)
+// Allow targets to reduce the default size of the code cache.
+#define CODE_CACHE_DEFAULT_LIMIT CODE_CACHE_SIZE_LIMIT
+
 #ifdef TARGET_ARCH_x86
 # include "globalDefinitions_x86.hpp"
 #endif


8145438.patch
revid: 8583
8145438: Guarantee failures since 8144028: Use AArch64 bit-test
instructions in C2

this makes a small change to src/share/vm/adlc/formssel.cpp

@@ -1239,7 +1239,8 @@
       !is_short_branch() &&     // Don't match another short branch variant
       reduce_result() != NULL &&
       strcmp(reduce_result(), short_branch->reduce_result()) == 0 &&
-      _matrule->equivalent(AD.globalNames(), short_branch->_matrule)) {
+      _matrule->equivalent(AD.globalNames(), short_branch->_matrule) &&
+      equivalent_predicates(this, short_branch)) {
     // The instructions are equivalent.

     // Now verify that both instructions have the same parameters and


More information about the aarch64-port-dev mailing list