/hg/icedtea8-forest/hotspot: 7 new changesets

andrew at icedtea.classpath.org andrew at icedtea.classpath.org
Tue Jul 11 02:36:00 UTC 2017


changeset 4dc64f3c3e23 in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=4dc64f3c3e23
author: gromero
date: Thu Jun 01 20:42:49 2017 -0400

	8175813, PR3394, RH1448880: PPC64: "mbind: Invalid argument" when -XX:+UseNUMA is used
	Reviewed-by: dholmes, zgu


changeset 13a04e8df5a3 in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=13a04e8df5a3
author: zgu
date: Tue Jul 04 22:34:34 2017 +0100

	8181055, PR3394, RH1448880: PPC64: "mbind: Invalid argument" still seen after 8175813
	Summary: Use numa_interleave_memory v2 api when available
	Reviewed-by: dholmes, shade, gromero


changeset 2fee74c55478 in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=2fee74c55478
author: dholmes
date: Fri Apr 28 21:14:37 2017 -0400

	8179084, PR3409, RH1455694: HotSpot VM fails to start when AggressiveHeap is set
	Reviewed-by: kbarrett, stefank


changeset 3d07e14d65bc in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=3d07e14d65bc
author: tschatzl
date: Mon May 15 12:20:15 2017 +0200

	8180048, PR3411, RH1449870: Interned string and symbol table leak memory during parallel unlinking
	Summary: Make appending found dead BasicHashtableEntrys to the free list atomic.
	Reviewed-by: ehelin, shade


changeset 1faf7c170899 in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=1faf7c170899
author: jcm
date: Wed Jan 11 04:26:49 2017 -0800

	8164293, PR3412, RH1459641: HotSpot leaking memory in long-running requests
	Summary: Applied RMs in sweep_code_cache and related codes.
	Reviewed-by: kvn, thartmann


changeset 654b7fcb4932 in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=654b7fcb4932
author: shade
date: Wed Mar 08 14:22:52 2017 +0100

	8175887, PR3415: C1 value numbering handling of Unsafe.get*Volatile is incorrect
	Reviewed-by: vlivanov


changeset 735f782200d6 in /hg/icedtea8-forest/hotspot
details: http://icedtea.classpath.org/hg/icedtea8-forest/hotspot?cmd=changeset;node=735f782200d6
author: andrew
date: Tue Jul 11 03:45:21 2017 +0100

	Added tag icedtea-3.5.0pre01 for changeset 654b7fcb4932


diffstat:

 .hgtags                                       |    1 +
 src/os/linux/vm/os_linux.cpp                  |  122 ++++++++++++++++++++++---
 src/os/linux/vm/os_linux.hpp                  |   48 ++++++++++-
 src/share/vm/c1/c1_ValueMap.hpp               |    8 +-
 src/share/vm/classfile/symbolTable.cpp        |   51 +++++++---
 src/share/vm/classfile/symbolTable.hpp        |   17 ++-
 src/share/vm/code/nmethod.cpp                 |    1 +
 src/share/vm/runtime/arguments.cpp            |    2 -
 src/share/vm/runtime/sweeper.cpp              |    3 +
 src/share/vm/runtime/vmStructs.cpp            |    2 +-
 src/share/vm/utilities/hashtable.cpp          |   31 ++++++-
 src/share/vm/utilities/hashtable.hpp          |   22 ++++-
 test/TEST.groups                              |    3 +-
 test/compiler/c1/UnsafeVolatileGuardTest.java |   72 +++++++++++++++
 test/compiler/c1/VolatileGuardTest.java       |   52 +++++++++++
 test/gc/arguments/TestAggressiveHeap.java     |   91 +++++++++++++++++++
 16 files changed, 476 insertions(+), 50 deletions(-)

diffs (truncated from 933 to 500 lines):

diff -r d77ea5a8ca52 -r 735f782200d6 .hgtags
--- a/.hgtags	Tue May 16 18:03:46 2017 +0100
+++ b/.hgtags	Tue Jul 11 03:45:21 2017 +0100
@@ -998,3 +998,4 @@
 56e71d16083904ceddfdd1d66312582a42781646 jdk8u131-b09
 1da23ae49386608550596502d90a381ee6c1dfaa jdk8u131-b10
 00b7bbd261c972b38d4ecc2925f445c28de6fcb3 icedtea-3.4.0
+654b7fcb4932d48063f5f1fba0c8994db5e02976 icedtea-3.5.0pre01
diff -r d77ea5a8ca52 -r 735f782200d6 src/os/linux/vm/os_linux.cpp
--- a/src/os/linux/vm/os_linux.cpp	Tue May 16 18:03:46 2017 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Tue Jul 11 03:45:21 2017 +0100
@@ -2747,8 +2747,9 @@
 bool os::numa_topology_changed()   { return false; }
 
 size_t os::numa_get_groups_num() {
-  int max_node = Linux::numa_max_node();
-  return max_node > 0 ? max_node + 1 : 1;
+  // Return just the number of nodes in which it's possible to allocate memory
+  // (in numa terminology, configured nodes).
+  return Linux::numa_num_configured_nodes();
 }
 
 int os::numa_get_group_id() {
@@ -2762,11 +2763,33 @@
   return 0;
 }
 
+int os::Linux::get_existing_num_nodes() {
+  size_t node;
+  size_t highest_node_number = Linux::numa_max_node();
+  int num_nodes = 0;
+
+  // Get the total number of nodes in the system including nodes without memory.
+  for (node = 0; node <= highest_node_number; node++) {
+    if (isnode_in_existing_nodes(node)) {
+      num_nodes++;
+    }
+  }
+  return num_nodes;
+}
+
 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
-  for (size_t i = 0; i < size; i++) {
-    ids[i] = i;
-  }
-  return size;
+  size_t highest_node_number = Linux::numa_max_node();
+  size_t i = 0;
+
+  // Map all node ids in which is possible to allocate memory. Also nodes are
+  // not always consecutively available, i.e. available from 0 to the highest
+  // node number.
+  for (size_t node = 0; node <= highest_node_number; node++) {
+    if (Linux::isnode_in_configured_nodes(node)) {
+      ids[i++] = node;
+    }
+  }
+  return i;
 }
 
 bool os::get_page_info(char *start, page_info* info) {
@@ -2807,11 +2830,8 @@
 extern "C" JNIEXPORT void numa_error(char *where) { }
 extern "C" JNIEXPORT int fork1() { return fork(); }
 
-
-// If we are running with libnuma version > 2, then we should
-// be trying to use symbols with versions 1.1
-// If we are running with earlier version, which did not have symbol versions,
-// we should use the base version.
+// Handle request to load libnuma symbol version 1.1 (API v1). If it fails
+// load symbol from base version instead.
 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
   void *f = dlvsym(handle, name, "libnuma_1.1");
   if (f == NULL) {
@@ -2820,6 +2840,12 @@
   return f;
 }
 
+// Handle request to load libnuma symbol version 1.2 (API v2) only.
+// Return NULL if the symbol is not defined in this particular version.
+void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
+  return dlvsym(handle, name, "libnuma_1.2");
+}
+
 bool os::Linux::libnuma_init() {
   // sched_getcpu() should be in libc.
   set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
@@ -2836,18 +2862,30 @@
                                            libnuma_dlsym(handle, "numa_node_to_cpus")));
       set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
                                        libnuma_dlsym(handle, "numa_max_node")));
+      set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
+                                                   libnuma_dlsym(handle, "numa_num_configured_nodes")));
       set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
                                         libnuma_dlsym(handle, "numa_available")));
       set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
                                             libnuma_dlsym(handle, "numa_tonode_memory")));
       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
-                                            libnuma_dlsym(handle, "numa_interleave_memory")));
+                                                libnuma_dlsym(handle, "numa_interleave_memory")));
+      set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
+                                                libnuma_v2_dlsym(handle, "numa_interleave_memory")));
       set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
-                                            libnuma_dlsym(handle, "numa_set_bind_policy")));
-
+                                              libnuma_dlsym(handle, "numa_set_bind_policy")));
+      set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
+                                               libnuma_dlsym(handle, "numa_bitmask_isbitset")));
+      set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
+                                       libnuma_dlsym(handle, "numa_distance")));
 
       if (numa_available() != -1) {
         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
+        set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
+        set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
+        // Create an index -> node mapping, since nodes are not always consecutive
+        _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
+        rebuild_nindex_to_node_map();
         // Create a cpu -> node mapping
         _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
         rebuild_cpu_to_node_map();
@@ -2858,6 +2896,17 @@
   return false;
 }
 
+void os::Linux::rebuild_nindex_to_node_map() {
+  int highest_node_number = Linux::numa_max_node();
+
+  nindex_to_node()->clear();
+  for (int node = 0; node <= highest_node_number; node++) {
+    if (Linux::isnode_in_existing_nodes(node)) {
+      nindex_to_node()->append(node);
+    }
+  }
+}
+
 // rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
 // The table is later used in get_node_by_cpu().
 void os::Linux::rebuild_cpu_to_node_map() {
@@ -2877,16 +2926,46 @@
 
   cpu_to_node()->clear();
   cpu_to_node()->at_grow(cpu_num - 1);
-  size_t node_num = numa_get_groups_num();
-
+
+  size_t node_num = get_existing_num_nodes();
+
+  int distance = 0;
+  int closest_distance = INT_MAX;
+  int closest_node = 0;
   unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
   for (size_t i = 0; i < node_num; i++) {
-    if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
+    // Check if node is configured (not a memory-less node). If it is not, find
+    // the closest configured node.
+    if (!isnode_in_configured_nodes(nindex_to_node()->at(i))) {
+      closest_distance = INT_MAX;
+      // Check distance from all remaining nodes in the system. Ignore distance
+      // from itself and from another non-configured node.
+      for (size_t m = 0; m < node_num; m++) {
+        if (m != i && isnode_in_configured_nodes(nindex_to_node()->at(m))) {
+          distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
+          // If a closest node is found, update. There is always at least one
+          // configured node in the system so there is always at least one node
+          // close.
+          if (distance != 0 && distance < closest_distance) {
+            closest_distance = distance;
+            closest_node = nindex_to_node()->at(m);
+          }
+        }
+      }
+     } else {
+       // Current node is already a configured node.
+       closest_node = nindex_to_node()->at(i);
+     }
+
+    // Get cpus from the original node and map them to the closest node. If node
+    // is a configured node (not a memory-less node), then original node and
+    // closest node are the same.
+    if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
       for (size_t j = 0; j < cpu_map_valid_size; j++) {
         if (cpu_map[j] != 0) {
           for (size_t k = 0; k < BitsPerCLong; k++) {
             if (cpu_map[j] & (1UL << k)) {
-              cpu_to_node()->at_put(j * BitsPerCLong + k, i);
+              cpu_to_node()->at_put(j * BitsPerCLong + k, closest_node);
             }
           }
         }
@@ -2904,14 +2983,21 @@
 }
 
 GrowableArray<int>* os::Linux::_cpu_to_node;
+GrowableArray<int>* os::Linux::_nindex_to_node;
 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
+os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
 os::Linux::numa_available_func_t os::Linux::_numa_available;
 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
+os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
+os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
+os::Linux::numa_distance_func_t os::Linux::_numa_distance;
 unsigned long* os::Linux::_numa_all_nodes;
+struct bitmask* os::Linux::_numa_all_nodes_ptr;
+struct bitmask* os::Linux::_numa_nodes_ptr;
 
 bool os::pd_uncommit_memory(char* addr, size_t size) {
   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
diff -r d77ea5a8ca52 -r 735f782200d6 src/os/linux/vm/os_linux.hpp
--- a/src/os/linux/vm/os_linux.hpp	Tue May 16 18:03:46 2017 +0100
+++ b/src/os/linux/vm/os_linux.hpp	Tue Jul 11 03:45:21 2017 +0100
@@ -67,6 +67,7 @@
   static bool _supports_fast_thread_cpu_time;
 
   static GrowableArray<int>* _cpu_to_node;
+  static GrowableArray<int>* _nindex_to_node;
 
  protected:
 
@@ -94,7 +95,9 @@
   static void set_is_floating_stack()         { _is_floating_stack = true; }
 
   static void rebuild_cpu_to_node_map();
+  static void rebuild_nindex_to_node_map();
   static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
+  static GrowableArray<int>* nindex_to_node()  { return _nindex_to_node; }
 
   static size_t find_large_page_size();
   static size_t setup_large_page_size();
@@ -187,6 +190,8 @@
   static void libpthread_init();
   static bool libnuma_init();
   static void* libnuma_dlsym(void* handle, const char* name);
+  // libnuma v2 (libnuma_1.2) symbols
+  static void* libnuma_v2_dlsym(void* handle, const char* name);
   // Minimum stack size a thread can be created with (allowing
   // the VM to completely create the thread and enter user code)
   static size_t min_stack_allowed;
@@ -243,28 +248,45 @@
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   typedef int (*numa_max_node_func_t)(void);
+  typedef int (*numa_num_configured_nodes_func_t)(void);
   typedef int (*numa_available_func_t)(void);
   typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
   typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
+  typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
+
   typedef void (*numa_set_bind_policy_func_t)(int policy);
+  typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
+  typedef int (*numa_distance_func_t)(int node1, int node2);
 
   static sched_getcpu_func_t _sched_getcpu;
   static numa_node_to_cpus_func_t _numa_node_to_cpus;
   static numa_max_node_func_t _numa_max_node;
+  static numa_num_configured_nodes_func_t _numa_num_configured_nodes;
   static numa_available_func_t _numa_available;
   static numa_tonode_memory_func_t _numa_tonode_memory;
   static numa_interleave_memory_func_t _numa_interleave_memory;
+  static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
   static numa_set_bind_policy_func_t _numa_set_bind_policy;
+  static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
+  static numa_distance_func_t _numa_distance;
   static unsigned long* _numa_all_nodes;
+  static struct bitmask* _numa_all_nodes_ptr;
+  static struct bitmask* _numa_nodes_ptr;
 
   static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
   static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
   static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
+  static void set_numa_num_configured_nodes(numa_num_configured_nodes_func_t func) { _numa_num_configured_nodes = func; }
   static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
+  static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
   static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
+  static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
+  static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
+  static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = *ptr; }
+  static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = *ptr; }
   static int sched_getcpu_syscall(void);
 public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
@@ -272,12 +294,18 @@
     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
   }
   static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }
+  static int numa_num_configured_nodes() {
+    return _numa_num_configured_nodes != NULL ? _numa_num_configured_nodes() : -1;
+  }
   static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; }
   static int numa_tonode_memory(void *start, size_t size, int node) {
     return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
   }
   static void numa_interleave_memory(void *start, size_t size) {
-    if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
+    // Use v2 api if available
+    if (_numa_interleave_memory_v2 != NULL && _numa_all_nodes_ptr != NULL) {
+      _numa_interleave_memory_v2(start, size, _numa_all_nodes_ptr);
+    } else if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
       _numa_interleave_memory(start, size, _numa_all_nodes);
     }
   }
@@ -286,7 +314,25 @@
       _numa_set_bind_policy(policy);
     }
   }
+  static int numa_distance(int node1, int node2) {
+    return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;
+  }
   static int get_node_by_cpu(int cpu_id);
+  static int get_existing_num_nodes();
+  // Check if numa node is configured (non-zero memory node).
+  static bool isnode_in_configured_nodes(unsigned int n) {
+    if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
+      return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
+    } else
+      return 0;
+  }
+  // Check if numa node exists in the system (including zero memory nodes).
+  static bool isnode_in_existing_nodes(unsigned int n) {
+    if (_numa_bitmask_isbitset != NULL && _numa_nodes_ptr != NULL) {
+      return _numa_bitmask_isbitset(_numa_nodes_ptr, n);
+    } else
+      return 0;
+  }
 };
 
 
diff -r d77ea5a8ca52 -r 735f782200d6 src/share/vm/c1/c1_ValueMap.hpp
--- a/src/share/vm/c1/c1_ValueMap.hpp	Tue May 16 18:03:46 2017 +0100
+++ b/src/share/vm/c1/c1_ValueMap.hpp	Tue Jul 11 03:45:21 2017 +0100
@@ -158,6 +158,12 @@
   void do_UnsafePutRaw   (UnsafePutRaw*    x) { kill_memory(); }
   void do_UnsafePutObject(UnsafePutObject* x) { kill_memory(); }
   void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { kill_memory(); }
+  void do_UnsafeGetRaw   (UnsafeGetRaw*    x) { /* nothing to do */ }
+  void do_UnsafeGetObject(UnsafeGetObject* x) {
+    if (x->is_volatile()) { // the JMM requires this
+      kill_memory();
+    }
+  }
   void do_Intrinsic      (Intrinsic*       x) { if (!x->preserves_state()) kill_memory(); }
 
   void do_Phi            (Phi*             x) { /* nothing to do */ }
@@ -198,8 +204,6 @@
   void do_OsrEntry       (OsrEntry*        x) { /* nothing to do */ }
   void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }
   void do_RoundFP        (RoundFP*         x) { /* nothing to do */ }
-  void do_UnsafeGetRaw   (UnsafeGetRaw*    x) { /* nothing to do */ }
-  void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }
   void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
   void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
   void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
diff -r d77ea5a8ca52 -r 735f782200d6 src/share/vm/classfile/symbolTable.cpp
--- a/src/share/vm/classfile/symbolTable.cpp	Tue May 16 18:03:46 2017 +0100
+++ b/src/share/vm/classfile/symbolTable.cpp	Tue Jul 11 03:45:21 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,7 @@
 int SymbolTable::_symbols_counted = 0;
 volatile int SymbolTable::_parallel_claimed_idx = 0;
 
-void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total) {
+void SymbolTable::buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context, size_t* memory_total) {
   for (int i = start_idx; i < end_idx; ++i) {
     HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
     HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
@@ -110,15 +110,14 @@
       }
       Symbol* s = entry->literal();
       (*memory_total) += s->size();
-      (*processed)++;
+      context->_num_processed++;
       assert(s != NULL, "just checking");
       // If reference count is zero, remove.
       if (s->refcount() == 0) {
         assert(!entry->is_shared(), "shared entries should be kept live");
         delete s;
-        (*removed)++;
         *p = entry->next();
-        the_table()->free_entry(entry);
+        context->free_entry(entry);
       } else {
         p = entry->next_addr();
       }
@@ -132,9 +131,14 @@
 // This is done late during GC.
 void SymbolTable::unlink(int* processed, int* removed) {
   size_t memory_total = 0;
-  buckets_unlink(0, the_table()->table_size(), processed, removed, &memory_total);
-  _symbols_removed += *removed;
-  _symbols_counted += *processed;
+  BucketUnlinkContext context;
+  buckets_unlink(0, the_table()->table_size(), &context, &memory_total);
+  _the_table->bulk_free_entries(&context);
+  *processed = context._num_processed;
+  *removed = context._num_removed;
+
+  _symbols_removed = context._num_removed;
+  _symbols_counted = context._num_processed;
   // Exclude printing for normal PrintGCDetails because people parse
   // this output.
   if (PrintGCDetails && Verbose && WizardMode) {
@@ -148,6 +152,7 @@
 
   size_t memory_total = 0;
 
+  BucketUnlinkContext context;
   for (;;) {
     // Grab next set of buckets to scan
     int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
@@ -157,10 +162,15 @@
     }
 
     int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
-    buckets_unlink(start_idx, end_idx, processed, removed, &memory_total);
+    buckets_unlink(start_idx, end_idx, &context, &memory_total);
   }
-  Atomic::add(*processed, &_symbols_counted);
-  Atomic::add(*removed, &_symbols_removed);
+
+  _the_table->bulk_free_entries(&context);
+  *processed = context._num_processed;
+  *removed = context._num_removed;
+
+  Atomic::add(context._num_processed, &_symbols_counted);
+  Atomic::add(context._num_removed, &_symbols_removed);
   // Exclude printing for normal PrintGCDetails because people parse
   // this output.
   if (PrintGCDetails && Verbose && WizardMode) {
@@ -811,7 +821,11 @@
 }
 
 void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
-  buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), processed, removed);
+  BucketUnlinkContext context;
+  buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), &context);
+  _the_table->bulk_free_entries(&context);
+  *processed = context._num_processed;
+  *removed = context._num_removed;
 }
 
 void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
@@ -820,6 +834,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   const int limit = the_table()->table_size();
 
+  BucketUnlinkContext context;
   for (;;) {
     // Grab next set of buckets to scan
     int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
@@ -829,8 +844,11 @@
     }
 
     int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
-    buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, processed, removed);
+    buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, &context);
   }
+  _the_table->bulk_free_entries(&context);
+  *processed = context._num_processed;
+  *removed = context._num_removed;
 }
 
 void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
@@ -856,7 +874,7 @@
   }
 }
 
-void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed) {
+void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context) {
   const int limit = the_table()->table_size();
 
   assert(0 <= start_idx && start_idx <= limit,
@@ -880,10 +898,9 @@
         p = entry->next_addr();
       } else {
         *p = entry->next();
-        the_table()->free_entry(entry);
-        (*removed)++;
+        context->free_entry(entry);
       }
-      (*processed)++;
+      context->_num_processed++;
       entry = *p;
     }
   }
diff -r d77ea5a8ca52 -r 735f782200d6 src/share/vm/classfile/symbolTable.hpp
--- a/src/share/vm/classfile/symbolTable.hpp	Tue May 16 18:03:46 2017 +0100


More information about the distro-pkg-dev mailing list