RFR(T): 8230327: Make G1DirtyCardQueueSet free-id init unconditional
Leo Korinth
leo.korinth at oracle.com
Thu Aug 29 09:49:39 UTC 2019
On 29/08/2019 02:27, Kim Barrett wrote:
> Please review this trivial cleanup of G1DirtyCardQueueSet
> initialization. With the separation of G1RedirtyCardsQueueSet from
> G1DirtyCardQueueSet, the latter is now a singleton class that should
> always have an associated G1FreeIdSet. We can now unconditionally
> construct that object when constructing the DCQS.
>
> CR:
> https://bugs.openjdk.java.net/browse/JDK-8230327
>
> Webrev:
> http://cr.openjdk.java.net/~kbarrett/8230327/open.00/
>
> Testing:
> Local (linux-x64) hotspot:tier1.
>
Looks good! How about making G1FreeIdSet inline instead of a pointer? I
am fine with either.
Thanks,
Leo
Something like:
diff --git a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp
b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp
index 91982fd4be..e62e9a80b9 100644
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp
@@ -27,7 +27,6 @@
#include "gc/g1/g1CardTableEntryClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1DirtyCardQueue.hpp"
-#include "gc/g1/g1FreeIdSet.hpp"
#include "gc/g1/g1RedirtyCardsQueue.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
@@ -91,7 +90,7 @@ G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool
notify_when_complete) :
_notify_when_complete(notify_when_complete),
_max_completed_buffers(MaxCompletedBuffersUnlimited),
_completed_buffers_padding(0),
- _free_ids(new G1FreeIdSet(0, num_par_ids())),
+ _free_ids(0, num_par_ids()),
_processed_buffers_mut(0),
_processed_buffers_rs_thread(0)
{
@@ -100,7 +99,6 @@ G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool
notify_when_complete) :
G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
abandon_completed_buffers();
- delete _free_ids;
}
// Determines how many mutator threads can process the buffers in
parallel.
@@ -287,10 +285,10 @@ bool
G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node)
}
bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
- uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
+ uint worker_i = _free_ids.claim_par_id(); // temporarily claim an id
G1RefineCardConcurrentlyClosure cl;
bool result = apply_closure_to_buffer(&cl, node, worker_i);
- _free_ids->release_par_id(worker_i); // release the id
+ _free_ids.release_par_id(worker_i); // release the id
if (result) {
assert_fully_consumed(node, buffer_size());
diff --git a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp
b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp
index a9eb5c2d96..c94867c950 100644
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp
@@ -25,12 +25,12 @@
#ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
#define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
+#include "gc/g1/g1FreeIdSet.hpp"
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
class G1CardTableEntryClosure;
class G1DirtyCardQueueSet;
-class G1FreeIdSet;
class G1RedirtyCardsQueueSet;
class Thread;
class Monitor;
@@ -118,7 +118,7 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
size_t _completed_buffers_padding;
static const size_t MaxCompletedBuffersUnlimited = SIZE_MAX;
- G1FreeIdSet* _free_ids;
+ G1FreeIdSet _free_ids;
// The number of completed buffers processed by mutator and rs thread,
// respectively.
/Leo
-------------- next part --------------
A non-text attachment was scrubbed...
Name: inline.patch
Type: text/x-patch
Size: 2707 bytes
Desc: not available
URL: <https://mail.openjdk.org/pipermail/hotspot-gc-dev/attachments/20190829/be45d43b/inline.patch>
More information about the hotspot-gc-dev
mailing list