Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
1 1 /*
2 2 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 class CompactibleFreeListSpace;
26 26
27 27 // A class for maintaining a free list of FreeChunk's. The FreeList
28 28 // maintains a the structure of the list (head, tail, etc.) plus
29 29 // statistics for allocations from the list. The links between items
30 30 // are not part of FreeList. The statistics are
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
31 31 // used to make decisions about coalescing FreeChunk's when they
32 32 // are swept during collection.
33 33 //
34 34 // See the corresponding .cpp file for a description of the specifics
35 35 // for that implementation.
36 36
37 37 class Mutex;
38 38
39 39 class FreeList VALUE_OBJ_CLASS_SPEC {
40 40 friend class CompactibleFreeListSpace;
41 + friend class printTreeCensusClosure;
41 42 FreeChunk* _head; // List of free chunks
42 43 FreeChunk* _tail; // Tail of list of free chunks
43 44 size_t _size; // Size in Heap words of each chunks
44 45 ssize_t _count; // Number of entries in list
45 46 size_t _hint; // next larger size list with a positive surplus
46 47
47 48 AllocationStats _allocation_stats; // statistics for smart allocation
48 49
49 50 #ifdef ASSERT
50 51 Mutex* _protecting_lock;
51 52 #endif
52 53
53 54 // Asserts false if the protecting lock (if any) is not held.
54 55 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
55 56 void assert_proper_lock_protection() const {
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
56 57 #ifdef ASSERT
57 58 if (_protecting_lock != NULL)
58 59 assert_proper_lock_protection_work();
59 60 #endif
60 61 }
61 62
62 63 // Initialize the allocation statistics.
63 64 protected:
64 65 void init_statistics();
65 66 void set_count(ssize_t v) { _count = v;}
66 - void increment_count() { _count++; }
67 + void increment_count() { _count++; }
67 68 void decrement_count() {
68 69 _count--;
69 - assert(_count >= 0, "Count should not be negative"); }
70 + assert(_count >= 0, "Count should not be negative");
71 + }
70 72
71 73 public:
72 74 // Constructor
73 75 // Construct a list without any entries.
74 76 FreeList();
75 77 // Construct a list with "fc" as the first (and lone) entry in the list.
76 78 FreeList(FreeChunk* fc);
77 79 // Construct a list which will have a FreeChunk at address "addr" and
78 80 // of size "size" as the first (and lone) entry in the list.
79 81 FreeList(HeapWord* addr, size_t size);
80 82
81 83 // Reset the head, tail, hint, and count of a free list.
82 84 void reset(size_t hint);
83 85
84 86 // Declare the current free list to be protected by the given lock.
85 87 #ifdef ASSERT
86 88 void set_protecting_lock(Mutex* protecting_lock) {
87 89 _protecting_lock = protecting_lock;
88 90 }
89 91 #endif
90 92
91 93 // Accessors.
92 94 FreeChunk* head() const {
93 95 assert_proper_lock_protection();
94 96 return _head;
95 97 }
96 98 void set_head(FreeChunk* v) {
97 99 assert_proper_lock_protection();
98 100 _head = v;
99 101 assert(!_head || _head->size() == _size, "bad chunk size");
100 102 }
101 103 // Set the head of the list and set the prev field of non-null
102 104 // values to NULL.
103 105 void link_head(FreeChunk* v) {
104 106 assert_proper_lock_protection();
105 107 set_head(v);
106 108 // If this method is not used (just set the head instead),
107 109 // this check can be avoided.
108 110 if (v != NULL) {
109 111 v->linkPrev(NULL);
110 112 }
111 113 }
112 114
113 115 FreeChunk* tail() const {
114 116 assert_proper_lock_protection();
115 117 return _tail;
116 118 }
117 119 void set_tail(FreeChunk* v) {
118 120 assert_proper_lock_protection();
119 121 _tail = v;
120 122 assert(!_tail || _tail->size() == _size, "bad chunk size");
121 123 }
122 124 // Set the tail of the list and set the next field of non-null
123 125 // values to NULL.
124 126 void link_tail(FreeChunk* v) {
125 127 assert_proper_lock_protection();
126 128 set_tail(v);
127 129 if (v != NULL) {
128 130 v->clearNext();
129 131 }
130 132 }
131 133
132 134 // No locking checks in read-accessors: lock-free reads (only) are benign.
133 135 // Readers are expected to have the lock if they are doing work that
134 136 // requires atomicity guarantees in sections of code.
135 137 size_t size() const {
136 138 return _size;
137 139 }
138 140 void set_size(size_t v) {
139 141 assert_proper_lock_protection();
140 142 _size = v;
141 143 }
142 144 ssize_t count() const {
143 145 return _count;
144 146 }
145 147 size_t hint() const {
146 148 return _hint;
147 149 }
148 150 void set_hint(size_t v) {
149 151 assert_proper_lock_protection();
150 152 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
151 153 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
152 154
153 155 // Accessors for statistics
154 156 AllocationStats* allocation_stats() {
155 157 assert_proper_lock_protection();
156 158 return &_allocation_stats;
157 159 }
158 160
159 161 ssize_t desired() const {
160 162 return _allocation_stats.desired();
161 163 }
164 + void set_desired(ssize_t v) {
165 + assert_proper_lock_protection();
166 + _allocation_stats.set_desired(v);
167 + }
162 168 void compute_desired(float inter_sweep_current,
163 169 float inter_sweep_estimate) {
164 170 assert_proper_lock_protection();
165 171 _allocation_stats.compute_desired(_count,
166 172 inter_sweep_current,
167 173 inter_sweep_estimate);
168 174 }
169 175 ssize_t coalDesired() const {
170 176 return _allocation_stats.coalDesired();
171 177 }
172 178 void set_coalDesired(ssize_t v) {
173 179 assert_proper_lock_protection();
174 180 _allocation_stats.set_coalDesired(v);
175 181 }
176 182
177 183 ssize_t surplus() const {
178 184 return _allocation_stats.surplus();
179 185 }
180 186 void set_surplus(ssize_t v) {
181 187 assert_proper_lock_protection();
182 188 _allocation_stats.set_surplus(v);
183 189 }
184 190 void increment_surplus() {
185 191 assert_proper_lock_protection();
186 192 _allocation_stats.increment_surplus();
187 193 }
188 194 void decrement_surplus() {
189 195 assert_proper_lock_protection();
190 196 _allocation_stats.decrement_surplus();
191 197 }
192 198
193 199 ssize_t bfrSurp() const {
194 200 return _allocation_stats.bfrSurp();
195 201 }
196 202 void set_bfrSurp(ssize_t v) {
197 203 assert_proper_lock_protection();
198 204 _allocation_stats.set_bfrSurp(v);
199 205 }
200 206 ssize_t prevSweep() const {
201 207 return _allocation_stats.prevSweep();
202 208 }
203 209 void set_prevSweep(ssize_t v) {
204 210 assert_proper_lock_protection();
205 211 _allocation_stats.set_prevSweep(v);
206 212 }
207 213 ssize_t beforeSweep() const {
208 214 return _allocation_stats.beforeSweep();
209 215 }
210 216 void set_beforeSweep(ssize_t v) {
211 217 assert_proper_lock_protection();
212 218 _allocation_stats.set_beforeSweep(v);
213 219 }
214 220
215 221 ssize_t coalBirths() const {
216 222 return _allocation_stats.coalBirths();
217 223 }
218 224 void set_coalBirths(ssize_t v) {
219 225 assert_proper_lock_protection();
220 226 _allocation_stats.set_coalBirths(v);
221 227 }
222 228 void increment_coalBirths() {
223 229 assert_proper_lock_protection();
224 230 _allocation_stats.increment_coalBirths();
225 231 }
226 232
227 233 ssize_t coalDeaths() const {
228 234 return _allocation_stats.coalDeaths();
229 235 }
230 236 void set_coalDeaths(ssize_t v) {
231 237 assert_proper_lock_protection();
232 238 _allocation_stats.set_coalDeaths(v);
233 239 }
234 240 void increment_coalDeaths() {
235 241 assert_proper_lock_protection();
236 242 _allocation_stats.increment_coalDeaths();
237 243 }
238 244
239 245 ssize_t splitBirths() const {
240 246 return _allocation_stats.splitBirths();
241 247 }
242 248 void set_splitBirths(ssize_t v) {
243 249 assert_proper_lock_protection();
244 250 _allocation_stats.set_splitBirths(v);
245 251 }
246 252 void increment_splitBirths() {
247 253 assert_proper_lock_protection();
248 254 _allocation_stats.increment_splitBirths();
249 255 }
250 256
251 257 ssize_t splitDeaths() const {
252 258 return _allocation_stats.splitDeaths();
253 259 }
254 260 void set_splitDeaths(ssize_t v) {
255 261 assert_proper_lock_protection();
256 262 _allocation_stats.set_splitDeaths(v);
257 263 }
258 264 void increment_splitDeaths() {
259 265 assert_proper_lock_protection();
260 266 _allocation_stats.increment_splitDeaths();
261 267 }
262 268
263 269 NOT_PRODUCT(
264 270 // For debugging. The "_returnedBytes" in all the lists are summed
265 271 // and compared with the total number of bytes swept during a
266 272 // collection.
267 273 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
268 274 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
269 275 void increment_returnedBytes_by(size_t v) {
270 276 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
271 277 }
272 278 )
273 279
274 280 // Unlink head of list and return it. Returns NULL if
275 281 // the list is empty.
276 282 FreeChunk* getChunkAtHead();
277 283
278 284 // Remove the first "n" or "count", whichever is smaller, chunks from the
279 285 // list, setting "fl", which is required to be empty, to point to them.
280 286 void getFirstNChunksFromList(size_t n, FreeList* fl);
281 287
282 288 // Unlink this chunk from it's free list
283 289 void removeChunk(FreeChunk* fc);
284 290
285 291 // Add this chunk to this free list.
286 292 void returnChunkAtHead(FreeChunk* fc);
287 293 void returnChunkAtTail(FreeChunk* fc);
288 294
289 295 // Similar to returnChunk* but also records some diagnostic
290 296 // information.
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
291 297 void returnChunkAtHead(FreeChunk* fc, bool record_return);
292 298 void returnChunkAtTail(FreeChunk* fc, bool record_return);
293 299
294 300 // Prepend "fl" (whose size is required to be the same as that of "this")
295 301 // to the front of "this" list.
296 302 void prepend(FreeList* fl);
297 303
298 304 // Verify that the chunk is in the list.
299 305 // found. Return NULL if "fc" is not found.
300 306 bool verifyChunkInFreeLists(FreeChunk* fc) const;
307 +
308 + // Printing support
309 + static void print_labels_on(outputStream* st, const char* c);
310 + void print_on(outputStream* st, const char* c = NULL) const;
301 311 };
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX