Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
1 1 /*
2 2 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 # include "incls/_precompiled.incl"
26 26 # include "incls/_compactibleFreeListSpace.cpp.incl"
27 27
28 28 /////////////////////////////////////////////////////////////////////////
29 29 //// CompactibleFreeListSpace
30 30 /////////////////////////////////////////////////////////////////////////
31 31
32 32 // highest ranked free list lock rank
33 33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
34 34
35 35 // Constructor
36 36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
37 37 MemRegion mr, bool use_adaptive_freelists,
38 38 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
39 39 _dictionaryChoice(dictionaryChoice),
40 40 _adaptive_freelists(use_adaptive_freelists),
41 41 _bt(bs, mr),
42 42 // free list locks are in the range of values taken by _lockRank
43 43 // This range currently is [_leaf+2, _leaf+3]
44 44 // Note: this requires that CFLspace c'tors
45 45 // are called serially in the order in which the locks are
46 46 // are acquired in the program text. This is true today.
47 47 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
48 48 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
49 49 "CompactibleFreeListSpace._dict_par_lock", true),
50 50 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
51 51 CMSRescanMultiple),
52 52 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
53 53 CMSConcMarkMultiple),
54 54 _collector(NULL)
55 55 {
56 56 _bt.set_space(this);
57 57 initialize(mr, true);
58 58 // We have all of "mr", all of which we place in the dictionary
59 59 // as one big chunk. We'll need to decide here which of several
60 60 // possible alternative dictionary implementations to use. For
61 61 // now the choice is easy, since we have only one working
62 62 // implementation, namely, the simple binary tree (splaying
63 63 // temporarily disabled).
64 64 switch (dictionaryChoice) {
65 65 case FreeBlockDictionary::dictionaryBinaryTree:
66 66 _dictionary = new BinaryTreeDictionary(mr);
67 67 break;
68 68 case FreeBlockDictionary::dictionarySplayTree:
69 69 case FreeBlockDictionary::dictionarySkipList:
70 70 default:
71 71 warning("dictionaryChoice: selected option not understood; using"
72 72 " default BinaryTreeDictionary implementation instead.");
73 73 _dictionary = new BinaryTreeDictionary(mr);
74 74 break;
75 75 }
76 76 splitBirth(mr.word_size());
77 77 assert(_dictionary != NULL, "CMS dictionary initialization");
78 78 // The indexed free lists are initially all empty and are lazily
79 79 // filled in on demand. Initialize the array elements to NULL.
80 80 initializeIndexedFreeListArray();
81 81
82 82 // Not using adaptive free lists assumes that allocation is first
83 83 // from the linAB's. Also a cms perm gen which can be compacted
84 84 // has to have the klass's klassKlass allocated at a lower
85 85 // address in the heap than the klass so that the klassKlass is
86 86 // moved to its new location before the klass is moved.
87 87 // Set the _refillSize for the linear allocation blocks
88 88 if (!use_adaptive_freelists) {
89 89 FreeChunk* fc = _dictionary->getChunk(mr.word_size());
90 90 // The small linAB initially has all the space and will allocate
91 91 // a chunk of any size.
92 92 HeapWord* addr = (HeapWord*) fc;
93 93 _smallLinearAllocBlock.set(addr, fc->size() ,
94 94 1024*SmallForLinearAlloc, fc->size());
95 95 // Note that _unallocated_block is not updated here.
96 96 // Allocations from the linear allocation block should
97 97 // update it.
98 98 } else {
99 99 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
100 100 SmallForLinearAlloc);
101 101 }
102 102 // CMSIndexedFreeListReplenish should be at least 1
103 103 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
104 104 _promoInfo.setSpace(this);
105 105 if (UseCMSBestFit) {
106 106 _fitStrategy = FreeBlockBestFitFirst;
107 107 } else {
108 108 _fitStrategy = FreeBlockStrategyNone;
109 109 }
110 110 checkFreeListConsistency();
111 111
112 112 // Initialize locks for parallel case.
113 113 if (ParallelGCThreads > 0) {
114 114 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
115 115 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
116 116 "a freelist par lock",
117 117 true);
118 118 if (_indexedFreeListParLocks[i] == NULL)
119 119 vm_exit_during_initialization("Could not allocate a par lock");
120 120 DEBUG_ONLY(
121 121 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
122 122 )
123 123 }
124 124 _dictionary->set_par_lock(&_parDictionaryAllocLock);
125 125 }
126 126 }
127 127
128 128 // Like CompactibleSpace forward() but always calls cross_threshold() to
129 129 // update the block offset table. Removed initialize_threshold call because
130 130 // CFLS does not use a block offset array for contiguous spaces.
131 131 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
132 132 CompactPoint* cp, HeapWord* compact_top) {
133 133 // q is alive
134 134 // First check if we should switch compaction space
135 135 assert(this == cp->space, "'this' should be current compaction space.");
136 136 size_t compaction_max_size = pointer_delta(end(), compact_top);
137 137 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
138 138 "virtual adjustObjectSize_v() method is not correct");
139 139 size_t adjusted_size = adjustObjectSize(size);
140 140 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
141 141 "no small fragments allowed");
142 142 assert(minimum_free_block_size() == MinChunkSize,
143 143 "for de-virtualized reference below");
144 144 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
145 145 if (adjusted_size + MinChunkSize > compaction_max_size &&
146 146 adjusted_size != compaction_max_size) {
147 147 do {
148 148 // switch to next compaction space
149 149 cp->space->set_compaction_top(compact_top);
150 150 cp->space = cp->space->next_compaction_space();
151 151 if (cp->space == NULL) {
152 152 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
153 153 assert(cp->gen != NULL, "compaction must succeed");
154 154 cp->space = cp->gen->first_compaction_space();
155 155 assert(cp->space != NULL, "generation must have a first compaction space");
156 156 }
157 157 compact_top = cp->space->bottom();
158 158 cp->space->set_compaction_top(compact_top);
159 159 // The correct adjusted_size may not be the same as that for this method
160 160 // (i.e., cp->space may no longer be "this" so adjust the size again.
161 161 // Use the virtual method which is not used above to save the virtual
162 162 // dispatch.
163 163 adjusted_size = cp->space->adjust_object_size_v(size);
164 164 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
165 165 assert(cp->space->minimum_free_block_size() == 0, "just checking");
166 166 } while (adjusted_size > compaction_max_size);
167 167 }
168 168
169 169 // store the forwarding pointer into the mark word
170 170 if ((HeapWord*)q != compact_top) {
171 171 q->forward_to(oop(compact_top));
172 172 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
173 173 } else {
174 174 // if the object isn't moving we can just set the mark to the default
175 175 // mark and handle it specially later on.
176 176 q->init_mark();
177 177 assert(q->forwardee() == NULL, "should be forwarded to NULL");
178 178 }
179 179
180 180 debug_only(MarkSweep::register_live_oop(q, adjusted_size));
181 181 compact_top += adjusted_size;
182 182
183 183 // we need to update the offset table so that the beginnings of objects can be
184 184 // found during scavenge. Note that we are updating the offset table based on
185 185 // where the object will be once the compaction phase finishes.
186 186
187 187 // Always call cross_threshold(). A contiguous space can only call it when
188 188 // the compaction_top exceeds the current threshold but not for an
189 189 // non-contiguous space.
190 190 cp->threshold =
191 191 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
192 192 return compact_top;
193 193 }
194 194
195 195 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
196 196 // and use of single_block instead of alloc_block. The name here is not really
197 197 // appropriate - maybe a more general name could be invented for both the
198 198 // contiguous and noncontiguous spaces.
199 199
200 200 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
201 201 _bt.single_block(start, the_end);
202 202 return end();
203 203 }
204 204
205 205 // Initialize them to NULL.
206 206 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
207 207 for (size_t i = 0; i < IndexSetSize; i++) {
208 208 // Note that on platforms where objects are double word aligned,
209 209 // the odd array elements are not used. It is convenient, however,
210 210 // to map directly from the object size to the array element.
211 211 _indexedFreeList[i].reset(IndexSetSize);
212 212 _indexedFreeList[i].set_size(i);
213 213 assert(_indexedFreeList[i].count() == 0, "reset check failed");
214 214 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
215 215 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
216 216 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
217 217 }
218 218 }
219 219
220 220 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
221 221 for (int i = 1; i < IndexSetSize; i++) {
222 222 assert(_indexedFreeList[i].size() == (size_t) i,
223 223 "Indexed free list sizes are incorrect");
224 224 _indexedFreeList[i].reset(IndexSetSize);
225 225 assert(_indexedFreeList[i].count() == 0, "reset check failed");
226 226 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
227 227 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
228 228 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
229 229 }
230 230 }
231 231
232 232 void CompactibleFreeListSpace::reset(MemRegion mr) {
233 233 resetIndexedFreeListArray();
234 234 dictionary()->reset();
235 235 if (BlockOffsetArrayUseUnallocatedBlock) {
236 236 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
237 237 // Everything's allocated until proven otherwise.
238 238 _bt.set_unallocated_block(end());
239 239 }
240 240 if (!mr.is_empty()) {
241 241 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
242 242 _bt.single_block(mr.start(), mr.word_size());
243 243 FreeChunk* fc = (FreeChunk*) mr.start();
244 244 fc->setSize(mr.word_size());
245 245 if (mr.word_size() >= IndexSetSize ) {
246 246 returnChunkToDictionary(fc);
247 247 } else {
248 248 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
249 249 _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
250 250 }
251 251 }
252 252 _promoInfo.reset();
253 253 _smallLinearAllocBlock._ptr = NULL;
254 254 _smallLinearAllocBlock._word_size = 0;
255 255 }
256 256
257 257 void CompactibleFreeListSpace::reset_after_compaction() {
258 258 // Reset the space to the new reality - one free chunk.
259 259 MemRegion mr(compaction_top(), end());
260 260 reset(mr);
261 261 // Now refill the linear allocation block(s) if possible.
262 262 if (_adaptive_freelists) {
263 263 refillLinearAllocBlocksIfNeeded();
264 264 } else {
265 265 // Place as much of mr in the linAB as we can get,
266 266 // provided it was big enough to go into the dictionary.
267 267 FreeChunk* fc = dictionary()->findLargestDict();
268 268 if (fc != NULL) {
269 269 assert(fc->size() == mr.word_size(),
270 270 "Why was the chunk broken up?");
271 271 removeChunkFromDictionary(fc);
272 272 HeapWord* addr = (HeapWord*) fc;
273 273 _smallLinearAllocBlock.set(addr, fc->size() ,
274 274 1024*SmallForLinearAlloc, fc->size());
275 275 // Note that _unallocated_block is not updated here.
276 276 }
277 277 }
278 278 }
279 279
280 280 // Walks the entire dictionary, returning a coterminal
281 281 // chunk, if it exists. Use with caution since it involves
282 282 // a potentially complete walk of a potentially large tree.
283 283 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
284 284
285 285 assert_lock_strong(&_freelistLock);
286 286
287 287 return dictionary()->find_chunk_ends_at(end());
288 288 }
289 289
290 290
291 291 #ifndef PRODUCT
292 292 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
293 293 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
294 294 _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
295 295 }
296 296 }
297 297
298 298 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
299 299 size_t sum = 0;
300 300 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
301 301 sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
302 302 }
303 303 return sum;
304 304 }
305 305
306 306 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
307 307 size_t count = 0;
308 308 for (int i = MinChunkSize; i < IndexSetSize; i++) {
309 309 debug_only(
310 310 ssize_t total_list_count = 0;
311 311 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
312 312 fc = fc->next()) {
313 313 total_list_count++;
314 314 }
315 315 assert(total_list_count == _indexedFreeList[i].count(),
316 316 "Count in list is incorrect");
317 317 )
318 318 count += _indexedFreeList[i].count();
319 319 }
320 320 return count;
321 321 }
322 322
323 323 size_t CompactibleFreeListSpace::totalCount() {
324 324 size_t num = totalCountInIndexedFreeLists();
325 325 num += dictionary()->totalCount();
326 326 if (_smallLinearAllocBlock._word_size != 0) {
327 327 num++;
328 328 }
329 329 return num;
330 330 }
331 331 #endif
332 332
333 333 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
334 334 FreeChunk* fc = (FreeChunk*) p;
335 335 return fc->isFree();
336 336 }
337 337
338 338 size_t CompactibleFreeListSpace::used() const {
339 339 return capacity() - free();
340 340 }
341 341
342 342 size_t CompactibleFreeListSpace::free() const {
343 343 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
344 344 // if you do this while the structures are in flux you
345 345 // may get an approximate answer only; for instance
346 346 // because there is concurrent allocation either
347 347 // directly by mutators or for promotion during a GC.
348 348 // It's "MT-safe", however, in the sense that you are guaranteed
349 349 // not to crash and burn, for instance, because of walking
350 350 // pointers that could disappear as you were walking them.
351 351 // The approximation is because the various components
352 352 // that are read below are not read atomically (and
353 353 // further the computation of totalSizeInIndexedFreeLists()
354 354 // is itself a non-atomic computation. The normal use of
355 355 // this is during a resize operation at the end of GC
356 356 // and at that time you are guaranteed to get the
357 357 // correct actual value. However, for instance, this is
358 358 // also read completely asynchronously by the "perf-sampler"
359 359 // that supports jvmstat, and you are apt to see the values
360 360 // flicker in such cases.
361 361 assert(_dictionary != NULL, "No _dictionary?");
362 362 return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
363 363 totalSizeInIndexedFreeLists() +
364 364 _smallLinearAllocBlock._word_size) * HeapWordSize;
365 365 }
366 366
367 367 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
368 368 assert(_dictionary != NULL, "No _dictionary?");
369 369 assert_locked();
370 370 size_t res = _dictionary->maxChunkSize();
371 371 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
372 372 (size_t) SmallForLinearAlloc - 1));
373 373 // XXX the following could potentially be pretty slow;
374 374 // should one, pesimally for the rare cases when res
375 375 // caclulated above is less than IndexSetSize,
376 376 // just return res calculated above? My reasoning was that
377 377 // those cases will be so rare that the extra time spent doesn't
378 378 // really matter....
379 379 // Note: do not change the loop test i >= res + IndexSetStride
380 380 // to i > res below, because i is unsigned and res may be zero.
381 381 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
382 382 i -= IndexSetStride) {
383 383 if (_indexedFreeList[i].head() != NULL) {
384 384 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
385 385 return i;
386 386 }
387 387 }
388 388 return res;
389 389 }
390 390
391 391 void CompactibleFreeListSpace::reportFreeListStatistics() const {
392 392 assert_lock_strong(&_freelistLock);
393 393 assert(PrintFLSStatistics != 0, "Reporting error");
394 394 _dictionary->reportStatistics();
395 395 if (PrintFLSStatistics > 1) {
396 396 reportIndexedFreeListStatistics();
397 397 size_t totalSize = totalSizeInIndexedFreeLists() +
398 398 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
399 399 gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
400 400 }
401 401 }
402 402
403 403 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
404 404 assert_lock_strong(&_freelistLock);
405 405 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
406 406 "--------------------------------\n");
407 407 size_t totalSize = totalSizeInIndexedFreeLists();
408 408 size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
409 409 gclog_or_tty->print("Total Free Space: %d\n", totalSize);
410 410 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
411 411 gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
412 412 if (freeBlocks != 0) {
413 413 gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
414 414 }
415 415 }
416 416
417 417 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
418 418 size_t res = 0;
419 419 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
420 420 debug_only(
421 421 ssize_t recount = 0;
422 422 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
423 423 fc = fc->next()) {
424 424 recount += 1;
425 425 }
426 426 assert(recount == _indexedFreeList[i].count(),
427 427 "Incorrect count in list");
428 428 )
429 429 res += _indexedFreeList[i].count();
430 430 }
431 431 return res;
432 432 }
433 433
434 434 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
435 435 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
436 436 if (_indexedFreeList[i].head() != NULL) {
437 437 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
438 438 return (size_t)i;
439 439 }
440 440 }
441 441 return 0;
442 442 }
443 443
444 444 void CompactibleFreeListSpace::set_end(HeapWord* value) {
445 445 HeapWord* prevEnd = end();
446 446 assert(prevEnd != value, "unnecessary set_end call");
447 447 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
448 448 _end = value;
449 449 if (prevEnd != NULL) {
450 450 // Resize the underlying block offset table.
451 451 _bt.resize(pointer_delta(value, bottom()));
452 452 if (value <= prevEnd) {
453 453 assert(value >= unallocated_block(), "New end is below unallocated block");
454 454 } else {
455 455 // Now, take this new chunk and add it to the free blocks.
456 456 // Note that the BOT has not yet been updated for this block.
457 457 size_t newFcSize = pointer_delta(value, prevEnd);
458 458 // XXX This is REALLY UGLY and should be fixed up. XXX
459 459 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
460 460 // Mark the boundary of the new block in BOT
461 461 _bt.mark_block(prevEnd, value);
462 462 // put it all in the linAB
463 463 if (ParallelGCThreads == 0) {
464 464 _smallLinearAllocBlock._ptr = prevEnd;
465 465 _smallLinearAllocBlock._word_size = newFcSize;
466 466 repairLinearAllocBlock(&_smallLinearAllocBlock);
467 467 } else { // ParallelGCThreads > 0
468 468 MutexLockerEx x(parDictionaryAllocLock(),
469 469 Mutex::_no_safepoint_check_flag);
470 470 _smallLinearAllocBlock._ptr = prevEnd;
471 471 _smallLinearAllocBlock._word_size = newFcSize;
472 472 repairLinearAllocBlock(&_smallLinearAllocBlock);
473 473 }
474 474 // Births of chunks put into a LinAB are not recorded. Births
475 475 // of chunks as they are allocated out of a LinAB are.
476 476 } else {
477 477 // Add the block to the free lists, if possible coalescing it
478 478 // with the last free block, and update the BOT and census data.
479 479 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
480 480 }
481 481 }
482 482 }
483 483 }
484 484
485 485 class FreeListSpace_DCTOC : public Filtering_DCTOC {
486 486 CompactibleFreeListSpace* _cfls;
487 487 CMSCollector* _collector;
488 488 protected:
489 489 // Override.
490 490 #define walk_mem_region_with_cl_DECL(ClosureType) \
491 491 virtual void walk_mem_region_with_cl(MemRegion mr, \
492 492 HeapWord* bottom, HeapWord* top, \
493 493 ClosureType* cl); \
494 494 void walk_mem_region_with_cl_par(MemRegion mr, \
495 495 HeapWord* bottom, HeapWord* top, \
496 496 ClosureType* cl); \
497 497 void walk_mem_region_with_cl_nopar(MemRegion mr, \
498 498 HeapWord* bottom, HeapWord* top, \
499 499 ClosureType* cl)
500 500 walk_mem_region_with_cl_DECL(OopClosure);
501 501 walk_mem_region_with_cl_DECL(FilteringClosure);
502 502
503 503 public:
504 504 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
505 505 CMSCollector* collector,
506 506 OopClosure* cl,
507 507 CardTableModRefBS::PrecisionStyle precision,
508 508 HeapWord* boundary) :
509 509 Filtering_DCTOC(sp, cl, precision, boundary),
510 510 _cfls(sp), _collector(collector) {}
511 511 };
512 512
513 513 // We de-virtualize the block-related calls below, since we know that our
514 514 // space is a CompactibleFreeListSpace.
515 515 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
516 516 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
517 517 HeapWord* bottom, \
518 518 HeapWord* top, \
519 519 ClosureType* cl) { \
520 520 if (SharedHeap::heap()->n_par_threads() > 0) { \
521 521 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
522 522 } else { \
523 523 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
524 524 } \
525 525 } \
526 526 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
527 527 HeapWord* bottom, \
528 528 HeapWord* top, \
529 529 ClosureType* cl) { \
530 530 /* Skip parts that are before "mr", in case "block_start" sent us \
531 531 back too far. */ \
532 532 HeapWord* mr_start = mr.start(); \
533 533 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
534 534 HeapWord* next = bottom + bot_size; \
535 535 while (next < mr_start) { \
536 536 bottom = next; \
537 537 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
538 538 next = bottom + bot_size; \
539 539 } \
540 540 \
541 541 while (bottom < top) { \
542 542 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
543 543 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
544 544 oop(bottom)) && \
545 545 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
546 546 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
547 547 bottom += _cfls->adjustObjectSize(word_sz); \
548 548 } else { \
549 549 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
550 550 } \
551 551 } \
552 552 } \
553 553 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
554 554 HeapWord* bottom, \
555 555 HeapWord* top, \
556 556 ClosureType* cl) { \
557 557 /* Skip parts that are before "mr", in case "block_start" sent us \
558 558 back too far. */ \
559 559 HeapWord* mr_start = mr.start(); \
560 560 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
561 561 HeapWord* next = bottom + bot_size; \
562 562 while (next < mr_start) { \
563 563 bottom = next; \
564 564 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
565 565 next = bottom + bot_size; \
566 566 } \
567 567 \
568 568 while (bottom < top) { \
569 569 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
570 570 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
571 571 oop(bottom)) && \
572 572 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
573 573 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
574 574 bottom += _cfls->adjustObjectSize(word_sz); \
575 575 } else { \
576 576 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
577 577 } \
578 578 } \
579 579 }
580 580
581 581 // (There are only two of these, rather than N, because the split is due
582 582 // only to the introduction of the FilteringClosure, a local part of the
583 583 // impl of this abstraction.)
584 584 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
585 585 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
586 586
587 587 DirtyCardToOopClosure*
588 588 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
589 589 CardTableModRefBS::PrecisionStyle precision,
590 590 HeapWord* boundary) {
591 591 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
592 592 }
593 593
594 594
595 595 // Note on locking for the space iteration functions:
596 596 // since the collector's iteration activities are concurrent with
597 597 // allocation activities by mutators, absent a suitable mutual exclusion
598 598 // mechanism the iterators may go awry. For instace a block being iterated
599 599 // may suddenly be allocated or divided up and part of it allocated and
600 600 // so on.
601 601
602 602 // Apply the given closure to each block in the space.
603 603 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
604 604 assert_lock_strong(freelistLock());
605 605 HeapWord *cur, *limit;
606 606 for (cur = bottom(), limit = end(); cur < limit;
607 607 cur += cl->do_blk_careful(cur));
608 608 }
609 609
610 610 // Apply the given closure to each block in the space.
611 611 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
612 612 assert_lock_strong(freelistLock());
613 613 HeapWord *cur, *limit;
614 614 for (cur = bottom(), limit = end(); cur < limit;
615 615 cur += cl->do_blk(cur));
616 616 }
617 617
618 618 // Apply the given closure to each oop in the space.
619 619 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
620 620 assert_lock_strong(freelistLock());
621 621 HeapWord *cur, *limit;
622 622 size_t curSize;
623 623 for (cur = bottom(), limit = end(); cur < limit;
624 624 cur += curSize) {
625 625 curSize = block_size(cur);
626 626 if (block_is_obj(cur)) {
627 627 oop(cur)->oop_iterate(cl);
628 628 }
629 629 }
630 630 }
631 631
632 632 // Apply the given closure to each oop in the space \intersect memory region.
633 633 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
634 634 assert_lock_strong(freelistLock());
635 635 if (is_empty()) {
636 636 return;
637 637 }
638 638 MemRegion cur = MemRegion(bottom(), end());
639 639 mr = mr.intersection(cur);
640 640 if (mr.is_empty()) {
641 641 return;
642 642 }
643 643 if (mr.equals(cur)) {
644 644 oop_iterate(cl);
645 645 return;
646 646 }
647 647 assert(mr.end() <= end(), "just took an intersection above");
648 648 HeapWord* obj_addr = block_start(mr.start());
649 649 HeapWord* t = mr.end();
650 650
651 651 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
652 652 if (block_is_obj(obj_addr)) {
653 653 // Handle first object specially.
654 654 oop obj = oop(obj_addr);
655 655 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
656 656 } else {
657 657 FreeChunk* fc = (FreeChunk*)obj_addr;
658 658 obj_addr += fc->size();
659 659 }
660 660 while (obj_addr < t) {
661 661 HeapWord* obj = obj_addr;
662 662 obj_addr += block_size(obj_addr);
663 663 // If "obj_addr" is not greater than top, then the
664 664 // entire object "obj" is within the region.
665 665 if (obj_addr <= t) {
666 666 if (block_is_obj(obj)) {
667 667 oop(obj)->oop_iterate(cl);
668 668 }
669 669 } else {
670 670 // "obj" extends beyond end of region
671 671 if (block_is_obj(obj)) {
672 672 oop(obj)->oop_iterate(&smr_blk);
673 673 }
674 674 break;
675 675 }
676 676 }
677 677 }
678 678
679 679 // NOTE: In the following methods, in order to safely be able to
680 680 // apply the closure to an object, we need to be sure that the
681 681 // object has been initialized. We are guaranteed that an object
682 682 // is initialized if we are holding the Heap_lock with the
683 683 // world stopped.
684 684 void CompactibleFreeListSpace::verify_objects_initialized() const {
685 685 if (is_init_completed()) {
686 686 assert_locked_or_safepoint(Heap_lock);
687 687 if (Universe::is_fully_initialized()) {
688 688 guarantee(SafepointSynchronize::is_at_safepoint(),
689 689 "Required for objects to be initialized");
690 690 }
691 691 } // else make a concession at vm start-up
692 692 }
693 693
694 694 // Apply the given closure to each object in the space
695 695 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
696 696 assert_lock_strong(freelistLock());
697 697 NOT_PRODUCT(verify_objects_initialized());
698 698 HeapWord *cur, *limit;
699 699 size_t curSize;
700 700 for (cur = bottom(), limit = end(); cur < limit;
701 701 cur += curSize) {
702 702 curSize = block_size(cur);
703 703 if (block_is_obj(cur)) {
704 704 blk->do_object(oop(cur));
705 705 }
706 706 }
707 707 }
708 708
709 709 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
710 710 UpwardsObjectClosure* cl) {
711 711 assert_locked();
712 712 NOT_PRODUCT(verify_objects_initialized());
713 713 Space::object_iterate_mem(mr, cl);
714 714 }
715 715
716 716 // Callers of this iterator beware: The closure application should
717 717 // be robust in the face of uninitialized objects and should (always)
718 718 // return a correct size so that the next addr + size below gives us a
719 719 // valid block boundary. [See for instance,
720 720 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
721 721 // in ConcurrentMarkSweepGeneration.cpp.]
722 722 HeapWord*
723 723 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
724 724 assert_lock_strong(freelistLock());
725 725 HeapWord *addr, *last;
726 726 size_t size;
727 727 for (addr = bottom(), last = end();
728 728 addr < last; addr += size) {
729 729 FreeChunk* fc = (FreeChunk*)addr;
730 730 if (fc->isFree()) {
731 731 // Since we hold the free list lock, which protects direct
732 732 // allocation in this generation by mutators, a free object
733 733 // will remain free throughout this iteration code.
734 734 size = fc->size();
735 735 } else {
736 736 // Note that the object need not necessarily be initialized,
737 737 // because (for instance) the free list lock does NOT protect
738 738 // object initialization. The closure application below must
739 739 // therefore be correct in the face of uninitialized objects.
740 740 size = cl->do_object_careful(oop(addr));
741 741 if (size == 0) {
742 742 // An unparsable object found. Signal early termination.
743 743 return addr;
744 744 }
745 745 }
746 746 }
747 747 return NULL;
748 748 }
749 749
750 750 // Callers of this iterator beware: The closure application should
751 751 // be robust in the face of uninitialized objects and should (always)
752 752 // return a correct size so that the next addr + size below gives us a
753 753 // valid block boundary. [See for instance,
754 754 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
755 755 // in ConcurrentMarkSweepGeneration.cpp.]
756 756 HeapWord*
757 757 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
758 758 ObjectClosureCareful* cl) {
759 759 assert_lock_strong(freelistLock());
760 760 // Can't use used_region() below because it may not necessarily
761 761 // be the same as [bottom(),end()); although we could
762 762 // use [used_region().start(),round_to(used_region().end(),CardSize)),
763 763 // that appears too cumbersome, so we just do the simpler check
764 764 // in the assertion below.
765 765 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
766 766 "mr should be non-empty and within used space");
767 767 HeapWord *addr, *end;
768 768 size_t size;
769 769 for (addr = block_start_careful(mr.start()), end = mr.end();
770 770 addr < end; addr += size) {
771 771 FreeChunk* fc = (FreeChunk*)addr;
772 772 if (fc->isFree()) {
773 773 // Since we hold the free list lock, which protects direct
774 774 // allocation in this generation by mutators, a free object
775 775 // will remain free throughout this iteration code.
776 776 size = fc->size();
777 777 } else {
778 778 // Note that the object need not necessarily be initialized,
779 779 // because (for instance) the free list lock does NOT protect
780 780 // object initialization. The closure application below must
781 781 // therefore be correct in the face of uninitialized objects.
782 782 size = cl->do_object_careful_m(oop(addr), mr);
783 783 if (size == 0) {
784 784 // An unparsable object found. Signal early termination.
785 785 return addr;
786 786 }
787 787 }
788 788 }
789 789 return NULL;
790 790 }
791 791
792 792
793 793 HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
794 794 NOT_PRODUCT(verify_objects_initialized());
795 795 return _bt.block_start(p);
796 796 }
797 797
798 798 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
799 799 return _bt.block_start_careful(p);
800 800 }
801 801
802 802 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
803 803 NOT_PRODUCT(verify_objects_initialized());
804 804 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
805 805 // This must be volatile, or else there is a danger that the compiler
806 806 // will compile the code below into a sometimes-infinite loop, by keeping
807 807 // the value read the first time in a register.
808 808 oop o = (oop)p;
809 809 volatile oop* second_word_addr = o->klass_addr();
810 810 while (true) {
811 811 klassOop k = (klassOop)(*second_word_addr);
812 812 // We must do this until we get a consistent view of the object.
813 813 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
814 814 FreeChunk* fc = (FreeChunk*)p;
815 815 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
816 816 size_t res = (*sz_addr);
817 817 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
818 818 if (k == k2) {
819 819 assert(res != 0, "Block size should not be 0");
820 820 return res;
821 821 }
822 822 } else if (k != NULL) {
823 823 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
824 824 assert(o->is_parsable(), "Should be parsable");
825 825 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
826 826 size_t res = o->size_given_klass(k->klass_part());
827 827 res = adjustObjectSize(res);
828 828 assert(res != 0, "Block size should not be 0");
829 829 return res;
830 830 }
831 831 }
832 832 }
833 833
834 834 // A variant of the above that uses the Printezis bits for
835 835 // unparsable but allocated objects. This avoids any possible
836 836 // stalls waiting for mutators to initialize objects, and is
837 837 // thus potentially faster than the variant above. However,
838 838 // this variant may return a zero size for a block that is
839 839 // under mutation and for which a consistent size cannot be
840 840 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
841 841 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
842 842 const CMSCollector* c)
843 843 const {
844 844 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
845 845 // This must be volatile, or else there is a danger that the compiler
846 846 // will compile the code below into a sometimes-infinite loop, by keeping
847 847 // the value read the first time in a register.
848 848 oop o = (oop)p;
849 849 volatile oop* second_word_addr = o->klass_addr();
850 850 DEBUG_ONLY(uint loops = 0;)
851 851 while (true) {
852 852 klassOop k = (klassOop)(*second_word_addr);
853 853 // We must do this until we get a consistent view of the object.
854 854 if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
855 855 FreeChunk* fc = (FreeChunk*)p;
856 856 volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
857 857 size_t res = (*sz_addr);
858 858 klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
859 859 if (k == k2) {
860 860 assert(res != 0, "Block size should not be 0");
861 861 assert(loops == 0, "Should be 0");
862 862 return res;
863 863 }
864 864 } else if (k != NULL && o->is_parsable()) {
865 865 assert(k->is_oop(), "Should really be klass oop.");
866 866 assert(o->is_oop(), "Should be an oop");
867 867 size_t res = o->size_given_klass(k->klass_part());
868 868 res = adjustObjectSize(res);
869 869 assert(res != 0, "Block size should not be 0");
870 870 return res;
871 871 } else {
872 872 return c->block_size_if_printezis_bits(p);
873 873 }
874 874 assert(loops == 0, "Can loop at most once");
875 875 DEBUG_ONLY(loops++;)
876 876 }
877 877 }
878 878
879 879 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
880 880 NOT_PRODUCT(verify_objects_initialized());
881 881 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
882 882 FreeChunk* fc = (FreeChunk*)p;
883 883 if (fc->isFree()) {
884 884 return fc->size();
885 885 } else {
886 886 // Ignore mark word because this may be a recently promoted
887 887 // object whose mark word is used to chain together grey
888 888 // objects (the last one would have a null value).
889 889 assert(oop(p)->is_oop(true), "Should be an oop");
890 890 return adjustObjectSize(oop(p)->size());
891 891 }
892 892 }
893 893
894 894 // This implementation assumes that the property of "being an object" is
895 895 // stable. But being a free chunk may not be (because of parallel
896 896 // promotion.)
897 897 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
898 898 FreeChunk* fc = (FreeChunk*)p;
899 899 assert(is_in_reserved(p), "Should be in space");
900 900 // When doing a mark-sweep-compact of the CMS generation, this
901 901 // assertion may fail because prepare_for_compaction() uses
902 902 // space that is garbage to maintain information on ranges of
903 903 // live objects so that these live ranges can be moved as a whole.
904 904 // Comment out this assertion until that problem can be solved
905 905 // (i.e., that the block start calculation may look at objects
906 906 // at address below "p" in finding the object that contains "p"
907 907 // and those objects (if garbage) may have been modified to hold
908 908 // live range information.
909 909 // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
910 910 klassOop k = oop(p)->klass();
911 911 intptr_t ki = (intptr_t)k;
912 912 if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
913 913 if (k != NULL) {
914 914 // Ignore mark word because it may have been used to
915 915 // chain together promoted objects (the last one
916 916 // would have a null value).
917 917 assert(oop(p)->is_oop(true), "Should be an oop");
918 918 return true;
919 919 } else {
920 920 return false; // Was not an object at the start of collection.
921 921 }
922 922 }
923 923
924 924 // Check if the object is alive. This fact is checked either by consulting
925 925 // the main marking bitmap in the sweeping phase or, if it's a permanent
926 926 // generation and we're not in the sweeping phase, by checking the
927 927 // perm_gen_verify_bit_map where we store the "deadness" information if
928 928 // we did not sweep the perm gen in the most recent previous GC cycle.
929 929 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
930 930 assert (block_is_obj(p), "The address should point to an object");
931 931
932 932 // If we're sweeping, we use object liveness information from the main bit map
933 933 // for both perm gen and old gen.
934 934 // We don't need to lock the bitmap (live_map or dead_map below), because
935 935 // EITHER we are in the middle of the sweeping phase, and the
936 936 // main marking bit map (live_map below) is locked,
937 937 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
938 938 // is stable, because it's mutated only in the sweeping phase.
939 939 if (_collector->abstract_state() == CMSCollector::Sweeping) {
940 940 CMSBitMap* live_map = _collector->markBitMap();
941 941 return live_map->isMarked((HeapWord*) p);
942 942 } else {
943 943 // If we're not currently sweeping and we haven't swept the perm gen in
944 944 // the previous concurrent cycle then we may have dead but unswept objects
945 945 // in the perm gen. In this case, we use the "deadness" information
946 946 // that we had saved in perm_gen_verify_bit_map at the last sweep.
947 947 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
948 948 if (_collector->verifying()) {
949 949 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
950 950 // Object is marked in the dead_map bitmap at the previous sweep
951 951 // when we know that it's dead; if the bitmap is not allocated then
952 952 // the object is alive.
953 953 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
954 954 || !dead_map->par_isMarked((HeapWord*) p);
955 955 } else {
956 956 return false; // We can't say for sure if it's live, so we say that it's dead.
957 957 }
958 958 }
959 959 }
960 960 return true;
961 961 }
962 962
963 963 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
964 964 FreeChunk* fc = (FreeChunk*)p;
965 965 assert(is_in_reserved(p), "Should be in space");
966 966 assert(_bt.block_start(p) == p, "Should be a block boundary");
967 967 if (!fc->isFree()) {
968 968 // Ignore mark word because it may have been used to
969 969 // chain together promoted objects (the last one
970 970 // would have a null value).
971 971 assert(oop(p)->is_oop(true), "Should be an oop");
972 972 return true;
973 973 }
974 974 return false;
975 975 }
976 976
977 977 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
978 978 // approximate answer if you don't hold the freelistlock when you call this.
979 979 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
980 980 size_t size = 0;
981 981 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
982 982 debug_only(
983 983 // We may be calling here without the lock in which case we
984 984 // won't do this modest sanity check.
985 985 if (freelistLock()->owned_by_self()) {
986 986 size_t total_list_size = 0;
987 987 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
988 988 fc = fc->next()) {
989 989 total_list_size += i;
990 990 }
991 991 assert(total_list_size == i * _indexedFreeList[i].count(),
992 992 "Count in list is incorrect");
993 993 }
994 994 )
995 995 size += i * _indexedFreeList[i].count();
996 996 }
997 997 return size;
998 998 }
999 999
1000 1000 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1001 1001 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1002 1002 return allocate(size);
1003 1003 }
1004 1004
1005 1005 HeapWord*
1006 1006 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1007 1007 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1008 1008 }
1009 1009
1010 1010 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1011 1011 assert_lock_strong(freelistLock());
1012 1012 HeapWord* res = NULL;
1013 1013 assert(size == adjustObjectSize(size),
1014 1014 "use adjustObjectSize() before calling into allocate()");
1015 1015
1016 1016 if (_adaptive_freelists) {
1017 1017 res = allocate_adaptive_freelists(size);
1018 1018 } else { // non-adaptive free lists
1019 1019 res = allocate_non_adaptive_freelists(size);
1020 1020 }
1021 1021
1022 1022 if (res != NULL) {
1023 1023 // check that res does lie in this space!
1024 1024 assert(is_in_reserved(res), "Not in this space!");
1025 1025 assert(is_aligned((void*)res), "alignment check");
1026 1026
1027 1027 FreeChunk* fc = (FreeChunk*)res;
1028 1028 fc->markNotFree();
1029 1029 assert(!fc->isFree(), "shouldn't be marked free");
1030 1030 assert(oop(fc)->klass() == NULL, "should look uninitialized");
1031 1031 // Verify that the block offset table shows this to
1032 1032 // be a single block, but not one which is unallocated.
1033 1033 _bt.verify_single_block(res, size);
1034 1034 _bt.verify_not_unallocated(res, size);
1035 1035 // mangle a just allocated object with a distinct pattern.
1036 1036 debug_only(fc->mangleAllocated(size));
1037 1037 }
1038 1038
1039 1039 return res;
1040 1040 }
1041 1041
1042 1042 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1043 1043 HeapWord* res = NULL;
1044 1044 // try and use linear allocation for smaller blocks
1045 1045 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1046 1046 // if successful, the following also adjusts block offset table
1047 1047 res = getChunkFromSmallLinearAllocBlock(size);
1048 1048 }
1049 1049 // Else triage to indexed lists for smaller sizes
1050 1050 if (res == NULL) {
1051 1051 if (size < SmallForDictionary) {
1052 1052 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1053 1053 } else {
1054 1054 // else get it from the big dictionary; if even this doesn't
1055 1055 // work we are out of luck.
1056 1056 res = (HeapWord*)getChunkFromDictionaryExact(size);
1057 1057 }
1058 1058 }
1059 1059
1060 1060 return res;
1061 1061 }
1062 1062
1063 1063 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1064 1064 assert_lock_strong(freelistLock());
1065 1065 HeapWord* res = NULL;
1066 1066 assert(size == adjustObjectSize(size),
1067 1067 "use adjustObjectSize() before calling into allocate()");
1068 1068
1069 1069 // Strategy
1070 1070 // if small
1071 1071 // exact size from small object indexed list if small
1072 1072 // small or large linear allocation block (linAB) as appropriate
1073 1073 // take from lists of greater sized chunks
1074 1074 // else
1075 1075 // dictionary
1076 1076 // small or large linear allocation block if it has the space
1077 1077 // Try allocating exact size from indexTable first
1078 1078 if (size < IndexSetSize) {
1079 1079 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1080 1080 if(res != NULL) {
1081 1081 assert(res != (HeapWord*)_indexedFreeList[size].head(),
1082 1082 "Not removed from free list");
1083 1083 // no block offset table adjustment is necessary on blocks in
1084 1084 // the indexed lists.
1085 1085
1086 1086 // Try allocating from the small LinAB
1087 1087 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1088 1088 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1089 1089 // if successful, the above also adjusts block offset table
1090 1090 // Note that this call will refill the LinAB to
1091 1091 // satisfy the request. This is different that
1092 1092 // evm.
1093 1093 // Don't record chunk off a LinAB? smallSplitBirth(size);
1094 1094
1095 1095 } else {
1096 1096 // Raid the exact free lists larger than size, even if they are not
1097 1097 // overpopulated.
1098 1098 res = (HeapWord*) getChunkFromGreater(size);
1099 1099 }
1100 1100 } else {
1101 1101 // Big objects get allocated directly from the dictionary.
1102 1102 res = (HeapWord*) getChunkFromDictionaryExact(size);
1103 1103 if (res == NULL) {
1104 1104 // Try hard not to fail since an allocation failure will likely
1105 1105 // trigger a synchronous GC. Try to get the space from the
1106 1106 // allocation blocks.
1107 1107 res = getChunkFromSmallLinearAllocBlockRemainder(size);
1108 1108 }
1109 1109 }
1110 1110
1111 1111 return res;
1112 1112 }
1113 1113
1114 1114 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1115 1115 // when promoting obj.
1116 1116 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1117 1117 // Depending on the object size, expansion may require refilling either a
1118 1118 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
1119 1119 // is added because the dictionary may over-allocate to avoid fragmentation.
1120 1120 size_t space = obj_size;
1121 1121 if (!_adaptive_freelists) {
1122 1122 space = MAX2(space, _smallLinearAllocBlock._refillSize);
1123 1123 }
1124 1124 space += _promoInfo.refillSize() + 2 * MinChunkSize;
1125 1125 return space;
1126 1126 }
1127 1127
1128 1128 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1129 1129 FreeChunk* ret;
1130 1130
1131 1131 assert(numWords >= MinChunkSize, "Size is less than minimum");
1132 1132 assert(linearAllocationWouldFail() || bestFitFirst(),
1133 1133 "Should not be here");
1134 1134
1135 1135 size_t i;
1136 1136 size_t currSize = numWords + MinChunkSize;
1137 1137 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1138 1138 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1139 1139 FreeList* fl = &_indexedFreeList[i];
1140 1140 if (fl->head()) {
1141 1141 ret = getFromListGreater(fl, numWords);
1142 1142 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1143 1143 return ret;
1144 1144 }
1145 1145 }
1146 1146
1147 1147 currSize = MAX2((size_t)SmallForDictionary,
1148 1148 (size_t)(numWords + MinChunkSize));
1149 1149
1150 1150 /* Try to get a chunk that satisfies request, while avoiding
1151 1151 fragmentation that can't be handled. */
1152 1152 {
1153 1153 ret = dictionary()->getChunk(currSize);
1154 1154 if (ret != NULL) {
1155 1155 assert(ret->size() - numWords >= MinChunkSize,
1156 1156 "Chunk is too small");
1157 1157 _bt.allocated((HeapWord*)ret, ret->size());
1158 1158 /* Carve returned chunk. */
1159 1159 (void) splitChunkAndReturnRemainder(ret, numWords);
1160 1160 /* Label this as no longer a free chunk. */
1161 1161 assert(ret->isFree(), "This chunk should be free");
1162 1162 ret->linkPrev(NULL);
1163 1163 }
1164 1164 assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1165 1165 return ret;
1166 1166 }
1167 1167 ShouldNotReachHere();
1168 1168 }
1169 1169
1170 1170 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
1171 1171 const {
1172 1172 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1173 1173 return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1174 1174 }
1175 1175
1176 1176 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1177 1177 if (fc->size() >= IndexSetSize) {
1178 1178 return dictionary()->verifyChunkInFreeLists(fc);
1179 1179 } else {
1180 1180 return verifyChunkInIndexedFreeLists(fc);
1181 1181 }
1182 1182 }
1183 1183
1184 1184 #ifndef PRODUCT
1185 1185 void CompactibleFreeListSpace::assert_locked() const {
1186 1186 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1187 1187 }
1188 1188 #endif
1189 1189
1190 1190 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1191 1191 // In the parallel case, the main thread holds the free list lock
1192 1192 // on behalf the parallel threads.
1193 1193 assert_locked();
1194 1194 FreeChunk* fc;
1195 1195 {
1196 1196 // If GC is parallel, this might be called by several threads.
1197 1197 // This should be rare enough that the locking overhead won't affect
1198 1198 // the sequential code.
1199 1199 MutexLockerEx x(parDictionaryAllocLock(),
1200 1200 Mutex::_no_safepoint_check_flag);
1201 1201 fc = getChunkFromDictionary(size);
1202 1202 }
1203 1203 if (fc != NULL) {
1204 1204 fc->dontCoalesce();
1205 1205 assert(fc->isFree(), "Should be free, but not coalescable");
1206 1206 // Verify that the block offset table shows this to
1207 1207 // be a single block, but not one which is unallocated.
1208 1208 _bt.verify_single_block((HeapWord*)fc, fc->size());
1209 1209 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1210 1210 }
1211 1211 return fc;
1212 1212 }
1213 1213
1214 1214 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
1215 1215 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1216 1216 assert_locked();
1217 1217
1218 1218 // if we are tracking promotions, then first ensure space for
1219 1219 // promotion (including spooling space for saving header if necessary).
1220 1220 // then allocate and copy, then track promoted info if needed.
1221 1221 // When tracking (see PromotionInfo::track()), the mark word may
1222 1222 // be displaced and in this case restoration of the mark word
1223 1223 // occurs in the (oop_since_save_marks_)iterate phase.
1224 1224 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1225 1225 return NULL;
1226 1226 }
1227 1227 // Call the allocate(size_t, bool) form directly to avoid the
1228 1228 // additional call through the allocate(size_t) form. Having
1229 1229 // the compile inline the call is problematic because allocate(size_t)
1230 1230 // is a virtual method.
1231 1231 HeapWord* res = allocate(adjustObjectSize(obj_size));
1232 1232 if (res != NULL) {
1233 1233 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1234 1234 // if we should be tracking promotions, do so.
1235 1235 if (_promoInfo.tracking()) {
1236 1236 _promoInfo.track((PromotedObject*)res);
1237 1237 }
1238 1238 }
1239 1239 return oop(res);
1240 1240 }
1241 1241
1242 1242 HeapWord*
1243 1243 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1244 1244 assert_locked();
1245 1245 assert(size >= MinChunkSize, "minimum chunk size");
1246 1246 assert(size < _smallLinearAllocBlock._allocation_size_limit,
1247 1247 "maximum from smallLinearAllocBlock");
1248 1248 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1249 1249 }
1250 1250
1251 1251 HeapWord*
1252 1252 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1253 1253 size_t size) {
1254 1254 assert_locked();
1255 1255 assert(size >= MinChunkSize, "too small");
1256 1256 HeapWord* res = NULL;
1257 1257 // Try to do linear allocation from blk, making sure that
1258 1258 if (blk->_word_size == 0) {
1259 1259 // We have probably been unable to fill this either in the prologue or
1260 1260 // when it was exhausted at the last linear allocation. Bail out until
1261 1261 // next time.
1262 1262 assert(blk->_ptr == NULL, "consistency check");
1263 1263 return NULL;
1264 1264 }
1265 1265 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1266 1266 res = getChunkFromLinearAllocBlockRemainder(blk, size);
1267 1267 if (res != NULL) return res;
1268 1268
1269 1269 // about to exhaust this linear allocation block
1270 1270 if (blk->_word_size == size) { // exactly satisfied
1271 1271 res = blk->_ptr;
1272 1272 _bt.allocated(res, blk->_word_size);
1273 1273 } else if (size + MinChunkSize <= blk->_refillSize) {
1274 1274 // Update _unallocated_block if the size is such that chunk would be
1275 1275 // returned to the indexed free list. All other chunks in the indexed
1276 1276 // free lists are allocated from the dictionary so that _unallocated_block
1277 1277 // has already been adjusted for them. Do it here so that the cost
1278 1278 // for all chunks added back to the indexed free lists.
1279 1279 if (blk->_word_size < SmallForDictionary) {
1280 1280 _bt.allocated(blk->_ptr, blk->_word_size);
1281 1281 }
1282 1282 // Return the chunk that isn't big enough, and then refill below.
1283 1283 addChunkToFreeLists(blk->_ptr, blk->_word_size);
1284 1284 _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
1285 1285 // Don't keep statistics on adding back chunk from a LinAB.
1286 1286 } else {
1287 1287 // A refilled block would not satisfy the request.
1288 1288 return NULL;
1289 1289 }
1290 1290
1291 1291 blk->_ptr = NULL; blk->_word_size = 0;
1292 1292 refillLinearAllocBlock(blk);
1293 1293 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1294 1294 "block was replenished");
1295 1295 if (res != NULL) {
1296 1296 splitBirth(size);
1297 1297 repairLinearAllocBlock(blk);
1298 1298 } else if (blk->_ptr != NULL) {
1299 1299 res = blk->_ptr;
1300 1300 size_t blk_size = blk->_word_size;
1301 1301 blk->_word_size -= size;
1302 1302 blk->_ptr += size;
1303 1303 splitBirth(size);
1304 1304 repairLinearAllocBlock(blk);
1305 1305 // Update BOT last so that other (parallel) GC threads see a consistent
1306 1306 // view of the BOT and free blocks.
1307 1307 // Above must occur before BOT is updated below.
1308 1308 _bt.split_block(res, blk_size, size); // adjust block offset table
1309 1309 }
1310 1310 return res;
1311 1311 }
1312 1312
1313 1313 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1314 1314 LinearAllocBlock* blk,
1315 1315 size_t size) {
1316 1316 assert_locked();
1317 1317 assert(size >= MinChunkSize, "too small");
1318 1318
1319 1319 HeapWord* res = NULL;
1320 1320 // This is the common case. Keep it simple.
1321 1321 if (blk->_word_size >= size + MinChunkSize) {
1322 1322 assert(blk->_ptr != NULL, "consistency check");
1323 1323 res = blk->_ptr;
1324 1324 // Note that the BOT is up-to-date for the linAB before allocation. It
1325 1325 // indicates the start of the linAB. The split_block() updates the
1326 1326 // BOT for the linAB after the allocation (indicates the start of the
1327 1327 // next chunk to be allocated).
1328 1328 size_t blk_size = blk->_word_size;
1329 1329 blk->_word_size -= size;
1330 1330 blk->_ptr += size;
1331 1331 splitBirth(size);
1332 1332 repairLinearAllocBlock(blk);
1333 1333 // Update BOT last so that other (parallel) GC threads see a consistent
1334 1334 // view of the BOT and free blocks.
1335 1335 // Above must occur before BOT is updated below.
1336 1336 _bt.split_block(res, blk_size, size); // adjust block offset table
1337 1337 _bt.allocated(res, size);
1338 1338 }
1339 1339 return res;
1340 1340 }
1341 1341
1342 1342 FreeChunk*
1343 1343 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1344 1344 assert_locked();
1345 1345 assert(size < SmallForDictionary, "just checking");
1346 1346 FreeChunk* res;
1347 1347 res = _indexedFreeList[size].getChunkAtHead();
1348 1348 if (res == NULL) {
1349 1349 res = getChunkFromIndexedFreeListHelper(size);
1350 1350 }
1351 1351 _bt.verify_not_unallocated((HeapWord*) res, size);
1352 1352 return res;
1353 1353 }
1354 1354
1355 1355 FreeChunk*
1356 1356 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
1357 1357 assert_locked();
1358 1358 FreeChunk* fc = NULL;
1359 1359 if (size < SmallForDictionary) {
1360 1360 assert(_indexedFreeList[size].head() == NULL ||
1361 1361 _indexedFreeList[size].surplus() <= 0,
1362 1362 "List for this size should be empty or under populated");
1363 1363 // Try best fit in exact lists before replenishing the list
1364 1364 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1365 1365 // Replenish list.
1366 1366 //
1367 1367 // Things tried that failed.
1368 1368 // Tried allocating out of the two LinAB's first before
1369 1369 // replenishing lists.
1370 1370 // Tried small linAB of size 256 (size in indexed list)
1371 1371 // and replenishing indexed lists from the small linAB.
1372 1372 //
1373 1373 FreeChunk* newFc = NULL;
1374 1374 size_t replenish_size = CMSIndexedFreeListReplenish * size;
1375 1375 if (replenish_size < SmallForDictionary) {
1376 1376 // Do not replenish from an underpopulated size.
1377 1377 if (_indexedFreeList[replenish_size].surplus() > 0 &&
1378 1378 _indexedFreeList[replenish_size].head() != NULL) {
1379 1379 newFc =
1380 1380 _indexedFreeList[replenish_size].getChunkAtHead();
1381 1381 } else {
1382 1382 newFc = bestFitSmall(replenish_size);
1383 1383 }
1384 1384 }
1385 1385 if (newFc != NULL) {
1386 1386 splitDeath(replenish_size);
1387 1387 } else if (replenish_size > size) {
1388 1388 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1389 1389 newFc =
1390 1390 getChunkFromIndexedFreeListHelper(replenish_size);
1391 1391 }
1392 1392 if (newFc != NULL) {
1393 1393 assert(newFc->size() == replenish_size, "Got wrong size");
1394 1394 size_t i;
1395 1395 FreeChunk *curFc, *nextFc;
1396 1396 // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
1397 1397 // The last chunk is not added to the lists but is returned as the
1398 1398 // free chunk.
1399 1399 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1400 1400 i = 0;
1401 1401 i < (CMSIndexedFreeListReplenish - 1);
1402 1402 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1403 1403 i++) {
1404 1404 curFc->setSize(size);
1405 1405 // Don't record this as a return in order to try and
1406 1406 // determine the "returns" from a GC.
1407 1407 _bt.verify_not_unallocated((HeapWord*) fc, size);
1408 1408 _indexedFreeList[size].returnChunkAtTail(curFc, false);
1409 1409 _bt.mark_block((HeapWord*)curFc, size);
1410 1410 splitBirth(size);
1411 1411 // Don't record the initial population of the indexed list
1412 1412 // as a split birth.
1413 1413 }
1414 1414
1415 1415 // check that the arithmetic was OK above
1416 1416 assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
1417 1417 "inconsistency in carving newFc");
1418 1418 curFc->setSize(size);
1419 1419 _bt.mark_block((HeapWord*)curFc, size);
1420 1420 splitBirth(size);
1421 1421 return curFc;
1422 1422 }
1423 1423 }
1424 1424 } else {
1425 1425 // Get a free chunk from the free chunk dictionary to be returned to
1426 1426 // replenish the indexed free list.
1427 1427 fc = getChunkFromDictionaryExact(size);
1428 1428 }
1429 1429 assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1430 1430 return fc;
1431 1431 }
1432 1432
1433 1433 FreeChunk*
1434 1434 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1435 1435 assert_locked();
1436 1436 FreeChunk* fc = _dictionary->getChunk(size);
1437 1437 if (fc == NULL) {
1438 1438 return NULL;
1439 1439 }
1440 1440 _bt.allocated((HeapWord*)fc, fc->size());
1441 1441 if (fc->size() >= size + MinChunkSize) {
1442 1442 fc = splitChunkAndReturnRemainder(fc, size);
1443 1443 }
1444 1444 assert(fc->size() >= size, "chunk too small");
1445 1445 assert(fc->size() < size + MinChunkSize, "chunk too big");
1446 1446 _bt.verify_single_block((HeapWord*)fc, fc->size());
1447 1447 return fc;
1448 1448 }
1449 1449
1450 1450 FreeChunk*
1451 1451 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1452 1452 assert_locked();
1453 1453 FreeChunk* fc = _dictionary->getChunk(size);
1454 1454 if (fc == NULL) {
1455 1455 return fc;
1456 1456 }
1457 1457 _bt.allocated((HeapWord*)fc, fc->size());
1458 1458 if (fc->size() == size) {
1459 1459 _bt.verify_single_block((HeapWord*)fc, size);
1460 1460 return fc;
1461 1461 }
1462 1462 assert(fc->size() > size, "getChunk() guarantee");
1463 1463 if (fc->size() < size + MinChunkSize) {
1464 1464 // Return the chunk to the dictionary and go get a bigger one.
1465 1465 returnChunkToDictionary(fc);
1466 1466 fc = _dictionary->getChunk(size + MinChunkSize);
1467 1467 if (fc == NULL) {
1468 1468 return NULL;
1469 1469 }
1470 1470 _bt.allocated((HeapWord*)fc, fc->size());
1471 1471 }
1472 1472 assert(fc->size() >= size + MinChunkSize, "tautology");
1473 1473 fc = splitChunkAndReturnRemainder(fc, size);
1474 1474 assert(fc->size() == size, "chunk is wrong size");
1475 1475 _bt.verify_single_block((HeapWord*)fc, size);
1476 1476 return fc;
1477 1477 }
1478 1478
1479 1479 void
1480 1480 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1481 1481 assert_locked();
1482 1482
1483 1483 size_t size = chunk->size();
1484 1484 _bt.verify_single_block((HeapWord*)chunk, size);
1485 1485 // adjust _unallocated_block downward, as necessary
1486 1486 _bt.freed((HeapWord*)chunk, size);
1487 1487 _dictionary->returnChunk(chunk);
1488 1488 }
1489 1489
1490 1490 void
1491 1491 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1492 1492 assert_locked();
1493 1493 size_t size = fc->size();
1494 1494 _bt.verify_single_block((HeapWord*) fc, size);
1495 1495 _bt.verify_not_unallocated((HeapWord*) fc, size);
1496 1496 if (_adaptive_freelists) {
1497 1497 _indexedFreeList[size].returnChunkAtTail(fc);
1498 1498 } else {
1499 1499 _indexedFreeList[size].returnChunkAtHead(fc);
1500 1500 }
1501 1501 }
1502 1502
1503 1503 // Add chunk to end of last block -- if it's the largest
1504 1504 // block -- and update BOT and census data. We would
1505 1505 // of course have preferred to coalesce it with the
1506 1506 // last block, but it's currently less expensive to find the
1507 1507 // largest block than it is to find the last.
1508 1508 void
1509 1509 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1510 1510 HeapWord* chunk, size_t size) {
1511 1511 // check that the chunk does lie in this space!
1512 1512 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1513 1513 assert_locked();
1514 1514 // One of the parallel gc task threads may be here
1515 1515 // whilst others are allocating.
1516 1516 Mutex* lock = NULL;
1517 1517 if (ParallelGCThreads != 0) {
1518 1518 lock = &_parDictionaryAllocLock;
1519 1519 }
1520 1520 FreeChunk* ec;
1521 1521 {
1522 1522 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1523 1523 ec = dictionary()->findLargestDict(); // get largest block
1524 1524 if (ec != NULL && ec->end() == chunk) {
1525 1525 // It's a coterminal block - we can coalesce.
1526 1526 size_t old_size = ec->size();
1527 1527 coalDeath(old_size);
1528 1528 removeChunkFromDictionary(ec);
1529 1529 size += old_size;
1530 1530 } else {
1531 1531 ec = (FreeChunk*)chunk;
1532 1532 }
1533 1533 }
1534 1534 ec->setSize(size);
1535 1535 debug_only(ec->mangleFreed(size));
1536 1536 if (size < SmallForDictionary) {
1537 1537 lock = _indexedFreeListParLocks[size];
1538 1538 }
1539 1539 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1540 1540 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1541 1541 // record the birth under the lock since the recording involves
1542 1542 // manipulation of the list on which the chunk lives and
1543 1543 // if the chunk is allocated and is the last on the list,
1544 1544 // the list can go away.
1545 1545 coalBirth(size);
1546 1546 }
1547 1547
1548 1548 void
1549 1549 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1550 1550 size_t size) {
1551 1551 // check that the chunk does lie in this space!
1552 1552 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1553 1553 assert_locked();
1554 1554 _bt.verify_single_block(chunk, size);
1555 1555
1556 1556 FreeChunk* fc = (FreeChunk*) chunk;
1557 1557 fc->setSize(size);
1558 1558 debug_only(fc->mangleFreed(size));
1559 1559 if (size < SmallForDictionary) {
1560 1560 returnChunkToFreeList(fc);
1561 1561 } else {
1562 1562 returnChunkToDictionary(fc);
1563 1563 }
1564 1564 }
1565 1565
1566 1566 void
1567 1567 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1568 1568 size_t size, bool coalesced) {
1569 1569 assert_locked();
1570 1570 assert(chunk != NULL, "null chunk");
1571 1571 if (coalesced) {
1572 1572 // repair BOT
1573 1573 _bt.single_block(chunk, size);
1574 1574 }
1575 1575 addChunkToFreeLists(chunk, size);
1576 1576 }
1577 1577
1578 1578 // We _must_ find the purported chunk on our free lists;
1579 1579 // we assert if we don't.
1580 1580 void
1581 1581 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1582 1582 size_t size = fc->size();
1583 1583 assert_locked();
1584 1584 debug_only(verifyFreeLists());
1585 1585 if (size < SmallForDictionary) {
1586 1586 removeChunkFromIndexedFreeList(fc);
1587 1587 } else {
1588 1588 removeChunkFromDictionary(fc);
1589 1589 }
1590 1590 _bt.verify_single_block((HeapWord*)fc, size);
1591 1591 debug_only(verifyFreeLists());
1592 1592 }
1593 1593
1594 1594 void
1595 1595 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1596 1596 size_t size = fc->size();
1597 1597 assert_locked();
1598 1598 assert(fc != NULL, "null chunk");
1599 1599 _bt.verify_single_block((HeapWord*)fc, size);
1600 1600 _dictionary->removeChunk(fc);
1601 1601 // adjust _unallocated_block upward, as necessary
1602 1602 _bt.allocated((HeapWord*)fc, size);
1603 1603 }
1604 1604
1605 1605 void
1606 1606 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1607 1607 assert_locked();
1608 1608 size_t size = fc->size();
1609 1609 _bt.verify_single_block((HeapWord*)fc, size);
1610 1610 NOT_PRODUCT(
1611 1611 if (FLSVerifyIndexTable) {
1612 1612 verifyIndexedFreeList(size);
1613 1613 }
1614 1614 )
1615 1615 _indexedFreeList[size].removeChunk(fc);
1616 1616 debug_only(fc->clearNext());
1617 1617 debug_only(fc->clearPrev());
1618 1618 NOT_PRODUCT(
1619 1619 if (FLSVerifyIndexTable) {
1620 1620 verifyIndexedFreeList(size);
1621 1621 }
1622 1622 )
1623 1623 }
1624 1624
1625 1625 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1626 1626 /* A hint is the next larger size that has a surplus.
1627 1627 Start search at a size large enough to guarantee that
1628 1628 the excess is >= MIN_CHUNK. */
1629 1629 size_t start = align_object_size(numWords + MinChunkSize);
1630 1630 if (start < IndexSetSize) {
1631 1631 FreeList* it = _indexedFreeList;
1632 1632 size_t hint = _indexedFreeList[start].hint();
1633 1633 while (hint < IndexSetSize) {
1634 1634 assert(hint % MinObjAlignment == 0, "hint should be aligned");
1635 1635 FreeList *fl = &_indexedFreeList[hint];
1636 1636 if (fl->surplus() > 0 && fl->head() != NULL) {
1637 1637 // Found a list with surplus, reset original hint
1638 1638 // and split out a free chunk which is returned.
1639 1639 _indexedFreeList[start].set_hint(hint);
1640 1640 FreeChunk* res = getFromListGreater(fl, numWords);
1641 1641 assert(res == NULL || res->isFree(),
1642 1642 "Should be returning a free chunk");
1643 1643 return res;
1644 1644 }
1645 1645 hint = fl->hint(); /* keep looking */
1646 1646 }
1647 1647 /* None found. */
1648 1648 it[start].set_hint(IndexSetSize);
1649 1649 }
1650 1650 return NULL;
1651 1651 }
1652 1652
1653 1653 /* Requires fl->size >= numWords + MinChunkSize */
1654 1654 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1655 1655 size_t numWords) {
1656 1656 FreeChunk *curr = fl->head();
1657 1657 size_t oldNumWords = curr->size();
1658 1658 assert(numWords >= MinChunkSize, "Word size is too small");
1659 1659 assert(curr != NULL, "List is empty");
1660 1660 assert(oldNumWords >= numWords + MinChunkSize,
1661 1661 "Size of chunks in the list is too small");
1662 1662
1663 1663 fl->removeChunk(curr);
1664 1664 // recorded indirectly by splitChunkAndReturnRemainder -
1665 1665 // smallSplit(oldNumWords, numWords);
1666 1666 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1667 1667 // Does anything have to be done for the remainder in terms of
1668 1668 // fixing the card table?
1669 1669 assert(new_chunk == NULL || new_chunk->isFree(),
1670 1670 "Should be returning a free chunk");
1671 1671 return new_chunk;
1672 1672 }
1673 1673
1674 1674 FreeChunk*
1675 1675 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1676 1676 size_t new_size) {
1677 1677 assert_locked();
1678 1678 size_t size = chunk->size();
1679 1679 assert(size > new_size, "Split from a smaller block?");
1680 1680 assert(is_aligned(chunk), "alignment problem");
1681 1681 assert(size == adjustObjectSize(size), "alignment problem");
1682 1682 size_t rem_size = size - new_size;
1683 1683 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1684 1684 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1685 1685 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1686 1686 assert(is_aligned(ffc), "alignment problem");
1687 1687 ffc->setSize(rem_size);
1688 1688 ffc->linkNext(NULL);
1689 1689 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1690 1690 // Above must occur before BOT is updated below.
1691 1691 // adjust block offset table
1692 1692 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1693 1693 if (rem_size < SmallForDictionary) {
1694 1694 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1695 1695 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1696 1696 returnChunkToFreeList(ffc);
1697 1697 split(size, rem_size);
1698 1698 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1699 1699 } else {
1700 1700 returnChunkToDictionary(ffc);
1701 1701 split(size ,rem_size);
1702 1702 }
1703 1703 chunk->setSize(new_size);
1704 1704 return chunk;
1705 1705 }
1706 1706
1707 1707 void
1708 1708 CompactibleFreeListSpace::sweep_completed() {
1709 1709 // Now that space is probably plentiful, refill linear
1710 1710 // allocation blocks as needed.
1711 1711 refillLinearAllocBlocksIfNeeded();
1712 1712 }
1713 1713
1714 1714 void
1715 1715 CompactibleFreeListSpace::gc_prologue() {
1716 1716 assert_locked();
1717 1717 if (PrintFLSStatistics != 0) {
1718 1718 gclog_or_tty->print("Before GC:\n");
1719 1719 reportFreeListStatistics();
1720 1720 }
1721 1721 refillLinearAllocBlocksIfNeeded();
1722 1722 }
1723 1723
1724 1724 void
1725 1725 CompactibleFreeListSpace::gc_epilogue() {
1726 1726 assert_locked();
1727 1727 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1728 1728 if (_smallLinearAllocBlock._word_size == 0)
1729 1729 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1730 1730 }
1731 1731 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1732 1732 _promoInfo.stopTrackingPromotions();
1733 1733 repairLinearAllocationBlocks();
1734 1734 // Print Space's stats
1735 1735 if (PrintFLSStatistics != 0) {
1736 1736 gclog_or_tty->print("After GC:\n");
1737 1737 reportFreeListStatistics();
1738 1738 }
1739 1739 }
1740 1740
1741 1741 // Iteration support, mostly delegated from a CMS generation
1742 1742
1743 1743 void CompactibleFreeListSpace::save_marks() {
1744 1744 // mark the "end" of the used space at the time of this call;
1745 1745 // note, however, that promoted objects from this point
1746 1746 // on are tracked in the _promoInfo below.
1747 1747 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
1748 1748 unallocated_block() : end());
1749 1749 // inform allocator that promotions should be tracked.
1750 1750 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1751 1751 _promoInfo.startTrackingPromotions();
1752 1752 }
1753 1753
1754 1754 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1755 1755 assert(_promoInfo.tracking(), "No preceding save_marks?");
1756 1756 guarantee(SharedHeap::heap()->n_par_threads() == 0,
1757 1757 "Shouldn't be called (yet) during parallel part of gc.");
1758 1758 return _promoInfo.noPromotions();
1759 1759 }
1760 1760
1761 1761 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
1762 1762 \
1763 1763 void CompactibleFreeListSpace:: \
1764 1764 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
1765 1765 assert(SharedHeap::heap()->n_par_threads() == 0, \
1766 1766 "Shouldn't be called (yet) during parallel part of gc."); \
1767 1767 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
1768 1768 /* \
1769 1769 * This also restores any displaced headers and removes the elements from \
1770 1770 * the iteration set as they are processed, so that we have a clean slate \
1771 1771 * at the end of the iteration. Note, thus, that if new objects are \
1772 1772 * promoted as a result of the iteration they are iterated over as well. \
1773 1773 */ \
1774 1774 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
1775 1775 }
1776 1776
1777 1777 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1778 1778
1779 1779 //////////////////////////////////////////////////////////////////////////////
1780 1780 // We go over the list of promoted objects, removing each from the list,
1781 1781 // and applying the closure (this may, in turn, add more elements to
1782 1782 // the tail of the promoted list, and these newly added objects will
1783 1783 // also be processed) until the list is empty.
1784 1784 // To aid verification and debugging, in the non-product builds
1785 1785 // we actually forward _promoHead each time we process a promoted oop.
1786 1786 // Note that this is not necessary in general (i.e. when we don't need to
1787 1787 // call PromotionInfo::verify()) because oop_iterate can only add to the
1788 1788 // end of _promoTail, and never needs to look at _promoHead.
1789 1789
1790 1790 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \
1791 1791 \
1792 1792 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \
1793 1793 NOT_PRODUCT(verify()); \
1794 1794 PromotedObject *curObj, *nextObj; \
1795 1795 for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \
1796 1796 if ((nextObj = curObj->next()) == NULL) { \
1797 1797 /* protect ourselves against additions due to closure application \
1798 1798 below by resetting the list. */ \
1799 1799 assert(_promoTail == curObj, "Should have been the tail"); \
1800 1800 _promoHead = _promoTail = NULL; \
1801 1801 } \
1802 1802 if (curObj->hasDisplacedMark()) { \
1803 1803 /* restore displaced header */ \
1804 1804 oop(curObj)->set_mark(nextDisplacedHeader()); \
1805 1805 } else { \
1806 1806 /* restore prototypical header */ \
1807 1807 oop(curObj)->init_mark(); \
1808 1808 } \
1809 1809 /* The "promoted_mark" should now not be set */ \
1810 1810 assert(!curObj->hasPromotedMark(), \
1811 1811 "Should have been cleared by restoring displaced mark-word"); \
1812 1812 NOT_PRODUCT(_promoHead = nextObj); \
1813 1813 if (cl != NULL) oop(curObj)->oop_iterate(cl); \
1814 1814 if (nextObj == NULL) { /* start at head of list reset above */ \
1815 1815 nextObj = _promoHead; \
1816 1816 } \
1817 1817 } \
1818 1818 assert(noPromotions(), "post-condition violation"); \
1819 1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1820 1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1821 1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
1822 1822 }
1823 1823
1824 1824 // This should have been ALL_SINCE_...() just like the others,
1825 1825 // but, because the body of the method above is somehwat longer,
1826 1826 // the MSVC compiler cannot cope; as a workaround, we split the
1827 1827 // macro into its 3 constituent parts below (see original macro
↓ open down ↓ |
1827 lines elided |
↑ open up ↑ |
1828 1828 // definition in specializedOopClosures.hpp).
1829 1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1830 1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1831 1831
1832 1832
1833 1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1834 1834 // ugghh... how would one do this efficiently for a non-contiguous space?
1835 1835 guarantee(false, "NYI");
1836 1836 }
1837 1837
1838 -bool CompactibleFreeListSpace::linearAllocationWouldFail() {
1838 +bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1839 1839 return _smallLinearAllocBlock._word_size == 0;
1840 1840 }
1841 1841
1842 1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1843 1843 // Fix up linear allocation blocks to look like free blocks
1844 1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
1845 1845 }
1846 1846
1847 1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1848 1848 assert_locked();
1849 1849 if (blk->_ptr != NULL) {
1850 1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1851 1851 "Minimum block size requirement");
1852 1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1853 1853 fc->setSize(blk->_word_size);
1854 1854 fc->linkPrev(NULL); // mark as free
1855 1855 fc->dontCoalesce();
1856 1856 assert(fc->isFree(), "just marked it free");
1857 1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1858 1858 }
1859 1859 }
1860 1860
1861 1861 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1862 1862 assert_locked();
1863 1863 if (_smallLinearAllocBlock._ptr == NULL) {
1864 1864 assert(_smallLinearAllocBlock._word_size == 0,
1865 1865 "Size of linAB should be zero if the ptr is NULL");
1866 1866 // Reset the linAB refill and allocation size limit.
1867 1867 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1868 1868 }
1869 1869 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1870 1870 }
1871 1871
1872 1872 void
1873 1873 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1874 1874 assert_locked();
1875 1875 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1876 1876 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1877 1877 "blk invariant");
1878 1878 if (blk->_ptr == NULL) {
1879 1879 refillLinearAllocBlock(blk);
1880 1880 }
1881 1881 if (PrintMiscellaneous && Verbose) {
1882 1882 if (blk->_word_size == 0) {
1883 1883 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1884 1884 }
1885 1885 }
1886 1886 }
1887 1887
1888 1888 void
1889 1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1890 1890 assert_locked();
1891 1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1892 1892 "linear allocation block should be empty");
1893 1893 FreeChunk* fc;
1894 1894 if (blk->_refillSize < SmallForDictionary &&
1895 1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1896 1896 // A linAB's strategy might be to use small sizes to reduce
1897 1897 // fragmentation but still get the benefits of allocation from a
1898 1898 // linAB.
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
1899 1899 } else {
1900 1900 fc = getChunkFromDictionary(blk->_refillSize);
1901 1901 }
1902 1902 if (fc != NULL) {
1903 1903 blk->_ptr = (HeapWord*)fc;
1904 1904 blk->_word_size = fc->size();
1905 1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1906 1906 }
1907 1907 }
1908 1908
1909 +// Support for concurrent collection policy decisions.
1910 +bool CompactibleFreeListSpace::should_concurrent_collect() const {
1911 + // In the future we might want to add in frgamentation stats --
1912 + // including erosion of the "mountain" into this decision as well.
1913 + return !adaptive_freelists() && linearAllocationWouldFail();
1914 +}
1915 +
1909 1916 // Support for compaction
1910 1917
1911 1918 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1912 1919 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1913 1920 // prepare_for_compaction() uses the space between live objects
1914 1921 // so that later phase can skip dead space quickly. So verification
1915 1922 // of the free lists doesn't work after.
1916 1923 }
1917 1924
1918 1925 #define obj_size(q) adjustObjectSize(oop(q)->size())
1919 1926 #define adjust_obj_size(s) adjustObjectSize(s)
1920 1927
1921 1928 void CompactibleFreeListSpace::adjust_pointers() {
1922 1929 // In other versions of adjust_pointers(), a bail out
1923 1930 // based on the amount of live data in the generation
1924 1931 // (i.e., if 0, bail out) may be used.
1925 1932 // Cannot test used() == 0 here because the free lists have already
1926 1933 // been mangled by the compaction.
1927 1934
1928 1935 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
1929 1936 // See note about verification in prepare_for_compaction().
1930 1937 }
1931 1938
1932 1939 void CompactibleFreeListSpace::compact() {
1933 1940 SCAN_AND_COMPACT(obj_size);
1934 1941 }
1935 1942
1936 1943 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1937 1944 // where fbs is free block sizes
1938 1945 double CompactibleFreeListSpace::flsFrag() const {
1939 1946 size_t itabFree = totalSizeInIndexedFreeLists();
1940 1947 double frag = 0.0;
1941 1948 size_t i;
1942 1949
1943 1950 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1944 1951 double sz = i;
1945 1952 frag += _indexedFreeList[i].count() * (sz * sz);
1946 1953 }
1947 1954
1948 1955 double totFree = itabFree +
1949 1956 _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
1950 1957 if (totFree > 0) {
1951 1958 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1952 1959 (totFree * totFree));
1953 1960 frag = (double)1.0 - frag;
1954 1961 } else {
1955 1962 assert(frag == 0.0, "Follows from totFree == 0");
1956 1963 }
1957 1964 return frag;
1958 1965 }
1959 1966
1960 1967 #define CoalSurplusPercent 1.05
1961 1968 #define SplitSurplusPercent 1.10
1962 1969
1963 1970 void CompactibleFreeListSpace::beginSweepFLCensus(
1964 1971 float inter_sweep_current,
1965 1972 float inter_sweep_estimate) {
1966 1973 assert_locked();
1967 1974 size_t i;
1968 1975 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1969 1976 FreeList* fl = &_indexedFreeList[i];
1970 1977 fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
1971 1978 fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
1972 1979 fl->set_beforeSweep(fl->count());
1973 1980 fl->set_bfrSurp(fl->surplus());
1974 1981 }
1975 1982 _dictionary->beginSweepDictCensus(CoalSurplusPercent,
1976 1983 inter_sweep_current,
1977 1984 inter_sweep_estimate);
1978 1985 }
1979 1986
1980 1987 void CompactibleFreeListSpace::setFLSurplus() {
1981 1988 assert_locked();
1982 1989 size_t i;
1983 1990 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1984 1991 FreeList *fl = &_indexedFreeList[i];
1985 1992 fl->set_surplus(fl->count() -
1986 1993 (ssize_t)((double)fl->desired() * SplitSurplusPercent));
1987 1994 }
1988 1995 }
1989 1996
1990 1997 void CompactibleFreeListSpace::setFLHints() {
1991 1998 assert_locked();
1992 1999 size_t i;
1993 2000 size_t h = IndexSetSize;
1994 2001 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
1995 2002 FreeList *fl = &_indexedFreeList[i];
1996 2003 fl->set_hint(h);
1997 2004 if (fl->surplus() > 0) {
1998 2005 h = i;
1999 2006 }
2000 2007 }
2001 2008 }
2002 2009
2003 2010 void CompactibleFreeListSpace::clearFLCensus() {
2004 2011 assert_locked();
2005 2012 int i;
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
2006 2013 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2007 2014 FreeList *fl = &_indexedFreeList[i];
2008 2015 fl->set_prevSweep(fl->count());
2009 2016 fl->set_coalBirths(0);
2010 2017 fl->set_coalDeaths(0);
2011 2018 fl->set_splitBirths(0);
2012 2019 fl->set_splitDeaths(0);
2013 2020 }
2014 2021 }
2015 2022
2016 -void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
2023 +void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2017 2024 setFLSurplus();
2018 2025 setFLHints();
2019 2026 if (PrintGC && PrintFLSCensus > 0) {
2020 - printFLCensus(sweepCt);
2027 + printFLCensus(sweep_count);
2021 2028 }
2022 2029 clearFLCensus();
2023 2030 assert_locked();
2024 2031 _dictionary->endSweepDictCensus(SplitSurplusPercent);
2025 2032 }
2026 2033
2027 2034 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2028 2035 if (size < SmallForDictionary) {
2029 2036 FreeList *fl = &_indexedFreeList[size];
2030 2037 return (fl->coalDesired() < 0) ||
2031 2038 ((int)fl->count() > fl->coalDesired());
2032 2039 } else {
2033 2040 return dictionary()->coalDictOverPopulated(size);
2034 2041 }
2035 2042 }
2036 2043
2037 2044 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2038 2045 assert(size < SmallForDictionary, "Size too large for indexed list");
2039 2046 FreeList *fl = &_indexedFreeList[size];
2040 2047 fl->increment_coalBirths();
2041 2048 fl->increment_surplus();
2042 2049 }
2043 2050
2044 2051 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2045 2052 assert(size < SmallForDictionary, "Size too large for indexed list");
2046 2053 FreeList *fl = &_indexedFreeList[size];
2047 2054 fl->increment_coalDeaths();
2048 2055 fl->decrement_surplus();
2049 2056 }
2050 2057
2051 2058 void CompactibleFreeListSpace::coalBirth(size_t size) {
2052 2059 if (size < SmallForDictionary) {
2053 2060 smallCoalBirth(size);
2054 2061 } else {
2055 2062 dictionary()->dictCensusUpdate(size,
2056 2063 false /* split */,
2057 2064 true /* birth */);
2058 2065 }
2059 2066 }
2060 2067
2061 2068 void CompactibleFreeListSpace::coalDeath(size_t size) {
2062 2069 if(size < SmallForDictionary) {
2063 2070 smallCoalDeath(size);
2064 2071 } else {
2065 2072 dictionary()->dictCensusUpdate(size,
2066 2073 false /* split */,
2067 2074 false /* birth */);
2068 2075 }
2069 2076 }
2070 2077
2071 2078 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2072 2079 assert(size < SmallForDictionary, "Size too large for indexed list");
2073 2080 FreeList *fl = &_indexedFreeList[size];
2074 2081 fl->increment_splitBirths();
2075 2082 fl->increment_surplus();
2076 2083 }
2077 2084
2078 2085 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2079 2086 assert(size < SmallForDictionary, "Size too large for indexed list");
2080 2087 FreeList *fl = &_indexedFreeList[size];
2081 2088 fl->increment_splitDeaths();
2082 2089 fl->decrement_surplus();
2083 2090 }
2084 2091
2085 2092 void CompactibleFreeListSpace::splitBirth(size_t size) {
2086 2093 if (size < SmallForDictionary) {
2087 2094 smallSplitBirth(size);
2088 2095 } else {
2089 2096 dictionary()->dictCensusUpdate(size,
2090 2097 true /* split */,
2091 2098 true /* birth */);
2092 2099 }
2093 2100 }
2094 2101
2095 2102 void CompactibleFreeListSpace::splitDeath(size_t size) {
2096 2103 if (size < SmallForDictionary) {
2097 2104 smallSplitDeath(size);
2098 2105 } else {
2099 2106 dictionary()->dictCensusUpdate(size,
2100 2107 true /* split */,
2101 2108 false /* birth */);
2102 2109 }
2103 2110 }
2104 2111
2105 2112 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2106 2113 size_t to2 = from - to1;
2107 2114 splitDeath(from);
2108 2115 splitBirth(to1);
2109 2116 splitBirth(to2);
2110 2117 }
2111 2118
2112 2119
2113 2120 void CompactibleFreeListSpace::print() const {
2114 2121 tty->print(" CompactibleFreeListSpace");
2115 2122 Space::print();
2116 2123 }
2117 2124
2118 2125 void CompactibleFreeListSpace::prepare_for_verify() {
2119 2126 assert_locked();
2120 2127 repairLinearAllocationBlocks();
2121 2128 // Verify that the SpoolBlocks look like free blocks of
2122 2129 // appropriate sizes... To be done ...
2123 2130 }
2124 2131
2125 2132 class VerifyAllBlksClosure: public BlkClosure {
2126 2133 const CompactibleFreeListSpace* _sp;
2127 2134 const MemRegion _span;
2128 2135
2129 2136 public:
2130 2137 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2131 2138 MemRegion span) : _sp(sp), _span(span) { }
2132 2139
2133 2140 size_t do_blk(HeapWord* addr) {
2134 2141 size_t res;
2135 2142 if (_sp->block_is_obj(addr)) {
2136 2143 oop p = oop(addr);
2137 2144 guarantee(p->is_oop(), "Should be an oop");
2138 2145 res = _sp->adjustObjectSize(p->size());
2139 2146 if (_sp->obj_is_alive(addr)) {
2140 2147 p->verify();
2141 2148 }
2142 2149 } else {
2143 2150 FreeChunk* fc = (FreeChunk*)addr;
2144 2151 res = fc->size();
2145 2152 if (FLSVerifyLists && !fc->cantCoalesce()) {
2146 2153 guarantee(_sp->verifyChunkInFreeLists(fc),
2147 2154 "Chunk should be on a free list");
2148 2155 }
2149 2156 }
2150 2157 guarantee(res != 0, "Livelock: no rank reduction!");
2151 2158 return res;
2152 2159 }
2153 2160 };
2154 2161
2155 2162 class VerifyAllOopsClosure: public OopClosure {
2156 2163 const CMSCollector* _collector;
2157 2164 const CompactibleFreeListSpace* _sp;
2158 2165 const MemRegion _span;
2159 2166 const bool _past_remark;
2160 2167 const CMSBitMap* _bit_map;
2161 2168
2162 2169 public:
2163 2170 VerifyAllOopsClosure(const CMSCollector* collector,
2164 2171 const CompactibleFreeListSpace* sp, MemRegion span,
2165 2172 bool past_remark, CMSBitMap* bit_map) :
2166 2173 OopClosure(), _collector(collector), _sp(sp), _span(span),
2167 2174 _past_remark(past_remark), _bit_map(bit_map) { }
2168 2175
2169 2176 void do_oop(oop* ptr) {
2170 2177 oop p = *ptr;
2171 2178 if (p != NULL) {
2172 2179 if (_span.contains(p)) { // the interior oop points into CMS heap
2173 2180 if (!_span.contains(ptr)) { // reference from outside CMS heap
2174 2181 // Should be a valid object; the first disjunct below allows
2175 2182 // us to sidestep an assertion in block_is_obj() that insists
2176 2183 // that p be in _sp. Note that several generations (and spaces)
2177 2184 // are spanned by _span (CMS heap) above.
2178 2185 guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
2179 2186 "Should be an object");
2180 2187 guarantee(p->is_oop(), "Should be an oop");
2181 2188 p->verify();
2182 2189 if (_past_remark) {
2183 2190 // Remark has been completed, the object should be marked
2184 2191 _bit_map->isMarked((HeapWord*)p);
2185 2192 }
2186 2193 }
2187 2194 else { // reference within CMS heap
2188 2195 if (_past_remark) {
2189 2196 // Remark has been completed -- so the referent should have
2190 2197 // been marked, if referring object is.
2191 2198 if (_bit_map->isMarked(_collector->block_start(ptr))) {
2192 2199 guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
2193 2200 }
2194 2201 }
2195 2202 }
2196 2203 } else if (_sp->is_in_reserved(ptr)) {
2197 2204 // the reference is from FLS, and points out of FLS
2198 2205 guarantee(p->is_oop(), "Should be an oop");
2199 2206 p->verify();
2200 2207 }
2201 2208 }
2202 2209 }
2203 2210 };
2204 2211
2205 2212 void CompactibleFreeListSpace::verify(bool ignored) const {
2206 2213 assert_lock_strong(&_freelistLock);
2207 2214 verify_objects_initialized();
2208 2215 MemRegion span = _collector->_span;
2209 2216 bool past_remark = (_collector->abstract_state() ==
2210 2217 CMSCollector::Sweeping);
2211 2218
2212 2219 ResourceMark rm;
2213 2220 HandleMark hm;
2214 2221
2215 2222 // Check integrity of CFL data structures
2216 2223 _promoInfo.verify();
2217 2224 _dictionary->verify();
2218 2225 if (FLSVerifyIndexTable) {
2219 2226 verifyIndexedFreeLists();
2220 2227 }
2221 2228 // Check integrity of all objects and free blocks in space
2222 2229 {
2223 2230 VerifyAllBlksClosure cl(this, span);
2224 2231 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
2225 2232 }
2226 2233 // Check that all references in the heap to FLS
2227 2234 // are to valid objects in FLS or that references in
2228 2235 // FLS are to valid objects elsewhere in the heap
2229 2236 if (FLSVerifyAllHeapReferences)
2230 2237 {
2231 2238 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2232 2239 _collector->markBitMap());
2233 2240 CollectedHeap* ch = Universe::heap();
2234 2241 ch->oop_iterate(&cl); // all oops in generations
2235 2242 ch->permanent_oop_iterate(&cl); // all oops in perm gen
2236 2243 }
2237 2244
2238 2245 if (VerifyObjectStartArray) {
2239 2246 // Verify the block offset table
2240 2247 _bt.verify();
2241 2248 }
2242 2249 }
2243 2250
2244 2251 #ifndef PRODUCT
2245 2252 void CompactibleFreeListSpace::verifyFreeLists() const {
2246 2253 if (FLSVerifyLists) {
2247 2254 _dictionary->verify();
2248 2255 verifyIndexedFreeLists();
2249 2256 } else {
2250 2257 if (FLSVerifyDictionary) {
2251 2258 _dictionary->verify();
2252 2259 }
2253 2260 if (FLSVerifyIndexTable) {
2254 2261 verifyIndexedFreeLists();
2255 2262 }
2256 2263 }
2257 2264 }
2258 2265 #endif
2259 2266
2260 2267 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2261 2268 size_t i = 0;
2262 2269 for (; i < MinChunkSize; i++) {
2263 2270 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2264 2271 }
2265 2272 for (; i < IndexSetSize; i++) {
2266 2273 verifyIndexedFreeList(i);
2267 2274 }
2268 2275 }
2269 2276
2270 2277 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2271 2278 guarantee(size % 2 == 0, "Odd slots should be empty");
2272 2279 for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
2273 2280 fc = fc->next()) {
2274 2281 guarantee(fc->size() == size, "Size inconsistency");
2275 2282 guarantee(fc->isFree(), "!free?");
2276 2283 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2277 2284 }
2278 2285 }
2279 2286
2280 2287 #ifndef PRODUCT
2281 2288 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2282 2289 assert(_dictionary->minSize() <= IndexSetSize,
2283 2290 "Some sizes can't be allocated without recourse to"
2284 2291 " linear allocation buffers");
2285 2292 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
↓ open down ↓ |
255 lines elided |
↑ open up ↑ |
2286 2293 "else MIN_TREE_CHUNK_SIZE is wrong");
2287 2294 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2288 2295 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2289 2296 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2290 2297 "Some for-loops may be incorrectly initialized");
2291 2298 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2292 2299 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2293 2300 }
2294 2301 #endif
2295 2302
2296 -void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
2303 +void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2297 2304 assert_lock_strong(&_freelistLock);
2298 - ssize_t bfrSurp = 0;
2299 - ssize_t surplus = 0;
2300 - ssize_t desired = 0;
2301 - ssize_t prevSweep = 0;
2302 - ssize_t beforeSweep = 0;
2303 - ssize_t count = 0;
2304 - ssize_t coalBirths = 0;
2305 - ssize_t coalDeaths = 0;
2306 - ssize_t splitBirths = 0;
2307 - ssize_t splitDeaths = 0;
2308 - gclog_or_tty->print("end sweep# %d\n", sweepCt);
2309 - gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
2310 - "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
2311 - "%7s\t" "\n",
2312 - "size", "bfrsurp", "surplus", "desired", "prvSwep",
2313 - "bfrSwep", "count", "cBirths", "cDeaths", "sBirths",
2314 - "sDeaths");
2315 -
2305 + FreeList total;
2306 + gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2307 + FreeList::print_labels_on(gclog_or_tty, "size");
2316 2308 size_t totalFree = 0;
2317 2309 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2318 2310 const FreeList *fl = &_indexedFreeList[i];
2319 - totalFree += fl->count() * fl->size();
2320 -
2321 - gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t"
2322 - "%7d\t" "%7d\t" "%7d\t" "%7d\t"
2323 - "%7d\t" "%7d\t" "%7d\t" "\n",
2324 - fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(),
2325 - fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(),
2326 - fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
2327 - bfrSurp += fl->bfrSurp();
2328 - surplus += fl->surplus();
2329 - desired += fl->desired();
2330 - prevSweep += fl->prevSweep();
2331 - beforeSweep += fl->beforeSweep();
2332 - count += fl->count();
2333 - coalBirths += fl->coalBirths();
2334 - coalDeaths += fl->coalDeaths();
2335 - splitBirths += fl->splitBirths();
2336 - splitDeaths += fl->splitDeaths();
2311 + totalFree += fl->count() * fl->size();
2312 + if (i % (40*IndexSetStride) == 0) {
2313 + FreeList::print_labels_on(gclog_or_tty, "size");
2314 + }
2315 + fl->print_on(gclog_or_tty);
2316 + total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
2317 + total.set_surplus( total.surplus() + fl->surplus() );
2318 + total.set_desired( total.desired() + fl->desired() );
2319 + total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
2320 + total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2321 + total.set_count( total.count() + fl->count() );
2322 + total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
2323 + total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
2324 + total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2325 + total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2337 2326 }
2338 - gclog_or_tty->print("%4s\t"
2339 - "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
2340 - "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
2341 - "totl",
2342 - bfrSurp, surplus, desired, prevSweep, beforeSweep,
2343 - count, coalBirths, coalDeaths, splitBirths, splitDeaths);
2344 - gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
2327 + total.print_on(gclog_or_tty, "TOTAL");
2328 + gclog_or_tty->print_cr("Total free in indexed lists "
2329 + SIZE_FORMAT " words", totalFree);
2345 2330 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2346 - (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
2347 - (prevSweep != 0 ? (double)prevSweep : 1.0),
2348 - (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
2331 + (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2332 + (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2333 + (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2349 2334 _dictionary->printDictCensus();
2350 2335 }
2351 2336
2352 2337 // Return the next displaced header, incrementing the pointer and
2353 2338 // recycling spool area as necessary.
2354 2339 markOop PromotionInfo::nextDisplacedHeader() {
2355 2340 assert(_spoolHead != NULL, "promotionInfo inconsistency");
2356 2341 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2357 2342 "Empty spool space: no displaced header can be fetched");
2358 2343 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2359 2344 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2360 2345 // Spool forward
2361 2346 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2362 2347 // forward to next block, recycling this block into spare spool buffer
2363 2348 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2364 2349 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2365 2350 _spoolHead->nextSpoolBlock = _spareSpool;
2366 2351 _spareSpool = _spoolHead;
2367 2352 _spoolHead = tmp;
2368 2353 _firstIndex = 1;
2369 2354 NOT_PRODUCT(
2370 2355 if (_spoolHead == NULL) { // all buffers fully consumed
2371 2356 assert(_spoolTail == NULL && _nextIndex == 1,
2372 2357 "spool buffers processing inconsistency");
2373 2358 }
2374 2359 )
2375 2360 }
2376 2361 return hdr;
2377 2362 }
2378 2363
2379 2364 void PromotionInfo::track(PromotedObject* trackOop) {
2380 2365 track(trackOop, oop(trackOop)->klass());
2381 2366 }
2382 2367
2383 2368 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
2384 2369 // make a copy of header as it may need to be spooled
2385 2370 markOop mark = oop(trackOop)->mark();
2386 2371 trackOop->clearNext();
2387 2372 if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
2388 2373 // save non-prototypical header, and mark oop
2389 2374 saveDisplacedHeader(mark);
2390 2375 trackOop->setDisplacedMark();
2391 2376 } else {
2392 2377 // we'd like to assert something like the following:
2393 2378 // assert(mark == markOopDesc::prototype(), "consistency check");
2394 2379 // ... but the above won't work because the age bits have not (yet) been
2395 2380 // cleared. The remainder of the check would be identical to the
2396 2381 // condition checked in must_be_preserved() above, so we don't really
2397 2382 // have anything useful to check here!
2398 2383 }
2399 2384 if (_promoTail != NULL) {
2400 2385 assert(_promoHead != NULL, "List consistency");
2401 2386 _promoTail->setNext(trackOop);
2402 2387 _promoTail = trackOop;
2403 2388 } else {
2404 2389 assert(_promoHead == NULL, "List consistency");
2405 2390 _promoHead = _promoTail = trackOop;
2406 2391 }
2407 2392 // Mask as newly promoted, so we can skip over such objects
2408 2393 // when scanning dirty cards
2409 2394 assert(!trackOop->hasPromotedMark(), "Should not have been marked");
2410 2395 trackOop->setPromotedMark();
2411 2396 }
2412 2397
2413 2398 // Save the given displaced header, incrementing the pointer and
2414 2399 // obtaining more spool area as necessary.
2415 2400 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
2416 2401 assert(_spoolHead != NULL && _spoolTail != NULL,
2417 2402 "promotionInfo inconsistency");
2418 2403 assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
2419 2404 _spoolTail->displacedHdr[_nextIndex] = hdr;
2420 2405 // Spool forward
2421 2406 if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
2422 2407 // get a new spooling block
2423 2408 assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
2424 2409 _splice_point = _spoolTail; // save for splicing
2425 2410 _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
2426 2411 _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
2427 2412 // ... but will attempt filling before next promotion attempt
2428 2413 _nextIndex = 1;
2429 2414 }
2430 2415 }
2431 2416
2432 2417 // Ensure that spooling space exists. Return false if spooling space
2433 2418 // could not be obtained.
2434 2419 bool PromotionInfo::ensure_spooling_space_work() {
2435 2420 assert(!has_spooling_space(), "Only call when there is no spooling space");
2436 2421 // Try and obtain more spooling space
2437 2422 SpoolBlock* newSpool = getSpoolBlock();
2438 2423 assert(newSpool == NULL ||
2439 2424 (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
2440 2425 "getSpoolBlock() sanity check");
2441 2426 if (newSpool == NULL) {
2442 2427 return false;
2443 2428 }
2444 2429 _nextIndex = 1;
2445 2430 if (_spoolTail == NULL) {
2446 2431 _spoolTail = newSpool;
2447 2432 if (_spoolHead == NULL) {
2448 2433 _spoolHead = newSpool;
2449 2434 _firstIndex = 1;
2450 2435 } else {
2451 2436 assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
2452 2437 "Splice point invariant");
2453 2438 // Extra check that _splice_point is connected to list
2454 2439 #ifdef ASSERT
2455 2440 {
2456 2441 SpoolBlock* blk = _spoolHead;
2457 2442 for (; blk->nextSpoolBlock != NULL;
2458 2443 blk = blk->nextSpoolBlock);
2459 2444 assert(blk != NULL && blk == _splice_point,
2460 2445 "Splice point incorrect");
2461 2446 }
2462 2447 #endif // ASSERT
2463 2448 _splice_point->nextSpoolBlock = newSpool;
2464 2449 }
2465 2450 } else {
2466 2451 assert(_spoolHead != NULL, "spool list consistency");
2467 2452 _spoolTail->nextSpoolBlock = newSpool;
2468 2453 _spoolTail = newSpool;
2469 2454 }
2470 2455 return true;
2471 2456 }
2472 2457
2473 2458 // Get a free spool buffer from the free pool, getting a new block
2474 2459 // from the heap if necessary.
2475 2460 SpoolBlock* PromotionInfo::getSpoolBlock() {
2476 2461 SpoolBlock* res;
2477 2462 if ((res = _spareSpool) != NULL) {
2478 2463 _spareSpool = _spareSpool->nextSpoolBlock;
2479 2464 res->nextSpoolBlock = NULL;
2480 2465 } else { // spare spool exhausted, get some from heap
2481 2466 res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
2482 2467 if (res != NULL) {
2483 2468 res->init();
2484 2469 }
2485 2470 }
2486 2471 assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
2487 2472 return res;
2488 2473 }
2489 2474
2490 2475 void PromotionInfo::startTrackingPromotions() {
2491 2476 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2492 2477 "spooling inconsistency?");
2493 2478 _firstIndex = _nextIndex = 1;
2494 2479 _tracking = true;
2495 2480 }
2496 2481
2497 2482 void PromotionInfo::stopTrackingPromotions() {
2498 2483 assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2499 2484 "spooling inconsistency?");
2500 2485 _firstIndex = _nextIndex = 1;
2501 2486 _tracking = false;
2502 2487 }
2503 2488
2504 2489 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
2505 2490 // points to the next slot available for filling.
2506 2491 // The set of slots holding displaced headers are then all those in the
2507 2492 // right-open interval denoted by:
2508 2493 //
2509 2494 // [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
2510 2495 //
2511 2496 // When _spoolTail is NULL, then the set of slots with displaced headers
2512 2497 // is all those starting at the slot <_spoolHead, _firstIndex> and
2513 2498 // going up to the last slot of last block in the linked list.
2514 2499 // In this lartter case, _splice_point points to the tail block of
2515 2500 // this linked list of blocks holding displaced headers.
2516 2501 void PromotionInfo::verify() const {
2517 2502 // Verify the following:
2518 2503 // 1. the number of displaced headers matches the number of promoted
2519 2504 // objects that have displaced headers
2520 2505 // 2. each promoted object lies in this space
2521 2506 debug_only(
2522 2507 PromotedObject* junk = NULL;
2523 2508 assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
2524 2509 "Offset of PromotedObject::_next is expected to align with "
2525 2510 " the OopDesc::_mark within OopDesc");
2526 2511 )
2527 2512 // FIXME: guarantee????
2528 2513 guarantee(_spoolHead == NULL || _spoolTail != NULL ||
2529 2514 _splice_point != NULL, "list consistency");
2530 2515 guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
2531 2516 // count the number of objects with displaced headers
2532 2517 size_t numObjsWithDisplacedHdrs = 0;
2533 2518 for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
2534 2519 guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
2535 2520 // the last promoted object may fail the mark() != NULL test of is_oop().
2536 2521 guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
2537 2522 if (curObj->hasDisplacedMark()) {
2538 2523 numObjsWithDisplacedHdrs++;
2539 2524 }
2540 2525 }
2541 2526 // Count the number of displaced headers
2542 2527 size_t numDisplacedHdrs = 0;
2543 2528 for (SpoolBlock* curSpool = _spoolHead;
2544 2529 curSpool != _spoolTail && curSpool != NULL;
2545 2530 curSpool = curSpool->nextSpoolBlock) {
2546 2531 // the first entry is just a self-pointer; indices 1 through
2547 2532 // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
2548 2533 guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
2549 2534 "first entry of displacedHdr should be self-referential");
2550 2535 numDisplacedHdrs += curSpool->bufferSize - 1;
2551 2536 }
2552 2537 guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
2553 2538 "internal consistency");
2554 2539 guarantee(_spoolTail != NULL || _nextIndex == 1,
2555 2540 "Inconsistency between _spoolTail and _nextIndex");
2556 2541 // We overcounted (_firstIndex-1) worth of slots in block
2557 2542 // _spoolHead and we undercounted (_nextIndex-1) worth of
2558 2543 // slots in block _spoolTail. We make an appropriate
2559 2544 // adjustment by subtracting the first and adding the
2560 2545 // second: - (_firstIndex - 1) + (_nextIndex - 1)
2561 2546 numDisplacedHdrs += (_nextIndex - _firstIndex);
2562 2547 guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
2563 2548 }
2564 2549
2565 2550
2566 2551 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2567 2552 _cfls(cfls)
2568 2553 {
2569 2554 _blocks_to_claim = CMSParPromoteBlocksToClaim;
2570 2555 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2571 2556 i < CompactibleFreeListSpace::IndexSetSize;
2572 2557 i += CompactibleFreeListSpace::IndexSetStride) {
2573 2558 _indexedFreeList[i].set_size(i);
2574 2559 }
2575 2560 }
2576 2561
2577 2562 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2578 2563 FreeChunk* res;
2579 2564 word_sz = _cfls->adjustObjectSize(word_sz);
2580 2565 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
2581 2566 // This locking manages sync with other large object allocations.
2582 2567 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2583 2568 Mutex::_no_safepoint_check_flag);
2584 2569 res = _cfls->getChunkFromDictionaryExact(word_sz);
2585 2570 if (res == NULL) return NULL;
2586 2571 } else {
2587 2572 FreeList* fl = &_indexedFreeList[word_sz];
2588 2573 bool filled = false; //TRAP
2589 2574 if (fl->count() == 0) {
2590 2575 bool filled = true; //TRAP
2591 2576 // Attempt to refill this local free list.
2592 2577 _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
2593 2578 // If it didn't work, give up.
2594 2579 if (fl->count() == 0) return NULL;
2595 2580 }
2596 2581 res = fl->getChunkAtHead();
2597 2582 assert(res != NULL, "Why was count non-zero?");
2598 2583 }
2599 2584 res->markNotFree();
2600 2585 assert(!res->isFree(), "shouldn't be marked free");
2601 2586 assert(oop(res)->klass() == NULL, "should look uninitialized");
2602 2587 // mangle a just allocated object with a distinct pattern.
2603 2588 debug_only(res->mangleAllocated(word_sz));
2604 2589 return (HeapWord*)res;
2605 2590 }
2606 2591
2607 2592 void CFLS_LAB::retire() {
2608 2593 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2609 2594 i < CompactibleFreeListSpace::IndexSetSize;
2610 2595 i += CompactibleFreeListSpace::IndexSetStride) {
2611 2596 if (_indexedFreeList[i].count() > 0) {
2612 2597 MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2613 2598 Mutex::_no_safepoint_check_flag);
2614 2599 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2615 2600 // Reset this list.
2616 2601 _indexedFreeList[i] = FreeList();
2617 2602 _indexedFreeList[i].set_size(i);
2618 2603 }
2619 2604 }
2620 2605 }
2621 2606
2622 2607 void
2623 2608 CompactibleFreeListSpace::
2624 2609 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2625 2610 assert(fl->count() == 0, "Precondition.");
2626 2611 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2627 2612 "Precondition");
2628 2613
2629 2614 // We'll try all multiples of word_sz in the indexed set (starting with
2630 2615 // word_sz itself), then try getting a big chunk and splitting it.
2631 2616 int k = 1;
2632 2617 size_t cur_sz = k * word_sz;
2633 2618 bool found = false;
2634 2619 while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
2635 2620 FreeList* gfl = &_indexedFreeList[cur_sz];
2636 2621 FreeList fl_for_cur_sz; // Empty.
2637 2622 fl_for_cur_sz.set_size(cur_sz);
2638 2623 {
2639 2624 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2640 2625 Mutex::_no_safepoint_check_flag);
2641 2626 if (gfl->count() != 0) {
2642 2627 size_t nn = MAX2(n/k, (size_t)1);
2643 2628 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2644 2629 found = true;
2645 2630 }
2646 2631 }
2647 2632 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
2648 2633 if (found) {
2649 2634 if (k == 1) {
2650 2635 fl->prepend(&fl_for_cur_sz);
2651 2636 } else {
2652 2637 // Divide each block on fl_for_cur_sz up k ways.
2653 2638 FreeChunk* fc;
2654 2639 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2655 2640 // Must do this in reverse order, so that anybody attempting to
2656 2641 // access the main chunk sees it as a single free block until we
2657 2642 // change it.
2658 2643 size_t fc_size = fc->size();
2659 2644 for (int i = k-1; i >= 0; i--) {
2660 2645 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2661 2646 ffc->setSize(word_sz);
2662 2647 ffc->linkNext(NULL);
2663 2648 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2664 2649 // Above must occur before BOT is updated below.
2665 2650 // splitting from the right, fc_size == (k - i + 1) * wordsize
2666 2651 _bt.mark_block((HeapWord*)ffc, word_sz);
2667 2652 fc_size -= word_sz;
2668 2653 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2669 2654 _bt.verify_single_block((HeapWord*)fc, fc_size);
2670 2655 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2671 2656 // Push this on "fl".
2672 2657 fl->returnChunkAtHead(ffc);
2673 2658 }
2674 2659 // TRAP
2675 2660 assert(fl->tail()->next() == NULL, "List invariant.");
2676 2661 }
2677 2662 }
2678 2663 return;
2679 2664 }
2680 2665 k++; cur_sz = k * word_sz;
2681 2666 }
2682 2667 // Otherwise, we'll split a block from the dictionary.
2683 2668 FreeChunk* fc = NULL;
2684 2669 FreeChunk* rem_fc = NULL;
2685 2670 size_t rem;
2686 2671 {
2687 2672 MutexLockerEx x(parDictionaryAllocLock(),
2688 2673 Mutex::_no_safepoint_check_flag);
2689 2674 while (n > 0) {
2690 2675 fc = dictionary()->getChunk(MAX2(n * word_sz,
2691 2676 _dictionary->minSize()),
2692 2677 FreeBlockDictionary::atLeast);
2693 2678 if (fc != NULL) {
2694 2679 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk
2695 2680 dictionary()->dictCensusUpdate(fc->size(),
2696 2681 true /*split*/,
2697 2682 false /*birth*/);
2698 2683 break;
2699 2684 } else {
2700 2685 n--;
2701 2686 }
2702 2687 }
2703 2688 if (fc == NULL) return;
2704 2689 // Otherwise, split up that block.
2705 2690 size_t nn = fc->size() / word_sz;
2706 2691 n = MIN2(nn, n);
2707 2692 rem = fc->size() - n * word_sz;
2708 2693 // If there is a remainder, and it's too small, allocate one fewer.
2709 2694 if (rem > 0 && rem < MinChunkSize) {
2710 2695 n--; rem += word_sz;
2711 2696 }
2712 2697 // First return the remainder, if any.
2713 2698 // Note that we hold the lock until we decide if we're going to give
2714 2699 // back the remainder to the dictionary, since a contending allocator
2715 2700 // may otherwise see the heap as empty. (We're willing to take that
2716 2701 // hit if the block is a small block.)
2717 2702 if (rem > 0) {
2718 2703 size_t prefix_size = n * word_sz;
2719 2704 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2720 2705 rem_fc->setSize(rem);
2721 2706 rem_fc->linkNext(NULL);
2722 2707 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2723 2708 // Above must occur before BOT is updated below.
2724 2709 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2725 2710 if (rem >= IndexSetSize) {
2726 2711 returnChunkToDictionary(rem_fc);
2727 2712 dictionary()->dictCensusUpdate(fc->size(),
2728 2713 true /*split*/,
2729 2714 true /*birth*/);
2730 2715 rem_fc = NULL;
2731 2716 }
2732 2717 // Otherwise, return it to the small list below.
2733 2718 }
2734 2719 }
2735 2720 //
2736 2721 if (rem_fc != NULL) {
2737 2722 MutexLockerEx x(_indexedFreeListParLocks[rem],
2738 2723 Mutex::_no_safepoint_check_flag);
2739 2724 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2740 2725 _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2741 2726 smallSplitBirth(rem);
2742 2727 }
2743 2728
2744 2729 // Now do the splitting up.
2745 2730 // Must do this in reverse order, so that anybody attempting to
2746 2731 // access the main chunk sees it as a single free block until we
2747 2732 // change it.
2748 2733 size_t fc_size = n * word_sz;
2749 2734 // All but first chunk in this loop
2750 2735 for (ssize_t i = n-1; i > 0; i--) {
2751 2736 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2752 2737 ffc->setSize(word_sz);
2753 2738 ffc->linkNext(NULL);
2754 2739 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2755 2740 // Above must occur before BOT is updated below.
2756 2741 // splitting from the right, fc_size == (n - i + 1) * wordsize
2757 2742 _bt.mark_block((HeapWord*)ffc, word_sz);
2758 2743 fc_size -= word_sz;
2759 2744 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2760 2745 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2761 2746 _bt.verify_single_block((HeapWord*)fc, fc_size);
2762 2747 // Push this on "fl".
2763 2748 fl->returnChunkAtHead(ffc);
2764 2749 }
2765 2750 // First chunk
2766 2751 fc->setSize(word_sz);
2767 2752 fc->linkNext(NULL);
2768 2753 fc->linkPrev(NULL);
2769 2754 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2770 2755 _bt.verify_single_block((HeapWord*)fc, fc->size());
2771 2756 fl->returnChunkAtHead(fc);
2772 2757
2773 2758 {
2774 2759 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2775 2760 Mutex::_no_safepoint_check_flag);
2776 2761 ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
2777 2762 _indexedFreeList[word_sz].set_splitBirths(new_births);
2778 2763 ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2779 2764 _indexedFreeList[word_sz].set_surplus(new_surplus);
2780 2765 }
2781 2766
2782 2767 // TRAP
2783 2768 assert(fl->tail()->next() == NULL, "List invariant.");
2784 2769 }
2785 2770
2786 2771 // Set up the space's par_seq_tasks structure for work claiming
2787 2772 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2788 2773 // XXX Need to suitably abstract and generalize this and the next
2789 2774 // method into one.
2790 2775 void
2791 2776 CompactibleFreeListSpace::
2792 2777 initialize_sequential_subtasks_for_rescan(int n_threads) {
2793 2778 // The "size" of each task is fixed according to rescan_task_size.
2794 2779 assert(n_threads > 0, "Unexpected n_threads argument");
2795 2780 const size_t task_size = rescan_task_size();
2796 2781 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2797 2782 assert((used_region().start() + (n_tasks - 1)*task_size <
2798 2783 used_region().end()) &&
2799 2784 (used_region().start() + n_tasks*task_size >=
2800 2785 used_region().end()), "n_task calculation incorrect");
2801 2786 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2802 2787 assert(!pst->valid(), "Clobbering existing data?");
2803 2788 pst->set_par_threads(n_threads);
2804 2789 pst->set_n_tasks((int)n_tasks);
2805 2790 }
2806 2791
2807 2792 // Set up the space's par_seq_tasks structure for work claiming
2808 2793 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2809 2794 void
2810 2795 CompactibleFreeListSpace::
2811 2796 initialize_sequential_subtasks_for_marking(int n_threads,
2812 2797 HeapWord* low) {
2813 2798 // The "size" of each task is fixed according to rescan_task_size.
2814 2799 assert(n_threads > 0, "Unexpected n_threads argument");
2815 2800 const size_t task_size = marking_task_size();
2816 2801 assert(task_size > CardTableModRefBS::card_size_in_words &&
2817 2802 (task_size % CardTableModRefBS::card_size_in_words == 0),
2818 2803 "Otherwise arithmetic below would be incorrect");
2819 2804 MemRegion span = _gen->reserved();
2820 2805 if (low != NULL) {
2821 2806 if (span.contains(low)) {
2822 2807 // Align low down to a card boundary so that
2823 2808 // we can use block_offset_careful() on span boundaries.
2824 2809 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2825 2810 CardTableModRefBS::card_size);
2826 2811 // Clip span prefix at aligned_low
2827 2812 span = span.intersection(MemRegion(aligned_low, span.end()));
2828 2813 } else if (low > span.end()) {
2829 2814 span = MemRegion(low, low); // Null region
2830 2815 } // else use entire span
2831 2816 }
2832 2817 assert(span.is_empty() ||
2833 2818 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
2834 2819 "span should start at a card boundary");
2835 2820 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2836 2821 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2837 2822 assert(n_tasks == 0 ||
2838 2823 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2839 2824 (span.start() + n_tasks*task_size >= span.end())),
2840 2825 "n_task calculation incorrect");
2841 2826 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2842 2827 assert(!pst->valid(), "Clobbering existing data?");
2843 2828 pst->set_par_threads(n_threads);
2844 2829 pst->set_n_tasks((int)n_tasks);
2845 2830 }
↓ open down ↓ |
487 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX