Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
1 1 /*
2 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 // ConcurrentMarkSweepGeneration is in support of a concurrent
26 26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
27 27 // style. We assume, for now, that this generation is always the
28 28 // seniormost generation (modulo the PermGeneration), and for simplicity
29 29 // in the first implementation, that this generation is a single compactible
30 30 // space. Neither of these restrictions appears essential, and will be
31 31 // relaxed in the future when more time is available to implement the
32 32 // greater generality (and there's a need for it).
33 33 //
34 34 // Concurrent mode failures are currently handled by
35 35 // means of a sliding mark-compact.
36 36
37 37 class CMSAdaptiveSizePolicy;
38 38 class CMSConcMarkingTask;
39 39 class CMSGCAdaptivePolicyCounters;
40 40 class ConcurrentMarkSweepGeneration;
41 41 class ConcurrentMarkSweepPolicy;
42 42 class ConcurrentMarkSweepThread;
43 43 class CompactibleFreeListSpace;
44 44 class FreeChunk;
45 45 class PromotionInfo;
46 46 class ScanMarkedObjectsAgainCarefullyClosure;
47 47
48 48 // A generic CMS bit map. It's the basis for both the CMS marking bit map
49 49 // as well as for the mod union table (in each case only a subset of the
50 50 // methods are used). This is essentially a wrapper around the BitMap class,
51 51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
52 52 // we have _shifter == 0. and for the mod union table we have
53 53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
54 54 // XXX 64-bit issues in BitMap?
55 55 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
56 56 friend class VMStructs;
57 57
58 58 HeapWord* _bmStartWord; // base address of range covered by map
59 59 size_t _bmWordSize; // map size (in #HeapWords covered)
60 60 const int _shifter; // shifts to convert HeapWord to bit position
61 61 VirtualSpace _virtual_space; // underlying the bit map
62 62 BitMap _bm; // the bit map itself
63 63 public:
64 64 Mutex* const _lock; // mutex protecting _bm;
65 65
66 66 public:
67 67 // constructor
68 68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
69 69
70 70 // allocates the actual storage for the map
71 71 bool allocate(MemRegion mr);
72 72 // field getter
73 73 Mutex* lock() const { return _lock; }
74 74 // locking verifier convenience function
75 75 void assert_locked() const PRODUCT_RETURN;
76 76
77 77 // inquiries
78 78 HeapWord* startWord() const { return _bmStartWord; }
79 79 size_t sizeInWords() const { return _bmWordSize; }
80 80 size_t sizeInBits() const { return _bm.size(); }
81 81 // the following is one past the last word in space
82 82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
83 83
84 84 // reading marks
85 85 bool isMarked(HeapWord* addr) const;
86 86 bool par_isMarked(HeapWord* addr) const; // do not lock checks
87 87 bool isUnmarked(HeapWord* addr) const;
88 88 bool isAllClear() const;
89 89
90 90 // writing marks
91 91 void mark(HeapWord* addr);
92 92 // For marking by parallel GC threads;
93 93 // returns true if we did, false if another thread did
94 94 bool par_mark(HeapWord* addr);
95 95
96 96 void mark_range(MemRegion mr);
97 97 void par_mark_range(MemRegion mr);
98 98 void mark_large_range(MemRegion mr);
99 99 void par_mark_large_range(MemRegion mr);
100 100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
101 101 void clear_range(MemRegion mr);
102 102 void par_clear_range(MemRegion mr);
103 103 void clear_large_range(MemRegion mr);
104 104 void par_clear_large_range(MemRegion mr);
105 105 void clear_all();
106 106 void clear_all_incrementally(); // Not yet implemented!!
107 107
108 108 NOT_PRODUCT(
109 109 // checks the memory region for validity
110 110 void region_invariant(MemRegion mr);
111 111 )
112 112
113 113 // iteration
114 114 void iterate(BitMapClosure* cl) {
115 115 _bm.iterate(cl);
116 116 }
117 117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
118 118 void dirty_range_iterate_clear(MemRegionClosure* cl);
119 119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
120 120
121 121 // auxiliary support for iteration
122 122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
123 123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
124 124 HeapWord* end_addr) const;
125 125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
126 126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
127 127 HeapWord* end_addr) const;
128 128 MemRegion getAndClearMarkedRegion(HeapWord* addr);
129 129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
130 130 HeapWord* end_addr);
131 131
132 132 // conversion utilities
133 133 HeapWord* offsetToHeapWord(size_t offset) const;
134 134 size_t heapWordToOffset(HeapWord* addr) const;
135 135 size_t heapWordDiffToOffsetDiff(size_t diff) const;
136 136
137 137 // debugging
138 138 // is this address range covered by the bit-map?
139 139 NOT_PRODUCT(
140 140 bool covers(MemRegion mr) const;
141 141 bool covers(HeapWord* start, size_t size = 0) const;
142 142 )
143 143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
144 144 };
145 145
146 146 // Represents a marking stack used by the CMS collector.
147 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
148 148 class CMSMarkStack: public CHeapObj {
149 149 //
150 150 friend class CMSCollector; // to get at expasion stats further below
151 151 //
152 152
153 153 VirtualSpace _virtual_space; // space for the stack
154 154 oop* _base; // bottom of stack
155 155 size_t _index; // one more than last occupied index
156 156 size_t _capacity; // max #elements
157 157 Mutex _par_lock; // an advisory lock used in case of parallel access
158 158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
159 159
160 160 protected:
161 161 size_t _hit_limit; // we hit max stack size limit
162 162 size_t _failed_double; // we failed expansion before hitting limit
163 163
164 164 public:
165 165 CMSMarkStack():
166 166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
167 167 _hit_limit(0),
168 168 _failed_double(0) {}
169 169
170 170 bool allocate(size_t size);
171 171
172 172 size_t capacity() const { return _capacity; }
173 173
174 174 oop pop() {
175 175 if (!isEmpty()) {
176 176 return _base[--_index] ;
177 177 }
178 178 return NULL;
179 179 }
180 180
181 181 bool push(oop ptr) {
182 182 if (isFull()) {
183 183 return false;
184 184 } else {
185 185 _base[_index++] = ptr;
186 186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
187 187 return true;
188 188 }
189 189 }
190 190
191 191 bool isEmpty() const { return _index == 0; }
192 192 bool isFull() const {
193 193 assert(_index <= _capacity, "buffer overflow");
194 194 return _index == _capacity;
195 195 }
196 196
197 197 size_t length() { return _index; }
198 198
199 199 // "Parallel versions" of some of the above
200 200 oop par_pop() {
201 201 // lock and pop
202 202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
203 203 return pop();
204 204 }
205 205
206 206 bool par_push(oop ptr) {
207 207 // lock and push
208 208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
209 209 return push(ptr);
210 210 }
211 211
212 212 // Forcibly reset the stack, losing all of its contents.
213 213 void reset() {
214 214 _index = 0;
215 215 }
216 216
217 217 // Expand the stack, typically in response to an overflow condition
218 218 void expand();
219 219
220 220 // Compute the least valued stack element.
221 221 oop least_value(HeapWord* low) {
222 222 oop least = (oop)low;
223 223 for (size_t i = 0; i < _index; i++) {
224 224 least = MIN2(least, _base[i]);
225 225 }
226 226 return least;
227 227 }
228 228
229 229 // Exposed here to allow stack expansion in || case
230 230 Mutex* par_lock() { return &_par_lock; }
231 231 };
232 232
233 233 class CardTableRS;
234 234 class CMSParGCThreadState;
235 235
236 236 class ModUnionClosure: public MemRegionClosure {
237 237 protected:
238 238 CMSBitMap* _t;
239 239 public:
240 240 ModUnionClosure(CMSBitMap* t): _t(t) { }
241 241 void do_MemRegion(MemRegion mr);
242 242 };
243 243
244 244 class ModUnionClosurePar: public ModUnionClosure {
245 245 public:
246 246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
247 247 void do_MemRegion(MemRegion mr);
248 248 };
249 249
250 250 // Survivor Chunk Array in support of parallelization of
251 251 // Survivor Space rescan.
252 252 class ChunkArray: public CHeapObj {
253 253 size_t _index;
254 254 size_t _capacity;
255 255 HeapWord** _array; // storage for array
256 256
257 257 public:
258 258 ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
259 259 ChunkArray(HeapWord** a, size_t c):
260 260 _index(0), _capacity(c), _array(a) {}
261 261
262 262 HeapWord** array() { return _array; }
263 263 void set_array(HeapWord** a) { _array = a; }
264 264
265 265 size_t capacity() { return _capacity; }
266 266 void set_capacity(size_t c) { _capacity = c; }
267 267
268 268 size_t end() {
269 269 assert(_index < capacity(), "_index out of bounds");
270 270 return _index;
271 271 } // exclusive
272 272
273 273 HeapWord* nth(size_t n) {
274 274 assert(n < end(), "Out of bounds access");
275 275 return _array[n];
276 276 }
277 277
278 278 void reset() {
279 279 _index = 0;
280 280 }
281 281
282 282 void record_sample(HeapWord* p, size_t sz) {
283 283 // For now we do not do anything with the size
284 284 if (_index < _capacity) {
285 285 _array[_index++] = p;
286 286 }
287 287 }
288 288 };
289 289
290 290 //
291 291 // Timing, allocation and promotion statistics for gc scheduling and incremental
292 292 // mode pacing. Most statistics are exponential averages.
293 293 //
294 294 class CMSStats VALUE_OBJ_CLASS_SPEC {
295 295 private:
296 296 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
297 297
298 298 // The following are exponential averages with factor alpha:
299 299 // avg = (100 - alpha) * avg + alpha * cur_sample
300 300 //
301 301 // The durations measure: end_time[n] - start_time[n]
302 302 // The periods measure: start_time[n] - start_time[n-1]
303 303 //
304 304 // The cms period and duration include only concurrent collections; time spent
305 305 // in foreground cms collections due to System.gc() or because of a failure to
306 306 // keep up are not included.
307 307 //
308 308 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
309 309 // real value, but is used only after the first period. A value of 100 is
310 310 // used for the first sample so it gets the entire weight.
311 311 unsigned int _saved_alpha; // 0-100
312 312 unsigned int _gc0_alpha;
313 313 unsigned int _cms_alpha;
314 314
315 315 double _gc0_duration;
316 316 double _gc0_period;
317 317 size_t _gc0_promoted; // bytes promoted per gc0
318 318 double _cms_duration;
319 319 double _cms_duration_pre_sweep; // time from initiation to start of sweep
320 320 double _cms_duration_per_mb;
321 321 double _cms_period;
322 322 size_t _cms_allocated; // bytes of direct allocation per gc0 period
323 323
324 324 // Timers.
325 325 elapsedTimer _cms_timer;
326 326 TimeStamp _gc0_begin_time;
327 327 TimeStamp _cms_begin_time;
328 328 TimeStamp _cms_end_time;
329 329
330 330 // Snapshots of the amount used in the CMS generation.
331 331 size_t _cms_used_at_gc0_begin;
332 332 size_t _cms_used_at_gc0_end;
333 333 size_t _cms_used_at_cms_begin;
334 334
335 335 // Used to prevent the duty cycle from being reduced in the middle of a cms
336 336 // cycle.
337 337 bool _allow_duty_cycle_reduction;
338 338
339 339 enum {
340 340 _GC0_VALID = 0x1,
341 341 _CMS_VALID = 0x2,
342 342 _ALL_VALID = _GC0_VALID | _CMS_VALID
343 343 };
344 344
345 345 unsigned int _valid_bits;
346 346
347 347 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
348 348
349 349 protected:
350 350
351 351 // Return a duty cycle that avoids wild oscillations, by limiting the amount
352 352 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
353 353 // as a recommended value).
354 354 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
355 355 unsigned int new_duty_cycle);
356 356 unsigned int icms_update_duty_cycle_impl();
357 357
358 358 public:
359 359 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
360 360 unsigned int alpha = CMSExpAvgFactor);
361 361
362 362 // Whether or not the statistics contain valid data; higher level statistics
363 363 // cannot be called until this returns true (they require at least one young
364 364 // gen and one cms cycle to have completed).
365 365 bool valid() const;
366 366
367 367 // Record statistics.
368 368 void record_gc0_begin();
369 369 void record_gc0_end(size_t cms_gen_bytes_used);
370 370 void record_cms_begin();
371 371 void record_cms_end();
372 372
373 373 // Allow management of the cms timer, which must be stopped/started around
374 374 // yield points.
375 375 elapsedTimer& cms_timer() { return _cms_timer; }
376 376 void start_cms_timer() { _cms_timer.start(); }
377 377 void stop_cms_timer() { _cms_timer.stop(); }
378 378
379 379 // Basic statistics; units are seconds or bytes.
380 380 double gc0_period() const { return _gc0_period; }
381 381 double gc0_duration() const { return _gc0_duration; }
382 382 size_t gc0_promoted() const { return _gc0_promoted; }
383 383 double cms_period() const { return _cms_period; }
384 384 double cms_duration() const { return _cms_duration; }
385 385 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
386 386 size_t cms_allocated() const { return _cms_allocated; }
387 387
388 388 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
389 389
390 390 // Seconds since the last background cms cycle began or ended.
391 391 double cms_time_since_begin() const;
392 392 double cms_time_since_end() const;
393 393
394 394 // Higher level statistics--caller must check that valid() returns true before
395 395 // calling.
396 396
397 397 // Returns bytes promoted per second of wall clock time.
398 398 double promotion_rate() const;
399 399
400 400 // Returns bytes directly allocated per second of wall clock time.
401 401 double cms_allocation_rate() const;
402 402
403 403 // Rate at which space in the cms generation is being consumed (sum of the
404 404 // above two).
405 405 double cms_consumption_rate() const;
406 406
407 407 // Returns an estimate of the number of seconds until the cms generation will
408 408 // fill up, assuming no collection work is done.
409 409 double time_until_cms_gen_full() const;
410 410
411 411 // Returns an estimate of the number of seconds remaining until
412 412 // the cms generation collection should start.
413 413 double time_until_cms_start() const;
414 414
415 415 // End of higher level statistics.
416 416
417 417 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
418 418 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
419 419
420 420 // Update the duty cycle and return the new value.
421 421 unsigned int icms_update_duty_cycle();
422 422
423 423 // Debugging.
424 424 void print_on(outputStream* st) const PRODUCT_RETURN;
425 425 void print() const { print_on(gclog_or_tty); }
426 426 };
427 427
428 428 // A closure related to weak references processing which
429 429 // we embed in the CMSCollector, since we need to pass
430 430 // it to the reference processor for secondary filtering
431 431 // of references based on reachability of referent;
432 432 // see role of _is_alive_non_header closure in the
433 433 // ReferenceProcessor class.
434 434 // For objects in the CMS generation, this closure checks
435 435 // if the object is "live" (reachable). Used in weak
436 436 // reference processing.
437 437 class CMSIsAliveClosure: public BoolObjectClosure {
438 438 MemRegion _span;
439 439 const CMSBitMap* _bit_map;
440 440
441 441 friend class CMSCollector;
442 442 protected:
443 443 void set_span(MemRegion span) { _span = span; }
444 444 public:
445 445 CMSIsAliveClosure(CMSBitMap* bit_map):
446 446 _bit_map(bit_map) { }
447 447
448 448 CMSIsAliveClosure(MemRegion span,
449 449 CMSBitMap* bit_map):
450 450 _span(span),
451 451 _bit_map(bit_map) { }
452 452 void do_object(oop obj) {
453 453 assert(false, "not to be invoked");
454 454 }
455 455 bool do_object_b(oop obj);
456 456 };
457 457
458 458
459 459 // Implements AbstractRefProcTaskExecutor for CMS.
460 460 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
461 461 public:
462 462
463 463 CMSRefProcTaskExecutor(CMSCollector& collector)
464 464 : _collector(collector)
465 465 { }
466 466
467 467 // Executes a task using worker threads.
468 468 virtual void execute(ProcessTask& task);
469 469 virtual void execute(EnqueueTask& task);
470 470 private:
471 471 CMSCollector& _collector;
472 472 };
473 473
474 474
475 475 class CMSCollector: public CHeapObj {
476 476 friend class VMStructs;
477 477 friend class ConcurrentMarkSweepThread;
478 478 friend class ConcurrentMarkSweepGeneration;
479 479 friend class CompactibleFreeListSpace;
480 480 friend class CMSParRemarkTask;
481 481 friend class CMSConcMarkingTask;
482 482 friend class CMSRefProcTaskProxy;
483 483 friend class CMSRefProcTaskExecutor;
484 484 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
485 485 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
486 486 friend class PushOrMarkClosure; // to access _restart_addr
487 487 friend class Par_PushOrMarkClosure; // to access _restart_addr
488 488 friend class MarkFromRootsClosure; // -- ditto --
489 489 // ... and for clearing cards
490 490 friend class Par_MarkFromRootsClosure; // to access _restart_addr
491 491 // ... and for clearing cards
492 492 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
493 493 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
494 494 friend class PushAndMarkVerifyClosure; // -- ditto --
495 495 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
496 496 friend class PushAndMarkClosure; // -- ditto --
497 497 friend class Par_PushAndMarkClosure; // -- ditto --
498 498 friend class CMSKeepAliveClosure; // -- ditto --
499 499 friend class CMSDrainMarkingStackClosure; // -- ditto --
500 500 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
501 501 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
502 502 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
503 503 friend class VM_CMS_Operation;
504 504 friend class VM_CMS_Initial_Mark;
505 505 friend class VM_CMS_Final_Remark;
506 506
507 507 private:
508 508 jlong _time_of_last_gc;
509 509 void update_time_of_last_gc(jlong now) {
510 510 _time_of_last_gc = now;
511 511 }
512 512
513 513 OopTaskQueueSet* _task_queues;
514 514
515 515 // Overflow list of grey objects, threaded through mark-word
516 516 // Manipulated with CAS in the parallel/multi-threaded case.
517 517 oop _overflow_list;
518 518 // The following array-pair keeps track of mark words
519 519 // displaced for accomodating overflow list above.
520 520 // This code will likely be revisited under RFE#4922830.
521 521 GrowableArray<oop>* _preserved_oop_stack;
522 522 GrowableArray<markOop>* _preserved_mark_stack;
523 523
524 524 int* _hash_seed;
525 525
526 526 // In support of multi-threaded concurrent phases
527 527 YieldingFlexibleWorkGang* _conc_workers;
↓ open down ↓ |
527 lines elided |
↑ open up ↑ |
528 528
529 529 // Performance Counters
530 530 CollectorCounters* _gc_counters;
531 531
532 532 // Initialization Errors
533 533 bool _completed_initialization;
534 534
535 535 // In support of ExplicitGCInvokesConcurrent
536 536 static bool _full_gc_requested;
537 537 unsigned int _collection_count_start;
538 +
538 539 // Should we unload classes this concurrent cycle?
539 - // Set in response to a concurrent full gc request.
540 - bool _unload_classes;
541 - bool _unloaded_classes_last_cycle;
540 + bool _should_unload_classes;
541 + unsigned int _concurrent_cycles_since_last_unload;
542 + unsigned int concurrent_cycles_since_last_unload() const {
543 + return _concurrent_cycles_since_last_unload;
544 + }
542 545 // Did we (allow) unload classes in the previous concurrent cycle?
543 - bool cms_unloaded_classes_last_cycle() const {
544 - return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled;
546 + bool unloaded_classes_last_cycle() const {
547 + return concurrent_cycles_since_last_unload() == 0;
545 548 }
546 549
547 550 // Verification support
548 551 CMSBitMap _verification_mark_bm;
549 552 void verify_after_remark_work_1();
550 553 void verify_after_remark_work_2();
551 554
552 555 // true if any verification flag is on.
553 556 bool _verifying;
554 557 bool verifying() const { return _verifying; }
555 558 void set_verifying(bool v) { _verifying = v; }
556 559
557 560 // Collector policy
558 561 ConcurrentMarkSweepPolicy* _collector_policy;
559 562 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
560 563
561 564 // Check whether the gc time limit has been
562 565 // exceeded and set the size policy flag
563 566 // appropriately.
564 567 void check_gc_time_limit();
565 568 // XXX Move these to CMSStats ??? FIX ME !!!
566 569 elapsedTimer _sweep_timer;
567 570 AdaptivePaddedAverage _sweep_estimate;
568 571
569 572 protected:
570 573 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
571 574 ConcurrentMarkSweepGeneration* _permGen; // perm gen
572 575 MemRegion _span; // span covering above two
573 576 CardTableRS* _ct; // card table
574 577
575 578 // CMS marking support structures
576 579 CMSBitMap _markBitMap;
577 580 CMSBitMap _modUnionTable;
578 581 CMSMarkStack _markStack;
579 582 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects
580 583 // to revisit
581 584 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
582 585
583 586 HeapWord* _restart_addr; // in support of marking stack overflow
584 587 void lower_restart_addr(HeapWord* low);
585 588
586 589 // Counters in support of marking stack / work queue overflow handling:
587 590 // a non-zero value indicates certain types of overflow events during
588 591 // the current CMS cycle and could lead to stack resizing efforts at
589 592 // an opportune future time.
590 593 size_t _ser_pmc_preclean_ovflw;
591 594 size_t _ser_pmc_remark_ovflw;
592 595 size_t _par_pmc_remark_ovflw;
593 596 size_t _ser_kac_ovflw;
594 597 size_t _par_kac_ovflw;
595 598 NOT_PRODUCT(size_t _num_par_pushes;)
596 599
597 600 // ("Weak") Reference processing support
598 601 ReferenceProcessor* _ref_processor;
599 602 CMSIsAliveClosure _is_alive_closure;
600 603 // keep this textually after _markBitMap; c'tor dependency
601 604
602 605 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
603 606 ModUnionClosure _modUnionClosure;
604 607 ModUnionClosurePar _modUnionClosurePar;
605 608
606 609 // CMS abstract state machine
607 610 // initial_state: Idling
608 611 // next_state(Idling) = {Marking}
609 612 // next_state(Marking) = {Precleaning, Sweeping}
610 613 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
611 614 // next_state(AbortablePreclean) = {FinalMarking}
612 615 // next_state(FinalMarking) = {Sweeping}
613 616 // next_state(Sweeping) = {Resizing}
614 617 // next_state(Resizing) = {Resetting}
615 618 // next_state(Resetting) = {Idling}
616 619 // The numeric values below are chosen so that:
617 620 // . _collectorState <= Idling == post-sweep && pre-mark
618 621 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
619 622 // precleaning || abortablePrecleanb
620 623 enum CollectorState {
621 624 Resizing = 0,
622 625 Resetting = 1,
623 626 Idling = 2,
624 627 InitialMarking = 3,
625 628 Marking = 4,
626 629 Precleaning = 5,
627 630 AbortablePreclean = 6,
628 631 FinalMarking = 7,
629 632 Sweeping = 8
630 633 };
631 634 static CollectorState _collectorState;
632 635
633 636 // State related to prologue/epilogue invocation for my generations
634 637 bool _between_prologue_and_epilogue;
635 638
636 639 // Signalling/State related to coordination between fore- and backgroud GC
637 640 // Note: When the baton has been passed from background GC to foreground GC,
638 641 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
639 642 static bool _foregroundGCIsActive; // true iff foreground collector is active or
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
640 643 // wants to go active
641 644 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
642 645 // yet passed the baton to the foreground GC
643 646
644 647 // Support for CMSScheduleRemark (abortable preclean)
645 648 bool _abort_preclean;
646 649 bool _start_sampling;
647 650
648 651 int _numYields;
649 652 size_t _numDirtyCards;
650 - uint _sweepCount;
653 + size_t _sweep_count;
651 654 // number of full gc's since the last concurrent gc.
652 655 uint _full_gcs_since_conc_gc;
653 656
654 - // if occupancy exceeds this, start a new gc cycle
655 - double _initiatingOccupancy;
656 657 // occupancy used for bootstrapping stats
657 658 double _bootstrap_occupancy;
658 659
659 660 // timer
660 661 elapsedTimer _timer;
661 662
662 663 // Timing, allocation and promotion statistics, used for scheduling.
663 664 CMSStats _stats;
664 665
665 666 // Allocation limits installed in the young gen, used only in
666 667 // CMSIncrementalMode. When an allocation in the young gen would cross one of
667 668 // these limits, the cms generation is notified and the cms thread is started
668 669 // or stopped, respectively.
669 670 HeapWord* _icms_start_limit;
670 671 HeapWord* _icms_stop_limit;
671 672
672 673 enum CMS_op_type {
673 674 CMS_op_checkpointRootsInitial,
674 675 CMS_op_checkpointRootsFinal
675 676 };
676 677
677 678 void do_CMS_operation(CMS_op_type op);
678 679 bool stop_world_and_do(CMS_op_type op);
679 680
680 681 OopTaskQueueSet* task_queues() { return _task_queues; }
681 682 int* hash_seed(int i) { return &_hash_seed[i]; }
682 683 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
683 684
684 685 // Support for parallelizing Eden rescan in CMS remark phase
685 686 void sample_eden(); // ... sample Eden space top
686 687
687 688 private:
688 689 // Support for parallelizing young gen rescan in CMS remark phase
689 690 Generation* _young_gen; // the younger gen
690 691 HeapWord** _top_addr; // ... Top of Eden
691 692 HeapWord** _end_addr; // ... End of Eden
692 693 HeapWord** _eden_chunk_array; // ... Eden partitioning array
693 694 size_t _eden_chunk_index; // ... top (exclusive) of array
694 695 size_t _eden_chunk_capacity; // ... max entries in array
695 696
696 697 // Support for parallelizing survivor space rescan
697 698 HeapWord** _survivor_chunk_array;
698 699 size_t _survivor_chunk_index;
699 700 size_t _survivor_chunk_capacity;
700 701 size_t* _cursor;
701 702 ChunkArray* _survivor_plab_array;
702 703
703 704 // Support for marking stack overflow handling
704 705 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
705 706 bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
706 707 void push_on_overflow_list(oop p);
707 708 void par_push_on_overflow_list(oop p);
708 709 // the following is, obviously, not, in general, "MT-stable"
709 710 bool overflow_list_is_empty() const;
710 711
711 712 void preserve_mark_if_necessary(oop p);
712 713 void par_preserve_mark_if_necessary(oop p);
713 714 void preserve_mark_work(oop p, markOop m);
714 715 void restore_preserved_marks_if_any();
715 716 NOT_PRODUCT(bool no_preserved_marks() const;)
716 717 // in support of testing overflow code
717 718 NOT_PRODUCT(int _overflow_counter;)
718 719 NOT_PRODUCT(bool simulate_overflow();) // sequential
719 720 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
720 721
721 722 int _roots_scanning_options;
722 723 int roots_scanning_options() const { return _roots_scanning_options; }
723 724 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
724 725 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
725 726
726 727 // CMS work methods
727 728 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
728 729
729 730 // a return value of false indicates failure due to stack overflow
730 731 bool markFromRootsWork(bool asynch); // concurrent marking work
731 732
732 733 public: // FIX ME!!! only for testing
733 734 bool do_marking_st(bool asynch); // single-threaded marking
734 735 bool do_marking_mt(bool asynch); // multi-threaded marking
735 736
736 737 private:
737 738
738 739 // concurrent precleaning work
739 740 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
740 741 ScanMarkedObjectsAgainCarefullyClosure* cl);
741 742 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
742 743 ScanMarkedObjectsAgainCarefullyClosure* cl);
743 744 // Does precleaning work, returning a quantity indicative of
744 745 // the amount of "useful work" done.
745 746 size_t preclean_work(bool clean_refs, bool clean_survivors);
746 747 void abortable_preclean(); // Preclean while looking for possible abort
747 748 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
748 749 // Helper function for above; merge-sorts the per-thread plab samples
749 750 void merge_survivor_plab_arrays(ContiguousSpace* surv);
750 751 // Resets (i.e. clears) the per-thread plab sample vectors
751 752 void reset_survivor_plab_arrays();
752 753
753 754 // final (second) checkpoint work
754 755 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
755 756 bool init_mark_was_synchronous);
756 757 // work routine for parallel version of remark
757 758 void do_remark_parallel();
758 759 // work routine for non-parallel version of remark
759 760 void do_remark_non_parallel();
760 761 // reference processing work routine (during second checkpoint)
761 762 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
762 763
763 764 // concurrent sweeping work
764 765 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
765 766
766 767 // (concurrent) resetting of support data structures
767 768 void reset(bool asynch);
768 769
769 770 // Clear _expansion_cause fields of constituent generations
770 771 void clear_expansion_cause();
771 772
772 773 // An auxilliary method used to record the ends of
773 774 // used regions of each generation to limit the extent of sweep
774 775 void save_sweep_limits();
775 776
776 777 // Resize the generations included in the collector.
777 778 void compute_new_size();
778 779
779 780 // A work method used by foreground collection to determine
780 781 // what type of collection (compacting or not, continuing or fresh)
781 782 // it should do.
782 783 void decide_foreground_collection_type(bool clear_all_soft_refs,
783 784 bool* should_compact, bool* should_start_over);
784 785
785 786 // A work method used by the foreground collector to do
786 787 // a mark-sweep-compact.
787 788 void do_compaction_work(bool clear_all_soft_refs);
788 789
789 790 // A work method used by the foreground collector to do
790 791 // a mark-sweep, after taking over from a possibly on-going
791 792 // concurrent mark-sweep collection.
792 793 void do_mark_sweep_work(bool clear_all_soft_refs,
793 794 CollectorState first_state, bool should_start_over);
794 795
795 796 // If the backgrould GC is active, acquire control from the background
796 797 // GC and do the collection.
797 798 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
798 799
799 800 // For synchronizing passing of control from background to foreground
800 801 // GC. waitForForegroundGC() is called by the background
801 802 // collector. It if had to wait for a foreground collection,
802 803 // it returns true and the background collection should assume
803 804 // that the collection was finished by the foreground
804 805 // collector.
805 806 bool waitForForegroundGC();
806 807
807 808 // Incremental mode triggering: recompute the icms duty cycle and set the
808 809 // allocation limits in the young gen.
809 810 void icms_update_allocation_limits();
810 811
811 812 size_t block_size_using_printezis_bits(HeapWord* addr) const;
812 813 size_t block_size_if_printezis_bits(HeapWord* addr) const;
813 814 HeapWord* next_card_start_after_block(HeapWord* addr) const;
814 815
815 816 void setup_cms_unloading_and_verification_state();
816 817 public:
817 818 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
↓ open down ↓ |
152 lines elided |
↑ open up ↑ |
818 819 ConcurrentMarkSweepGeneration* permGen,
819 820 CardTableRS* ct,
820 821 ConcurrentMarkSweepPolicy* cp);
821 822 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
822 823
823 824 ReferenceProcessor* ref_processor() { return _ref_processor; }
824 825 void ref_processor_init();
825 826
826 827 Mutex* bitMapLock() const { return _markBitMap.lock(); }
827 828 static CollectorState abstract_state() { return _collectorState; }
828 - double initiatingOccupancy() const { return _initiatingOccupancy; }
829 829
830 830 bool should_abort_preclean() const; // Whether preclean should be aborted.
831 831 size_t get_eden_used() const;
832 832 size_t get_eden_capacity() const;
833 833
834 834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
835 835
836 836 // locking checks
837 837 NOT_PRODUCT(static bool have_cms_token();)
838 838
839 839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
840 840 bool shouldConcurrentCollect();
841 841
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
842 842 void collect(bool full,
843 843 bool clear_all_soft_refs,
844 844 size_t size,
845 845 bool tlab);
846 846 void collect_in_background(bool clear_all_soft_refs);
847 847 void collect_in_foreground(bool clear_all_soft_refs);
848 848
849 849 // In support of ExplicitGCInvokesConcurrent
850 850 static void request_full_gc(unsigned int full_gc_count);
851 851 // Should we unload classes in a particular concurrent cycle?
852 - bool cms_should_unload_classes() const {
853 - assert(!_unload_classes || ExplicitGCInvokesConcurrentAndUnloadsClasses,
854 - "Inconsistency; see CR 6541037");
855 - return _unload_classes || CMSClassUnloadingEnabled;
852 + bool should_unload_classes() const {
853 + return _should_unload_classes;
856 854 }
855 + bool update_should_unload_classes();
857 856
858 857 void direct_allocated(HeapWord* start, size_t size);
859 858
860 859 // Object is dead if not marked and current phase is sweeping.
861 860 bool is_dead_obj(oop obj) const;
862 861
863 862 // After a promotion (of "start"), do any necessary marking.
864 863 // If "par", then it's being done by a parallel GC thread.
865 864 // The last two args indicate if we need precise marking
866 865 // and if so the size of the object so it can be dirtied
867 866 // in its entirety.
868 867 void promoted(bool par, HeapWord* start,
869 868 bool is_obj_array, size_t obj_size);
870 869
871 870 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
872 871 size_t word_size);
873 872
874 873 void getFreelistLocks() const;
875 874 void releaseFreelistLocks() const;
876 875 bool haveFreelistLocks() const;
877 876
878 877 // GC prologue and epilogue
879 878 void gc_prologue(bool full);
880 879 void gc_epilogue(bool full);
881 880
882 881 jlong time_of_last_gc(jlong now) {
883 882 if (_collectorState <= Idling) {
884 883 // gc not in progress
885 884 return _time_of_last_gc;
886 885 } else {
887 886 // collection in progress
888 887 return now;
889 888 }
890 889 }
891 890
892 891 // Support for parallel remark of survivor space
893 892 void* get_data_recorder(int thr_num);
894 893
895 894 CMSBitMap* markBitMap() { return &_markBitMap; }
896 895 void directAllocated(HeapWord* start, size_t size);
897 896
898 897 // main CMS steps and related support
899 898 void checkpointRootsInitial(bool asynch);
900 899 bool markFromRoots(bool asynch); // a return value of false indicates failure
901 900 // due to stack overflow
902 901 void preclean();
903 902 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
904 903 bool init_mark_was_synchronous);
905 904 void sweep(bool asynch);
906 905
907 906 // Check that the currently executing thread is the expected
908 907 // one (foreground collector or background collector).
909 908 void check_correct_thread_executing() PRODUCT_RETURN;
910 909 // XXXPERM void print_statistics() PRODUCT_RETURN;
911 910
912 911 bool is_cms_reachable(HeapWord* addr);
913 912
914 913 // Performance Counter Support
915 914 CollectorCounters* counters() { return _gc_counters; }
916 915
917 916 // timer stuff
918 917 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
919 918 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
920 919 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
921 920 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
922 921
923 922 int yields() { return _numYields; }
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
924 923 void resetYields() { _numYields = 0; }
925 924 void incrementYields() { _numYields++; }
926 925 void resetNumDirtyCards() { _numDirtyCards = 0; }
927 926 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
928 927 size_t numDirtyCards() { return _numDirtyCards; }
929 928
930 929 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
931 930 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
932 931 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
933 932 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
934 - uint sweepCount() const { return _sweepCount; }
935 - void incrementSweepCount() { _sweepCount++; }
933 + size_t sweep_count() const { return _sweep_count; }
934 + void increment_sweep_count() { _sweep_count++; }
936 935
937 936 // Timers/stats for gc scheduling and incremental mode pacing.
938 937 CMSStats& stats() { return _stats; }
939 938
940 939 // Convenience methods that check whether CMSIncrementalMode is enabled and
941 940 // forward to the corresponding methods in ConcurrentMarkSweepThread.
942 941 static void start_icms();
943 942 static void stop_icms(); // Called at the end of the cms cycle.
944 943 static void disable_icms(); // Called before a foreground collection.
945 944 static void enable_icms(); // Called after a foreground collection.
946 945 void icms_wait(); // Called at yield points.
947 946
948 947 // Adaptive size policy
949 948 CMSAdaptiveSizePolicy* size_policy();
950 949 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
951 950
952 951 // debugging
953 952 void verify(bool);
954 953 bool verify_after_remark();
955 954 void verify_ok_to_terminate() const PRODUCT_RETURN;
956 955 void verify_work_stacks_empty() const PRODUCT_RETURN;
957 956 void verify_overflow_empty() const PRODUCT_RETURN;
958 957
959 958 // convenience methods in support of debugging
960 959 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
961 960 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
962 961
963 962 // accessors
964 963 CMSMarkStack* verification_mark_stack() { return &_markStack; }
965 964 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
966 965
967 966 // Get the bit map with a perm gen "deadness" information.
968 967 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; }
969 968
970 969 // Initialization errors
971 970 bool completed_initialization() { return _completed_initialization; }
972 971 };
973 972
974 973 class CMSExpansionCause : public AllStatic {
975 974 public:
976 975 enum Cause {
977 976 _no_expansion,
978 977 _satisfy_free_ratio,
979 978 _satisfy_promotion,
980 979 _satisfy_allocation,
981 980 _allocate_par_lab,
982 981 _allocate_par_spooling_space,
983 982 _adaptive_size_policy
984 983 };
985 984 // Return a string describing the cause of the expansion.
986 985 static const char* to_string(CMSExpansionCause::Cause cause);
987 986 };
988 987
989 988 class ConcurrentMarkSweepGeneration: public CardGeneration {
990 989 friend class VMStructs;
991 990 friend class ConcurrentMarkSweepThread;
992 991 friend class ConcurrentMarkSweep;
993 992 friend class CMSCollector;
994 993 protected:
995 994 static CMSCollector* _collector; // the collector that collects us
996 995 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
997 996
998 997 // Performance Counters
999 998 GenerationCounters* _gen_counters;
1000 999 GSpaceCounters* _space_counters;
1001 1000
1002 1001 // Words directly allocated, used by CMSStats.
1003 1002 size_t _direct_allocated_words;
1004 1003
1005 1004 // Non-product stat counters
1006 1005 NOT_PRODUCT(
1007 1006 int _numObjectsPromoted;
1008 1007 int _numWordsPromoted;
1009 1008 int _numObjectsAllocated;
1010 1009 int _numWordsAllocated;
1011 1010 )
1012 1011
1013 1012 // Used for sizing decisions
1014 1013 bool _incremental_collection_failed;
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
1015 1014 bool incremental_collection_failed() {
1016 1015 return _incremental_collection_failed;
1017 1016 }
1018 1017 void set_incremental_collection_failed() {
1019 1018 _incremental_collection_failed = true;
1020 1019 }
1021 1020 void clear_incremental_collection_failed() {
1022 1021 _incremental_collection_failed = false;
1023 1022 }
1024 1023
1024 + // accessors
1025 + void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1026 + CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1027 +
1025 1028 private:
1026 1029 // For parallel young-gen GC support.
1027 1030 CMSParGCThreadState** _par_gc_thread_states;
1028 1031
1029 1032 // Reason generation was expanded
1030 1033 CMSExpansionCause::Cause _expansion_cause;
1031 1034
1032 - // accessors
1033 - void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1034 - CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
1035 -
1036 1035 // In support of MinChunkSize being larger than min object size
1037 1036 const double _dilatation_factor;
1038 1037
1039 1038 enum CollectionTypes {
1040 1039 Concurrent_collection_type = 0,
1041 1040 MS_foreground_collection_type = 1,
1042 1041 MSC_foreground_collection_type = 2,
1043 1042 Unknown_collection_type = 3
1044 1043 };
1045 1044
1046 1045 CollectionTypes _debug_collection_type;
1047 1046
1047 + // Fraction of current occupancy at which to start a CMS collection which
1048 + // will collect this generation (at least).
1049 + double _initiating_occupancy;
1050 +
1048 1051 protected:
1049 1052 // Grow generation by specified size (returns false if unable to grow)
1050 1053 bool grow_by(size_t bytes);
1051 1054 // Grow generation to reserved size.
1052 1055 bool grow_to_reserved();
1053 1056 // Shrink generation by specified size (returns false if unable to shrink)
1054 1057 virtual void shrink_by(size_t bytes);
1055 1058
1056 1059 // Update statistics for GC
1057 1060 virtual void update_gc_stats(int level, bool full);
1058 1061
1059 1062 // Maximum available space in the generation (including uncommitted)
1060 1063 // space.
1061 1064 size_t max_available() const;
1062 1065
1066 + // getter and initializer for _initiating_occupancy field.
1067 + double initiating_occupancy() const { return _initiating_occupancy; }
1068 + void init_initiating_occupancy(intx io, intx tr);
1069 +
1063 1070 public:
1064 1071 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1065 1072 int level, CardTableRS* ct,
1066 1073 bool use_adaptive_freelists,
1067 1074 FreeBlockDictionary::DictionaryChoice);
1068 1075
1069 1076 // Accessors
1070 1077 CMSCollector* collector() const { return _collector; }
1071 1078 static void set_collector(CMSCollector* collector) {
1072 1079 assert(_collector == NULL, "already set");
1073 1080 _collector = collector;
1074 1081 }
1075 1082 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1076 1083
1077 1084 Mutex* freelistLock() const;
1078 1085
1079 1086 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1080 1087
1081 1088 // Adaptive size policy
1082 1089 CMSAdaptiveSizePolicy* size_policy();
1083 1090
1084 1091 bool refs_discovery_is_atomic() const { return false; }
1085 1092 bool refs_discovery_is_mt() const {
1086 1093 // Note: CMS does MT-discovery during the parallel-remark
1087 1094 // phases. Use ReferenceProcessorMTMutator to make refs
1088 1095 // discovery MT-safe during such phases or other parallel
1089 1096 // discovery phases in the future. This may all go away
1090 1097 // if/when we decide that refs discovery is sufficiently
1091 1098 // rare that the cost of the CAS's involved is in the
1092 1099 // noise. That's a measurement that should be done, and
1093 1100 // the code simplified if that turns out to be the case.
1094 1101 return false;
1095 1102 }
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1096 1103
1097 1104 // Override
1098 1105 virtual void ref_processor_init();
1099 1106
1100 1107 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1101 1108
1102 1109 // Space enquiries
1103 1110 size_t capacity() const;
1104 1111 size_t used() const;
1105 1112 size_t free() const;
1106 - double occupancy() { return ((double)used())/((double)capacity()); }
1113 + double occupancy() const { return ((double)used())/((double)capacity()); }
1107 1114 size_t contiguous_available() const;
1108 1115 size_t unsafe_max_alloc_nogc() const;
1109 1116
1110 1117 // over-rides
1111 1118 MemRegion used_region() const;
1112 1119 MemRegion used_region_at_save_marks() const;
1113 1120
1114 1121 // Does a "full" (forced) collection invoked on this generation collect
1115 1122 // all younger generations as well? Note that the second conjunct is a
1116 1123 // hack to allow the collection of the younger gen first if the flag is
1117 1124 // set. This is better than using th policy's should_collect_gen0_first()
1118 1125 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1119 1126 virtual bool full_collects_younger_generations() const {
1120 1127 return UseCMSCompactAtFullCollection && !CollectGen0First;
1121 1128 }
1122 1129
1123 1130 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1124 1131
1125 1132 // Support for compaction
1126 1133 CompactibleSpace* first_compaction_space() const;
1127 1134 // Adjust quantites in the generation affected by
1128 1135 // the compaction.
1129 1136 void reset_after_compaction();
1130 1137
1131 1138 // Allocation support
1132 1139 HeapWord* allocate(size_t size, bool tlab);
1133 1140 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1134 1141 oop promote(oop obj, size_t obj_size, oop* ref);
1135 1142 HeapWord* par_allocate(size_t size, bool tlab) {
1136 1143 return allocate(size, tlab);
1137 1144 }
1138 1145
1139 1146 // Incremental mode triggering.
1140 1147 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1141 1148 size_t word_size);
1142 1149
1143 1150 // Used by CMSStats to track direct allocation. The value is sampled and
1144 1151 // reset after each young gen collection.
1145 1152 size_t direct_allocated_words() const { return _direct_allocated_words; }
1146 1153 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1147 1154
1148 1155 // Overrides for parallel promotion.
1149 1156 virtual oop par_promote(int thread_num,
1150 1157 oop obj, markOop m, size_t word_sz);
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
1151 1158 // This one should not be called for CMS.
1152 1159 virtual void par_promote_alloc_undo(int thread_num,
1153 1160 HeapWord* obj, size_t word_sz);
1154 1161 virtual void par_promote_alloc_done(int thread_num);
1155 1162 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1156 1163
1157 1164 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1158 1165 bool younger_handles_promotion_failure) const;
1159 1166
1160 1167 bool should_collect(bool full, size_t size, bool tlab);
1161 - // XXXPERM
1162 - bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
1168 + virtual bool should_concurrent_collect() const;
1169 + virtual bool is_too_full() const;
1163 1170 void collect(bool full,
1164 1171 bool clear_all_soft_refs,
1165 1172 size_t size,
1166 1173 bool tlab);
1167 1174
1168 1175 HeapWord* expand_and_allocate(size_t word_size,
1169 1176 bool tlab,
1170 1177 bool parallel = false);
1171 1178
1172 1179 // GC prologue and epilogue
1173 1180 void gc_prologue(bool full);
1174 1181 void gc_prologue_work(bool full, bool registerClosure,
1175 1182 ModUnionClosure* modUnionClosure);
1176 1183 void gc_epilogue(bool full);
1177 1184 void gc_epilogue_work(bool full);
1178 1185
1179 1186 // Time since last GC of this generation
1180 1187 jlong time_of_last_gc(jlong now) {
1181 1188 return collector()->time_of_last_gc(now);
1182 1189 }
1183 1190 void update_time_of_last_gc(jlong now) {
1184 1191 collector()-> update_time_of_last_gc(now);
1185 1192 }
1186 1193
1187 1194 // Allocation failure
1188 1195 void expand(size_t bytes, size_t expand_bytes,
1189 1196 CMSExpansionCause::Cause cause);
1190 1197 void shrink(size_t bytes);
1191 1198 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1192 1199 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1193 1200
1194 1201 // Iteration support and related enquiries
1195 1202 void save_marks();
1196 1203 bool no_allocs_since_save_marks();
1197 1204 void object_iterate_since_last_GC(ObjectClosure* cl);
1198 1205 void younger_refs_iterate(OopsInGenClosure* cl);
1199 1206
1200 1207 // Iteration support specific to CMS generations
1201 1208 void save_sweep_limit();
1202 1209
1203 1210 // More iteration support
1204 1211 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1205 1212 virtual void oop_iterate(OopClosure* cl);
1206 1213 virtual void object_iterate(ObjectClosure* cl);
1207 1214
1208 1215 // Need to declare the full complement of closures, whether we'll
1209 1216 // override them or not, or get message from the compiler:
1210 1217 // oop_since_save_marks_iterate_nv hides virtual function...
1211 1218 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1212 1219 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1213 1220 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1214 1221
1215 1222 // Smart allocation XXX -- move to CFLSpace?
1216 1223 void setNearLargestChunk();
1217 1224 bool isNearLargestChunk(HeapWord* addr);
1218 1225
1219 1226 // Get the chunk at the end of the space. Delagates to
1220 1227 // the space.
1221 1228 FreeChunk* find_chunk_at_end();
1222 1229
1223 1230 // Overriding of unused functionality (sharing not yet supported with CMS)
1224 1231 void pre_adjust_pointers();
1225 1232 void post_compact();
1226 1233
1227 1234 // Debugging
1228 1235 void prepare_for_verify();
1229 1236 void verify(bool allow_dirty);
1230 1237 void print_statistics() PRODUCT_RETURN;
1231 1238
1232 1239 // Performance Counters support
1233 1240 virtual void update_counters();
1234 1241 virtual void update_counters(size_t used);
1235 1242 void initialize_performance_counters();
1236 1243 CollectorCounters* counters() { return collector()->counters(); }
1237 1244
1238 1245 // Support for parallel remark of survivor space
1239 1246 void* get_data_recorder(int thr_num) {
1240 1247 //Delegate to collector
1241 1248 return collector()->get_data_recorder(thr_num);
1242 1249 }
1243 1250
1244 1251 // Printing
1245 1252 const char* name() const;
1246 1253 virtual const char* short_name() const { return "CMS"; }
1247 1254 void print() const;
1248 1255 void printOccupancy(const char* s);
1249 1256 bool must_be_youngest() const { return false; }
1250 1257 bool must_be_oldest() const { return true; }
1251 1258
1252 1259 void compute_new_size();
1253 1260
1254 1261 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1255 1262 void rotate_debug_collection_type();
1256 1263 };
1257 1264
1258 1265 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1259 1266
1260 1267 // Return the size policy from the heap's collector
1261 1268 // policy casted to CMSAdaptiveSizePolicy*.
1262 1269 CMSAdaptiveSizePolicy* cms_size_policy() const;
1263 1270
1264 1271 // Resize the generation based on the adaptive size
1265 1272 // policy.
1266 1273 void resize(size_t cur_promo, size_t desired_promo);
1267 1274
1268 1275 // Return the GC counters from the collector policy
1269 1276 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1270 1277
1271 1278 virtual void shrink_by(size_t bytes);
1272 1279
1273 1280 public:
1274 1281 virtual void compute_new_size();
1275 1282 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1276 1283 int level, CardTableRS* ct,
1277 1284 bool use_adaptive_freelists,
1278 1285 FreeBlockDictionary::DictionaryChoice
1279 1286 dictionaryChoice) :
1280 1287 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1281 1288 use_adaptive_freelists, dictionaryChoice) {}
1282 1289
1283 1290 virtual const char* short_name() const { return "ASCMS"; }
1284 1291 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1285 1292
1286 1293 virtual void update_counters();
1287 1294 virtual void update_counters(size_t used);
1288 1295 };
1289 1296
1290 1297 //
1291 1298 // Closures of various sorts used by CMS to accomplish its work
1292 1299 //
1293 1300
1294 1301 // This closure is used to check that a certain set of oops is empty.
1295 1302 class FalseClosure: public OopClosure {
1296 1303 public:
1297 1304 void do_oop(oop* p) {
1298 1305 guarantee(false, "Should be an empty set");
1299 1306 }
1300 1307 };
1301 1308
1302 1309 // This closure is used to do concurrent marking from the roots
1303 1310 // following the first checkpoint.
1304 1311 class MarkFromRootsClosure: public BitMapClosure {
1305 1312 CMSCollector* _collector;
1306 1313 MemRegion _span;
1307 1314 CMSBitMap* _bitMap;
1308 1315 CMSBitMap* _mut;
1309 1316 CMSMarkStack* _markStack;
1310 1317 CMSMarkStack* _revisitStack;
1311 1318 bool _yield;
1312 1319 int _skipBits;
1313 1320 HeapWord* _finger;
1314 1321 HeapWord* _threshold;
1315 1322 DEBUG_ONLY(bool _verifying;)
1316 1323
1317 1324 public:
1318 1325 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1319 1326 CMSBitMap* bitMap,
1320 1327 CMSMarkStack* markStack,
1321 1328 CMSMarkStack* revisitStack,
1322 1329 bool should_yield, bool verifying = false);
1323 1330 void do_bit(size_t offset);
1324 1331 void reset(HeapWord* addr);
1325 1332 inline void do_yield_check();
1326 1333
1327 1334 private:
1328 1335 void scanOopsInOop(HeapWord* ptr);
1329 1336 void do_yield_work();
1330 1337 };
1331 1338
1332 1339 // This closure is used to do concurrent multi-threaded
1333 1340 // marking from the roots following the first checkpoint.
1334 1341 // XXX This should really be a subclass of The serial version
1335 1342 // above, but i have not had the time to refactor things cleanly.
1336 1343 // That willbe done for Dolphin.
1337 1344 class Par_MarkFromRootsClosure: public BitMapClosure {
1338 1345 CMSCollector* _collector;
1339 1346 MemRegion _whole_span;
1340 1347 MemRegion _span;
1341 1348 CMSBitMap* _bit_map;
1342 1349 CMSBitMap* _mut;
1343 1350 OopTaskQueue* _work_queue;
1344 1351 CMSMarkStack* _overflow_stack;
1345 1352 CMSMarkStack* _revisit_stack;
1346 1353 bool _yield;
1347 1354 int _skip_bits;
1348 1355 HeapWord* _finger;
1349 1356 HeapWord* _threshold;
1350 1357 CMSConcMarkingTask* _task;
1351 1358 public:
1352 1359 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1353 1360 MemRegion span,
1354 1361 CMSBitMap* bit_map,
1355 1362 OopTaskQueue* work_queue,
1356 1363 CMSMarkStack* overflow_stack,
1357 1364 CMSMarkStack* revisit_stack,
1358 1365 bool should_yield);
1359 1366 void do_bit(size_t offset);
1360 1367 inline void do_yield_check();
1361 1368
1362 1369 private:
1363 1370 void scan_oops_in_oop(HeapWord* ptr);
1364 1371 void do_yield_work();
1365 1372 bool get_work_from_overflow_stack();
1366 1373 };
1367 1374
1368 1375 // The following closures are used to do certain kinds of verification of
1369 1376 // CMS marking.
1370 1377 class PushAndMarkVerifyClosure: public OopClosure {
1371 1378 CMSCollector* _collector;
1372 1379 MemRegion _span;
1373 1380 CMSBitMap* _verification_bm;
1374 1381 CMSBitMap* _cms_bm;
1375 1382 CMSMarkStack* _mark_stack;
1376 1383 public:
1377 1384 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1378 1385 MemRegion span,
1379 1386 CMSBitMap* verification_bm,
1380 1387 CMSBitMap* cms_bm,
1381 1388 CMSMarkStack* mark_stack);
1382 1389 void do_oop(oop* p);
1383 1390 // Deal with a stack overflow condition
1384 1391 void handle_stack_overflow(HeapWord* lost);
1385 1392 };
1386 1393
1387 1394 class MarkFromRootsVerifyClosure: public BitMapClosure {
1388 1395 CMSCollector* _collector;
1389 1396 MemRegion _span;
1390 1397 CMSBitMap* _verification_bm;
1391 1398 CMSBitMap* _cms_bm;
1392 1399 CMSMarkStack* _mark_stack;
1393 1400 HeapWord* _finger;
1394 1401 PushAndMarkVerifyClosure _pam_verify_closure;
1395 1402 public:
1396 1403 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1397 1404 CMSBitMap* verification_bm,
1398 1405 CMSBitMap* cms_bm,
1399 1406 CMSMarkStack* mark_stack);
1400 1407 void do_bit(size_t offset);
1401 1408 void reset(HeapWord* addr);
1402 1409 };
1403 1410
1404 1411
1405 1412 // This closure is used to check that a certain set of bits is
1406 1413 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1407 1414 class FalseBitMapClosure: public BitMapClosure {
1408 1415 public:
1409 1416 void do_bit(size_t offset) {
1410 1417 guarantee(false, "Should not have a 1 bit");
1411 1418 }
1412 1419 };
1413 1420
1414 1421 // This closure is used during the second checkpointing phase
1415 1422 // to rescan the marked objects on the dirty cards in the mod
1416 1423 // union table and the card table proper. It's invoked via
1417 1424 // MarkFromDirtyCardsClosure below. It uses either
1418 1425 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1419 1426 // declared in genOopClosures.hpp to accomplish some of its work.
1420 1427 // In the parallel case the bitMap is shared, so access to
1421 1428 // it needs to be suitably synchronized for updates by embedded
1422 1429 // closures that update it; however, this closure itself only
1423 1430 // reads the bit_map and because it is idempotent, is immune to
1424 1431 // reading stale values.
1425 1432 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1426 1433 #ifdef ASSERT
1427 1434 CMSCollector* _collector;
1428 1435 MemRegion _span;
1429 1436 union {
1430 1437 CMSMarkStack* _mark_stack;
1431 1438 OopTaskQueue* _work_queue;
1432 1439 };
1433 1440 #endif // ASSERT
1434 1441 bool _parallel;
1435 1442 CMSBitMap* _bit_map;
1436 1443 union {
1437 1444 MarkRefsIntoAndScanClosure* _scan_closure;
1438 1445 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1439 1446 };
1440 1447
1441 1448 public:
1442 1449 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1443 1450 MemRegion span,
1444 1451 ReferenceProcessor* rp,
1445 1452 CMSBitMap* bit_map,
1446 1453 CMSMarkStack* mark_stack,
1447 1454 CMSMarkStack* revisit_stack,
1448 1455 MarkRefsIntoAndScanClosure* cl):
1449 1456 #ifdef ASSERT
1450 1457 _collector(collector),
1451 1458 _span(span),
1452 1459 _mark_stack(mark_stack),
1453 1460 #endif // ASSERT
1454 1461 _parallel(false),
1455 1462 _bit_map(bit_map),
1456 1463 _scan_closure(cl) { }
1457 1464
1458 1465 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1459 1466 MemRegion span,
1460 1467 ReferenceProcessor* rp,
1461 1468 CMSBitMap* bit_map,
1462 1469 OopTaskQueue* work_queue,
1463 1470 CMSMarkStack* revisit_stack,
1464 1471 Par_MarkRefsIntoAndScanClosure* cl):
1465 1472 #ifdef ASSERT
1466 1473 _collector(collector),
1467 1474 _span(span),
1468 1475 _work_queue(work_queue),
1469 1476 #endif // ASSERT
1470 1477 _parallel(true),
1471 1478 _bit_map(bit_map),
1472 1479 _par_scan_closure(cl) { }
1473 1480
1474 1481 void do_object(oop obj) {
1475 1482 guarantee(false, "Call do_object_b(oop, MemRegion) instead");
1476 1483 }
1477 1484 bool do_object_b(oop obj) {
1478 1485 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1479 1486 return false;
1480 1487 }
1481 1488 bool do_object_bm(oop p, MemRegion mr);
1482 1489 };
1483 1490
1484 1491 // This closure is used during the second checkpointing phase
1485 1492 // to rescan the marked objects on the dirty cards in the mod
1486 1493 // union table and the card table proper. It invokes
1487 1494 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1488 1495 // In the parallel case, the bit map is shared and requires
1489 1496 // synchronized access.
1490 1497 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1491 1498 CompactibleFreeListSpace* _space;
1492 1499 ScanMarkedObjectsAgainClosure _scan_cl;
1493 1500 size_t _num_dirty_cards;
1494 1501
1495 1502 public:
1496 1503 MarkFromDirtyCardsClosure(CMSCollector* collector,
1497 1504 MemRegion span,
1498 1505 CompactibleFreeListSpace* space,
1499 1506 CMSBitMap* bit_map,
1500 1507 CMSMarkStack* mark_stack,
1501 1508 CMSMarkStack* revisit_stack,
1502 1509 MarkRefsIntoAndScanClosure* cl):
1503 1510 _space(space),
1504 1511 _num_dirty_cards(0),
1505 1512 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1506 1513 mark_stack, revisit_stack, cl) { }
1507 1514
1508 1515 MarkFromDirtyCardsClosure(CMSCollector* collector,
1509 1516 MemRegion span,
1510 1517 CompactibleFreeListSpace* space,
1511 1518 CMSBitMap* bit_map,
1512 1519 OopTaskQueue* work_queue,
1513 1520 CMSMarkStack* revisit_stack,
1514 1521 Par_MarkRefsIntoAndScanClosure* cl):
1515 1522 _space(space),
1516 1523 _num_dirty_cards(0),
1517 1524 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1518 1525 work_queue, revisit_stack, cl) { }
1519 1526
1520 1527 void do_MemRegion(MemRegion mr);
1521 1528 void set_space(CompactibleFreeListSpace* space) { _space = space; }
1522 1529 size_t num_dirty_cards() { return _num_dirty_cards; }
1523 1530 };
1524 1531
1525 1532 // This closure is used in the non-product build to check
1526 1533 // that there are no MemRegions with a certain property.
1527 1534 class FalseMemRegionClosure: public MemRegionClosure {
1528 1535 void do_MemRegion(MemRegion mr) {
1529 1536 guarantee(!mr.is_empty(), "Shouldn't be empty");
1530 1537 guarantee(false, "Should never be here");
1531 1538 }
1532 1539 };
1533 1540
1534 1541 // This closure is used during the precleaning phase
1535 1542 // to "carefully" rescan marked objects on dirty cards.
1536 1543 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1537 1544 // to accomplish some of its work.
1538 1545 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1539 1546 CMSCollector* _collector;
1540 1547 MemRegion _span;
1541 1548 bool _yield;
1542 1549 Mutex* _freelistLock;
1543 1550 CMSBitMap* _bitMap;
1544 1551 CMSMarkStack* _markStack;
1545 1552 MarkRefsIntoAndScanClosure* _scanningClosure;
1546 1553
1547 1554 public:
1548 1555 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1549 1556 MemRegion span,
1550 1557 CMSBitMap* bitMap,
1551 1558 CMSMarkStack* markStack,
1552 1559 CMSMarkStack* revisitStack,
1553 1560 MarkRefsIntoAndScanClosure* cl,
1554 1561 bool should_yield):
1555 1562 _collector(collector),
1556 1563 _span(span),
1557 1564 _yield(should_yield),
1558 1565 _bitMap(bitMap),
1559 1566 _markStack(markStack),
1560 1567 _scanningClosure(cl) {
1561 1568 }
1562 1569
1563 1570 void do_object(oop p) {
1564 1571 guarantee(false, "call do_object_careful instead");
1565 1572 }
1566 1573
1567 1574 size_t do_object_careful(oop p) {
1568 1575 guarantee(false, "Unexpected caller");
1569 1576 return 0;
1570 1577 }
1571 1578
1572 1579 size_t do_object_careful_m(oop p, MemRegion mr);
1573 1580
1574 1581 void setFreelistLock(Mutex* m) {
1575 1582 _freelistLock = m;
1576 1583 _scanningClosure->set_freelistLock(m);
1577 1584 }
1578 1585
1579 1586 private:
1580 1587 inline bool do_yield_check();
1581 1588
1582 1589 void do_yield_work();
1583 1590 };
1584 1591
1585 1592 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1586 1593 CMSCollector* _collector;
1587 1594 MemRegion _span;
1588 1595 bool _yield;
1589 1596 CMSBitMap* _bit_map;
1590 1597 CMSMarkStack* _mark_stack;
1591 1598 PushAndMarkClosure* _scanning_closure;
1592 1599 unsigned int _before_count;
1593 1600
1594 1601 public:
1595 1602 SurvivorSpacePrecleanClosure(CMSCollector* collector,
1596 1603 MemRegion span,
1597 1604 CMSBitMap* bit_map,
1598 1605 CMSMarkStack* mark_stack,
1599 1606 PushAndMarkClosure* cl,
1600 1607 unsigned int before_count,
1601 1608 bool should_yield):
1602 1609 _collector(collector),
1603 1610 _span(span),
1604 1611 _yield(should_yield),
1605 1612 _bit_map(bit_map),
1606 1613 _mark_stack(mark_stack),
1607 1614 _scanning_closure(cl),
1608 1615 _before_count(before_count)
1609 1616 { }
1610 1617
1611 1618 void do_object(oop p) {
1612 1619 guarantee(false, "call do_object_careful instead");
1613 1620 }
1614 1621
1615 1622 size_t do_object_careful(oop p);
1616 1623
1617 1624 size_t do_object_careful_m(oop p, MemRegion mr) {
1618 1625 guarantee(false, "Unexpected caller");
1619 1626 return 0;
1620 1627 }
1621 1628
1622 1629 private:
1623 1630 inline void do_yield_check();
1624 1631 void do_yield_work();
1625 1632 };
1626 1633
1627 1634 // This closure is used to accomplish the sweeping work
1628 1635 // after the second checkpoint but before the concurrent reset
1629 1636 // phase.
1630 1637 //
1631 1638 // Terminology
1632 1639 // left hand chunk (LHC) - block of one or more chunks currently being
1633 1640 // coalesced. The LHC is available for coalescing with a new chunk.
1634 1641 // right hand chunk (RHC) - block that is currently being swept that is
1635 1642 // free or garbage that can be coalesced with the LHC.
1636 1643 // _inFreeRange is true if there is currently a LHC
1637 1644 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1638 1645 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1639 1646 // _freeFinger is the address of the current LHC
1640 1647 class SweepClosure: public BlkClosureCareful {
1641 1648 CMSCollector* _collector; // collector doing the work
1642 1649 ConcurrentMarkSweepGeneration* _g; // Generation being swept
1643 1650 CompactibleFreeListSpace* _sp; // Space being swept
1644 1651 HeapWord* _limit;
1645 1652 Mutex* _freelistLock; // Free list lock (in space)
1646 1653 CMSBitMap* _bitMap; // Marking bit map (in
1647 1654 // generation)
1648 1655 bool _inFreeRange; // Indicates if we are in the
1649 1656 // midst of a free run
1650 1657 bool _freeRangeInFreeLists;
1651 1658 // Often, we have just found
1652 1659 // a free chunk and started
1653 1660 // a new free range; we do not
1654 1661 // eagerly remove this chunk from
1655 1662 // the free lists unless there is
1656 1663 // a possibility of coalescing.
1657 1664 // When true, this flag indicates
1658 1665 // that the _freeFinger below
1659 1666 // points to a potentially free chunk
1660 1667 // that may still be in the free lists
1661 1668 bool _lastFreeRangeCoalesced;
1662 1669 // free range contains chunks
1663 1670 // coalesced
1664 1671 bool _yield;
1665 1672 // Whether sweeping should be
1666 1673 // done with yields. For instance
1667 1674 // when done by the foreground
1668 1675 // collector we shouldn't yield.
1669 1676 HeapWord* _freeFinger; // When _inFreeRange is set, the
1670 1677 // pointer to the "left hand
1671 1678 // chunk"
1672 1679 size_t _freeRangeSize;
1673 1680 // When _inFreeRange is set, this
1674 1681 // indicates the accumulated size
1675 1682 // of the "left hand chunk"
1676 1683 NOT_PRODUCT(
1677 1684 size_t _numObjectsFreed;
1678 1685 size_t _numWordsFreed;
1679 1686 size_t _numObjectsLive;
1680 1687 size_t _numWordsLive;
1681 1688 size_t _numObjectsAlreadyFree;
1682 1689 size_t _numWordsAlreadyFree;
1683 1690 FreeChunk* _last_fc;
1684 1691 )
1685 1692 private:
1686 1693 // Code that is common to a free chunk or garbage when
1687 1694 // encountered during sweeping.
1688 1695 void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
1689 1696 size_t chunkSize);
1690 1697 // Process a free chunk during sweeping.
1691 1698 void doAlreadyFreeChunk(FreeChunk *fc);
1692 1699 // Process a garbage chunk during sweeping.
1693 1700 size_t doGarbageChunk(FreeChunk *fc);
1694 1701 // Process a live chunk during sweeping.
1695 1702 size_t doLiveChunk(FreeChunk* fc);
1696 1703
1697 1704 // Accessors.
1698 1705 HeapWord* freeFinger() const { return _freeFinger; }
1699 1706 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1700 1707 size_t freeRangeSize() const { return _freeRangeSize; }
1701 1708 void set_freeRangeSize(size_t v) { _freeRangeSize = v; }
1702 1709 bool inFreeRange() const { return _inFreeRange; }
1703 1710 void set_inFreeRange(bool v) { _inFreeRange = v; }
1704 1711 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1705 1712 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1706 1713 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1707 1714 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1708 1715
1709 1716 // Initialize a free range.
1710 1717 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1711 1718 // Return this chunk to the free lists.
1712 1719 void flushCurFreeChunk(HeapWord* chunk, size_t size);
1713 1720
1714 1721 // Check if we should yield and do so when necessary.
1715 1722 inline void do_yield_check(HeapWord* addr);
1716 1723
1717 1724 // Yield
1718 1725 void do_yield_work(HeapWord* addr);
1719 1726
1720 1727 // Debugging/Printing
1721 1728 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
1722 1729
1723 1730 public:
1724 1731 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1725 1732 CMSBitMap* bitMap, bool should_yield);
1726 1733 ~SweepClosure();
1727 1734
1728 1735 size_t do_blk_careful(HeapWord* addr);
1729 1736 };
1730 1737
1731 1738 // Closures related to weak references processing
1732 1739
1733 1740 // During CMS' weak reference processing, this is a
1734 1741 // work-routine/closure used to complete transitive
1735 1742 // marking of objects as live after a certain point
1736 1743 // in which an initial set has been completely accumulated.
1737 1744 class CMSDrainMarkingStackClosure: public VoidClosure {
1738 1745 CMSCollector* _collector;
1739 1746 MemRegion _span;
1740 1747 CMSMarkStack* _mark_stack;
1741 1748 CMSBitMap* _bit_map;
1742 1749 CMSKeepAliveClosure* _keep_alive;
1743 1750 public:
1744 1751 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1745 1752 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1746 1753 CMSKeepAliveClosure* keep_alive):
1747 1754 _collector(collector),
1748 1755 _span(span),
1749 1756 _bit_map(bit_map),
1750 1757 _mark_stack(mark_stack),
1751 1758 _keep_alive(keep_alive) { }
1752 1759
1753 1760 void do_void();
1754 1761 };
1755 1762
1756 1763 // A parallel version of CMSDrainMarkingStackClosure above.
1757 1764 class CMSParDrainMarkingStackClosure: public VoidClosure {
1758 1765 CMSCollector* _collector;
1759 1766 MemRegion _span;
1760 1767 OopTaskQueue* _work_queue;
1761 1768 CMSBitMap* _bit_map;
1762 1769 CMSInnerParMarkAndPushClosure _mark_and_push;
1763 1770
1764 1771 public:
1765 1772 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1766 1773 MemRegion span, CMSBitMap* bit_map,
1767 1774 OopTaskQueue* work_queue):
1768 1775 _collector(collector),
1769 1776 _span(span),
1770 1777 _bit_map(bit_map),
1771 1778 _work_queue(work_queue),
1772 1779 _mark_and_push(collector, span, bit_map, work_queue) { }
1773 1780
1774 1781 public:
1775 1782 void trim_queue(uint max);
1776 1783 void do_void();
1777 1784 };
1778 1785
1779 1786 // Allow yielding or short-circuiting of reference list
1780 1787 // prelceaning work.
1781 1788 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1782 1789 CMSCollector* _collector;
1783 1790 void do_yield_work();
1784 1791 public:
1785 1792 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1786 1793 _collector(collector) {}
1787 1794 virtual bool should_return();
1788 1795 };
1789 1796
1790 1797
1791 1798 // Convenience class that locks free list locks for given CMS collector
1792 1799 class FreelistLocker: public StackObj {
1793 1800 private:
1794 1801 CMSCollector* _collector;
1795 1802 public:
1796 1803 FreelistLocker(CMSCollector* collector):
1797 1804 _collector(collector) {
1798 1805 _collector->getFreelistLocks();
1799 1806 }
1800 1807
1801 1808 ~FreelistLocker() {
1802 1809 _collector->releaseFreelistLocks();
1803 1810 }
1804 1811 };
1805 1812
1806 1813 // Mark all dead objects in a given space.
1807 1814 class MarkDeadObjectsClosure: public BlkClosure {
1808 1815 const CMSCollector* _collector;
1809 1816 const CompactibleFreeListSpace* _sp;
1810 1817 CMSBitMap* _live_bit_map;
1811 1818 CMSBitMap* _dead_bit_map;
1812 1819 public:
1813 1820 MarkDeadObjectsClosure(const CMSCollector* collector,
1814 1821 const CompactibleFreeListSpace* sp,
1815 1822 CMSBitMap *live_bit_map,
1816 1823 CMSBitMap *dead_bit_map) :
1817 1824 _collector(collector),
1818 1825 _sp(sp),
1819 1826 _live_bit_map(live_bit_map),
1820 1827 _dead_bit_map(dead_bit_map) {}
1821 1828 size_t do_blk(HeapWord* addr);
1822 1829 };
↓ open down ↓ |
650 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX