1 /* 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 // ConcurrentMarkSweepGeneration is in support of a concurrent 26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 27 // style. We assume, for now, that this generation is always the 28 // seniormost generation (modulo the PermGeneration), and for simplicity 29 // in the first implementation, that this generation is a single compactible 30 // space. Neither of these restrictions appears essential, and will be 31 // relaxed in the future when more time is available to implement the 32 // greater generality (and there's a need for it). 33 // 34 // Concurrent mode failures are currently handled by 35 // means of a sliding mark-compact. 36 37 class CMSAdaptiveSizePolicy; 38 class CMSConcMarkingTask; 39 class CMSGCAdaptivePolicyCounters; 40 class ConcurrentMarkSweepGeneration; 41 class ConcurrentMarkSweepPolicy; 42 class ConcurrentMarkSweepThread; 43 class CompactibleFreeListSpace; 44 class FreeChunk; 45 class PromotionInfo; 46 class ScanMarkedObjectsAgainCarefullyClosure; 47 48 // A generic CMS bit map. It's the basis for both the CMS marking bit map 49 // as well as for the mod union table (in each case only a subset of the 50 // methods are used). This is essentially a wrapper around the BitMap class, 51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 52 // we have _shifter == 0. and for the mod union table we have 53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 54 // XXX 64-bit issues in BitMap? 55 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 56 friend class VMStructs; 57 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // shifts to convert HeapWord to bit position 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 public: 64 Mutex* const _lock; // mutex protecting _bm; 65 66 public: 67 // constructor 68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 69 70 // allocates the actual storage for the map 71 bool allocate(MemRegion mr); 72 // field getter 73 Mutex* lock() const { return _lock; } 74 // locking verifier convenience function 75 void assert_locked() const PRODUCT_RETURN; 76 77 // inquiries 78 HeapWord* startWord() const { return _bmStartWord; } 79 size_t sizeInWords() const { return _bmWordSize; } 80 size_t sizeInBits() const { return _bm.size(); } 81 // the following is one past the last word in space 82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 83 84 // reading marks 85 bool isMarked(HeapWord* addr) const; 86 bool par_isMarked(HeapWord* addr) const; // do not lock checks 87 bool isUnmarked(HeapWord* addr) const; 88 bool isAllClear() const; 89 90 // writing marks 91 void mark(HeapWord* addr); 92 // For marking by parallel GC threads; 93 // returns true if we did, false if another thread did 94 bool par_mark(HeapWord* addr); 95 96 void mark_range(MemRegion mr); 97 void par_mark_range(MemRegion mr); 98 void mark_large_range(MemRegion mr); 99 void par_mark_large_range(MemRegion mr); 100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 101 void clear_range(MemRegion mr); 102 void par_clear_range(MemRegion mr); 103 void clear_large_range(MemRegion mr); 104 void par_clear_large_range(MemRegion mr); 105 void clear_all(); 106 void clear_all_incrementally(); // Not yet implemented!! 107 108 NOT_PRODUCT( 109 // checks the memory region for validity 110 void region_invariant(MemRegion mr); 111 ) 112 113 // iteration 114 void iterate(BitMapClosure* cl) { 115 _bm.iterate(cl); 116 } 117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 118 void dirty_range_iterate_clear(MemRegionClosure* cl); 119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 120 121 // auxiliary support for iteration 122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 124 HeapWord* end_addr) const; 125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 127 HeapWord* end_addr) const; 128 MemRegion getAndClearMarkedRegion(HeapWord* addr); 129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 130 HeapWord* end_addr); 131 132 // conversion utilities 133 HeapWord* offsetToHeapWord(size_t offset) const; 134 size_t heapWordToOffset(HeapWord* addr) const; 135 size_t heapWordDiffToOffsetDiff(size_t diff) const; 136 137 // debugging 138 // is this address range covered by the bit-map? 139 NOT_PRODUCT( 140 bool covers(MemRegion mr) const; 141 bool covers(HeapWord* start, size_t size = 0) const; 142 ) 143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 144 }; 145 146 // Represents a marking stack used by the CMS collector. 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 148 class CMSMarkStack: public CHeapObj { 149 // 150 friend class CMSCollector; // to get at expasion stats further below 151 // 152 153 VirtualSpace _virtual_space; // space for the stack 154 oop* _base; // bottom of stack 155 size_t _index; // one more than last occupied index 156 size_t _capacity; // max #elements 157 Mutex _par_lock; // an advisory lock used in case of parallel access 158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 159 160 protected: 161 size_t _hit_limit; // we hit max stack size limit 162 size_t _failed_double; // we failed expansion before hitting limit 163 164 public: 165 CMSMarkStack(): 166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 167 _hit_limit(0), 168 _failed_double(0) {} 169 170 bool allocate(size_t size); 171 172 size_t capacity() const { return _capacity; } 173 174 oop pop() { 175 if (!isEmpty()) { 176 return _base[--_index] ; 177 } 178 return NULL; 179 } 180 181 bool push(oop ptr) { 182 if (isFull()) { 183 return false; 184 } else { 185 _base[_index++] = ptr; 186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 187 return true; 188 } 189 } 190 191 bool isEmpty() const { return _index == 0; } 192 bool isFull() const { 193 assert(_index <= _capacity, "buffer overflow"); 194 return _index == _capacity; 195 } 196 197 size_t length() { return _index; } 198 199 // "Parallel versions" of some of the above 200 oop par_pop() { 201 // lock and pop 202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 203 return pop(); 204 } 205 206 bool par_push(oop ptr) { 207 // lock and push 208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 209 return push(ptr); 210 } 211 212 // Forcibly reset the stack, losing all of its contents. 213 void reset() { 214 _index = 0; 215 } 216 217 // Expand the stack, typically in response to an overflow condition 218 void expand(); 219 220 // Compute the least valued stack element. 221 oop least_value(HeapWord* low) { 222 oop least = (oop)low; 223 for (size_t i = 0; i < _index; i++) { 224 least = MIN2(least, _base[i]); 225 } 226 return least; 227 } 228 229 // Exposed here to allow stack expansion in || case 230 Mutex* par_lock() { return &_par_lock; } 231 }; 232 233 class CardTableRS; 234 class CMSParGCThreadState; 235 236 class ModUnionClosure: public MemRegionClosure { 237 protected: 238 CMSBitMap* _t; 239 public: 240 ModUnionClosure(CMSBitMap* t): _t(t) { } 241 void do_MemRegion(MemRegion mr); 242 }; 243 244 class ModUnionClosurePar: public ModUnionClosure { 245 public: 246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 247 void do_MemRegion(MemRegion mr); 248 }; 249 250 // Survivor Chunk Array in support of parallelization of 251 // Survivor Space rescan. 252 class ChunkArray: public CHeapObj { 253 size_t _index; 254 size_t _capacity; 255 HeapWord** _array; // storage for array 256 257 public: 258 ChunkArray() : _index(0), _capacity(0), _array(NULL) {} 259 ChunkArray(HeapWord** a, size_t c): 260 _index(0), _capacity(c), _array(a) {} 261 262 HeapWord** array() { return _array; } 263 void set_array(HeapWord** a) { _array = a; } 264 265 size_t capacity() { return _capacity; } 266 void set_capacity(size_t c) { _capacity = c; } 267 268 size_t end() { 269 assert(_index < capacity(), "_index out of bounds"); 270 return _index; 271 } // exclusive 272 273 HeapWord* nth(size_t n) { 274 assert(n < end(), "Out of bounds access"); 275 return _array[n]; 276 } 277 278 void reset() { 279 _index = 0; 280 } 281 282 void record_sample(HeapWord* p, size_t sz) { 283 // For now we do not do anything with the size 284 if (_index < _capacity) { 285 _array[_index++] = p; 286 } 287 } 288 }; 289 290 // 291 // Timing, allocation and promotion statistics for gc scheduling and incremental 292 // mode pacing. Most statistics are exponential averages. 293 // 294 class CMSStats VALUE_OBJ_CLASS_SPEC { 295 private: 296 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 297 298 // The following are exponential averages with factor alpha: 299 // avg = (100 - alpha) * avg + alpha * cur_sample 300 // 301 // The durations measure: end_time[n] - start_time[n] 302 // The periods measure: start_time[n] - start_time[n-1] 303 // 304 // The cms period and duration include only concurrent collections; time spent 305 // in foreground cms collections due to System.gc() or because of a failure to 306 // keep up are not included. 307 // 308 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 309 // real value, but is used only after the first period. A value of 100 is 310 // used for the first sample so it gets the entire weight. 311 unsigned int _saved_alpha; // 0-100 312 unsigned int _gc0_alpha; 313 unsigned int _cms_alpha; 314 315 double _gc0_duration; 316 double _gc0_period; 317 size_t _gc0_promoted; // bytes promoted per gc0 318 double _cms_duration; 319 double _cms_duration_pre_sweep; // time from initiation to start of sweep 320 double _cms_duration_per_mb; 321 double _cms_period; 322 size_t _cms_allocated; // bytes of direct allocation per gc0 period 323 324 // Timers. 325 elapsedTimer _cms_timer; 326 TimeStamp _gc0_begin_time; 327 TimeStamp _cms_begin_time; 328 TimeStamp _cms_end_time; 329 330 // Snapshots of the amount used in the CMS generation. 331 size_t _cms_used_at_gc0_begin; 332 size_t _cms_used_at_gc0_end; 333 size_t _cms_used_at_cms_begin; 334 335 // Used to prevent the duty cycle from being reduced in the middle of a cms 336 // cycle. 337 bool _allow_duty_cycle_reduction; 338 339 enum { 340 _GC0_VALID = 0x1, 341 _CMS_VALID = 0x2, 342 _ALL_VALID = _GC0_VALID | _CMS_VALID 343 }; 344 345 unsigned int _valid_bits; 346 347 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 348 349 protected: 350 351 // Return a duty cycle that avoids wild oscillations, by limiting the amount 352 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 353 // as a recommended value). 354 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 355 unsigned int new_duty_cycle); 356 unsigned int icms_update_duty_cycle_impl(); 357 358 public: 359 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 360 unsigned int alpha = CMSExpAvgFactor); 361 362 // Whether or not the statistics contain valid data; higher level statistics 363 // cannot be called until this returns true (they require at least one young 364 // gen and one cms cycle to have completed). 365 bool valid() const; 366 367 // Record statistics. 368 void record_gc0_begin(); 369 void record_gc0_end(size_t cms_gen_bytes_used); 370 void record_cms_begin(); 371 void record_cms_end(); 372 373 // Allow management of the cms timer, which must be stopped/started around 374 // yield points. 375 elapsedTimer& cms_timer() { return _cms_timer; } 376 void start_cms_timer() { _cms_timer.start(); } 377 void stop_cms_timer() { _cms_timer.stop(); } 378 379 // Basic statistics; units are seconds or bytes. 380 double gc0_period() const { return _gc0_period; } 381 double gc0_duration() const { return _gc0_duration; } 382 size_t gc0_promoted() const { return _gc0_promoted; } 383 double cms_period() const { return _cms_period; } 384 double cms_duration() const { return _cms_duration; } 385 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 386 size_t cms_allocated() const { return _cms_allocated; } 387 388 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 389 390 // Seconds since the last background cms cycle began or ended. 391 double cms_time_since_begin() const; 392 double cms_time_since_end() const; 393 394 // Higher level statistics--caller must check that valid() returns true before 395 // calling. 396 397 // Returns bytes promoted per second of wall clock time. 398 double promotion_rate() const; 399 400 // Returns bytes directly allocated per second of wall clock time. 401 double cms_allocation_rate() const; 402 403 // Rate at which space in the cms generation is being consumed (sum of the 404 // above two). 405 double cms_consumption_rate() const; 406 407 // Returns an estimate of the number of seconds until the cms generation will 408 // fill up, assuming no collection work is done. 409 double time_until_cms_gen_full() const; 410 411 // Returns an estimate of the number of seconds remaining until 412 // the cms generation collection should start. 413 double time_until_cms_start() const; 414 415 // End of higher level statistics. 416 417 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 418 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 419 420 // Update the duty cycle and return the new value. 421 unsigned int icms_update_duty_cycle(); 422 423 // Debugging. 424 void print_on(outputStream* st) const PRODUCT_RETURN; 425 void print() const { print_on(gclog_or_tty); } 426 }; 427 428 // A closure related to weak references processing which 429 // we embed in the CMSCollector, since we need to pass 430 // it to the reference processor for secondary filtering 431 // of references based on reachability of referent; 432 // see role of _is_alive_non_header closure in the 433 // ReferenceProcessor class. 434 // For objects in the CMS generation, this closure checks 435 // if the object is "live" (reachable). Used in weak 436 // reference processing. 437 class CMSIsAliveClosure: public BoolObjectClosure { 438 MemRegion _span; 439 const CMSBitMap* _bit_map; 440 441 friend class CMSCollector; 442 protected: 443 void set_span(MemRegion span) { _span = span; } 444 public: 445 CMSIsAliveClosure(CMSBitMap* bit_map): 446 _bit_map(bit_map) { } 447 448 CMSIsAliveClosure(MemRegion span, 449 CMSBitMap* bit_map): 450 _span(span), 451 _bit_map(bit_map) { } 452 void do_object(oop obj) { 453 assert(false, "not to be invoked"); 454 } 455 bool do_object_b(oop obj); 456 }; 457 458 459 // Implements AbstractRefProcTaskExecutor for CMS. 460 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 461 public: 462 463 CMSRefProcTaskExecutor(CMSCollector& collector) 464 : _collector(collector) 465 { } 466 467 // Executes a task using worker threads. 468 virtual void execute(ProcessTask& task); 469 virtual void execute(EnqueueTask& task); 470 private: 471 CMSCollector& _collector; 472 }; 473 474 475 class CMSCollector: public CHeapObj { 476 friend class VMStructs; 477 friend class ConcurrentMarkSweepThread; 478 friend class ConcurrentMarkSweepGeneration; 479 friend class CompactibleFreeListSpace; 480 friend class CMSParRemarkTask; 481 friend class CMSConcMarkingTask; 482 friend class CMSRefProcTaskProxy; 483 friend class CMSRefProcTaskExecutor; 484 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 485 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 486 friend class PushOrMarkClosure; // to access _restart_addr 487 friend class Par_PushOrMarkClosure; // to access _restart_addr 488 friend class MarkFromRootsClosure; // -- ditto -- 489 // ... and for clearing cards 490 friend class Par_MarkFromRootsClosure; // to access _restart_addr 491 // ... and for clearing cards 492 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 493 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 494 friend class PushAndMarkVerifyClosure; // -- ditto -- 495 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 496 friend class PushAndMarkClosure; // -- ditto -- 497 friend class Par_PushAndMarkClosure; // -- ditto -- 498 friend class CMSKeepAliveClosure; // -- ditto -- 499 friend class CMSDrainMarkingStackClosure; // -- ditto -- 500 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 501 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 502 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 503 friend class VM_CMS_Operation; 504 friend class VM_CMS_Initial_Mark; 505 friend class VM_CMS_Final_Remark; 506 507 private: 508 jlong _time_of_last_gc; 509 void update_time_of_last_gc(jlong now) { 510 _time_of_last_gc = now; 511 } 512 513 OopTaskQueueSet* _task_queues; 514 515 // Overflow list of grey objects, threaded through mark-word 516 // Manipulated with CAS in the parallel/multi-threaded case. 517 oop _overflow_list; 518 // The following array-pair keeps track of mark words 519 // displaced for accomodating overflow list above. 520 // This code will likely be revisited under RFE#4922830. 521 GrowableArray<oop>* _preserved_oop_stack; 522 GrowableArray<markOop>* _preserved_mark_stack; 523 524 int* _hash_seed; 525 526 // In support of multi-threaded concurrent phases 527 YieldingFlexibleWorkGang* _conc_workers; 528 529 // Performance Counters 530 CollectorCounters* _gc_counters; 531 532 // Initialization Errors 533 bool _completed_initialization; 534 535 // In support of ExplicitGCInvokesConcurrent 536 static bool _full_gc_requested; 537 unsigned int _collection_count_start; 538 // Should we unload classes this concurrent cycle? 539 // Set in response to a concurrent full gc request. 540 bool _unload_classes; 541 bool _unloaded_classes_last_cycle; 542 // Did we (allow) unload classes in the previous concurrent cycle? 543 bool cms_unloaded_classes_last_cycle() const { 544 return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled; 545 } 546 547 // Verification support 548 CMSBitMap _verification_mark_bm; 549 void verify_after_remark_work_1(); 550 void verify_after_remark_work_2(); 551 552 // true if any verification flag is on. 553 bool _verifying; 554 bool verifying() const { return _verifying; } 555 void set_verifying(bool v) { _verifying = v; } 556 557 // Collector policy 558 ConcurrentMarkSweepPolicy* _collector_policy; 559 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 560 561 // Check whether the gc time limit has been 562 // exceeded and set the size policy flag 563 // appropriately. 564 void check_gc_time_limit(); 565 // XXX Move these to CMSStats ??? FIX ME !!! 566 elapsedTimer _sweep_timer; 567 AdaptivePaddedAverage _sweep_estimate; 568 569 protected: 570 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 571 ConcurrentMarkSweepGeneration* _permGen; // perm gen 572 MemRegion _span; // span covering above two 573 CardTableRS* _ct; // card table 574 575 // CMS marking support structures 576 CMSBitMap _markBitMap; 577 CMSBitMap _modUnionTable; 578 CMSMarkStack _markStack; 579 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects 580 // to revisit 581 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. 582 583 HeapWord* _restart_addr; // in support of marking stack overflow 584 void lower_restart_addr(HeapWord* low); 585 586 // Counters in support of marking stack / work queue overflow handling: 587 // a non-zero value indicates certain types of overflow events during 588 // the current CMS cycle and could lead to stack resizing efforts at 589 // an opportune future time. 590 size_t _ser_pmc_preclean_ovflw; 591 size_t _ser_pmc_remark_ovflw; 592 size_t _par_pmc_remark_ovflw; 593 size_t _ser_kac_ovflw; 594 size_t _par_kac_ovflw; 595 NOT_PRODUCT(size_t _num_par_pushes;) 596 597 // ("Weak") Reference processing support 598 ReferenceProcessor* _ref_processor; 599 CMSIsAliveClosure _is_alive_closure; 600 // keep this textually after _markBitMap; c'tor dependency 601 602 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 603 ModUnionClosure _modUnionClosure; 604 ModUnionClosurePar _modUnionClosurePar; 605 606 // CMS abstract state machine 607 // initial_state: Idling 608 // next_state(Idling) = {Marking} 609 // next_state(Marking) = {Precleaning, Sweeping} 610 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 611 // next_state(AbortablePreclean) = {FinalMarking} 612 // next_state(FinalMarking) = {Sweeping} 613 // next_state(Sweeping) = {Resizing} 614 // next_state(Resizing) = {Resetting} 615 // next_state(Resetting) = {Idling} 616 // The numeric values below are chosen so that: 617 // . _collectorState <= Idling == post-sweep && pre-mark 618 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 619 // precleaning || abortablePrecleanb 620 enum CollectorState { 621 Resizing = 0, 622 Resetting = 1, 623 Idling = 2, 624 InitialMarking = 3, 625 Marking = 4, 626 Precleaning = 5, 627 AbortablePreclean = 6, 628 FinalMarking = 7, 629 Sweeping = 8 630 }; 631 static CollectorState _collectorState; 632 633 // State related to prologue/epilogue invocation for my generations 634 bool _between_prologue_and_epilogue; 635 636 // Signalling/State related to coordination between fore- and backgroud GC 637 // Note: When the baton has been passed from background GC to foreground GC, 638 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 639 static bool _foregroundGCIsActive; // true iff foreground collector is active or 640 // wants to go active 641 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 642 // yet passed the baton to the foreground GC 643 644 // Support for CMSScheduleRemark (abortable preclean) 645 bool _abort_preclean; 646 bool _start_sampling; 647 648 int _numYields; 649 size_t _numDirtyCards; 650 uint _sweepCount; 651 // number of full gc's since the last concurrent gc. 652 uint _full_gcs_since_conc_gc; 653 654 // if occupancy exceeds this, start a new gc cycle 655 double _initiatingOccupancy; 656 // occupancy used for bootstrapping stats 657 double _bootstrap_occupancy; 658 659 // timer 660 elapsedTimer _timer; 661 662 // Timing, allocation and promotion statistics, used for scheduling. 663 CMSStats _stats; 664 665 // Allocation limits installed in the young gen, used only in 666 // CMSIncrementalMode. When an allocation in the young gen would cross one of 667 // these limits, the cms generation is notified and the cms thread is started 668 // or stopped, respectively. 669 HeapWord* _icms_start_limit; 670 HeapWord* _icms_stop_limit; 671 672 enum CMS_op_type { 673 CMS_op_checkpointRootsInitial, 674 CMS_op_checkpointRootsFinal 675 }; 676 677 void do_CMS_operation(CMS_op_type op); 678 bool stop_world_and_do(CMS_op_type op); 679 680 OopTaskQueueSet* task_queues() { return _task_queues; } 681 int* hash_seed(int i) { return &_hash_seed[i]; } 682 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 683 684 // Support for parallelizing Eden rescan in CMS remark phase 685 void sample_eden(); // ... sample Eden space top 686 687 private: 688 // Support for parallelizing young gen rescan in CMS remark phase 689 Generation* _young_gen; // the younger gen 690 HeapWord** _top_addr; // ... Top of Eden 691 HeapWord** _end_addr; // ... End of Eden 692 HeapWord** _eden_chunk_array; // ... Eden partitioning array 693 size_t _eden_chunk_index; // ... top (exclusive) of array 694 size_t _eden_chunk_capacity; // ... max entries in array 695 696 // Support for parallelizing survivor space rescan 697 HeapWord** _survivor_chunk_array; 698 size_t _survivor_chunk_index; 699 size_t _survivor_chunk_capacity; 700 size_t* _cursor; 701 ChunkArray* _survivor_plab_array; 702 703 // Support for marking stack overflow handling 704 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 705 bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q); 706 void push_on_overflow_list(oop p); 707 void par_push_on_overflow_list(oop p); 708 // the following is, obviously, not, in general, "MT-stable" 709 bool overflow_list_is_empty() const; 710 711 void preserve_mark_if_necessary(oop p); 712 void par_preserve_mark_if_necessary(oop p); 713 void preserve_mark_work(oop p, markOop m); 714 void restore_preserved_marks_if_any(); 715 NOT_PRODUCT(bool no_preserved_marks() const;) 716 // in support of testing overflow code 717 NOT_PRODUCT(int _overflow_counter;) 718 NOT_PRODUCT(bool simulate_overflow();) // sequential 719 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 720 721 int _roots_scanning_options; 722 int roots_scanning_options() const { return _roots_scanning_options; } 723 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 724 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 725 726 // CMS work methods 727 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 728 729 // a return value of false indicates failure due to stack overflow 730 bool markFromRootsWork(bool asynch); // concurrent marking work 731 732 public: // FIX ME!!! only for testing 733 bool do_marking_st(bool asynch); // single-threaded marking 734 bool do_marking_mt(bool asynch); // multi-threaded marking 735 736 private: 737 738 // concurrent precleaning work 739 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 740 ScanMarkedObjectsAgainCarefullyClosure* cl); 741 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 742 ScanMarkedObjectsAgainCarefullyClosure* cl); 743 // Does precleaning work, returning a quantity indicative of 744 // the amount of "useful work" done. 745 size_t preclean_work(bool clean_refs, bool clean_survivors); 746 void abortable_preclean(); // Preclean while looking for possible abort 747 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 748 // Helper function for above; merge-sorts the per-thread plab samples 749 void merge_survivor_plab_arrays(ContiguousSpace* surv); 750 // Resets (i.e. clears) the per-thread plab sample vectors 751 void reset_survivor_plab_arrays(); 752 753 // final (second) checkpoint work 754 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 755 bool init_mark_was_synchronous); 756 // work routine for parallel version of remark 757 void do_remark_parallel(); 758 // work routine for non-parallel version of remark 759 void do_remark_non_parallel(); 760 // reference processing work routine (during second checkpoint) 761 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 762 763 // concurrent sweeping work 764 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 765 766 // (concurrent) resetting of support data structures 767 void reset(bool asynch); 768 769 // Clear _expansion_cause fields of constituent generations 770 void clear_expansion_cause(); 771 772 // An auxilliary method used to record the ends of 773 // used regions of each generation to limit the extent of sweep 774 void save_sweep_limits(); 775 776 // Resize the generations included in the collector. 777 void compute_new_size(); 778 779 // A work method used by foreground collection to determine 780 // what type of collection (compacting or not, continuing or fresh) 781 // it should do. 782 void decide_foreground_collection_type(bool clear_all_soft_refs, 783 bool* should_compact, bool* should_start_over); 784 785 // A work method used by the foreground collector to do 786 // a mark-sweep-compact. 787 void do_compaction_work(bool clear_all_soft_refs); 788 789 // A work method used by the foreground collector to do 790 // a mark-sweep, after taking over from a possibly on-going 791 // concurrent mark-sweep collection. 792 void do_mark_sweep_work(bool clear_all_soft_refs, 793 CollectorState first_state, bool should_start_over); 794 795 // If the backgrould GC is active, acquire control from the background 796 // GC and do the collection. 797 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 798 799 // For synchronizing passing of control from background to foreground 800 // GC. waitForForegroundGC() is called by the background 801 // collector. It if had to wait for a foreground collection, 802 // it returns true and the background collection should assume 803 // that the collection was finished by the foreground 804 // collector. 805 bool waitForForegroundGC(); 806 807 // Incremental mode triggering: recompute the icms duty cycle and set the 808 // allocation limits in the young gen. 809 void icms_update_allocation_limits(); 810 811 size_t block_size_using_printezis_bits(HeapWord* addr) const; 812 size_t block_size_if_printezis_bits(HeapWord* addr) const; 813 HeapWord* next_card_start_after_block(HeapWord* addr) const; 814 815 void setup_cms_unloading_and_verification_state(); 816 public: 817 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 818 ConcurrentMarkSweepGeneration* permGen, 819 CardTableRS* ct, 820 ConcurrentMarkSweepPolicy* cp); 821 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 822 823 ReferenceProcessor* ref_processor() { return _ref_processor; } 824 void ref_processor_init(); 825 826 Mutex* bitMapLock() const { return _markBitMap.lock(); } 827 static CollectorState abstract_state() { return _collectorState; } 828 double initiatingOccupancy() const { return _initiatingOccupancy; } 829 830 bool should_abort_preclean() const; // Whether preclean should be aborted. 831 size_t get_eden_used() const; 832 size_t get_eden_capacity() const; 833 834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 835 836 // locking checks 837 NOT_PRODUCT(static bool have_cms_token();) 838 839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 840 bool shouldConcurrentCollect(); 841 842 void collect(bool full, 843 bool clear_all_soft_refs, 844 size_t size, 845 bool tlab); 846 void collect_in_background(bool clear_all_soft_refs); 847 void collect_in_foreground(bool clear_all_soft_refs); 848 849 // In support of ExplicitGCInvokesConcurrent 850 static void request_full_gc(unsigned int full_gc_count); 851 // Should we unload classes in a particular concurrent cycle? 852 bool cms_should_unload_classes() const { 853 assert(!_unload_classes || ExplicitGCInvokesConcurrentAndUnloadsClasses, 854 "Inconsistency; see CR 6541037"); 855 return _unload_classes || CMSClassUnloadingEnabled; 856 } 857 858 void direct_allocated(HeapWord* start, size_t size); 859 860 // Object is dead if not marked and current phase is sweeping. 861 bool is_dead_obj(oop obj) const; 862 863 // After a promotion (of "start"), do any necessary marking. 864 // If "par", then it's being done by a parallel GC thread. 865 // The last two args indicate if we need precise marking 866 // and if so the size of the object so it can be dirtied 867 // in its entirety. 868 void promoted(bool par, HeapWord* start, 869 bool is_obj_array, size_t obj_size); 870 871 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 872 size_t word_size); 873 874 void getFreelistLocks() const; 875 void releaseFreelistLocks() const; 876 bool haveFreelistLocks() const; 877 878 // GC prologue and epilogue 879 void gc_prologue(bool full); 880 void gc_epilogue(bool full); 881 882 jlong time_of_last_gc(jlong now) { 883 if (_collectorState <= Idling) { 884 // gc not in progress 885 return _time_of_last_gc; 886 } else { 887 // collection in progress 888 return now; 889 } 890 } 891 892 // Support for parallel remark of survivor space 893 void* get_data_recorder(int thr_num); 894 895 CMSBitMap* markBitMap() { return &_markBitMap; } 896 void directAllocated(HeapWord* start, size_t size); 897 898 // main CMS steps and related support 899 void checkpointRootsInitial(bool asynch); 900 bool markFromRoots(bool asynch); // a return value of false indicates failure 901 // due to stack overflow 902 void preclean(); 903 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 904 bool init_mark_was_synchronous); 905 void sweep(bool asynch); 906 907 // Check that the currently executing thread is the expected 908 // one (foreground collector or background collector). 909 void check_correct_thread_executing() PRODUCT_RETURN; 910 // XXXPERM void print_statistics() PRODUCT_RETURN; 911 912 bool is_cms_reachable(HeapWord* addr); 913 914 // Performance Counter Support 915 CollectorCounters* counters() { return _gc_counters; } 916 917 // timer stuff 918 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 919 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 920 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 921 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 922 923 int yields() { return _numYields; } 924 void resetYields() { _numYields = 0; } 925 void incrementYields() { _numYields++; } 926 void resetNumDirtyCards() { _numDirtyCards = 0; } 927 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 928 size_t numDirtyCards() { return _numDirtyCards; } 929 930 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 931 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 932 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 933 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 934 uint sweepCount() const { return _sweepCount; } 935 void incrementSweepCount() { _sweepCount++; } 936 937 // Timers/stats for gc scheduling and incremental mode pacing. 938 CMSStats& stats() { return _stats; } 939 940 // Convenience methods that check whether CMSIncrementalMode is enabled and 941 // forward to the corresponding methods in ConcurrentMarkSweepThread. 942 static void start_icms(); 943 static void stop_icms(); // Called at the end of the cms cycle. 944 static void disable_icms(); // Called before a foreground collection. 945 static void enable_icms(); // Called after a foreground collection. 946 void icms_wait(); // Called at yield points. 947 948 // Adaptive size policy 949 CMSAdaptiveSizePolicy* size_policy(); 950 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 951 952 // debugging 953 void verify(bool); 954 bool verify_after_remark(); 955 void verify_ok_to_terminate() const PRODUCT_RETURN; 956 void verify_work_stacks_empty() const PRODUCT_RETURN; 957 void verify_overflow_empty() const PRODUCT_RETURN; 958 959 // convenience methods in support of debugging 960 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 961 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 962 963 // accessors 964 CMSMarkStack* verification_mark_stack() { return &_markStack; } 965 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 966 967 // Get the bit map with a perm gen "deadness" information. 968 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } 969 970 // Initialization errors 971 bool completed_initialization() { return _completed_initialization; } 972 }; 973 974 class CMSExpansionCause : public AllStatic { 975 public: 976 enum Cause { 977 _no_expansion, 978 _satisfy_free_ratio, 979 _satisfy_promotion, 980 _satisfy_allocation, 981 _allocate_par_lab, 982 _allocate_par_spooling_space, 983 _adaptive_size_policy 984 }; 985 // Return a string describing the cause of the expansion. 986 static const char* to_string(CMSExpansionCause::Cause cause); 987 }; 988 989 class ConcurrentMarkSweepGeneration: public CardGeneration { 990 friend class VMStructs; 991 friend class ConcurrentMarkSweepThread; 992 friend class ConcurrentMarkSweep; 993 friend class CMSCollector; 994 protected: 995 static CMSCollector* _collector; // the collector that collects us 996 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 997 998 // Performance Counters 999 GenerationCounters* _gen_counters; 1000 GSpaceCounters* _space_counters; 1001 1002 // Words directly allocated, used by CMSStats. 1003 size_t _direct_allocated_words; 1004 1005 // Non-product stat counters 1006 NOT_PRODUCT( 1007 int _numObjectsPromoted; 1008 int _numWordsPromoted; 1009 int _numObjectsAllocated; 1010 int _numWordsAllocated; 1011 ) 1012 1013 // Used for sizing decisions 1014 bool _incremental_collection_failed; 1015 bool incremental_collection_failed() { 1016 return _incremental_collection_failed; 1017 } 1018 void set_incremental_collection_failed() { 1019 _incremental_collection_failed = true; 1020 } 1021 void clear_incremental_collection_failed() { 1022 _incremental_collection_failed = false; 1023 } 1024 1025 private: 1026 // For parallel young-gen GC support. 1027 CMSParGCThreadState** _par_gc_thread_states; 1028 1029 // Reason generation was expanded 1030 CMSExpansionCause::Cause _expansion_cause; 1031 1032 // accessors 1033 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1034 CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; } 1035 1036 // In support of MinChunkSize being larger than min object size 1037 const double _dilatation_factor; 1038 1039 enum CollectionTypes { 1040 Concurrent_collection_type = 0, 1041 MS_foreground_collection_type = 1, 1042 MSC_foreground_collection_type = 2, 1043 Unknown_collection_type = 3 1044 }; 1045 1046 CollectionTypes _debug_collection_type; 1047 1048 protected: 1049 // Grow generation by specified size (returns false if unable to grow) 1050 bool grow_by(size_t bytes); 1051 // Grow generation to reserved size. 1052 bool grow_to_reserved(); 1053 // Shrink generation by specified size (returns false if unable to shrink) 1054 virtual void shrink_by(size_t bytes); 1055 1056 // Update statistics for GC 1057 virtual void update_gc_stats(int level, bool full); 1058 1059 // Maximum available space in the generation (including uncommitted) 1060 // space. 1061 size_t max_available() const; 1062 1063 public: 1064 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1065 int level, CardTableRS* ct, 1066 bool use_adaptive_freelists, 1067 FreeBlockDictionary::DictionaryChoice); 1068 1069 // Accessors 1070 CMSCollector* collector() const { return _collector; } 1071 static void set_collector(CMSCollector* collector) { 1072 assert(_collector == NULL, "already set"); 1073 _collector = collector; 1074 } 1075 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1076 1077 Mutex* freelistLock() const; 1078 1079 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1080 1081 // Adaptive size policy 1082 CMSAdaptiveSizePolicy* size_policy(); 1083 1084 bool refs_discovery_is_atomic() const { return false; } 1085 bool refs_discovery_is_mt() const { 1086 // Note: CMS does MT-discovery during the parallel-remark 1087 // phases. Use ReferenceProcessorMTMutator to make refs 1088 // discovery MT-safe during such phases or other parallel 1089 // discovery phases in the future. This may all go away 1090 // if/when we decide that refs discovery is sufficiently 1091 // rare that the cost of the CAS's involved is in the 1092 // noise. That's a measurement that should be done, and 1093 // the code simplified if that turns out to be the case. 1094 return false; 1095 } 1096 1097 // Override 1098 virtual void ref_processor_init(); 1099 1100 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1101 1102 // Space enquiries 1103 size_t capacity() const; 1104 size_t used() const; 1105 size_t free() const; 1106 double occupancy() { return ((double)used())/((double)capacity()); } 1107 size_t contiguous_available() const; 1108 size_t unsafe_max_alloc_nogc() const; 1109 1110 // over-rides 1111 MemRegion used_region() const; 1112 MemRegion used_region_at_save_marks() const; 1113 1114 // Does a "full" (forced) collection invoked on this generation collect 1115 // all younger generations as well? Note that the second conjunct is a 1116 // hack to allow the collection of the younger gen first if the flag is 1117 // set. This is better than using th policy's should_collect_gen0_first() 1118 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1119 virtual bool full_collects_younger_generations() const { 1120 return UseCMSCompactAtFullCollection && !CollectGen0First; 1121 } 1122 1123 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1124 1125 // Support for compaction 1126 CompactibleSpace* first_compaction_space() const; 1127 // Adjust quantites in the generation affected by 1128 // the compaction. 1129 void reset_after_compaction(); 1130 1131 // Allocation support 1132 HeapWord* allocate(size_t size, bool tlab); 1133 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1134 oop promote(oop obj, size_t obj_size, oop* ref); 1135 HeapWord* par_allocate(size_t size, bool tlab) { 1136 return allocate(size, tlab); 1137 } 1138 1139 // Incremental mode triggering. 1140 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1141 size_t word_size); 1142 1143 // Used by CMSStats to track direct allocation. The value is sampled and 1144 // reset after each young gen collection. 1145 size_t direct_allocated_words() const { return _direct_allocated_words; } 1146 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1147 1148 // Overrides for parallel promotion. 1149 virtual oop par_promote(int thread_num, 1150 oop obj, markOop m, size_t word_sz); 1151 // This one should not be called for CMS. 1152 virtual void par_promote_alloc_undo(int thread_num, 1153 HeapWord* obj, size_t word_sz); 1154 virtual void par_promote_alloc_done(int thread_num); 1155 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1156 1157 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, 1158 bool younger_handles_promotion_failure) const; 1159 1160 bool should_collect(bool full, size_t size, bool tlab); 1161 // XXXPERM 1162 bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM 1163 void collect(bool full, 1164 bool clear_all_soft_refs, 1165 size_t size, 1166 bool tlab); 1167 1168 HeapWord* expand_and_allocate(size_t word_size, 1169 bool tlab, 1170 bool parallel = false); 1171 1172 // GC prologue and epilogue 1173 void gc_prologue(bool full); 1174 void gc_prologue_work(bool full, bool registerClosure, 1175 ModUnionClosure* modUnionClosure); 1176 void gc_epilogue(bool full); 1177 void gc_epilogue_work(bool full); 1178 1179 // Time since last GC of this generation 1180 jlong time_of_last_gc(jlong now) { 1181 return collector()->time_of_last_gc(now); 1182 } 1183 void update_time_of_last_gc(jlong now) { 1184 collector()-> update_time_of_last_gc(now); 1185 } 1186 1187 // Allocation failure 1188 void expand(size_t bytes, size_t expand_bytes, 1189 CMSExpansionCause::Cause cause); 1190 void shrink(size_t bytes); 1191 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1192 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1193 1194 // Iteration support and related enquiries 1195 void save_marks(); 1196 bool no_allocs_since_save_marks(); 1197 void object_iterate_since_last_GC(ObjectClosure* cl); 1198 void younger_refs_iterate(OopsInGenClosure* cl); 1199 1200 // Iteration support specific to CMS generations 1201 void save_sweep_limit(); 1202 1203 // More iteration support 1204 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 1205 virtual void oop_iterate(OopClosure* cl); 1206 virtual void object_iterate(ObjectClosure* cl); 1207 1208 // Need to declare the full complement of closures, whether we'll 1209 // override them or not, or get message from the compiler: 1210 // oop_since_save_marks_iterate_nv hides virtual function... 1211 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1212 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1213 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1214 1215 // Smart allocation XXX -- move to CFLSpace? 1216 void setNearLargestChunk(); 1217 bool isNearLargestChunk(HeapWord* addr); 1218 1219 // Get the chunk at the end of the space. Delagates to 1220 // the space. 1221 FreeChunk* find_chunk_at_end(); 1222 1223 // Overriding of unused functionality (sharing not yet supported with CMS) 1224 void pre_adjust_pointers(); 1225 void post_compact(); 1226 1227 // Debugging 1228 void prepare_for_verify(); 1229 void verify(bool allow_dirty); 1230 void print_statistics() PRODUCT_RETURN; 1231 1232 // Performance Counters support 1233 virtual void update_counters(); 1234 virtual void update_counters(size_t used); 1235 void initialize_performance_counters(); 1236 CollectorCounters* counters() { return collector()->counters(); } 1237 1238 // Support for parallel remark of survivor space 1239 void* get_data_recorder(int thr_num) { 1240 //Delegate to collector 1241 return collector()->get_data_recorder(thr_num); 1242 } 1243 1244 // Printing 1245 const char* name() const; 1246 virtual const char* short_name() const { return "CMS"; } 1247 void print() const; 1248 void printOccupancy(const char* s); 1249 bool must_be_youngest() const { return false; } 1250 bool must_be_oldest() const { return true; } 1251 1252 void compute_new_size(); 1253 1254 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1255 void rotate_debug_collection_type(); 1256 }; 1257 1258 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1259 1260 // Return the size policy from the heap's collector 1261 // policy casted to CMSAdaptiveSizePolicy*. 1262 CMSAdaptiveSizePolicy* cms_size_policy() const; 1263 1264 // Resize the generation based on the adaptive size 1265 // policy. 1266 void resize(size_t cur_promo, size_t desired_promo); 1267 1268 // Return the GC counters from the collector policy 1269 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1270 1271 virtual void shrink_by(size_t bytes); 1272 1273 public: 1274 virtual void compute_new_size(); 1275 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1276 int level, CardTableRS* ct, 1277 bool use_adaptive_freelists, 1278 FreeBlockDictionary::DictionaryChoice 1279 dictionaryChoice) : 1280 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1281 use_adaptive_freelists, dictionaryChoice) {} 1282 1283 virtual const char* short_name() const { return "ASCMS"; } 1284 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1285 1286 virtual void update_counters(); 1287 virtual void update_counters(size_t used); 1288 }; 1289 1290 // 1291 // Closures of various sorts used by CMS to accomplish its work 1292 // 1293 1294 // This closure is used to check that a certain set of oops is empty. 1295 class FalseClosure: public OopClosure { 1296 public: 1297 void do_oop(oop* p) { 1298 guarantee(false, "Should be an empty set"); 1299 } 1300 }; 1301 1302 // This closure is used to do concurrent marking from the roots 1303 // following the first checkpoint. 1304 class MarkFromRootsClosure: public BitMapClosure { 1305 CMSCollector* _collector; 1306 MemRegion _span; 1307 CMSBitMap* _bitMap; 1308 CMSBitMap* _mut; 1309 CMSMarkStack* _markStack; 1310 CMSMarkStack* _revisitStack; 1311 bool _yield; 1312 int _skipBits; 1313 HeapWord* _finger; 1314 HeapWord* _threshold; 1315 DEBUG_ONLY(bool _verifying;) 1316 1317 public: 1318 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1319 CMSBitMap* bitMap, 1320 CMSMarkStack* markStack, 1321 CMSMarkStack* revisitStack, 1322 bool should_yield, bool verifying = false); 1323 void do_bit(size_t offset); 1324 void reset(HeapWord* addr); 1325 inline void do_yield_check(); 1326 1327 private: 1328 void scanOopsInOop(HeapWord* ptr); 1329 void do_yield_work(); 1330 }; 1331 1332 // This closure is used to do concurrent multi-threaded 1333 // marking from the roots following the first checkpoint. 1334 // XXX This should really be a subclass of The serial version 1335 // above, but i have not had the time to refactor things cleanly. 1336 // That willbe done for Dolphin. 1337 class Par_MarkFromRootsClosure: public BitMapClosure { 1338 CMSCollector* _collector; 1339 MemRegion _whole_span; 1340 MemRegion _span; 1341 CMSBitMap* _bit_map; 1342 CMSBitMap* _mut; 1343 OopTaskQueue* _work_queue; 1344 CMSMarkStack* _overflow_stack; 1345 CMSMarkStack* _revisit_stack; 1346 bool _yield; 1347 int _skip_bits; 1348 HeapWord* _finger; 1349 HeapWord* _threshold; 1350 CMSConcMarkingTask* _task; 1351 public: 1352 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1353 MemRegion span, 1354 CMSBitMap* bit_map, 1355 OopTaskQueue* work_queue, 1356 CMSMarkStack* overflow_stack, 1357 CMSMarkStack* revisit_stack, 1358 bool should_yield); 1359 void do_bit(size_t offset); 1360 inline void do_yield_check(); 1361 1362 private: 1363 void scan_oops_in_oop(HeapWord* ptr); 1364 void do_yield_work(); 1365 bool get_work_from_overflow_stack(); 1366 }; 1367 1368 // The following closures are used to do certain kinds of verification of 1369 // CMS marking. 1370 class PushAndMarkVerifyClosure: public OopClosure { 1371 CMSCollector* _collector; 1372 MemRegion _span; 1373 CMSBitMap* _verification_bm; 1374 CMSBitMap* _cms_bm; 1375 CMSMarkStack* _mark_stack; 1376 public: 1377 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1378 MemRegion span, 1379 CMSBitMap* verification_bm, 1380 CMSBitMap* cms_bm, 1381 CMSMarkStack* mark_stack); 1382 void do_oop(oop* p); 1383 // Deal with a stack overflow condition 1384 void handle_stack_overflow(HeapWord* lost); 1385 }; 1386 1387 class MarkFromRootsVerifyClosure: public BitMapClosure { 1388 CMSCollector* _collector; 1389 MemRegion _span; 1390 CMSBitMap* _verification_bm; 1391 CMSBitMap* _cms_bm; 1392 CMSMarkStack* _mark_stack; 1393 HeapWord* _finger; 1394 PushAndMarkVerifyClosure _pam_verify_closure; 1395 public: 1396 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1397 CMSBitMap* verification_bm, 1398 CMSBitMap* cms_bm, 1399 CMSMarkStack* mark_stack); 1400 void do_bit(size_t offset); 1401 void reset(HeapWord* addr); 1402 }; 1403 1404 1405 // This closure is used to check that a certain set of bits is 1406 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1407 class FalseBitMapClosure: public BitMapClosure { 1408 public: 1409 void do_bit(size_t offset) { 1410 guarantee(false, "Should not have a 1 bit"); 1411 } 1412 }; 1413 1414 // This closure is used during the second checkpointing phase 1415 // to rescan the marked objects on the dirty cards in the mod 1416 // union table and the card table proper. It's invoked via 1417 // MarkFromDirtyCardsClosure below. It uses either 1418 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1419 // declared in genOopClosures.hpp to accomplish some of its work. 1420 // In the parallel case the bitMap is shared, so access to 1421 // it needs to be suitably synchronized for updates by embedded 1422 // closures that update it; however, this closure itself only 1423 // reads the bit_map and because it is idempotent, is immune to 1424 // reading stale values. 1425 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1426 #ifdef ASSERT 1427 CMSCollector* _collector; 1428 MemRegion _span; 1429 union { 1430 CMSMarkStack* _mark_stack; 1431 OopTaskQueue* _work_queue; 1432 }; 1433 #endif // ASSERT 1434 bool _parallel; 1435 CMSBitMap* _bit_map; 1436 union { 1437 MarkRefsIntoAndScanClosure* _scan_closure; 1438 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1439 }; 1440 1441 public: 1442 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1443 MemRegion span, 1444 ReferenceProcessor* rp, 1445 CMSBitMap* bit_map, 1446 CMSMarkStack* mark_stack, 1447 CMSMarkStack* revisit_stack, 1448 MarkRefsIntoAndScanClosure* cl): 1449 #ifdef ASSERT 1450 _collector(collector), 1451 _span(span), 1452 _mark_stack(mark_stack), 1453 #endif // ASSERT 1454 _parallel(false), 1455 _bit_map(bit_map), 1456 _scan_closure(cl) { } 1457 1458 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1459 MemRegion span, 1460 ReferenceProcessor* rp, 1461 CMSBitMap* bit_map, 1462 OopTaskQueue* work_queue, 1463 CMSMarkStack* revisit_stack, 1464 Par_MarkRefsIntoAndScanClosure* cl): 1465 #ifdef ASSERT 1466 _collector(collector), 1467 _span(span), 1468 _work_queue(work_queue), 1469 #endif // ASSERT 1470 _parallel(true), 1471 _bit_map(bit_map), 1472 _par_scan_closure(cl) { } 1473 1474 void do_object(oop obj) { 1475 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); 1476 } 1477 bool do_object_b(oop obj) { 1478 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1479 return false; 1480 } 1481 bool do_object_bm(oop p, MemRegion mr); 1482 }; 1483 1484 // This closure is used during the second checkpointing phase 1485 // to rescan the marked objects on the dirty cards in the mod 1486 // union table and the card table proper. It invokes 1487 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1488 // In the parallel case, the bit map is shared and requires 1489 // synchronized access. 1490 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1491 CompactibleFreeListSpace* _space; 1492 ScanMarkedObjectsAgainClosure _scan_cl; 1493 size_t _num_dirty_cards; 1494 1495 public: 1496 MarkFromDirtyCardsClosure(CMSCollector* collector, 1497 MemRegion span, 1498 CompactibleFreeListSpace* space, 1499 CMSBitMap* bit_map, 1500 CMSMarkStack* mark_stack, 1501 CMSMarkStack* revisit_stack, 1502 MarkRefsIntoAndScanClosure* cl): 1503 _space(space), 1504 _num_dirty_cards(0), 1505 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1506 mark_stack, revisit_stack, cl) { } 1507 1508 MarkFromDirtyCardsClosure(CMSCollector* collector, 1509 MemRegion span, 1510 CompactibleFreeListSpace* space, 1511 CMSBitMap* bit_map, 1512 OopTaskQueue* work_queue, 1513 CMSMarkStack* revisit_stack, 1514 Par_MarkRefsIntoAndScanClosure* cl): 1515 _space(space), 1516 _num_dirty_cards(0), 1517 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1518 work_queue, revisit_stack, cl) { } 1519 1520 void do_MemRegion(MemRegion mr); 1521 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1522 size_t num_dirty_cards() { return _num_dirty_cards; } 1523 }; 1524 1525 // This closure is used in the non-product build to check 1526 // that there are no MemRegions with a certain property. 1527 class FalseMemRegionClosure: public MemRegionClosure { 1528 void do_MemRegion(MemRegion mr) { 1529 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1530 guarantee(false, "Should never be here"); 1531 } 1532 }; 1533 1534 // This closure is used during the precleaning phase 1535 // to "carefully" rescan marked objects on dirty cards. 1536 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1537 // to accomplish some of its work. 1538 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1539 CMSCollector* _collector; 1540 MemRegion _span; 1541 bool _yield; 1542 Mutex* _freelistLock; 1543 CMSBitMap* _bitMap; 1544 CMSMarkStack* _markStack; 1545 MarkRefsIntoAndScanClosure* _scanningClosure; 1546 1547 public: 1548 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1549 MemRegion span, 1550 CMSBitMap* bitMap, 1551 CMSMarkStack* markStack, 1552 CMSMarkStack* revisitStack, 1553 MarkRefsIntoAndScanClosure* cl, 1554 bool should_yield): 1555 _collector(collector), 1556 _span(span), 1557 _yield(should_yield), 1558 _bitMap(bitMap), 1559 _markStack(markStack), 1560 _scanningClosure(cl) { 1561 } 1562 1563 void do_object(oop p) { 1564 guarantee(false, "call do_object_careful instead"); 1565 } 1566 1567 size_t do_object_careful(oop p) { 1568 guarantee(false, "Unexpected caller"); 1569 return 0; 1570 } 1571 1572 size_t do_object_careful_m(oop p, MemRegion mr); 1573 1574 void setFreelistLock(Mutex* m) { 1575 _freelistLock = m; 1576 _scanningClosure->set_freelistLock(m); 1577 } 1578 1579 private: 1580 inline bool do_yield_check(); 1581 1582 void do_yield_work(); 1583 }; 1584 1585 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1586 CMSCollector* _collector; 1587 MemRegion _span; 1588 bool _yield; 1589 CMSBitMap* _bit_map; 1590 CMSMarkStack* _mark_stack; 1591 PushAndMarkClosure* _scanning_closure; 1592 unsigned int _before_count; 1593 1594 public: 1595 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1596 MemRegion span, 1597 CMSBitMap* bit_map, 1598 CMSMarkStack* mark_stack, 1599 PushAndMarkClosure* cl, 1600 unsigned int before_count, 1601 bool should_yield): 1602 _collector(collector), 1603 _span(span), 1604 _yield(should_yield), 1605 _bit_map(bit_map), 1606 _mark_stack(mark_stack), 1607 _scanning_closure(cl), 1608 _before_count(before_count) 1609 { } 1610 1611 void do_object(oop p) { 1612 guarantee(false, "call do_object_careful instead"); 1613 } 1614 1615 size_t do_object_careful(oop p); 1616 1617 size_t do_object_careful_m(oop p, MemRegion mr) { 1618 guarantee(false, "Unexpected caller"); 1619 return 0; 1620 } 1621 1622 private: 1623 inline void do_yield_check(); 1624 void do_yield_work(); 1625 }; 1626 1627 // This closure is used to accomplish the sweeping work 1628 // after the second checkpoint but before the concurrent reset 1629 // phase. 1630 // 1631 // Terminology 1632 // left hand chunk (LHC) - block of one or more chunks currently being 1633 // coalesced. The LHC is available for coalescing with a new chunk. 1634 // right hand chunk (RHC) - block that is currently being swept that is 1635 // free or garbage that can be coalesced with the LHC. 1636 // _inFreeRange is true if there is currently a LHC 1637 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1638 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1639 // _freeFinger is the address of the current LHC 1640 class SweepClosure: public BlkClosureCareful { 1641 CMSCollector* _collector; // collector doing the work 1642 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1643 CompactibleFreeListSpace* _sp; // Space being swept 1644 HeapWord* _limit; 1645 Mutex* _freelistLock; // Free list lock (in space) 1646 CMSBitMap* _bitMap; // Marking bit map (in 1647 // generation) 1648 bool _inFreeRange; // Indicates if we are in the 1649 // midst of a free run 1650 bool _freeRangeInFreeLists; 1651 // Often, we have just found 1652 // a free chunk and started 1653 // a new free range; we do not 1654 // eagerly remove this chunk from 1655 // the free lists unless there is 1656 // a possibility of coalescing. 1657 // When true, this flag indicates 1658 // that the _freeFinger below 1659 // points to a potentially free chunk 1660 // that may still be in the free lists 1661 bool _lastFreeRangeCoalesced; 1662 // free range contains chunks 1663 // coalesced 1664 bool _yield; 1665 // Whether sweeping should be 1666 // done with yields. For instance 1667 // when done by the foreground 1668 // collector we shouldn't yield. 1669 HeapWord* _freeFinger; // When _inFreeRange is set, the 1670 // pointer to the "left hand 1671 // chunk" 1672 size_t _freeRangeSize; 1673 // When _inFreeRange is set, this 1674 // indicates the accumulated size 1675 // of the "left hand chunk" 1676 NOT_PRODUCT( 1677 size_t _numObjectsFreed; 1678 size_t _numWordsFreed; 1679 size_t _numObjectsLive; 1680 size_t _numWordsLive; 1681 size_t _numObjectsAlreadyFree; 1682 size_t _numWordsAlreadyFree; 1683 FreeChunk* _last_fc; 1684 ) 1685 private: 1686 // Code that is common to a free chunk or garbage when 1687 // encountered during sweeping. 1688 void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 1689 size_t chunkSize); 1690 // Process a free chunk during sweeping. 1691 void doAlreadyFreeChunk(FreeChunk *fc); 1692 // Process a garbage chunk during sweeping. 1693 size_t doGarbageChunk(FreeChunk *fc); 1694 // Process a live chunk during sweeping. 1695 size_t doLiveChunk(FreeChunk* fc); 1696 1697 // Accessors. 1698 HeapWord* freeFinger() const { return _freeFinger; } 1699 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1700 size_t freeRangeSize() const { return _freeRangeSize; } 1701 void set_freeRangeSize(size_t v) { _freeRangeSize = v; } 1702 bool inFreeRange() const { return _inFreeRange; } 1703 void set_inFreeRange(bool v) { _inFreeRange = v; } 1704 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1705 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1706 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1707 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1708 1709 // Initialize a free range. 1710 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1711 // Return this chunk to the free lists. 1712 void flushCurFreeChunk(HeapWord* chunk, size_t size); 1713 1714 // Check if we should yield and do so when necessary. 1715 inline void do_yield_check(HeapWord* addr); 1716 1717 // Yield 1718 void do_yield_work(HeapWord* addr); 1719 1720 // Debugging/Printing 1721 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; 1722 1723 public: 1724 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1725 CMSBitMap* bitMap, bool should_yield); 1726 ~SweepClosure(); 1727 1728 size_t do_blk_careful(HeapWord* addr); 1729 }; 1730 1731 // Closures related to weak references processing 1732 1733 // During CMS' weak reference processing, this is a 1734 // work-routine/closure used to complete transitive 1735 // marking of objects as live after a certain point 1736 // in which an initial set has been completely accumulated. 1737 class CMSDrainMarkingStackClosure: public VoidClosure { 1738 CMSCollector* _collector; 1739 MemRegion _span; 1740 CMSMarkStack* _mark_stack; 1741 CMSBitMap* _bit_map; 1742 CMSKeepAliveClosure* _keep_alive; 1743 public: 1744 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1745 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1746 CMSKeepAliveClosure* keep_alive): 1747 _collector(collector), 1748 _span(span), 1749 _bit_map(bit_map), 1750 _mark_stack(mark_stack), 1751 _keep_alive(keep_alive) { } 1752 1753 void do_void(); 1754 }; 1755 1756 // A parallel version of CMSDrainMarkingStackClosure above. 1757 class CMSParDrainMarkingStackClosure: public VoidClosure { 1758 CMSCollector* _collector; 1759 MemRegion _span; 1760 OopTaskQueue* _work_queue; 1761 CMSBitMap* _bit_map; 1762 CMSInnerParMarkAndPushClosure _mark_and_push; 1763 1764 public: 1765 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1766 MemRegion span, CMSBitMap* bit_map, 1767 OopTaskQueue* work_queue): 1768 _collector(collector), 1769 _span(span), 1770 _bit_map(bit_map), 1771 _work_queue(work_queue), 1772 _mark_and_push(collector, span, bit_map, work_queue) { } 1773 1774 public: 1775 void trim_queue(uint max); 1776 void do_void(); 1777 }; 1778 1779 // Allow yielding or short-circuiting of reference list 1780 // prelceaning work. 1781 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1782 CMSCollector* _collector; 1783 void do_yield_work(); 1784 public: 1785 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1786 _collector(collector) {} 1787 virtual bool should_return(); 1788 }; 1789 1790 1791 // Convenience class that locks free list locks for given CMS collector 1792 class FreelistLocker: public StackObj { 1793 private: 1794 CMSCollector* _collector; 1795 public: 1796 FreelistLocker(CMSCollector* collector): 1797 _collector(collector) { 1798 _collector->getFreelistLocks(); 1799 } 1800 1801 ~FreelistLocker() { 1802 _collector->releaseFreelistLocks(); 1803 } 1804 }; 1805 1806 // Mark all dead objects in a given space. 1807 class MarkDeadObjectsClosure: public BlkClosure { 1808 const CMSCollector* _collector; 1809 const CompactibleFreeListSpace* _sp; 1810 CMSBitMap* _live_bit_map; 1811 CMSBitMap* _dead_bit_map; 1812 public: 1813 MarkDeadObjectsClosure(const CMSCollector* collector, 1814 const CompactibleFreeListSpace* sp, 1815 CMSBitMap *live_bit_map, 1816 CMSBitMap *dead_bit_map) : 1817 _collector(collector), 1818 _sp(sp), 1819 _live_bit_map(live_bit_map), 1820 _dead_bit_map(dead_bit_map) {} 1821 size_t do_blk(HeapWord* addr); 1822 };