1 /* 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 // ConcurrentMarkSweepGeneration is in support of a concurrent 26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 27 // style. We assume, for now, that this generation is always the 28 // seniormost generation (modulo the PermGeneration), and for simplicity 29 // in the first implementation, that this generation is a single compactible 30 // space. Neither of these restrictions appears essential, and will be 31 // relaxed in the future when more time is available to implement the 32 // greater generality (and there's a need for it). 33 // 34 // Concurrent mode failures are currently handled by 35 // means of a sliding mark-compact. 36 37 class CMSAdaptiveSizePolicy; 38 class CMSConcMarkingTask; 39 class CMSGCAdaptivePolicyCounters; 40 class ConcurrentMarkSweepGeneration; 41 class ConcurrentMarkSweepPolicy; 42 class ConcurrentMarkSweepThread; 43 class CompactibleFreeListSpace; 44 class FreeChunk; 45 class PromotionInfo; 46 class ScanMarkedObjectsAgainCarefullyClosure; 47 48 // A generic CMS bit map. It's the basis for both the CMS marking bit map 49 // as well as for the mod union table (in each case only a subset of the 50 // methods are used). This is essentially a wrapper around the BitMap class, 51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 52 // we have _shifter == 0. and for the mod union table we have 53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 54 // XXX 64-bit issues in BitMap? 55 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 56 friend class VMStructs; 57 58 HeapWord* _bmStartWord; // base address of range covered by map 59 size_t _bmWordSize; // map size (in #HeapWords covered) 60 const int _shifter; // shifts to convert HeapWord to bit position 61 VirtualSpace _virtual_space; // underlying the bit map 62 BitMap _bm; // the bit map itself 63 public: 64 Mutex* const _lock; // mutex protecting _bm; 65 66 public: 67 // constructor 68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 69 70 // allocates the actual storage for the map 71 bool allocate(MemRegion mr); 72 // field getter 73 Mutex* lock() const { return _lock; } 74 // locking verifier convenience function 75 void assert_locked() const PRODUCT_RETURN; 76 77 // inquiries 78 HeapWord* startWord() const { return _bmStartWord; } 79 size_t sizeInWords() const { return _bmWordSize; } 80 size_t sizeInBits() const { return _bm.size(); } 81 // the following is one past the last word in space 82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 83 84 // reading marks 85 bool isMarked(HeapWord* addr) const; 86 bool par_isMarked(HeapWord* addr) const; // do not lock checks 87 bool isUnmarked(HeapWord* addr) const; 88 bool isAllClear() const; 89 90 // writing marks 91 void mark(HeapWord* addr); 92 // For marking by parallel GC threads; 93 // returns true if we did, false if another thread did 94 bool par_mark(HeapWord* addr); 95 96 void mark_range(MemRegion mr); 97 void par_mark_range(MemRegion mr); 98 void mark_large_range(MemRegion mr); 99 void par_mark_large_range(MemRegion mr); 100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 101 void clear_range(MemRegion mr); 102 void par_clear_range(MemRegion mr); 103 void clear_large_range(MemRegion mr); 104 void par_clear_large_range(MemRegion mr); 105 void clear_all(); 106 void clear_all_incrementally(); // Not yet implemented!! 107 108 NOT_PRODUCT( 109 // checks the memory region for validity 110 void region_invariant(MemRegion mr); 111 ) 112 113 // iteration 114 void iterate(BitMapClosure* cl) { 115 _bm.iterate(cl); 116 } 117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 118 void dirty_range_iterate_clear(MemRegionClosure* cl); 119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 120 121 // auxiliary support for iteration 122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 124 HeapWord* end_addr) const; 125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 127 HeapWord* end_addr) const; 128 MemRegion getAndClearMarkedRegion(HeapWord* addr); 129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 130 HeapWord* end_addr); 131 132 // conversion utilities 133 HeapWord* offsetToHeapWord(size_t offset) const; 134 size_t heapWordToOffset(HeapWord* addr) const; 135 size_t heapWordDiffToOffsetDiff(size_t diff) const; 136 137 // debugging 138 // is this address range covered by the bit-map? 139 NOT_PRODUCT( 140 bool covers(MemRegion mr) const; 141 bool covers(HeapWord* start, size_t size = 0) const; 142 ) 143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 144 }; 145 146 // Represents a marking stack used by the CMS collector. 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 148 class CMSMarkStack: public CHeapObj { 149 // 150 friend class CMSCollector; // to get at expasion stats further below 151 // 152 153 VirtualSpace _virtual_space; // space for the stack 154 oop* _base; // bottom of stack 155 size_t _index; // one more than last occupied index 156 size_t _capacity; // max #elements 157 Mutex _par_lock; // an advisory lock used in case of parallel access 158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 159 160 protected: 161 size_t _hit_limit; // we hit max stack size limit 162 size_t _failed_double; // we failed expansion before hitting limit 163 164 public: 165 CMSMarkStack(): 166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 167 _hit_limit(0), 168 _failed_double(0) {} 169 170 bool allocate(size_t size); 171 172 size_t capacity() const { return _capacity; } 173 174 oop pop() { 175 if (!isEmpty()) { 176 return _base[--_index] ; 177 } 178 return NULL; 179 } 180 181 bool push(oop ptr) { 182 if (isFull()) { 183 return false; 184 } else { 185 _base[_index++] = ptr; 186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 187 return true; 188 } 189 } 190 191 bool isEmpty() const { return _index == 0; } 192 bool isFull() const { 193 assert(_index <= _capacity, "buffer overflow"); 194 return _index == _capacity; 195 } 196 197 size_t length() { return _index; } 198 199 // "Parallel versions" of some of the above 200 oop par_pop() { 201 // lock and pop 202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 203 return pop(); 204 } 205 206 bool par_push(oop ptr) { 207 // lock and push 208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 209 return push(ptr); 210 } 211 212 // Forcibly reset the stack, losing all of its contents. 213 void reset() { 214 _index = 0; 215 } 216 217 // Expand the stack, typically in response to an overflow condition 218 void expand(); 219 220 // Compute the least valued stack element. 221 oop least_value(HeapWord* low) { 222 oop least = (oop)low; 223 for (size_t i = 0; i < _index; i++) { 224 least = MIN2(least, _base[i]); 225 } 226 return least; 227 } 228 229 // Exposed here to allow stack expansion in || case 230 Mutex* par_lock() { return &_par_lock; } 231 }; 232 233 class CardTableRS; 234 class CMSParGCThreadState; 235 236 class ModUnionClosure: public MemRegionClosure { 237 protected: 238 CMSBitMap* _t; 239 public: 240 ModUnionClosure(CMSBitMap* t): _t(t) { } 241 void do_MemRegion(MemRegion mr); 242 }; 243 244 class ModUnionClosurePar: public ModUnionClosure { 245 public: 246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 247 void do_MemRegion(MemRegion mr); 248 }; 249 250 // Survivor Chunk Array in support of parallelization of 251 // Survivor Space rescan. 252 class ChunkArray: public CHeapObj { 253 size_t _index; 254 size_t _capacity; 255 HeapWord** _array; // storage for array 256 257 public: 258 ChunkArray() : _index(0), _capacity(0), _array(NULL) {} 259 ChunkArray(HeapWord** a, size_t c): 260 _index(0), _capacity(c), _array(a) {} 261 262 HeapWord** array() { return _array; } 263 void set_array(HeapWord** a) { _array = a; } 264 265 size_t capacity() { return _capacity; } 266 void set_capacity(size_t c) { _capacity = c; } 267 268 size_t end() { 269 assert(_index < capacity(), "_index out of bounds"); 270 return _index; 271 } // exclusive 272 273 HeapWord* nth(size_t n) { 274 assert(n < end(), "Out of bounds access"); 275 return _array[n]; 276 } 277 278 void reset() { 279 _index = 0; 280 } 281 282 void record_sample(HeapWord* p, size_t sz) { 283 // For now we do not do anything with the size 284 if (_index < _capacity) { 285 _array[_index++] = p; 286 } 287 } 288 }; 289 290 // 291 // Timing, allocation and promotion statistics for gc scheduling and incremental 292 // mode pacing. Most statistics are exponential averages. 293 // 294 class CMSStats VALUE_OBJ_CLASS_SPEC { 295 private: 296 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 297 298 // The following are exponential averages with factor alpha: 299 // avg = (100 - alpha) * avg + alpha * cur_sample 300 // 301 // The durations measure: end_time[n] - start_time[n] 302 // The periods measure: start_time[n] - start_time[n-1] 303 // 304 // The cms period and duration include only concurrent collections; time spent 305 // in foreground cms collections due to System.gc() or because of a failure to 306 // keep up are not included. 307 // 308 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 309 // real value, but is used only after the first period. A value of 100 is 310 // used for the first sample so it gets the entire weight. 311 unsigned int _saved_alpha; // 0-100 312 unsigned int _gc0_alpha; 313 unsigned int _cms_alpha; 314 315 double _gc0_duration; 316 double _gc0_period; 317 size_t _gc0_promoted; // bytes promoted per gc0 318 double _cms_duration; 319 double _cms_duration_pre_sweep; // time from initiation to start of sweep 320 double _cms_duration_per_mb; 321 double _cms_period; 322 size_t _cms_allocated; // bytes of direct allocation per gc0 period 323 324 // Timers. 325 elapsedTimer _cms_timer; 326 TimeStamp _gc0_begin_time; 327 TimeStamp _cms_begin_time; 328 TimeStamp _cms_end_time; 329 330 // Snapshots of the amount used in the CMS generation. 331 size_t _cms_used_at_gc0_begin; 332 size_t _cms_used_at_gc0_end; 333 size_t _cms_used_at_cms_begin; 334 335 // Used to prevent the duty cycle from being reduced in the middle of a cms 336 // cycle. 337 bool _allow_duty_cycle_reduction; 338 339 enum { 340 _GC0_VALID = 0x1, 341 _CMS_VALID = 0x2, 342 _ALL_VALID = _GC0_VALID | _CMS_VALID 343 }; 344 345 unsigned int _valid_bits; 346 347 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 348 349 protected: 350 351 // Return a duty cycle that avoids wild oscillations, by limiting the amount 352 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 353 // as a recommended value). 354 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 355 unsigned int new_duty_cycle); 356 unsigned int icms_update_duty_cycle_impl(); 357 358 public: 359 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 360 unsigned int alpha = CMSExpAvgFactor); 361 362 // Whether or not the statistics contain valid data; higher level statistics 363 // cannot be called until this returns true (they require at least one young 364 // gen and one cms cycle to have completed). 365 bool valid() const; 366 367 // Record statistics. 368 void record_gc0_begin(); 369 void record_gc0_end(size_t cms_gen_bytes_used); 370 void record_cms_begin(); 371 void record_cms_end(); 372 373 // Allow management of the cms timer, which must be stopped/started around 374 // yield points. 375 elapsedTimer& cms_timer() { return _cms_timer; } 376 void start_cms_timer() { _cms_timer.start(); } 377 void stop_cms_timer() { _cms_timer.stop(); } 378 379 // Basic statistics; units are seconds or bytes. 380 double gc0_period() const { return _gc0_period; } 381 double gc0_duration() const { return _gc0_duration; } 382 size_t gc0_promoted() const { return _gc0_promoted; } 383 double cms_period() const { return _cms_period; } 384 double cms_duration() const { return _cms_duration; } 385 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 386 size_t cms_allocated() const { return _cms_allocated; } 387 388 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 389 390 // Seconds since the last background cms cycle began or ended. 391 double cms_time_since_begin() const; 392 double cms_time_since_end() const; 393 394 // Higher level statistics--caller must check that valid() returns true before 395 // calling. 396 397 // Returns bytes promoted per second of wall clock time. 398 double promotion_rate() const; 399 400 // Returns bytes directly allocated per second of wall clock time. 401 double cms_allocation_rate() const; 402 403 // Rate at which space in the cms generation is being consumed (sum of the 404 // above two). 405 double cms_consumption_rate() const; 406 407 // Returns an estimate of the number of seconds until the cms generation will 408 // fill up, assuming no collection work is done. 409 double time_until_cms_gen_full() const; 410 411 // Returns an estimate of the number of seconds remaining until 412 // the cms generation collection should start. 413 double time_until_cms_start() const; 414 415 // End of higher level statistics. 416 417 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 418 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 419 420 // Update the duty cycle and return the new value. 421 unsigned int icms_update_duty_cycle(); 422 423 // Debugging. 424 void print_on(outputStream* st) const PRODUCT_RETURN; 425 void print() const { print_on(gclog_or_tty); } 426 }; 427 428 // A closure related to weak references processing which 429 // we embed in the CMSCollector, since we need to pass 430 // it to the reference processor for secondary filtering 431 // of references based on reachability of referent; 432 // see role of _is_alive_non_header closure in the 433 // ReferenceProcessor class. 434 // For objects in the CMS generation, this closure checks 435 // if the object is "live" (reachable). Used in weak 436 // reference processing. 437 class CMSIsAliveClosure: public BoolObjectClosure { 438 MemRegion _span; 439 const CMSBitMap* _bit_map; 440 441 friend class CMSCollector; 442 protected: 443 void set_span(MemRegion span) { _span = span; } 444 public: 445 CMSIsAliveClosure(CMSBitMap* bit_map): 446 _bit_map(bit_map) { } 447 448 CMSIsAliveClosure(MemRegion span, 449 CMSBitMap* bit_map): 450 _span(span), 451 _bit_map(bit_map) { } 452 void do_object(oop obj) { 453 assert(false, "not to be invoked"); 454 } 455 bool do_object_b(oop obj); 456 }; 457 458 459 // Implements AbstractRefProcTaskExecutor for CMS. 460 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 461 public: 462 463 CMSRefProcTaskExecutor(CMSCollector& collector) 464 : _collector(collector) 465 { } 466 467 // Executes a task using worker threads. 468 virtual void execute(ProcessTask& task); 469 virtual void execute(EnqueueTask& task); 470 private: 471 CMSCollector& _collector; 472 }; 473 474 475 class CMSCollector: public CHeapObj { 476 friend class VMStructs; 477 friend class ConcurrentMarkSweepThread; 478 friend class ConcurrentMarkSweepGeneration; 479 friend class CompactibleFreeListSpace; 480 friend class CMSParRemarkTask; 481 friend class CMSConcMarkingTask; 482 friend class CMSRefProcTaskProxy; 483 friend class CMSRefProcTaskExecutor; 484 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 485 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 486 friend class PushOrMarkClosure; // to access _restart_addr 487 friend class Par_PushOrMarkClosure; // to access _restart_addr 488 friend class MarkFromRootsClosure; // -- ditto -- 489 // ... and for clearing cards 490 friend class Par_MarkFromRootsClosure; // to access _restart_addr 491 // ... and for clearing cards 492 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 493 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 494 friend class PushAndMarkVerifyClosure; // -- ditto -- 495 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 496 friend class PushAndMarkClosure; // -- ditto -- 497 friend class Par_PushAndMarkClosure; // -- ditto -- 498 friend class CMSKeepAliveClosure; // -- ditto -- 499 friend class CMSDrainMarkingStackClosure; // -- ditto -- 500 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 501 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 502 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 503 friend class VM_CMS_Operation; 504 friend class VM_CMS_Initial_Mark; 505 friend class VM_CMS_Final_Remark; 506 507 private: 508 jlong _time_of_last_gc; 509 void update_time_of_last_gc(jlong now) { 510 _time_of_last_gc = now; 511 } 512 513 OopTaskQueueSet* _task_queues; 514 515 // Overflow list of grey objects, threaded through mark-word 516 // Manipulated with CAS in the parallel/multi-threaded case. 517 oop _overflow_list; 518 // The following array-pair keeps track of mark words 519 // displaced for accomodating overflow list above. 520 // This code will likely be revisited under RFE#4922830. 521 GrowableArray<oop>* _preserved_oop_stack; 522 GrowableArray<markOop>* _preserved_mark_stack; 523 524 int* _hash_seed; 525 526 // In support of multi-threaded concurrent phases 527 YieldingFlexibleWorkGang* _conc_workers; 528 529 // Performance Counters 530 CollectorCounters* _gc_counters; 531 532 // Initialization Errors 533 bool _completed_initialization; 534 535 // In support of ExplicitGCInvokesConcurrent 536 static bool _full_gc_requested; 537 unsigned int _collection_count_start; 538 539 // Should we unload classes this concurrent cycle? 540 bool _should_unload_classes; 541 unsigned int _concurrent_cycles_since_last_unload; 542 unsigned int concurrent_cycles_since_last_unload() const { 543 return _concurrent_cycles_since_last_unload; 544 } 545 // Did we (allow) unload classes in the previous concurrent cycle? 546 bool unloaded_classes_last_cycle() const { 547 return concurrent_cycles_since_last_unload() == 0; 548 } 549 550 // Verification support 551 CMSBitMap _verification_mark_bm; 552 void verify_after_remark_work_1(); 553 void verify_after_remark_work_2(); 554 555 // true if any verification flag is on. 556 bool _verifying; 557 bool verifying() const { return _verifying; } 558 void set_verifying(bool v) { _verifying = v; } 559 560 // Collector policy 561 ConcurrentMarkSweepPolicy* _collector_policy; 562 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 563 564 // Check whether the gc time limit has been 565 // exceeded and set the size policy flag 566 // appropriately. 567 void check_gc_time_limit(); 568 // XXX Move these to CMSStats ??? FIX ME !!! 569 elapsedTimer _sweep_timer; 570 AdaptivePaddedAverage _sweep_estimate; 571 572 protected: 573 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 574 ConcurrentMarkSweepGeneration* _permGen; // perm gen 575 MemRegion _span; // span covering above two 576 CardTableRS* _ct; // card table 577 578 // CMS marking support structures 579 CMSBitMap _markBitMap; 580 CMSBitMap _modUnionTable; 581 CMSMarkStack _markStack; 582 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects 583 // to revisit 584 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. 585 586 HeapWord* _restart_addr; // in support of marking stack overflow 587 void lower_restart_addr(HeapWord* low); 588 589 // Counters in support of marking stack / work queue overflow handling: 590 // a non-zero value indicates certain types of overflow events during 591 // the current CMS cycle and could lead to stack resizing efforts at 592 // an opportune future time. 593 size_t _ser_pmc_preclean_ovflw; 594 size_t _ser_pmc_remark_ovflw; 595 size_t _par_pmc_remark_ovflw; 596 size_t _ser_kac_ovflw; 597 size_t _par_kac_ovflw; 598 NOT_PRODUCT(size_t _num_par_pushes;) 599 600 // ("Weak") Reference processing support 601 ReferenceProcessor* _ref_processor; 602 CMSIsAliveClosure _is_alive_closure; 603 // keep this textually after _markBitMap; c'tor dependency 604 605 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 606 ModUnionClosure _modUnionClosure; 607 ModUnionClosurePar _modUnionClosurePar; 608 609 // CMS abstract state machine 610 // initial_state: Idling 611 // next_state(Idling) = {Marking} 612 // next_state(Marking) = {Precleaning, Sweeping} 613 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 614 // next_state(AbortablePreclean) = {FinalMarking} 615 // next_state(FinalMarking) = {Sweeping} 616 // next_state(Sweeping) = {Resizing} 617 // next_state(Resizing) = {Resetting} 618 // next_state(Resetting) = {Idling} 619 // The numeric values below are chosen so that: 620 // . _collectorState <= Idling == post-sweep && pre-mark 621 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 622 // precleaning || abortablePrecleanb 623 enum CollectorState { 624 Resizing = 0, 625 Resetting = 1, 626 Idling = 2, 627 InitialMarking = 3, 628 Marking = 4, 629 Precleaning = 5, 630 AbortablePreclean = 6, 631 FinalMarking = 7, 632 Sweeping = 8 633 }; 634 static CollectorState _collectorState; 635 636 // State related to prologue/epilogue invocation for my generations 637 bool _between_prologue_and_epilogue; 638 639 // Signalling/State related to coordination between fore- and backgroud GC 640 // Note: When the baton has been passed from background GC to foreground GC, 641 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 642 static bool _foregroundGCIsActive; // true iff foreground collector is active or 643 // wants to go active 644 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 645 // yet passed the baton to the foreground GC 646 647 // Support for CMSScheduleRemark (abortable preclean) 648 bool _abort_preclean; 649 bool _start_sampling; 650 651 int _numYields; 652 size_t _numDirtyCards; 653 size_t _sweep_count; 654 // number of full gc's since the last concurrent gc. 655 uint _full_gcs_since_conc_gc; 656 657 // occupancy used for bootstrapping stats 658 double _bootstrap_occupancy; 659 660 // timer 661 elapsedTimer _timer; 662 663 // Timing, allocation and promotion statistics, used for scheduling. 664 CMSStats _stats; 665 666 // Allocation limits installed in the young gen, used only in 667 // CMSIncrementalMode. When an allocation in the young gen would cross one of 668 // these limits, the cms generation is notified and the cms thread is started 669 // or stopped, respectively. 670 HeapWord* _icms_start_limit; 671 HeapWord* _icms_stop_limit; 672 673 enum CMS_op_type { 674 CMS_op_checkpointRootsInitial, 675 CMS_op_checkpointRootsFinal 676 }; 677 678 void do_CMS_operation(CMS_op_type op); 679 bool stop_world_and_do(CMS_op_type op); 680 681 OopTaskQueueSet* task_queues() { return _task_queues; } 682 int* hash_seed(int i) { return &_hash_seed[i]; } 683 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 684 685 // Support for parallelizing Eden rescan in CMS remark phase 686 void sample_eden(); // ... sample Eden space top 687 688 private: 689 // Support for parallelizing young gen rescan in CMS remark phase 690 Generation* _young_gen; // the younger gen 691 HeapWord** _top_addr; // ... Top of Eden 692 HeapWord** _end_addr; // ... End of Eden 693 HeapWord** _eden_chunk_array; // ... Eden partitioning array 694 size_t _eden_chunk_index; // ... top (exclusive) of array 695 size_t _eden_chunk_capacity; // ... max entries in array 696 697 // Support for parallelizing survivor space rescan 698 HeapWord** _survivor_chunk_array; 699 size_t _survivor_chunk_index; 700 size_t _survivor_chunk_capacity; 701 size_t* _cursor; 702 ChunkArray* _survivor_plab_array; 703 704 // Support for marking stack overflow handling 705 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 706 bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q); 707 void push_on_overflow_list(oop p); 708 void par_push_on_overflow_list(oop p); 709 // the following is, obviously, not, in general, "MT-stable" 710 bool overflow_list_is_empty() const; 711 712 void preserve_mark_if_necessary(oop p); 713 void par_preserve_mark_if_necessary(oop p); 714 void preserve_mark_work(oop p, markOop m); 715 void restore_preserved_marks_if_any(); 716 NOT_PRODUCT(bool no_preserved_marks() const;) 717 // in support of testing overflow code 718 NOT_PRODUCT(int _overflow_counter;) 719 NOT_PRODUCT(bool simulate_overflow();) // sequential 720 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 721 722 int _roots_scanning_options; 723 int roots_scanning_options() const { return _roots_scanning_options; } 724 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 725 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 726 727 // CMS work methods 728 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 729 730 // a return value of false indicates failure due to stack overflow 731 bool markFromRootsWork(bool asynch); // concurrent marking work 732 733 public: // FIX ME!!! only for testing 734 bool do_marking_st(bool asynch); // single-threaded marking 735 bool do_marking_mt(bool asynch); // multi-threaded marking 736 737 private: 738 739 // concurrent precleaning work 740 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 741 ScanMarkedObjectsAgainCarefullyClosure* cl); 742 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 743 ScanMarkedObjectsAgainCarefullyClosure* cl); 744 // Does precleaning work, returning a quantity indicative of 745 // the amount of "useful work" done. 746 size_t preclean_work(bool clean_refs, bool clean_survivors); 747 void abortable_preclean(); // Preclean while looking for possible abort 748 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 749 // Helper function for above; merge-sorts the per-thread plab samples 750 void merge_survivor_plab_arrays(ContiguousSpace* surv); 751 // Resets (i.e. clears) the per-thread plab sample vectors 752 void reset_survivor_plab_arrays(); 753 754 // final (second) checkpoint work 755 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 756 bool init_mark_was_synchronous); 757 // work routine for parallel version of remark 758 void do_remark_parallel(); 759 // work routine for non-parallel version of remark 760 void do_remark_non_parallel(); 761 // reference processing work routine (during second checkpoint) 762 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 763 764 // concurrent sweeping work 765 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 766 767 // (concurrent) resetting of support data structures 768 void reset(bool asynch); 769 770 // Clear _expansion_cause fields of constituent generations 771 void clear_expansion_cause(); 772 773 // An auxilliary method used to record the ends of 774 // used regions of each generation to limit the extent of sweep 775 void save_sweep_limits(); 776 777 // Resize the generations included in the collector. 778 void compute_new_size(); 779 780 // A work method used by foreground collection to determine 781 // what type of collection (compacting or not, continuing or fresh) 782 // it should do. 783 void decide_foreground_collection_type(bool clear_all_soft_refs, 784 bool* should_compact, bool* should_start_over); 785 786 // A work method used by the foreground collector to do 787 // a mark-sweep-compact. 788 void do_compaction_work(bool clear_all_soft_refs); 789 790 // A work method used by the foreground collector to do 791 // a mark-sweep, after taking over from a possibly on-going 792 // concurrent mark-sweep collection. 793 void do_mark_sweep_work(bool clear_all_soft_refs, 794 CollectorState first_state, bool should_start_over); 795 796 // If the backgrould GC is active, acquire control from the background 797 // GC and do the collection. 798 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 799 800 // For synchronizing passing of control from background to foreground 801 // GC. waitForForegroundGC() is called by the background 802 // collector. It if had to wait for a foreground collection, 803 // it returns true and the background collection should assume 804 // that the collection was finished by the foreground 805 // collector. 806 bool waitForForegroundGC(); 807 808 // Incremental mode triggering: recompute the icms duty cycle and set the 809 // allocation limits in the young gen. 810 void icms_update_allocation_limits(); 811 812 size_t block_size_using_printezis_bits(HeapWord* addr) const; 813 size_t block_size_if_printezis_bits(HeapWord* addr) const; 814 HeapWord* next_card_start_after_block(HeapWord* addr) const; 815 816 void setup_cms_unloading_and_verification_state(); 817 public: 818 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 819 ConcurrentMarkSweepGeneration* permGen, 820 CardTableRS* ct, 821 ConcurrentMarkSweepPolicy* cp); 822 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 823 824 ReferenceProcessor* ref_processor() { return _ref_processor; } 825 void ref_processor_init(); 826 827 Mutex* bitMapLock() const { return _markBitMap.lock(); } 828 static CollectorState abstract_state() { return _collectorState; } 829 830 bool should_abort_preclean() const; // Whether preclean should be aborted. 831 size_t get_eden_used() const; 832 size_t get_eden_capacity() const; 833 834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 835 836 // locking checks 837 NOT_PRODUCT(static bool have_cms_token();) 838 839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 840 bool shouldConcurrentCollect(); 841 842 void collect(bool full, 843 bool clear_all_soft_refs, 844 size_t size, 845 bool tlab); 846 void collect_in_background(bool clear_all_soft_refs); 847 void collect_in_foreground(bool clear_all_soft_refs); 848 849 // In support of ExplicitGCInvokesConcurrent 850 static void request_full_gc(unsigned int full_gc_count); 851 // Should we unload classes in a particular concurrent cycle? 852 bool should_unload_classes() const { 853 return _should_unload_classes; 854 } 855 bool update_should_unload_classes(); 856 857 void direct_allocated(HeapWord* start, size_t size); 858 859 // Object is dead if not marked and current phase is sweeping. 860 bool is_dead_obj(oop obj) const; 861 862 // After a promotion (of "start"), do any necessary marking. 863 // If "par", then it's being done by a parallel GC thread. 864 // The last two args indicate if we need precise marking 865 // and if so the size of the object so it can be dirtied 866 // in its entirety. 867 void promoted(bool par, HeapWord* start, 868 bool is_obj_array, size_t obj_size); 869 870 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 871 size_t word_size); 872 873 void getFreelistLocks() const; 874 void releaseFreelistLocks() const; 875 bool haveFreelistLocks() const; 876 877 // GC prologue and epilogue 878 void gc_prologue(bool full); 879 void gc_epilogue(bool full); 880 881 jlong time_of_last_gc(jlong now) { 882 if (_collectorState <= Idling) { 883 // gc not in progress 884 return _time_of_last_gc; 885 } else { 886 // collection in progress 887 return now; 888 } 889 } 890 891 // Support for parallel remark of survivor space 892 void* get_data_recorder(int thr_num); 893 894 CMSBitMap* markBitMap() { return &_markBitMap; } 895 void directAllocated(HeapWord* start, size_t size); 896 897 // main CMS steps and related support 898 void checkpointRootsInitial(bool asynch); 899 bool markFromRoots(bool asynch); // a return value of false indicates failure 900 // due to stack overflow 901 void preclean(); 902 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 903 bool init_mark_was_synchronous); 904 void sweep(bool asynch); 905 906 // Check that the currently executing thread is the expected 907 // one (foreground collector or background collector). 908 void check_correct_thread_executing() PRODUCT_RETURN; 909 // XXXPERM void print_statistics() PRODUCT_RETURN; 910 911 bool is_cms_reachable(HeapWord* addr); 912 913 // Performance Counter Support 914 CollectorCounters* counters() { return _gc_counters; } 915 916 // timer stuff 917 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 918 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 919 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 920 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 921 922 int yields() { return _numYields; } 923 void resetYields() { _numYields = 0; } 924 void incrementYields() { _numYields++; } 925 void resetNumDirtyCards() { _numDirtyCards = 0; } 926 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 927 size_t numDirtyCards() { return _numDirtyCards; } 928 929 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 930 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 931 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 932 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 933 size_t sweep_count() const { return _sweep_count; } 934 void increment_sweep_count() { _sweep_count++; } 935 936 // Timers/stats for gc scheduling and incremental mode pacing. 937 CMSStats& stats() { return _stats; } 938 939 // Convenience methods that check whether CMSIncrementalMode is enabled and 940 // forward to the corresponding methods in ConcurrentMarkSweepThread. 941 static void start_icms(); 942 static void stop_icms(); // Called at the end of the cms cycle. 943 static void disable_icms(); // Called before a foreground collection. 944 static void enable_icms(); // Called after a foreground collection. 945 void icms_wait(); // Called at yield points. 946 947 // Adaptive size policy 948 CMSAdaptiveSizePolicy* size_policy(); 949 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 950 951 // debugging 952 void verify(bool); 953 bool verify_after_remark(); 954 void verify_ok_to_terminate() const PRODUCT_RETURN; 955 void verify_work_stacks_empty() const PRODUCT_RETURN; 956 void verify_overflow_empty() const PRODUCT_RETURN; 957 958 // convenience methods in support of debugging 959 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 960 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 961 962 // accessors 963 CMSMarkStack* verification_mark_stack() { return &_markStack; } 964 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 965 966 // Get the bit map with a perm gen "deadness" information. 967 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } 968 969 // Initialization errors 970 bool completed_initialization() { return _completed_initialization; } 971 }; 972 973 class CMSExpansionCause : public AllStatic { 974 public: 975 enum Cause { 976 _no_expansion, 977 _satisfy_free_ratio, 978 _satisfy_promotion, 979 _satisfy_allocation, 980 _allocate_par_lab, 981 _allocate_par_spooling_space, 982 _adaptive_size_policy 983 }; 984 // Return a string describing the cause of the expansion. 985 static const char* to_string(CMSExpansionCause::Cause cause); 986 }; 987 988 class ConcurrentMarkSweepGeneration: public CardGeneration { 989 friend class VMStructs; 990 friend class ConcurrentMarkSweepThread; 991 friend class ConcurrentMarkSweep; 992 friend class CMSCollector; 993 protected: 994 static CMSCollector* _collector; // the collector that collects us 995 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 996 997 // Performance Counters 998 GenerationCounters* _gen_counters; 999 GSpaceCounters* _space_counters; 1000 1001 // Words directly allocated, used by CMSStats. 1002 size_t _direct_allocated_words; 1003 1004 // Non-product stat counters 1005 NOT_PRODUCT( 1006 int _numObjectsPromoted; 1007 int _numWordsPromoted; 1008 int _numObjectsAllocated; 1009 int _numWordsAllocated; 1010 ) 1011 1012 // Used for sizing decisions 1013 bool _incremental_collection_failed; 1014 bool incremental_collection_failed() { 1015 return _incremental_collection_failed; 1016 } 1017 void set_incremental_collection_failed() { 1018 _incremental_collection_failed = true; 1019 } 1020 void clear_incremental_collection_failed() { 1021 _incremental_collection_failed = false; 1022 } 1023 1024 // accessors 1025 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1026 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1027 1028 private: 1029 // For parallel young-gen GC support. 1030 CMSParGCThreadState** _par_gc_thread_states; 1031 1032 // Reason generation was expanded 1033 CMSExpansionCause::Cause _expansion_cause; 1034 1035 // In support of MinChunkSize being larger than min object size 1036 const double _dilatation_factor; 1037 1038 enum CollectionTypes { 1039 Concurrent_collection_type = 0, 1040 MS_foreground_collection_type = 1, 1041 MSC_foreground_collection_type = 2, 1042 Unknown_collection_type = 3 1043 }; 1044 1045 CollectionTypes _debug_collection_type; 1046 1047 // Fraction of current occupancy at which to start a CMS collection which 1048 // will collect this generation (at least). 1049 double _initiating_occupancy; 1050 1051 protected: 1052 // Grow generation by specified size (returns false if unable to grow) 1053 bool grow_by(size_t bytes); 1054 // Grow generation to reserved size. 1055 bool grow_to_reserved(); 1056 // Shrink generation by specified size (returns false if unable to shrink) 1057 virtual void shrink_by(size_t bytes); 1058 1059 // Update statistics for GC 1060 virtual void update_gc_stats(int level, bool full); 1061 1062 // Maximum available space in the generation (including uncommitted) 1063 // space. 1064 size_t max_available() const; 1065 1066 // getter and initializer for _initiating_occupancy field. 1067 double initiating_occupancy() const { return _initiating_occupancy; } 1068 void init_initiating_occupancy(intx io, intx tr); 1069 1070 public: 1071 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1072 int level, CardTableRS* ct, 1073 bool use_adaptive_freelists, 1074 FreeBlockDictionary::DictionaryChoice); 1075 1076 // Accessors 1077 CMSCollector* collector() const { return _collector; } 1078 static void set_collector(CMSCollector* collector) { 1079 assert(_collector == NULL, "already set"); 1080 _collector = collector; 1081 } 1082 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1083 1084 Mutex* freelistLock() const; 1085 1086 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1087 1088 // Adaptive size policy 1089 CMSAdaptiveSizePolicy* size_policy(); 1090 1091 bool refs_discovery_is_atomic() const { return false; } 1092 bool refs_discovery_is_mt() const { 1093 // Note: CMS does MT-discovery during the parallel-remark 1094 // phases. Use ReferenceProcessorMTMutator to make refs 1095 // discovery MT-safe during such phases or other parallel 1096 // discovery phases in the future. This may all go away 1097 // if/when we decide that refs discovery is sufficiently 1098 // rare that the cost of the CAS's involved is in the 1099 // noise. That's a measurement that should be done, and 1100 // the code simplified if that turns out to be the case. 1101 return false; 1102 } 1103 1104 // Override 1105 virtual void ref_processor_init(); 1106 1107 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1108 1109 // Space enquiries 1110 size_t capacity() const; 1111 size_t used() const; 1112 size_t free() const; 1113 double occupancy() const { return ((double)used())/((double)capacity()); } 1114 size_t contiguous_available() const; 1115 size_t unsafe_max_alloc_nogc() const; 1116 1117 // over-rides 1118 MemRegion used_region() const; 1119 MemRegion used_region_at_save_marks() const; 1120 1121 // Does a "full" (forced) collection invoked on this generation collect 1122 // all younger generations as well? Note that the second conjunct is a 1123 // hack to allow the collection of the younger gen first if the flag is 1124 // set. This is better than using th policy's should_collect_gen0_first() 1125 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1126 virtual bool full_collects_younger_generations() const { 1127 return UseCMSCompactAtFullCollection && !CollectGen0First; 1128 } 1129 1130 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1131 1132 // Support for compaction 1133 CompactibleSpace* first_compaction_space() const; 1134 // Adjust quantites in the generation affected by 1135 // the compaction. 1136 void reset_after_compaction(); 1137 1138 // Allocation support 1139 HeapWord* allocate(size_t size, bool tlab); 1140 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1141 oop promote(oop obj, size_t obj_size, oop* ref); 1142 HeapWord* par_allocate(size_t size, bool tlab) { 1143 return allocate(size, tlab); 1144 } 1145 1146 // Incremental mode triggering. 1147 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1148 size_t word_size); 1149 1150 // Used by CMSStats to track direct allocation. The value is sampled and 1151 // reset after each young gen collection. 1152 size_t direct_allocated_words() const { return _direct_allocated_words; } 1153 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1154 1155 // Overrides for parallel promotion. 1156 virtual oop par_promote(int thread_num, 1157 oop obj, markOop m, size_t word_sz); 1158 // This one should not be called for CMS. 1159 virtual void par_promote_alloc_undo(int thread_num, 1160 HeapWord* obj, size_t word_sz); 1161 virtual void par_promote_alloc_done(int thread_num); 1162 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1163 1164 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, 1165 bool younger_handles_promotion_failure) const; 1166 1167 bool should_collect(bool full, size_t size, bool tlab); 1168 virtual bool should_concurrent_collect() const; 1169 virtual bool is_too_full() const; 1170 void collect(bool full, 1171 bool clear_all_soft_refs, 1172 size_t size, 1173 bool tlab); 1174 1175 HeapWord* expand_and_allocate(size_t word_size, 1176 bool tlab, 1177 bool parallel = false); 1178 1179 // GC prologue and epilogue 1180 void gc_prologue(bool full); 1181 void gc_prologue_work(bool full, bool registerClosure, 1182 ModUnionClosure* modUnionClosure); 1183 void gc_epilogue(bool full); 1184 void gc_epilogue_work(bool full); 1185 1186 // Time since last GC of this generation 1187 jlong time_of_last_gc(jlong now) { 1188 return collector()->time_of_last_gc(now); 1189 } 1190 void update_time_of_last_gc(jlong now) { 1191 collector()-> update_time_of_last_gc(now); 1192 } 1193 1194 // Allocation failure 1195 void expand(size_t bytes, size_t expand_bytes, 1196 CMSExpansionCause::Cause cause); 1197 void shrink(size_t bytes); 1198 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1199 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1200 1201 // Iteration support and related enquiries 1202 void save_marks(); 1203 bool no_allocs_since_save_marks(); 1204 void object_iterate_since_last_GC(ObjectClosure* cl); 1205 void younger_refs_iterate(OopsInGenClosure* cl); 1206 1207 // Iteration support specific to CMS generations 1208 void save_sweep_limit(); 1209 1210 // More iteration support 1211 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 1212 virtual void oop_iterate(OopClosure* cl); 1213 virtual void object_iterate(ObjectClosure* cl); 1214 1215 // Need to declare the full complement of closures, whether we'll 1216 // override them or not, or get message from the compiler: 1217 // oop_since_save_marks_iterate_nv hides virtual function... 1218 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1219 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1220 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1221 1222 // Smart allocation XXX -- move to CFLSpace? 1223 void setNearLargestChunk(); 1224 bool isNearLargestChunk(HeapWord* addr); 1225 1226 // Get the chunk at the end of the space. Delagates to 1227 // the space. 1228 FreeChunk* find_chunk_at_end(); 1229 1230 // Overriding of unused functionality (sharing not yet supported with CMS) 1231 void pre_adjust_pointers(); 1232 void post_compact(); 1233 1234 // Debugging 1235 void prepare_for_verify(); 1236 void verify(bool allow_dirty); 1237 void print_statistics() PRODUCT_RETURN; 1238 1239 // Performance Counters support 1240 virtual void update_counters(); 1241 virtual void update_counters(size_t used); 1242 void initialize_performance_counters(); 1243 CollectorCounters* counters() { return collector()->counters(); } 1244 1245 // Support for parallel remark of survivor space 1246 void* get_data_recorder(int thr_num) { 1247 //Delegate to collector 1248 return collector()->get_data_recorder(thr_num); 1249 } 1250 1251 // Printing 1252 const char* name() const; 1253 virtual const char* short_name() const { return "CMS"; } 1254 void print() const; 1255 void printOccupancy(const char* s); 1256 bool must_be_youngest() const { return false; } 1257 bool must_be_oldest() const { return true; } 1258 1259 void compute_new_size(); 1260 1261 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1262 void rotate_debug_collection_type(); 1263 }; 1264 1265 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1266 1267 // Return the size policy from the heap's collector 1268 // policy casted to CMSAdaptiveSizePolicy*. 1269 CMSAdaptiveSizePolicy* cms_size_policy() const; 1270 1271 // Resize the generation based on the adaptive size 1272 // policy. 1273 void resize(size_t cur_promo, size_t desired_promo); 1274 1275 // Return the GC counters from the collector policy 1276 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1277 1278 virtual void shrink_by(size_t bytes); 1279 1280 public: 1281 virtual void compute_new_size(); 1282 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1283 int level, CardTableRS* ct, 1284 bool use_adaptive_freelists, 1285 FreeBlockDictionary::DictionaryChoice 1286 dictionaryChoice) : 1287 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1288 use_adaptive_freelists, dictionaryChoice) {} 1289 1290 virtual const char* short_name() const { return "ASCMS"; } 1291 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1292 1293 virtual void update_counters(); 1294 virtual void update_counters(size_t used); 1295 }; 1296 1297 // 1298 // Closures of various sorts used by CMS to accomplish its work 1299 // 1300 1301 // This closure is used to check that a certain set of oops is empty. 1302 class FalseClosure: public OopClosure { 1303 public: 1304 void do_oop(oop* p) { 1305 guarantee(false, "Should be an empty set"); 1306 } 1307 }; 1308 1309 // This closure is used to do concurrent marking from the roots 1310 // following the first checkpoint. 1311 class MarkFromRootsClosure: public BitMapClosure { 1312 CMSCollector* _collector; 1313 MemRegion _span; 1314 CMSBitMap* _bitMap; 1315 CMSBitMap* _mut; 1316 CMSMarkStack* _markStack; 1317 CMSMarkStack* _revisitStack; 1318 bool _yield; 1319 int _skipBits; 1320 HeapWord* _finger; 1321 HeapWord* _threshold; 1322 DEBUG_ONLY(bool _verifying;) 1323 1324 public: 1325 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1326 CMSBitMap* bitMap, 1327 CMSMarkStack* markStack, 1328 CMSMarkStack* revisitStack, 1329 bool should_yield, bool verifying = false); 1330 void do_bit(size_t offset); 1331 void reset(HeapWord* addr); 1332 inline void do_yield_check(); 1333 1334 private: 1335 void scanOopsInOop(HeapWord* ptr); 1336 void do_yield_work(); 1337 }; 1338 1339 // This closure is used to do concurrent multi-threaded 1340 // marking from the roots following the first checkpoint. 1341 // XXX This should really be a subclass of The serial version 1342 // above, but i have not had the time to refactor things cleanly. 1343 // That willbe done for Dolphin. 1344 class Par_MarkFromRootsClosure: public BitMapClosure { 1345 CMSCollector* _collector; 1346 MemRegion _whole_span; 1347 MemRegion _span; 1348 CMSBitMap* _bit_map; 1349 CMSBitMap* _mut; 1350 OopTaskQueue* _work_queue; 1351 CMSMarkStack* _overflow_stack; 1352 CMSMarkStack* _revisit_stack; 1353 bool _yield; 1354 int _skip_bits; 1355 HeapWord* _finger; 1356 HeapWord* _threshold; 1357 CMSConcMarkingTask* _task; 1358 public: 1359 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1360 MemRegion span, 1361 CMSBitMap* bit_map, 1362 OopTaskQueue* work_queue, 1363 CMSMarkStack* overflow_stack, 1364 CMSMarkStack* revisit_stack, 1365 bool should_yield); 1366 void do_bit(size_t offset); 1367 inline void do_yield_check(); 1368 1369 private: 1370 void scan_oops_in_oop(HeapWord* ptr); 1371 void do_yield_work(); 1372 bool get_work_from_overflow_stack(); 1373 }; 1374 1375 // The following closures are used to do certain kinds of verification of 1376 // CMS marking. 1377 class PushAndMarkVerifyClosure: public OopClosure { 1378 CMSCollector* _collector; 1379 MemRegion _span; 1380 CMSBitMap* _verification_bm; 1381 CMSBitMap* _cms_bm; 1382 CMSMarkStack* _mark_stack; 1383 public: 1384 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1385 MemRegion span, 1386 CMSBitMap* verification_bm, 1387 CMSBitMap* cms_bm, 1388 CMSMarkStack* mark_stack); 1389 void do_oop(oop* p); 1390 // Deal with a stack overflow condition 1391 void handle_stack_overflow(HeapWord* lost); 1392 }; 1393 1394 class MarkFromRootsVerifyClosure: public BitMapClosure { 1395 CMSCollector* _collector; 1396 MemRegion _span; 1397 CMSBitMap* _verification_bm; 1398 CMSBitMap* _cms_bm; 1399 CMSMarkStack* _mark_stack; 1400 HeapWord* _finger; 1401 PushAndMarkVerifyClosure _pam_verify_closure; 1402 public: 1403 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1404 CMSBitMap* verification_bm, 1405 CMSBitMap* cms_bm, 1406 CMSMarkStack* mark_stack); 1407 void do_bit(size_t offset); 1408 void reset(HeapWord* addr); 1409 }; 1410 1411 1412 // This closure is used to check that a certain set of bits is 1413 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1414 class FalseBitMapClosure: public BitMapClosure { 1415 public: 1416 void do_bit(size_t offset) { 1417 guarantee(false, "Should not have a 1 bit"); 1418 } 1419 }; 1420 1421 // This closure is used during the second checkpointing phase 1422 // to rescan the marked objects on the dirty cards in the mod 1423 // union table and the card table proper. It's invoked via 1424 // MarkFromDirtyCardsClosure below. It uses either 1425 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1426 // declared in genOopClosures.hpp to accomplish some of its work. 1427 // In the parallel case the bitMap is shared, so access to 1428 // it needs to be suitably synchronized for updates by embedded 1429 // closures that update it; however, this closure itself only 1430 // reads the bit_map and because it is idempotent, is immune to 1431 // reading stale values. 1432 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1433 #ifdef ASSERT 1434 CMSCollector* _collector; 1435 MemRegion _span; 1436 union { 1437 CMSMarkStack* _mark_stack; 1438 OopTaskQueue* _work_queue; 1439 }; 1440 #endif // ASSERT 1441 bool _parallel; 1442 CMSBitMap* _bit_map; 1443 union { 1444 MarkRefsIntoAndScanClosure* _scan_closure; 1445 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1446 }; 1447 1448 public: 1449 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1450 MemRegion span, 1451 ReferenceProcessor* rp, 1452 CMSBitMap* bit_map, 1453 CMSMarkStack* mark_stack, 1454 CMSMarkStack* revisit_stack, 1455 MarkRefsIntoAndScanClosure* cl): 1456 #ifdef ASSERT 1457 _collector(collector), 1458 _span(span), 1459 _mark_stack(mark_stack), 1460 #endif // ASSERT 1461 _parallel(false), 1462 _bit_map(bit_map), 1463 _scan_closure(cl) { } 1464 1465 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1466 MemRegion span, 1467 ReferenceProcessor* rp, 1468 CMSBitMap* bit_map, 1469 OopTaskQueue* work_queue, 1470 CMSMarkStack* revisit_stack, 1471 Par_MarkRefsIntoAndScanClosure* cl): 1472 #ifdef ASSERT 1473 _collector(collector), 1474 _span(span), 1475 _work_queue(work_queue), 1476 #endif // ASSERT 1477 _parallel(true), 1478 _bit_map(bit_map), 1479 _par_scan_closure(cl) { } 1480 1481 void do_object(oop obj) { 1482 guarantee(false, "Call do_object_b(oop, MemRegion) instead"); 1483 } 1484 bool do_object_b(oop obj) { 1485 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1486 return false; 1487 } 1488 bool do_object_bm(oop p, MemRegion mr); 1489 }; 1490 1491 // This closure is used during the second checkpointing phase 1492 // to rescan the marked objects on the dirty cards in the mod 1493 // union table and the card table proper. It invokes 1494 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1495 // In the parallel case, the bit map is shared and requires 1496 // synchronized access. 1497 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1498 CompactibleFreeListSpace* _space; 1499 ScanMarkedObjectsAgainClosure _scan_cl; 1500 size_t _num_dirty_cards; 1501 1502 public: 1503 MarkFromDirtyCardsClosure(CMSCollector* collector, 1504 MemRegion span, 1505 CompactibleFreeListSpace* space, 1506 CMSBitMap* bit_map, 1507 CMSMarkStack* mark_stack, 1508 CMSMarkStack* revisit_stack, 1509 MarkRefsIntoAndScanClosure* cl): 1510 _space(space), 1511 _num_dirty_cards(0), 1512 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1513 mark_stack, revisit_stack, cl) { } 1514 1515 MarkFromDirtyCardsClosure(CMSCollector* collector, 1516 MemRegion span, 1517 CompactibleFreeListSpace* space, 1518 CMSBitMap* bit_map, 1519 OopTaskQueue* work_queue, 1520 CMSMarkStack* revisit_stack, 1521 Par_MarkRefsIntoAndScanClosure* cl): 1522 _space(space), 1523 _num_dirty_cards(0), 1524 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1525 work_queue, revisit_stack, cl) { } 1526 1527 void do_MemRegion(MemRegion mr); 1528 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1529 size_t num_dirty_cards() { return _num_dirty_cards; } 1530 }; 1531 1532 // This closure is used in the non-product build to check 1533 // that there are no MemRegions with a certain property. 1534 class FalseMemRegionClosure: public MemRegionClosure { 1535 void do_MemRegion(MemRegion mr) { 1536 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1537 guarantee(false, "Should never be here"); 1538 } 1539 }; 1540 1541 // This closure is used during the precleaning phase 1542 // to "carefully" rescan marked objects on dirty cards. 1543 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1544 // to accomplish some of its work. 1545 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1546 CMSCollector* _collector; 1547 MemRegion _span; 1548 bool _yield; 1549 Mutex* _freelistLock; 1550 CMSBitMap* _bitMap; 1551 CMSMarkStack* _markStack; 1552 MarkRefsIntoAndScanClosure* _scanningClosure; 1553 1554 public: 1555 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1556 MemRegion span, 1557 CMSBitMap* bitMap, 1558 CMSMarkStack* markStack, 1559 CMSMarkStack* revisitStack, 1560 MarkRefsIntoAndScanClosure* cl, 1561 bool should_yield): 1562 _collector(collector), 1563 _span(span), 1564 _yield(should_yield), 1565 _bitMap(bitMap), 1566 _markStack(markStack), 1567 _scanningClosure(cl) { 1568 } 1569 1570 void do_object(oop p) { 1571 guarantee(false, "call do_object_careful instead"); 1572 } 1573 1574 size_t do_object_careful(oop p) { 1575 guarantee(false, "Unexpected caller"); 1576 return 0; 1577 } 1578 1579 size_t do_object_careful_m(oop p, MemRegion mr); 1580 1581 void setFreelistLock(Mutex* m) { 1582 _freelistLock = m; 1583 _scanningClosure->set_freelistLock(m); 1584 } 1585 1586 private: 1587 inline bool do_yield_check(); 1588 1589 void do_yield_work(); 1590 }; 1591 1592 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1593 CMSCollector* _collector; 1594 MemRegion _span; 1595 bool _yield; 1596 CMSBitMap* _bit_map; 1597 CMSMarkStack* _mark_stack; 1598 PushAndMarkClosure* _scanning_closure; 1599 unsigned int _before_count; 1600 1601 public: 1602 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1603 MemRegion span, 1604 CMSBitMap* bit_map, 1605 CMSMarkStack* mark_stack, 1606 PushAndMarkClosure* cl, 1607 unsigned int before_count, 1608 bool should_yield): 1609 _collector(collector), 1610 _span(span), 1611 _yield(should_yield), 1612 _bit_map(bit_map), 1613 _mark_stack(mark_stack), 1614 _scanning_closure(cl), 1615 _before_count(before_count) 1616 { } 1617 1618 void do_object(oop p) { 1619 guarantee(false, "call do_object_careful instead"); 1620 } 1621 1622 size_t do_object_careful(oop p); 1623 1624 size_t do_object_careful_m(oop p, MemRegion mr) { 1625 guarantee(false, "Unexpected caller"); 1626 return 0; 1627 } 1628 1629 private: 1630 inline void do_yield_check(); 1631 void do_yield_work(); 1632 }; 1633 1634 // This closure is used to accomplish the sweeping work 1635 // after the second checkpoint but before the concurrent reset 1636 // phase. 1637 // 1638 // Terminology 1639 // left hand chunk (LHC) - block of one or more chunks currently being 1640 // coalesced. The LHC is available for coalescing with a new chunk. 1641 // right hand chunk (RHC) - block that is currently being swept that is 1642 // free or garbage that can be coalesced with the LHC. 1643 // _inFreeRange is true if there is currently a LHC 1644 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1645 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1646 // _freeFinger is the address of the current LHC 1647 class SweepClosure: public BlkClosureCareful { 1648 CMSCollector* _collector; // collector doing the work 1649 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1650 CompactibleFreeListSpace* _sp; // Space being swept 1651 HeapWord* _limit; 1652 Mutex* _freelistLock; // Free list lock (in space) 1653 CMSBitMap* _bitMap; // Marking bit map (in 1654 // generation) 1655 bool _inFreeRange; // Indicates if we are in the 1656 // midst of a free run 1657 bool _freeRangeInFreeLists; 1658 // Often, we have just found 1659 // a free chunk and started 1660 // a new free range; we do not 1661 // eagerly remove this chunk from 1662 // the free lists unless there is 1663 // a possibility of coalescing. 1664 // When true, this flag indicates 1665 // that the _freeFinger below 1666 // points to a potentially free chunk 1667 // that may still be in the free lists 1668 bool _lastFreeRangeCoalesced; 1669 // free range contains chunks 1670 // coalesced 1671 bool _yield; 1672 // Whether sweeping should be 1673 // done with yields. For instance 1674 // when done by the foreground 1675 // collector we shouldn't yield. 1676 HeapWord* _freeFinger; // When _inFreeRange is set, the 1677 // pointer to the "left hand 1678 // chunk" 1679 size_t _freeRangeSize; 1680 // When _inFreeRange is set, this 1681 // indicates the accumulated size 1682 // of the "left hand chunk" 1683 NOT_PRODUCT( 1684 size_t _numObjectsFreed; 1685 size_t _numWordsFreed; 1686 size_t _numObjectsLive; 1687 size_t _numWordsLive; 1688 size_t _numObjectsAlreadyFree; 1689 size_t _numWordsAlreadyFree; 1690 FreeChunk* _last_fc; 1691 ) 1692 private: 1693 // Code that is common to a free chunk or garbage when 1694 // encountered during sweeping. 1695 void doPostIsFreeOrGarbageChunk(FreeChunk *fc, 1696 size_t chunkSize); 1697 // Process a free chunk during sweeping. 1698 void doAlreadyFreeChunk(FreeChunk *fc); 1699 // Process a garbage chunk during sweeping. 1700 size_t doGarbageChunk(FreeChunk *fc); 1701 // Process a live chunk during sweeping. 1702 size_t doLiveChunk(FreeChunk* fc); 1703 1704 // Accessors. 1705 HeapWord* freeFinger() const { return _freeFinger; } 1706 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1707 size_t freeRangeSize() const { return _freeRangeSize; } 1708 void set_freeRangeSize(size_t v) { _freeRangeSize = v; } 1709 bool inFreeRange() const { return _inFreeRange; } 1710 void set_inFreeRange(bool v) { _inFreeRange = v; } 1711 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1712 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1713 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1714 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1715 1716 // Initialize a free range. 1717 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1718 // Return this chunk to the free lists. 1719 void flushCurFreeChunk(HeapWord* chunk, size_t size); 1720 1721 // Check if we should yield and do so when necessary. 1722 inline void do_yield_check(HeapWord* addr); 1723 1724 // Yield 1725 void do_yield_work(HeapWord* addr); 1726 1727 // Debugging/Printing 1728 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; 1729 1730 public: 1731 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1732 CMSBitMap* bitMap, bool should_yield); 1733 ~SweepClosure(); 1734 1735 size_t do_blk_careful(HeapWord* addr); 1736 }; 1737 1738 // Closures related to weak references processing 1739 1740 // During CMS' weak reference processing, this is a 1741 // work-routine/closure used to complete transitive 1742 // marking of objects as live after a certain point 1743 // in which an initial set has been completely accumulated. 1744 class CMSDrainMarkingStackClosure: public VoidClosure { 1745 CMSCollector* _collector; 1746 MemRegion _span; 1747 CMSMarkStack* _mark_stack; 1748 CMSBitMap* _bit_map; 1749 CMSKeepAliveClosure* _keep_alive; 1750 public: 1751 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1752 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1753 CMSKeepAliveClosure* keep_alive): 1754 _collector(collector), 1755 _span(span), 1756 _bit_map(bit_map), 1757 _mark_stack(mark_stack), 1758 _keep_alive(keep_alive) { } 1759 1760 void do_void(); 1761 }; 1762 1763 // A parallel version of CMSDrainMarkingStackClosure above. 1764 class CMSParDrainMarkingStackClosure: public VoidClosure { 1765 CMSCollector* _collector; 1766 MemRegion _span; 1767 OopTaskQueue* _work_queue; 1768 CMSBitMap* _bit_map; 1769 CMSInnerParMarkAndPushClosure _mark_and_push; 1770 1771 public: 1772 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1773 MemRegion span, CMSBitMap* bit_map, 1774 OopTaskQueue* work_queue): 1775 _collector(collector), 1776 _span(span), 1777 _bit_map(bit_map), 1778 _work_queue(work_queue), 1779 _mark_and_push(collector, span, bit_map, work_queue) { } 1780 1781 public: 1782 void trim_queue(uint max); 1783 void do_void(); 1784 }; 1785 1786 // Allow yielding or short-circuiting of reference list 1787 // prelceaning work. 1788 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1789 CMSCollector* _collector; 1790 void do_yield_work(); 1791 public: 1792 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1793 _collector(collector) {} 1794 virtual bool should_return(); 1795 }; 1796 1797 1798 // Convenience class that locks free list locks for given CMS collector 1799 class FreelistLocker: public StackObj { 1800 private: 1801 CMSCollector* _collector; 1802 public: 1803 FreelistLocker(CMSCollector* collector): 1804 _collector(collector) { 1805 _collector->getFreelistLocks(); 1806 } 1807 1808 ~FreelistLocker() { 1809 _collector->releaseFreelistLocks(); 1810 } 1811 }; 1812 1813 // Mark all dead objects in a given space. 1814 class MarkDeadObjectsClosure: public BlkClosure { 1815 const CMSCollector* _collector; 1816 const CompactibleFreeListSpace* _sp; 1817 CMSBitMap* _live_bit_map; 1818 CMSBitMap* _dead_bit_map; 1819 public: 1820 MarkDeadObjectsClosure(const CMSCollector* collector, 1821 const CompactibleFreeListSpace* sp, 1822 CMSBitMap *live_bit_map, 1823 CMSBitMap *dead_bit_map) : 1824 _collector(collector), 1825 _sp(sp), 1826 _live_bit_map(live_bit_map), 1827 _dead_bit_map(dead_bit_map) {} 1828 size_t do_blk(HeapWord* addr); 1829 }; --- EOF ---