Print this page
--- 1.300 ---
Merged changes between child workspace "/net/spot/workspaces/ysr/cms_bugs" and
 parent workspace "/net/jano2/export2/hotspot/ws/main/gc_baseline".
--- 1.297.1.1 ---
6621144 CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"

*** 223,232 **** --- 223,260 ---- // note that all arithmetic is in units of HeapWords. assert(MinChunkSize >= oopDesc::header_size(), "just checking"); assert(_dilatation_factor >= 1.0, "from previous assert"); } + + // The field "_initiating_occupancy" represents the occupancy percentage + // at which we trigger a new collection cycle. Unless explicitly specified + // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it + // is calculated by: + // + // Let "f" be MinHeapFreeRatio in + // + // _intiating_occupancy = 100-f + + // f * (CMSTrigger[Perm]Ratio/100) + // where CMSTrigger[Perm]Ratio is the argument "tr" below. + // + // That is, if we assume the heap is at its desired maximum occupancy at the + // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free + // space be allocated before initiating a new collection cycle. + // + void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { + assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); + if (io >= 0) { + _initiating_occupancy = (double)io / 100.0; + } else { + _initiating_occupancy = ((100 - MinHeapFreeRatio) + + (double)(tr * MinHeapFreeRatio) / 100.0) + / 100.0; + } + } + + void ConcurrentMarkSweepGeneration::ref_processor_init() { assert(collector() != NULL, "no collector"); collector()->ref_processor_init(); }
*** 518,529 **** _icms_start_limit(NULL), _icms_stop_limit(NULL), _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _completed_initialization(false), _collector_policy(cp), ! _unload_classes(false), ! _unloaded_classes_last_cycle(false), _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { ExplicitGCInvokesConcurrent = true; } --- 546,558 ---- _icms_start_limit(NULL), _icms_stop_limit(NULL), _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _completed_initialization(false), _collector_policy(cp), ! _should_unload_classes(false), ! _concurrent_cycles_since_last_unload(0), ! _sweep_count(0), _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { ExplicitGCInvokesConcurrent = true; }
*** 640,669 **** _hash_seed[i] = 17; // copied from ParNew } } } ! // "initiatingOccupancy" is the occupancy ratio at which we trigger ! // a new collection cycle. Unless explicitly specified via ! // CMSTriggerRatio, it is calculated by: ! // Let "f" be MinHeapFreeRatio in ! // ! // intiatingOccupancy = 100-f + ! // f * (CMSTriggerRatio/100) ! // That is, if we assume the heap is at its desired maximum occupancy at the ! // end of a collection, we let CMSTriggerRatio of the (purported) free ! // space be allocated before initiating a new collection cycle. ! if (CMSInitiatingOccupancyFraction > 0) { ! _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0; ! } else { ! _initiatingOccupancy = ((100 - MinHeapFreeRatio) + ! (double)(CMSTriggerRatio * ! MinHeapFreeRatio) / 100.0) ! / 100.0; ! } // Clip CMSBootstrapOccupancy between 0 and 100. ! _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy))) /(double)100; _full_gcs_since_conc_gc = 0; // Now tell CMS generations the identity of their collector --- 669,683 ---- _hash_seed[i] = 17; // copied from ParNew } } } ! _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); ! _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio); ! // Clip CMSBootstrapOccupancy between 0 and 100. ! _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) /(double)100; _full_gcs_since_conc_gc = 0; // Now tell CMS generations the identity of their collector
*** 1411,1421 **** gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, _cmsGen->contiguous_available()); gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); ! gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy()); } // ------------------------------------------------------------------ // If the estimated time to complete a cms collection (cms_duration()) // is less than the estimated time remaining until the cms generation --- 1425,1436 ---- gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, _cmsGen->contiguous_available()); gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); ! gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); ! gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); } // ------------------------------------------------------------------ // If the estimated time to complete a cms collection (cms_duration()) // is less than the estimated time remaining until the cms generation
*** 1444,1504 **** // Otherwise, we start a collection cycle if either the perm gen or // old gen want a collection cycle started. Each may use // an appropriate criterion for making this decision. // XXX We need to make sure that the gen expansion ! // criterion dovetails well with this. ! if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) { if (Verbose && PrintGCDetails) { gclog_or_tty->print_cr("CMS old gen initiated"); } return true; } ! if (cms_should_unload_classes() && ! _permGen->shouldConcurrentCollect(initiatingOccupancy())) { if (Verbose && PrintGCDetails) { gclog_or_tty->print_cr("CMS perm gen initiated"); } return true; } ! return false; } // Clear _expansion_cause fields of constituent generations void CMSCollector::clear_expansion_cause() { _cmsGen->clear_expansion_cause(); _permGen->clear_expansion_cause(); } ! bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect( ! double initiatingOccupancy) { ! // We should be conservative in starting a collection cycle. To ! // start too eagerly runs the risk of collecting too often in the ! // extreme. To collect too rarely falls back on full collections, ! // which works, even if not optimum in terms of concurrent work. ! // As a work around for too eagerly collecting, use the flag ! // UseCMSInitiatingOccupancyOnly. This also has the advantage of ! // giving the user an easily understandable way of controlling the ! // collections. ! // We want to start a new collection cycle if any of the following ! // conditions hold: ! // . our current occupancy exceeds the initiating occupancy, or ! // . we recently needed to expand and have not since that expansion, ! // collected, or ! // . we are not using adaptive free lists and linear allocation is ! // going to fail, or ! // . (for old gen) incremental collection has already failed or ! // may soon fail in the near future as we may not be able to absorb ! // promotions. ! assert_lock_strong(freelistLock()); ! if (occupancy() > initiatingOccupancy) { if (PrintGCDetails && Verbose) { gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", ! short_name(), occupancy(), initiatingOccupancy); } return true; } if (UseCMSInitiatingOccupancyOnly) { return false; --- 1459,1537 ---- // Otherwise, we start a collection cycle if either the perm gen or // old gen want a collection cycle started. Each may use // an appropriate criterion for making this decision. // XXX We need to make sure that the gen expansion ! // criterion dovetails well with this. XXX NEED TO FIX THIS ! if (_cmsGen->should_concurrent_collect()) { if (Verbose && PrintGCDetails) { gclog_or_tty->print_cr("CMS old gen initiated"); } return true; } ! // We start a collection if we believe an incremental collection may fail; ! // this is not likely to be productive in practice because it's probably too ! // late anyway. ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! assert(gch->collector_policy()->is_two_generation_policy(), ! "You may want to check the correctness of the following"); ! if (gch->incremental_collection_will_fail()) { ! if (PrintGCDetails && Verbose) { ! gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); ! } ! return true; ! } ! ! if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { ! bool res = update_should_unload_classes(); ! if (res) { if (Verbose && PrintGCDetails) { gclog_or_tty->print_cr("CMS perm gen initiated"); } return true; } ! } return false; } // Clear _expansion_cause fields of constituent generations void CMSCollector::clear_expansion_cause() { _cmsGen->clear_expansion_cause(); _permGen->clear_expansion_cause(); } ! // We should be conservative in starting a collection cycle. To ! // start too eagerly runs the risk of collecting too often in the ! // extreme. To collect too rarely falls back on full collections, ! // which works, even if not optimum in terms of concurrent work. ! // As a work around for too eagerly collecting, use the flag ! // UseCMSInitiatingOccupancyOnly. This also has the advantage of ! // giving the user an easily understandable way of controlling the ! // collections. ! // We want to start a new collection cycle if any of the following ! // conditions hold: ! // . our current occupancy exceeds the configured initiating occupancy ! // for this generation, or ! // . we recently needed to expand this space and have not, since that ! // expansion, done a collection of this generation, or ! // . the underlying space believes that it may be a good idea to initiate ! // a concurrent collection (this may be based on criteria such as the ! // following: the space uses linear allocation and linear allocation is ! // going to fail, or there is believed to be excessive fragmentation in ! // the generation, etc... or ... ! // [.(currently done by CMSCollector::shouldConcurrentCollect() only for ! // the case of the old generation, not the perm generation; see CR 6543076): ! // we may be approaching a point at which allocation requests may fail because ! // we will be out of sufficient free space given allocation rate estimates.] ! bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { ! assert_lock_strong(freelistLock()); ! if (occupancy() > initiating_occupancy()) { if (PrintGCDetails && Verbose) { gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", ! short_name(), occupancy(), initiating_occupancy()); } return true; } if (UseCMSInitiatingOccupancyOnly) { return false;
*** 1508,1535 **** gclog_or_tty->print(" %s: collect because expanded for allocation ", short_name()); } return true; } ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! assert(gch->collector_policy()->is_two_generation_policy(), ! "You may want to check the correctness of the following"); ! if (gch->incremental_collection_will_fail()) { if (PrintGCDetails && Verbose) { ! gclog_or_tty->print(" %s: collect because incremental collection will fail ", short_name()); } return true; } - if (!_cmsSpace->adaptive_freelists() && - _cmsSpace->linearAllocationWouldFail()) { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print(" %s: collect because of linAB ", - short_name()); - } - return true; - } return false; } void ConcurrentMarkSweepGeneration::collect(bool full, bool clear_all_soft_refs, --- 1541,1557 ---- gclog_or_tty->print(" %s: collect because expanded for allocation ", short_name()); } return true; } ! if (_cmsSpace->should_concurrent_collect()) { if (PrintGCDetails && Verbose) { ! gclog_or_tty->print(" %s: collect because cmsSpace says so ", short_name()); } return true; } return false; } void ConcurrentMarkSweepGeneration::collect(bool full, bool clear_all_soft_refs,
*** 1968,1989 **** _collectorState = Resetting; assert(_restart_addr == NULL, "Should have been NULL'd before baton was passed"); reset(false /* == !asynch */); _cmsGen->reset_after_compaction(); ! if (verifying() && !cms_should_unload_classes()) { perm_gen_verify_bit_map()->clear_all(); } // Clear any data recorded in the PLAB chunk arrays. if (_survivor_plab_array != NULL) { reset_survivor_plab_arrays(); } // Adjust the per-size allocation stats for the next epoch. ! _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */); // Restart the "sweep timer" for next epoch. _sweep_timer.reset(); _sweep_timer.start(); // Sample collection pause time and reset for collection interval. --- 1990,2012 ---- _collectorState = Resetting; assert(_restart_addr == NULL, "Should have been NULL'd before baton was passed"); reset(false /* == !asynch */); _cmsGen->reset_after_compaction(); + _concurrent_cycles_since_last_unload = 0; ! if (verifying() && !should_unload_classes()) { perm_gen_verify_bit_map()->clear_all(); } // Clear any data recorded in the PLAB chunk arrays. if (_survivor_plab_array != NULL) { reset_survivor_plab_arrays(); } // Adjust the per-size allocation stats for the next epoch. ! _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); // Restart the "sweep timer" for next epoch. _sweep_timer.reset(); _sweep_timer.start(); // Sample collection pause time and reset for collection interval.
*** 2096,2105 **** --- 2119,2129 ---- GenCollectedHeap* gch = GenCollectedHeap::heap(); { bool safepoint_check = Mutex::_no_safepoint_check_flag; MutexLockerEx hl(Heap_lock, safepoint_check); + FreelistLocker fll(this); MutexLockerEx x(CGC_lock, safepoint_check); if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { // The foreground collector is active or we're // not using asynchronous collections. Skip this // background collection.
*** 2110,2126 **** _collectorState = InitialMarking; // Reset the expansion cause, now that we are about to begin // a new cycle. clear_expansion_cause(); } ! _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle ! // This controls class unloading in response to an explicit gc request. ! // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then ! // we will unload classes even if CMSClassUnloadingEnabled is not set. ! // See CR 6541037 and related CRs. ! _unload_classes = _full_gc_requested // ... for this cycle ! && ExplicitGCInvokesConcurrentAndUnloadsClasses; _full_gc_requested = false; // acks all outstanding full gc requests // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); } --- 2134,2146 ---- _collectorState = InitialMarking; // Reset the expansion cause, now that we are about to begin // a new cycle. clear_expansion_cause(); } ! // Decide if we want to enable class unloading as part of the ! // ensuing concurrent GC cycle. ! update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); }
*** 3045,3069 **** assert(overflow_list_is_empty(), "Overflow list should be empty"); assert(no_preserved_marks(), "No preserved marks"); } #endif // PRODUCT void CMSCollector::setup_cms_unloading_and_verification_state() { const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; ! if (cms_should_unload_classes()) { // Should unload classes this cycle remove_root_scanning_option(rso); // Shrink the root set appropriately set_verifying(should_verify); // Set verification state for this cycle return; // Nothing else needs to be done at this time } // Not unloading classes this cycle ! assert(!cms_should_unload_classes(), "Inconsitency!"); ! if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) { // We were not verifying, or we _were_ unloading classes in the last cycle, // AND some verification options are enabled this cycle; in this case, // we must make sure that the deadness map is allocated if not already so, // and cleared (if already allocated previously -- // CMSBitMap::sizeInBits() is used to determine if it's allocated). --- 3065,3130 ---- assert(overflow_list_is_empty(), "Overflow list should be empty"); assert(no_preserved_marks(), "No preserved marks"); } #endif // PRODUCT + // Decide if we want to enable class unloading as part of the + // ensuing concurrent GC cycle. We will collect the perm gen and + // unload classes if it's the case that: + // (1) an explicit gc request has been made and the flag + // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR + // (2) (a) class unloading is enabled at the command line, and + // (b) (i) perm gen threshold has been crossed, or + // (ii) old gen is getting really full, or + // (iii) the previous N CMS collections did not collect the + // perm gen + // NOTE: Provided there is no change in the state of the heap between + // calls to this method, it should have idempotent results. Moreover, + // its results should be monotonically increasing (i.e. going from 0 to 1, + // but not 1 to 0) between successive calls between which the heap was + // not collected. For the implementation below, it must thus rely on + // the property that concurrent_cycles_since_last_unload() + // will not decrease unless a collection cycle happened and that + // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are + // themselves also monotonic in that sense. See check_monotonicity() + // below. + bool CMSCollector::update_should_unload_classes() { + // Condition 1 above + if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { + _should_unload_classes = true; + } else if (CMSClassUnloadingEnabled) { // Condition 2.a above + // Disjuncts 2.b.(i,ii,iii) above + _should_unload_classes = (concurrent_cycles_since_last_unload() >= + CMSClassUnloadingMaxInterval) + || _permGen->should_concurrent_collect() + || _cmsGen->is_too_full(); + } + return _should_unload_classes; + } + + bool ConcurrentMarkSweepGeneration::is_too_full() const { + bool res = should_concurrent_collect(); + #define CMSIsTooFullPercentage 98 + res = res && occupancy() > (double)CMSIsTooFullPercentage/100.0; + return res; + } + void CMSCollector::setup_cms_unloading_and_verification_state() { const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC || VerifyBeforeExit; const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; ! if (should_unload_classes()) { // Should unload classes this cycle remove_root_scanning_option(rso); // Shrink the root set appropriately set_verifying(should_verify); // Set verification state for this cycle return; // Nothing else needs to be done at this time } // Not unloading classes this cycle ! assert(!should_unload_classes(), "Inconsitency!"); ! if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { // We were not verifying, or we _were_ unloading classes in the last cycle, // AND some verification options are enabled this cycle; in this case, // we must make sure that the deadness map is allocated if not already so, // and cleared (if already allocated previously -- // CMSBitMap::sizeInBits() is used to determine if it's allocated).
*** 4696,4706 **** ResourceMark rm; HandleMark hm; GenCollectedHeap* gch = GenCollectedHeap::heap(); ! if (cms_should_unload_classes()) { CodeCache::gc_prologue(); } assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); --- 4757,4767 ---- ResourceMark rm; HandleMark hm; GenCollectedHeap* gch = GenCollectedHeap::heap(); ! if (should_unload_classes()) { CodeCache::gc_prologue(); } assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock());
*** 4756,4766 **** refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); verify_overflow_empty(); ! if (cms_should_unload_classes()) { CodeCache::gc_epilogue(); } // If we encountered any (marking stack / work queue) overflow // events during the current CMS cycle, take appropriate --- 4817,4827 ---- refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); verify_overflow_empty(); ! if (should_unload_classes()) { CodeCache::gc_epilogue(); } // If we encountered any (marking stack / work queue) overflow // events during the current CMS cycle, take appropriate
*** 5626,5636 **** NULL); } verify_work_stacks_empty(); } ! if (cms_should_unload_classes()) { { TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); // Follow SystemDictionary roots and unload classes bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); --- 5687,5697 ---- NULL); } verify_work_stacks_empty(); } ! if (should_unload_classes()) { { TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); // Follow SystemDictionary roots and unload classes bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
*** 5720,5747 **** void CMSCollector::sweep(bool asynch) { assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); verify_overflow_empty(); ! incrementSweepCount(); _sweep_timer.stop(); _sweep_estimate.sample(_sweep_timer.seconds()); size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); // PermGen verification support: If perm gen sweeping is disabled in // this cycle, we preserve the perm gen object "deadness" information // in the perm_gen_verify_bit_map. In order to do that we traverse // all blocks in perm gen and mark all dead objects. ! if (verifying() && !cms_should_unload_classes()) { ! CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), ! bitMapLock()); assert(perm_gen_verify_bit_map()->sizeInBits() != 0, "Should have already been allocated"); MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), markBitMap(), perm_gen_verify_bit_map()); _permGen->cmsSpace()->blk_iterate(&mdo); } if (asynch) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); // First sweep the old gen then the perm gen --- 5781,5814 ---- void CMSCollector::sweep(bool asynch) { assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); verify_overflow_empty(); ! increment_sweep_count(); _sweep_timer.stop(); _sweep_estimate.sample(_sweep_timer.seconds()); size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); // PermGen verification support: If perm gen sweeping is disabled in // this cycle, we preserve the perm gen object "deadness" information // in the perm_gen_verify_bit_map. In order to do that we traverse // all blocks in perm gen and mark all dead objects. ! if (verifying() && !should_unload_classes()) { assert(perm_gen_verify_bit_map()->sizeInBits() != 0, "Should have already been allocated"); MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), markBitMap(), perm_gen_verify_bit_map()); + if (asynch) { + CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), + bitMapLock()); _permGen->cmsSpace()->blk_iterate(&mdo); + } else { + // In the case of synchronous sweep, we already have + // the requisite locks/tokens. + _permGen->cmsSpace()->blk_iterate(&mdo); } + } if (asynch) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); // First sweep the old gen then the perm gen
*** 5750,5760 **** bitMapLock()); sweepWork(_cmsGen, asynch); } // Now repeat for perm gen ! if (cms_should_unload_classes()) { CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), bitMapLock()); sweepWork(_permGen, asynch); } --- 5817,5827 ---- bitMapLock()); sweepWork(_cmsGen, asynch); } // Now repeat for perm gen ! if (should_unload_classes()) { CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), bitMapLock()); sweepWork(_permGen, asynch); }
*** 5772,5782 **** } } else { // already have needed locks sweepWork(_cmsGen, asynch); ! if (cms_should_unload_classes()) { sweepWork(_permGen, asynch); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); --- 5839,5849 ---- } } else { // already have needed locks sweepWork(_cmsGen, asynch); ! if (should_unload_classes()) { sweepWork(_permGen, asynch); } // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc();
*** 5933,5943 **** // co-terminal free run. This is done in the SweepClosure // destructor; so, do not remove this scope, else the // end-of-sweep-census below will be off by a little bit. } gen->cmsSpace()->sweep_completed(); ! gen->cmsSpace()->endSweepFLCensus(sweepCount()); } // Reset CMS data structures (for now just the marking bit map) // preparatory for the next cycle. void CMSCollector::reset(bool asynch) { --- 6000,6015 ---- // co-terminal free run. This is done in the SweepClosure // destructor; so, do not remove this scope, else the // end-of-sweep-census below will be off by a little bit. } gen->cmsSpace()->sweep_completed(); ! gen->cmsSpace()->endSweepFLCensus(sweep_count()); ! if (should_unload_classes()) { // unloaded classes this cycle, ! _concurrent_cycles_since_last_unload = 0; // ... reset count ! } else { // did not unload classes, ! _concurrent_cycles_since_last_unload++; // ... increment count ! } } // Reset CMS data structures (for now just the marking bit map) // preparatory for the next cycle. void CMSCollector::reset(bool asynch) {
*** 7191,7201 **** _bitMap(bitMap), _markStack(markStack), _revisitStack(revisitStack), _finger(finger), _parent(parent), ! _should_remember_klasses(collector->cms_should_unload_classes()) { } Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, --- 7263,7273 ---- _bitMap(bitMap), _markStack(markStack), _revisitStack(revisitStack), _finger(finger), _parent(parent), ! _should_remember_klasses(collector->should_unload_classes()) { } Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map,
*** 7214,7224 **** _overflow_stack(overflow_stack), _revisit_stack(revisit_stack), _finger(finger), _global_finger_addr(global_finger_addr), _parent(parent), ! _should_remember_klasses(collector->cms_should_unload_classes()) { } void CMSCollector::lower_restart_addr(HeapWord* low) { assert(_span.contains(low), "Out of bounds addr"); --- 7286,7296 ---- _overflow_stack(overflow_stack), _revisit_stack(revisit_stack), _finger(finger), _global_finger_addr(global_finger_addr), _parent(parent), ! _should_remember_klasses(collector->should_unload_classes()) { } void CMSCollector::lower_restart_addr(HeapWord* low) { assert(_span.contains(low), "Out of bounds addr");
*** 7357,7367 **** _bit_map(bit_map), _mod_union_table(mod_union_table), _mark_stack(mark_stack), _revisit_stack(revisit_stack), _concurrent_precleaning(concurrent_precleaning), ! _should_remember_klasses(collector->cms_should_unload_classes()) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } // Grey object rescan during pre-cleaning and second checkpoint phases -- --- 7429,7439 ---- _bit_map(bit_map), _mod_union_table(mod_union_table), _mark_stack(mark_stack), _revisit_stack(revisit_stack), _concurrent_precleaning(concurrent_precleaning), ! _should_remember_klasses(collector->should_unload_classes()) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } // Grey object rescan during pre-cleaning and second checkpoint phases --
*** 7419,7429 **** _collector(collector), _span(span), _bit_map(bit_map), _work_queue(work_queue), _revisit_stack(revisit_stack), ! _should_remember_klasses(collector->cms_should_unload_classes()) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } // Grey object rescan during second checkpoint phase -- --- 7491,7501 ---- _collector(collector), _span(span), _bit_map(bit_map), _work_queue(work_queue), _revisit_stack(revisit_stack), ! _should_remember_klasses(collector->should_unload_classes()) { assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); } // Grey object rescan during second checkpoint phase --
*** 7941,7951 **** assert(size == CompactibleFreeListSpace::adjustObjectSize(size), "alignment problem"); #ifdef DEBUG if (oop(addr)->klass() != NULL && ! ( !_collector->cms_should_unload_classes() || oop(addr)->is_parsable())) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); assert(size == CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), --- 8013,8023 ---- assert(size == CompactibleFreeListSpace::adjustObjectSize(size), "alignment problem"); #ifdef DEBUG if (oop(addr)->klass() != NULL && ! ( !_collector->should_unload_classes() || oop(addr)->is_parsable())) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); assert(size == CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
*** 7954,7964 **** #endif } else { // This should be an initialized object that's alive. assert(oop(addr)->klass() != NULL && ! (!_collector->cms_should_unload_classes() || oop(addr)->is_parsable()), "Should be an initialized object"); // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); // Verify that the bit map has no bits marked between --- 8026,8036 ---- #endif } else { // This should be an initialized object that's alive. assert(oop(addr)->klass() != NULL && ! (!_collector->should_unload_classes() || oop(addr)->is_parsable()), "Should be an initialized object"); // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); // Verify that the bit map has no bits marked between