Print this page
--- 1.300 ---
Merged changes between child workspace "/net/spot/workspaces/ysr/cms_bugs" and
 parent workspace "/net/jano2/export2/hotspot/ws/main/gc_baseline".
--- 1.297.1.1 ---
6621144 CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"


 208       }
 209     }
 210   } else {
 211     _par_gc_thread_states = NULL;
 212   }
 213   _incremental_collection_failed = false;
 214   // The "dilatation_factor" is the expansion that can occur on
 215   // account of the fact that the minimum object size in the CMS
 216   // generation may be larger than that in, say, a contiguous young
 217   //  generation.
 218   // Ideally, in the calculation below, we'd compute the dilatation
 219   // factor as: MinChunkSize/(promoting_gen's min object size)
 220   // Since we do not have such a general query interface for the
 221   // promoting generation, we'll instead just use the mimimum
 222   // object size (which today is a header's worth of space);
 223   // note that all arithmetic is in units of HeapWords.
 224   assert(MinChunkSize >= oopDesc::header_size(), "just checking");
 225   assert(_dilatation_factor >= 1.0, "from previous assert");
 226 }
 227 




























 228 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 229   assert(collector() != NULL, "no collector");
 230   collector()->ref_processor_init();
 231 }
 232 
 233 void CMSCollector::ref_processor_init() {
 234   if (_ref_processor == NULL) {
 235     // Allocate and initialize a reference processor
 236     _ref_processor = ReferenceProcessor::create_ref_processor(
 237         _span,                               // span
 238         _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
 239         _cmsGen->refs_discovery_is_mt(),     // mt_discovery
 240         &_is_alive_closure,
 241         ParallelGCThreads,
 242         ParallelRefProcEnabled);
 243     // Initialize the _ref_processor field of CMSGen
 244     _cmsGen->set_ref_processor(_ref_processor);
 245 
 246     // Allocate a dummy ref processor for perm gen.
 247     ReferenceProcessor* rp2 = new ReferenceProcessor();


 503   _eden_chunk_index(0),        // -- ditto --
 504   _survivor_plab_array(NULL),  // -- ditto --
 505   _survivor_chunk_array(NULL), // -- ditto --
 506   _survivor_chunk_capacity(0), // -- ditto --
 507   _survivor_chunk_index(0),    // -- ditto --
 508   _ser_pmc_preclean_ovflw(0),
 509   _ser_pmc_remark_ovflw(0),
 510   _par_pmc_remark_ovflw(0),
 511   _ser_kac_ovflw(0),
 512   _par_kac_ovflw(0),
 513 #ifndef PRODUCT
 514   _num_par_pushes(0),
 515 #endif
 516   _collection_count_start(0),
 517   _verifying(false),
 518   _icms_start_limit(NULL),
 519   _icms_stop_limit(NULL),
 520   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 521   _completed_initialization(false),
 522   _collector_policy(cp),
 523   _unload_classes(false),
 524   _unloaded_classes_last_cycle(false),
 525   _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 526 {
 527   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 528     ExplicitGCInvokesConcurrent = true;
 529   }
 530   // Now expand the span and allocate the collection support structures
 531   // (MUT, marking bit map etc.) to cover both generations subject to
 532   // collection.
 533 
 534   // First check that _permGen is adjacent to _cmsGen and above it.
 535   assert(   _cmsGen->reserved().word_size()  > 0
 536          && _permGen->reserved().word_size() > 0,
 537          "generations should not be of zero size");
 538   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
 539          "_cmsGen and _permGen should not overlap");
 540   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
 541          "_cmsGen->end() different from _permGen->start()");
 542 
 543   // For use by dirty card to oop closures.
 544   _cmsGen->cmsSpace()->set_collector(this);


 625       typedef struct OopTaskQueuePadded {
 626         OopTaskQueue work_queue;
 627         char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
 628       } OopTaskQueuePadded;
 629 
 630       for (i = 0; i < num_queues; i++) {
 631         OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
 632         if (q_padded == NULL) {
 633           warning("work_queue allocation failure.");
 634           return;
 635         }
 636         _task_queues->register_queue(i, &q_padded->work_queue);
 637       }
 638       for (i = 0; i < num_queues; i++) {
 639         _task_queues->queue(i)->initialize();
 640         _hash_seed[i] = 17;  // copied from ParNew
 641       }
 642     }
 643   }
 644 
 645   // "initiatingOccupancy" is the occupancy ratio at which we trigger
 646   // a new collection cycle.  Unless explicitly specified via
 647   // CMSTriggerRatio, it is calculated by:
 648   //   Let "f" be MinHeapFreeRatio in
 649   //
 650   //    intiatingOccupancy = 100-f +
 651   //                         f * (CMSTriggerRatio/100)
 652   // That is, if we assume the heap is at its desired maximum occupancy at the
 653   // end of a collection, we let CMSTriggerRatio of the (purported) free
 654   // space be allocated before initiating a new collection cycle.
 655   if (CMSInitiatingOccupancyFraction > 0) {
 656     _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
 657   } else {
 658     _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
 659                            (double)(CMSTriggerRatio *
 660                                     MinHeapFreeRatio) / 100.0)
 661                            / 100.0;
 662   }
 663   // Clip CMSBootstrapOccupancy between 0 and 100.
 664   _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
 665                          /(double)100;
 666 
 667   _full_gcs_since_conc_gc = 0;
 668 
 669   // Now tell CMS generations the identity of their collector
 670   ConcurrentMarkSweepGeneration::set_collector(this);
 671 
 672   // Create & start a CMS thread for this CMS collector
 673   _cmsThread = ConcurrentMarkSweepThread::start(this);
 674   assert(cmsThread() != NULL, "CMS Thread should have been created");
 675   assert(cmsThread()->collector() == this,
 676          "CMS Thread should refer to this gen");
 677   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 678 
 679   // Support for parallelizing young gen rescan
 680   GenCollectedHeap* gch = GenCollectedHeap::heap();
 681   _young_gen = gch->prev_gen(_cmsGen);
 682   if (gch->supports_inline_contig_alloc()) {
 683     _top_addr = gch->top_addr();
 684     _end_addr = gch->end_addr();


1396     }
1397   )
1398 
1399   FreelistLocker x(this);
1400   // ------------------------------------------------------------------
1401   // Print out lots of information which affects the initiation of
1402   // a collection.
1403   if (PrintCMSInitiationStatistics && stats().valid()) {
1404     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1405     gclog_or_tty->stamp();
1406     gclog_or_tty->print_cr("");
1407     stats().print_on(gclog_or_tty);
1408     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1409       stats().time_until_cms_gen_full());
1410     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1411     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1412                            _cmsGen->contiguous_available());
1413     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1414     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1415     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1416     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());

1417   }
1418   // ------------------------------------------------------------------
1419 
1420   // If the estimated time to complete a cms collection (cms_duration())
1421   // is less than the estimated time remaining until the cms generation
1422   // is full, start a collection.
1423   if (!UseCMSInitiatingOccupancyOnly) {
1424     if (stats().valid()) {
1425       if (stats().time_until_cms_start() == 0.0) {
1426         return true;
1427       }
1428     } else {
1429       // We want to conservatively collect somewhat early in order
1430       // to try and "bootstrap" our CMS/promotion statistics;
1431       // this branch will not fire after the first successful CMS
1432       // collection because the stats should then be valid.
1433       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1434         if (Verbose && PrintGCDetails) {
1435           gclog_or_tty->print_cr(
1436             " CMSCollector: collect for bootstrapping statistics:"
1437             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1438             _bootstrap_occupancy);
1439         }
1440         return true;
1441       }
1442     }
1443   }
1444 
1445   // Otherwise, we start a collection cycle if either the perm gen or
1446   // old gen want a collection cycle started. Each may use
1447   // an appropriate criterion for making this decision.
1448   // XXX We need to make sure that the gen expansion
1449   // criterion dovetails well with this.
1450   if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
1451     if (Verbose && PrintGCDetails) {
1452       gclog_or_tty->print_cr("CMS old gen initiated");
1453     }
1454     return true;
1455   }
1456 
1457   if (cms_should_unload_classes() &&
1458       _permGen->shouldConcurrentCollect(initiatingOccupancy())) {














1459     if (Verbose && PrintGCDetails) {
1460      gclog_or_tty->print_cr("CMS perm gen initiated");
1461     }
1462     return true;
1463   }
1464 
1465   return false;
1466 }
1467 
1468 // Clear _expansion_cause fields of constituent generations
1469 void CMSCollector::clear_expansion_cause() {
1470   _cmsGen->clear_expansion_cause();
1471   _permGen->clear_expansion_cause();
1472 }
1473 
1474 bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
1475   double initiatingOccupancy) {
1476   // We should be conservative in starting a collection cycle.  To
1477   // start too eagerly runs the risk of collecting too often in the
1478   // extreme.  To collect too rarely falls back on full collections,
1479   // which works, even if not optimum in terms of concurrent work.
1480   // As a work around for too eagerly collecting, use the flag
1481   // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1482   // giving the user an easily understandable way of controlling the
1483   // collections.
1484   // We want to start a new collection cycle if any of the following
1485   // conditions hold:
1486   // . our current occupancy exceeds the initiating occupancy, or
1487   // . we recently needed to expand and have not since that expansion,
1488   //   collected, or
1489   // . we are not using adaptive free lists and linear allocation is
1490   //   going to fail, or
1491   // . (for old gen) incremental collection has already failed or
1492   //   may soon fail in the near future as we may not be able to absorb
1493   //   promotions.
1494   assert_lock_strong(freelistLock());



1495 
1496   if (occupancy() > initiatingOccupancy) {

1497     if (PrintGCDetails && Verbose) {
1498       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1499         short_name(), occupancy(), initiatingOccupancy);
1500     }
1501     return true;
1502   }
1503   if (UseCMSInitiatingOccupancyOnly) {
1504     return false;
1505   }
1506   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1507     if (PrintGCDetails && Verbose) {
1508       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1509         short_name());
1510     }
1511     return true;
1512   }
1513   GenCollectedHeap* gch = GenCollectedHeap::heap();
1514   assert(gch->collector_policy()->is_two_generation_policy(),
1515          "You may want to check the correctness of the following");
1516   if (gch->incremental_collection_will_fail()) {
1517     if (PrintGCDetails && Verbose) {
1518       gclog_or_tty->print(" %s: collect because incremental collection will fail ",
1519         short_name());
1520     }
1521     return true;
1522   }
1523   if (!_cmsSpace->adaptive_freelists() &&
1524       _cmsSpace->linearAllocationWouldFail()) {
1525     if (PrintGCDetails && Verbose) {
1526       gclog_or_tty->print(" %s: collect because of linAB ",
1527         short_name());
1528     }
1529     return true;
1530   }
1531   return false;
1532 }
1533 
1534 void ConcurrentMarkSweepGeneration::collect(bool   full,
1535                                             bool   clear_all_soft_refs,
1536                                             size_t size,
1537                                             bool   tlab)
1538 {
1539   collector()->collect(full, clear_all_soft_refs, size, tlab);
1540 }
1541 
1542 void CMSCollector::collect(bool   full,
1543                            bool   clear_all_soft_refs,
1544                            size_t size,
1545                            bool   tlab)
1546 {
1547   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1548     // For debugging purposes skip the collection if the state
1549     // is not currently idle
1550     if (TraceCMSState) {


1953     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1954     size_t free_size = cms_space->free();
1955     assert(free_size ==
1956            pointer_delta(cms_space->end(), cms_space->compaction_top())
1957            * HeapWordSize,
1958       "All the free space should be compacted into one chunk at top");
1959     assert(cms_space->dictionary()->totalChunkSize(
1960                                       debug_only(cms_space->freelistLock())) == 0 ||
1961            cms_space->totalSizeInIndexedFreeLists() == 0,
1962       "All the free space should be in a single chunk");
1963     size_t num = cms_space->totalCount();
1964     assert((free_size == 0 && num == 0) ||
1965            (free_size > 0  && (num == 1 || num == 2)),
1966          "There should be at most 2 free chunks after compaction");
1967   #endif // ASSERT
1968   _collectorState = Resetting;
1969   assert(_restart_addr == NULL,
1970          "Should have been NULL'd before baton was passed");
1971   reset(false /* == !asynch */);
1972   _cmsGen->reset_after_compaction();

1973 
1974   if (verifying() && !cms_should_unload_classes()) {
1975     perm_gen_verify_bit_map()->clear_all();
1976   }
1977 
1978   // Clear any data recorded in the PLAB chunk arrays.
1979   if (_survivor_plab_array != NULL) {
1980     reset_survivor_plab_arrays();
1981   }
1982 
1983   // Adjust the per-size allocation stats for the next epoch.
1984   _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
1985   // Restart the "sweep timer" for next epoch.
1986   _sweep_timer.reset();
1987   _sweep_timer.start();
1988 
1989   // Sample collection pause time and reset for collection interval.
1990   if (UseAdaptiveSizePolicy) {
1991     size_policy()->msc_collection_end(gch->gc_cause());
1992   }
1993 
1994   // For a mark-sweep-compact, compute_new_size() will be called


2081     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2082     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2083     _c->_foregroundGCShouldWait = true;
2084   }
2085 };
2086 
2087 // There are separate collect_in_background and collect_in_foreground because of
2088 // the different locking requirements of the background collector and the
2089 // foreground collector.  There was originally an attempt to share
2090 // one "collect" method between the background collector and the foreground
2091 // collector but the if-then-else required made it cleaner to have
2092 // separate methods.
2093 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2094   assert(Thread::current()->is_ConcurrentGC_thread(),
2095     "A CMS asynchronous collection is only allowed on a CMS thread.");
2096 
2097   GenCollectedHeap* gch = GenCollectedHeap::heap();
2098   {
2099     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2100     MutexLockerEx hl(Heap_lock, safepoint_check);

2101     MutexLockerEx x(CGC_lock, safepoint_check);
2102     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2103       // The foreground collector is active or we're
2104       // not using asynchronous collections.  Skip this
2105       // background collection.
2106       assert(!_foregroundGCShouldWait, "Should be clear");
2107       return;
2108     } else {
2109       assert(_collectorState == Idling, "Should be idling before start.");
2110       _collectorState = InitialMarking;
2111       // Reset the expansion cause, now that we are about to begin
2112       // a new cycle.
2113       clear_expansion_cause();
2114     }
2115     _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
2116     // This controls class unloading in response to an explicit gc request.
2117     // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
2118     // we will unload classes even if CMSClassUnloadingEnabled is not set.
2119     // See CR 6541037 and related CRs.
2120     _unload_classes = _full_gc_requested                      // ... for this cycle
2121                       && ExplicitGCInvokesConcurrentAndUnloadsClasses;
2122     _full_gc_requested = false;           // acks all outstanding full gc requests
2123     // Signal that we are about to start a collection
2124     gch->increment_total_full_collections();  // ... starting a collection cycle
2125     _collection_count_start = gch->total_full_collections();
2126   }
2127 
2128   // Used for PrintGC
2129   size_t prev_used;
2130   if (PrintGC && Verbose) {
2131     prev_used = _cmsGen->used(); // XXXPERM
2132   }
2133 
2134   // The change of the collection state is normally done at this level;
2135   // the exceptions are phases that are executed while the world is
2136   // stopped.  For those phases the change of state is done while the
2137   // world is stopped.  For baton passing purposes this allows the
2138   // background collector to finish the phase and change state atomically.
2139   // The foreground collector cannot wait on a phase that is done
2140   // while the world is stopped because the foreground collector already
2141   // has the world stopped and would deadlock.


3030   assert(_num_par_pushes >= 0, "Inconsistency");
3031   if (_overflow_list == NULL) {
3032     assert(_num_par_pushes == 0, "Inconsistency");
3033   }
3034   return _overflow_list == NULL;
3035 }
3036 
3037 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3038 // merely consolidate assertion checks that appear to occur together frequently.
3039 void CMSCollector::verify_work_stacks_empty() const {
3040   assert(_markStack.isEmpty(), "Marking stack should be empty");
3041   assert(overflow_list_is_empty(), "Overflow list should be empty");
3042 }
3043 
3044 void CMSCollector::verify_overflow_empty() const {
3045   assert(overflow_list_is_empty(), "Overflow list should be empty");
3046   assert(no_preserved_marks(), "No preserved marks");
3047 }
3048 #endif // PRODUCT
3049 









































3050 void CMSCollector::setup_cms_unloading_and_verification_state() {
3051   const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3052                              || VerifyBeforeExit;
3053   const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3054                              |   SharedHeap::SO_CodeCache;
3055 
3056   if (cms_should_unload_classes()) {   // Should unload classes this cycle
3057     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3058     set_verifying(should_verify);    // Set verification state for this cycle
3059     return;                            // Nothing else needs to be done at this time
3060   }
3061 
3062   // Not unloading classes this cycle
3063   assert(!cms_should_unload_classes(), "Inconsitency!");
3064   if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
3065     // We were not verifying, or we _were_ unloading classes in the last cycle,
3066     // AND some verification options are enabled this cycle; in this case,
3067     // we must make sure that the deadness map is allocated if not already so,
3068     // and cleared (if already allocated previously --
3069     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3070     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3071       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3072         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3073                 "permanent generation verification disabled");
3074         return;  // Note that we leave verification disabled, so we'll retry this
3075                  // allocation next cycle. We _could_ remember this failure
3076                  // and skip further attempts and permanently disable verification
3077                  // attempts if that is considered more desirable.
3078       }
3079       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3080               "_perm_gen_ver_bit_map inconsistency?");
3081     } else {
3082       perm_gen_verify_bit_map()->clear_all();
3083     }
3084     // Include symbols, strings and code cache elements to prevent their resurrection.


4681   SpecializationStats::print();
4682 }
4683 
4684 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4685   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4686 
4687   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4688 
4689   assert(haveFreelistLocks(), "must have free list locks");
4690   assert_lock_strong(bitMapLock());
4691 
4692   if (UseAdaptiveSizePolicy) {
4693     size_policy()->checkpoint_roots_final_begin();
4694   }
4695 
4696   ResourceMark rm;
4697   HandleMark   hm;
4698 
4699   GenCollectedHeap* gch = GenCollectedHeap::heap();
4700 
4701   if (cms_should_unload_classes()) {
4702     CodeCache::gc_prologue();
4703   }
4704   assert(haveFreelistLocks(), "must have free list locks");
4705   assert_lock_strong(bitMapLock());
4706 
4707   if (!init_mark_was_synchronous) {
4708     // We might assume that we need not fill TLAB's when
4709     // CMSScavengeBeforeRemark is set, because we may have just done
4710     // a scavenge which would have filled all TLAB's -- and besides
4711     // Eden would be empty. This however may not always be the case --
4712     // for instance although we asked for a scavenge, it may not have
4713     // happened because of a JNI critical section. We probably need
4714     // a policy for deciding whether we can in that case wait until
4715     // the critical section releases and then do the remark following
4716     // the scavenge, and skip it here. In the absence of that policy,
4717     // or of an indication of whether the scavenge did indeed occur,
4718     // we cannot rely on TLAB's having been filled and must do
4719     // so here just in case a scavenge did not happen.
4720     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4721     // Update the saved marks which may affect the root scans.


4741         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4742                     gclog_or_tty);
4743         do_remark_non_parallel();
4744       }
4745     }
4746   } else {
4747     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4748     // The initial mark was stop-world, so there's no rescanning to
4749     // do; go straight on to the next step below.
4750   }
4751   verify_work_stacks_empty();
4752   verify_overflow_empty();
4753 
4754   {
4755     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4756     refProcessingWork(asynch, clear_all_soft_refs);
4757   }
4758   verify_work_stacks_empty();
4759   verify_overflow_empty();
4760 
4761   if (cms_should_unload_classes()) {
4762     CodeCache::gc_epilogue();
4763   }
4764 
4765   // If we encountered any (marking stack / work queue) overflow
4766   // events during the current CMS cycle, take appropriate
4767   // remedial measures, where possible, so as to try and avoid
4768   // recurrence of that condition.
4769   assert(_markStack.isEmpty(), "No grey objects");
4770   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4771                      _ser_kac_ovflw;
4772   if (ser_ovflw > 0) {
4773     if (PrintCMSStatistics != 0) {
4774       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4775         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4776         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4777         _ser_kac_ovflw);
4778     }
4779     _markStack.expand();
4780     _ser_pmc_remark_ovflw = 0;
4781     _ser_pmc_preclean_ovflw = 0;


5611                                 &cmsKeepAliveClosure);
5612   {
5613     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5614     if (rp->processing_is_mt()) {
5615       CMSRefProcTaskExecutor task_executor(*this);
5616       rp->process_discovered_references(soft_ref_policy,
5617                                         &_is_alive_closure,
5618                                         &cmsKeepAliveClosure,
5619                                         &cmsDrainMarkingStackClosure,
5620                                         &task_executor);
5621     } else {
5622       rp->process_discovered_references(soft_ref_policy,
5623                                         &_is_alive_closure,
5624                                         &cmsKeepAliveClosure,
5625                                         &cmsDrainMarkingStackClosure,
5626                                         NULL);
5627     }
5628     verify_work_stacks_empty();
5629   }
5630 
5631   if (cms_should_unload_classes()) {
5632     {
5633       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5634 
5635       // Follow SystemDictionary roots and unload classes
5636       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5637 
5638       // Follow CodeCache roots and unload any methods marked for unloading
5639       CodeCache::do_unloading(&_is_alive_closure,
5640                               &cmsKeepAliveClosure,
5641                               purged_class);
5642 
5643       cmsDrainMarkingStackClosure.do_void();
5644       verify_work_stacks_empty();
5645 
5646       // Update subklass/sibling/implementor links in KlassKlass descendants
5647       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5648       oop k;
5649       while ((k = _revisitStack.pop()) != NULL) {
5650         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5651                        &_is_alive_closure,


5714              "executing VM thread");
5715     }
5716   }
5717 }
5718 #endif
5719 
5720 void CMSCollector::sweep(bool asynch) {
5721   assert(_collectorState == Sweeping, "just checking");
5722   check_correct_thread_executing();
5723   verify_work_stacks_empty();
5724   verify_overflow_empty();
5725   incrementSweepCount();
5726   _sweep_timer.stop();
5727   _sweep_estimate.sample(_sweep_timer.seconds());
5728   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5729 
5730   // PermGen verification support: If perm gen sweeping is disabled in
5731   // this cycle, we preserve the perm gen object "deadness" information
5732   // in the perm_gen_verify_bit_map. In order to do that we traverse
5733   // all blocks in perm gen and mark all dead objects.
5734   if (verifying() && !cms_should_unload_classes()) {
5735     CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5736                              bitMapLock());
5737     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5738            "Should have already been allocated");
5739     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5740                                markBitMap(), perm_gen_verify_bit_map());



5741     _permGen->cmsSpace()->blk_iterate(&mdo);




5742   }

5743 
5744   if (asynch) {
5745     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5746     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5747     // First sweep the old gen then the perm gen
5748     {
5749       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5750                                bitMapLock());
5751       sweepWork(_cmsGen, asynch);
5752     }
5753 
5754     // Now repeat for perm gen
5755     if (cms_should_unload_classes()) {
5756       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5757                              bitMapLock());
5758       sweepWork(_permGen, asynch);
5759     }
5760 
5761     // Update Universe::_heap_*_at_gc figures.
5762     // We need all the free list locks to make the abstract state
5763     // transition from Sweeping to Resetting. See detailed note
5764     // further below.
5765     {
5766       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5767                                _permGen->freelistLock());
5768       // Update heap occupancy information which is used as
5769       // input to soft ref clearing policy at the next gc.
5770       Universe::update_heap_info_at_gc();
5771       _collectorState = Resizing;
5772     }
5773   } else {
5774     // already have needed locks
5775     sweepWork(_cmsGen,  asynch);
5776 
5777     if (cms_should_unload_classes()) {
5778       sweepWork(_permGen, asynch);
5779     }
5780     // Update heap occupancy information which is used as
5781     // input to soft ref clearing policy at the next gc.
5782     Universe::update_heap_info_at_gc();
5783     _collectorState = Resizing;
5784   }
5785   verify_work_stacks_empty();
5786   verify_overflow_empty();
5787 
5788   _sweep_timer.reset();
5789   _sweep_timer.start();
5790 
5791   update_time_of_last_gc(os::javaTimeMillis());
5792 
5793   // NOTE on abstract state transitions:
5794   // Mutators allocate-live and/or mark the mod-union table dirty
5795   // based on the state of the collection.  The former is done in
5796   // the interval [Marking, Sweeping] and the latter in the interval
5797   // [Marking, Sweeping).  Thus the transitions into the Marking state


5919         "Should possess CMS token to sweep");
5920   assert_lock_strong(gen->freelistLock());
5921   assert_lock_strong(bitMapLock());
5922 
5923   assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5924   gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5925                                       _sweep_estimate.padded_average());
5926   gen->setNearLargestChunk();
5927 
5928   {
5929     SweepClosure sweepClosure(this, gen, &_markBitMap,
5930                             CMSYield && asynch);
5931     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5932     // We need to free-up/coalesce garbage/blocks from a
5933     // co-terminal free run. This is done in the SweepClosure
5934     // destructor; so, do not remove this scope, else the
5935     // end-of-sweep-census below will be off by a little bit.
5936   }
5937   gen->cmsSpace()->sweep_completed();
5938   gen->cmsSpace()->endSweepFLCensus(sweepCount());





5939 }
5940 
5941 // Reset CMS data structures (for now just the marking bit map)
5942 // preparatory for the next cycle.
5943 void CMSCollector::reset(bool asynch) {
5944   GenCollectedHeap* gch = GenCollectedHeap::heap();
5945   CMSAdaptiveSizePolicy* sp = size_policy();
5946   AdaptiveSizePolicyOutput(sp, gch->total_collections());
5947   if (asynch) {
5948     CMSTokenSyncWithLocks ts(true, bitMapLock());
5949 
5950     // If the state is not "Resetting", the foreground  thread
5951     // has done a collection and the resetting.
5952     if (_collectorState != Resetting) {
5953       assert(_collectorState == Idling, "The state should only change"
5954         " because the foreground collector has finished the collection");
5955       return;
5956     }
5957 
5958     // Clear the mark bitmap (no grey objects to start with)


7176     }
7177     // anything including and to the right of _finger
7178     // will be scanned as we iterate over the remainder of the
7179     // bit map
7180   }
7181 }
7182 
7183 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7184                      MemRegion span,
7185                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7186                      CMSMarkStack*  revisitStack,
7187                      HeapWord* finger, MarkFromRootsClosure* parent) :
7188   OopClosure(collector->ref_processor()),
7189   _collector(collector),
7190   _span(span),
7191   _bitMap(bitMap),
7192   _markStack(markStack),
7193   _revisitStack(revisitStack),
7194   _finger(finger),
7195   _parent(parent),
7196   _should_remember_klasses(collector->cms_should_unload_classes())
7197 { }
7198 
7199 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7200                      MemRegion span,
7201                      CMSBitMap* bit_map,
7202                      OopTaskQueue* work_queue,
7203                      CMSMarkStack*  overflow_stack,
7204                      CMSMarkStack*  revisit_stack,
7205                      HeapWord* finger,
7206                      HeapWord** global_finger_addr,
7207                      Par_MarkFromRootsClosure* parent) :
7208   OopClosure(collector->ref_processor()),
7209   _collector(collector),
7210   _whole_span(collector->_span),
7211   _span(span),
7212   _bit_map(bit_map),
7213   _work_queue(work_queue),
7214   _overflow_stack(overflow_stack),
7215   _revisit_stack(revisit_stack),
7216   _finger(finger),
7217   _global_finger_addr(global_finger_addr),
7218   _parent(parent),
7219   _should_remember_klasses(collector->cms_should_unload_classes())
7220 { }
7221 
7222 
7223 void CMSCollector::lower_restart_addr(HeapWord* low) {
7224   assert(_span.contains(low), "Out of bounds addr");
7225   if (_restart_addr == NULL) {
7226     _restart_addr = low;
7227   } else {
7228     _restart_addr = MIN2(_restart_addr, low);
7229   }
7230 }
7231 
7232 // Upon stack overflow, we discard (part of) the stack,
7233 // remembering the least address amongst those discarded
7234 // in CMSCollector's _restart_address.
7235 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7236   // Remember the least grey address discarded
7237   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7238   _collector->lower_restart_addr(ra);
7239   _markStack->reset();  // discard stack contents


7342   }
7343 }
7344 
7345 
7346 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7347                                        MemRegion span,
7348                                        ReferenceProcessor* rp,
7349                                        CMSBitMap* bit_map,
7350                                        CMSBitMap* mod_union_table,
7351                                        CMSMarkStack*  mark_stack,
7352                                        CMSMarkStack*  revisit_stack,
7353                                        bool           concurrent_precleaning):
7354   OopClosure(rp),
7355   _collector(collector),
7356   _span(span),
7357   _bit_map(bit_map),
7358   _mod_union_table(mod_union_table),
7359   _mark_stack(mark_stack),
7360   _revisit_stack(revisit_stack),
7361   _concurrent_precleaning(concurrent_precleaning),
7362   _should_remember_klasses(collector->cms_should_unload_classes())
7363 {
7364   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7365 }
7366 
7367 // Grey object rescan during pre-cleaning and second checkpoint phases --
7368 // the non-parallel version (the parallel version appears further below.)
7369 void PushAndMarkClosure::do_oop(oop* p) {
7370   oop    this_oop = *p;
7371   // Ignore mark word verification. If during concurrent precleaning
7372   // the object monitor may be locked. If during the checkpoint
7373   // phases, the object may already have been reached by a  different
7374   // path and may be at the end of the global overflow list (so
7375   // the mark word may be NULL).
7376   assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7377          "expected an oop or NULL");
7378   HeapWord* addr = (HeapWord*)this_oop;
7379   // Check if oop points into the CMS generation
7380   // and is not marked
7381   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7382     // a white object ...


7404          // in the overflow list.
7405          _collector->push_on_overflow_list(this_oop);
7406          _collector->_ser_pmc_remark_ovflw++;
7407       }
7408     }
7409   }
7410 }
7411 
7412 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7413                                                MemRegion span,
7414                                                ReferenceProcessor* rp,
7415                                                CMSBitMap* bit_map,
7416                                                OopTaskQueue* work_queue,
7417                                                CMSMarkStack* revisit_stack):
7418   OopClosure(rp),
7419   _collector(collector),
7420   _span(span),
7421   _bit_map(bit_map),
7422   _work_queue(work_queue),
7423   _revisit_stack(revisit_stack),
7424   _should_remember_klasses(collector->cms_should_unload_classes())
7425 {
7426   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7427 }
7428 
7429 // Grey object rescan during second checkpoint phase --
7430 // the parallel version.
7431 void Par_PushAndMarkClosure::do_oop(oop* p) {
7432   oop    this_oop = *p;
7433   // In the assert below, we ignore the mark word because
7434   // this oop may point to an already visited object that is
7435   // on the overflow stack (in which case the mark word has
7436   // been hijacked for chaining into the overflow stack --
7437   // if this is the last object in the overflow stack then
7438   // its mark word will be NULL). Because this object may
7439   // have been subsequently popped off the global overflow
7440   // stack, and the mark word possibly restored to the prototypical
7441   // value, by the time we get to examined this failing assert in
7442   // the debugger, is_oop_or_null(false) may subsequently start
7443   // to hold.
7444   assert(this_oop->is_oop_or_null(true),


7926 
7927   // Common code path for original and adaptive free lists.
7928 
7929   // this object is live: we'd normally expect this to be
7930   // an oop, and like to assert the following:
7931   // assert(oop(addr)->is_oop(), "live block should be an oop");
7932   // However, as we commented above, this may be an object whose
7933   // header hasn't yet been initialized.
7934   size_t size;
7935   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7936   if (_bitMap->isMarked(addr + 1)) {
7937     // Determine the size from the bit map, rather than trying to
7938     // compute it from the object header.
7939     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7940     size = pointer_delta(nextOneAddr + 1, addr);
7941     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7942            "alignment problem");
7943 
7944     #ifdef DEBUG
7945       if (oop(addr)->klass() != NULL &&
7946           (   !_collector->cms_should_unload_classes()
7947            || oop(addr)->is_parsable())) {
7948         // Ignore mark word because we are running concurrent with mutators
7949         assert(oop(addr)->is_oop(true), "live block should be an oop");
7950         assert(size ==
7951                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7952                "P-mark and computed size do not agree");
7953       }
7954     #endif
7955 
7956   } else {
7957     // This should be an initialized object that's alive.
7958     assert(oop(addr)->klass() != NULL &&
7959            (!_collector->cms_should_unload_classes()
7960             || oop(addr)->is_parsable()),
7961            "Should be an initialized object");
7962     // Ignore mark word because we are running concurrent with mutators
7963     assert(oop(addr)->is_oop(true), "live block should be an oop");
7964     // Verify that the bit map has no bits marked between
7965     // addr and purported end of this block.
7966     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7967     assert(size >= 3, "Necessary for Printezis marks to work");
7968     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7969     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7970   }
7971   return size;
7972 }
7973 
7974 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
7975                                             size_t chunkSize) {
7976   // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
7977   // scheme.
7978   bool fcInFreeLists = fc->isFree();
7979   assert(_sp->adaptive_freelists(), "Should only be used in this case.");




 208       }
 209     }
 210   } else {
 211     _par_gc_thread_states = NULL;
 212   }
 213   _incremental_collection_failed = false;
 214   // The "dilatation_factor" is the expansion that can occur on
 215   // account of the fact that the minimum object size in the CMS
 216   // generation may be larger than that in, say, a contiguous young
 217   //  generation.
 218   // Ideally, in the calculation below, we'd compute the dilatation
 219   // factor as: MinChunkSize/(promoting_gen's min object size)
 220   // Since we do not have such a general query interface for the
 221   // promoting generation, we'll instead just use the mimimum
 222   // object size (which today is a header's worth of space);
 223   // note that all arithmetic is in units of HeapWords.
 224   assert(MinChunkSize >= oopDesc::header_size(), "just checking");
 225   assert(_dilatation_factor >= 1.0, "from previous assert");
 226 }
 227 
 228 
 229 // The field "_initiating_occupancy" represents the occupancy percentage
 230 // at which we trigger a new collection cycle.  Unless explicitly specified
 231 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
 232 // is calculated by:
 233 //
 234 //   Let "f" be MinHeapFreeRatio in
 235 //
 236 //    _intiating_occupancy = 100-f +
 237 //                           f * (CMSTrigger[Perm]Ratio/100)
 238 //   where CMSTrigger[Perm]Ratio is the argument "tr" below.
 239 //
 240 // That is, if we assume the heap is at its desired maximum occupancy at the
 241 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
 242 // space be allocated before initiating a new collection cycle.
 243 //
 244 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 245   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 246   if (io > 0) {
 247     _initiating_occupancy = (double)io / 100.0;
 248   } else {
 249     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 250                              (double)(tr * MinHeapFreeRatio) / 100.0)
 251                             / 100.0;
 252   }
 253 }
 254 
 255 
 256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 257   assert(collector() != NULL, "no collector");
 258   collector()->ref_processor_init();
 259 }
 260 
 261 void CMSCollector::ref_processor_init() {
 262   if (_ref_processor == NULL) {
 263     // Allocate and initialize a reference processor
 264     _ref_processor = ReferenceProcessor::create_ref_processor(
 265         _span,                               // span
 266         _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
 267         _cmsGen->refs_discovery_is_mt(),     // mt_discovery
 268         &_is_alive_closure,
 269         ParallelGCThreads,
 270         ParallelRefProcEnabled);
 271     // Initialize the _ref_processor field of CMSGen
 272     _cmsGen->set_ref_processor(_ref_processor);
 273 
 274     // Allocate a dummy ref processor for perm gen.
 275     ReferenceProcessor* rp2 = new ReferenceProcessor();


 531   _eden_chunk_index(0),        // -- ditto --
 532   _survivor_plab_array(NULL),  // -- ditto --
 533   _survivor_chunk_array(NULL), // -- ditto --
 534   _survivor_chunk_capacity(0), // -- ditto --
 535   _survivor_chunk_index(0),    // -- ditto --
 536   _ser_pmc_preclean_ovflw(0),
 537   _ser_pmc_remark_ovflw(0),
 538   _par_pmc_remark_ovflw(0),
 539   _ser_kac_ovflw(0),
 540   _par_kac_ovflw(0),
 541 #ifndef PRODUCT
 542   _num_par_pushes(0),
 543 #endif
 544   _collection_count_start(0),
 545   _verifying(false),
 546   _icms_start_limit(NULL),
 547   _icms_stop_limit(NULL),
 548   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 549   _completed_initialization(false),
 550   _collector_policy(cp),
 551   _should_unload_classes(false),
 552   _concurrent_cycles_since_last_unload(0),
 553   _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 554 {
 555   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 556     ExplicitGCInvokesConcurrent = true;
 557   }
 558   // Now expand the span and allocate the collection support structures
 559   // (MUT, marking bit map etc.) to cover both generations subject to
 560   // collection.
 561 
 562   // First check that _permGen is adjacent to _cmsGen and above it.
 563   assert(   _cmsGen->reserved().word_size()  > 0
 564          && _permGen->reserved().word_size() > 0,
 565          "generations should not be of zero size");
 566   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
 567          "_cmsGen and _permGen should not overlap");
 568   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
 569          "_cmsGen->end() different from _permGen->start()");
 570 
 571   // For use by dirty card to oop closures.
 572   _cmsGen->cmsSpace()->set_collector(this);


 653       typedef struct OopTaskQueuePadded {
 654         OopTaskQueue work_queue;
 655         char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
 656       } OopTaskQueuePadded;
 657 
 658       for (i = 0; i < num_queues; i++) {
 659         OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
 660         if (q_padded == NULL) {
 661           warning("work_queue allocation failure.");
 662           return;
 663         }
 664         _task_queues->register_queue(i, &q_padded->work_queue);
 665       }
 666       for (i = 0; i < num_queues; i++) {
 667         _task_queues->queue(i)->initialize();
 668         _hash_seed[i] = 17;  // copied from ParNew
 669       }
 670     }
 671   }
 672 
 673   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 674   _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
 675 















 676   // Clip CMSBootstrapOccupancy between 0 and 100.
 677   _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 678                          /(double)100;
 679 
 680   _full_gcs_since_conc_gc = 0;
 681 
 682   // Now tell CMS generations the identity of their collector
 683   ConcurrentMarkSweepGeneration::set_collector(this);
 684 
 685   // Create & start a CMS thread for this CMS collector
 686   _cmsThread = ConcurrentMarkSweepThread::start(this);
 687   assert(cmsThread() != NULL, "CMS Thread should have been created");
 688   assert(cmsThread()->collector() == this,
 689          "CMS Thread should refer to this gen");
 690   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 691 
 692   // Support for parallelizing young gen rescan
 693   GenCollectedHeap* gch = GenCollectedHeap::heap();
 694   _young_gen = gch->prev_gen(_cmsGen);
 695   if (gch->supports_inline_contig_alloc()) {
 696     _top_addr = gch->top_addr();
 697     _end_addr = gch->end_addr();


1409     }
1410   )
1411 
1412   FreelistLocker x(this);
1413   // ------------------------------------------------------------------
1414   // Print out lots of information which affects the initiation of
1415   // a collection.
1416   if (PrintCMSInitiationStatistics && stats().valid()) {
1417     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1418     gclog_or_tty->stamp();
1419     gclog_or_tty->print_cr("");
1420     stats().print_on(gclog_or_tty);
1421     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1422       stats().time_until_cms_gen_full());
1423     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1424     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1425                            _cmsGen->contiguous_available());
1426     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1427     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1428     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1429     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1430     gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1431   }
1432   // ------------------------------------------------------------------
1433 
1434   // If the estimated time to complete a cms collection (cms_duration())
1435   // is less than the estimated time remaining until the cms generation
1436   // is full, start a collection.
1437   if (!UseCMSInitiatingOccupancyOnly) {
1438     if (stats().valid()) {
1439       if (stats().time_until_cms_start() == 0.0) {
1440         return true;
1441       }
1442     } else {
1443       // We want to conservatively collect somewhat early in order
1444       // to try and "bootstrap" our CMS/promotion statistics;
1445       // this branch will not fire after the first successful CMS
1446       // collection because the stats should then be valid.
1447       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1448         if (Verbose && PrintGCDetails) {
1449           gclog_or_tty->print_cr(
1450             " CMSCollector: collect for bootstrapping statistics:"
1451             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1452             _bootstrap_occupancy);
1453         }
1454         return true;
1455       }
1456     }
1457   }
1458 
1459   // Otherwise, we start a collection cycle if either the perm gen or
1460   // old gen want a collection cycle started. Each may use
1461   // an appropriate criterion for making this decision.
1462   // XXX We need to make sure that the gen expansion
1463   // criterion dovetails well with this. XXX NEED TO FIX THIS
1464   if (_cmsGen->should_concurrent_collect()) {
1465     if (Verbose && PrintGCDetails) {
1466       gclog_or_tty->print_cr("CMS old gen initiated");
1467     }
1468     return true;
1469   }
1470 
1471   // We start a collection if we believe an incremental collection may fail;
1472   // this is not likely to be productive in practice because it's probably too
1473   // late anyway.
1474   GenCollectedHeap* gch = GenCollectedHeap::heap();
1475   assert(gch->collector_policy()->is_two_generation_policy(),
1476          "You may want to check the correctness of the following");
1477   if (gch->incremental_collection_will_fail()) {
1478     if (PrintGCDetails && Verbose) {
1479       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1480     }
1481     return true;
1482   }
1483 
1484   if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1485     bool res = update_should_unload_classes();
1486     if (res) {
1487       if (Verbose && PrintGCDetails) {
1488         gclog_or_tty->print_cr("CMS perm gen initiated");
1489       }
1490       return true;
1491     }
1492   }
1493   return false;
1494 }
1495 
1496 // Clear _expansion_cause fields of constituent generations
1497 void CMSCollector::clear_expansion_cause() {
1498   _cmsGen->clear_expansion_cause();
1499   _permGen->clear_expansion_cause();
1500 }
1501 
1502 // We should be conservative in starting a collection cycle.  To
1503 // start too eagerly runs the risk of collecting too often in the
1504 // extreme.  To collect too rarely falls back on full collections,
1505 // which works, even if not optimum in terms of concurrent work.
1506 // As a work around for too eagerly collecting, use the flag
1507 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1508 // giving the user an easily understandable way of controlling the
1509 // collections.
1510 // We want to start a new collection cycle if any of the following
1511 // conditions hold:
1512 // . our current occupancy exceeds the configured initiating occupancy
1513 //   for this generation, or
1514 // . we recently needed to expand this space and have not, since that
1515 //   expansion, done a collection of this generation, or
1516 // . the underlying space believes that it may be a good idea to initiate
1517 //   a concurrent collection (this may be based on criteria such as the
1518 //   following: the space uses linear allocation and linear allocation is
1519 //   going to fail, or there is believed to be excessive fragmentation in
1520 //   the generation, etc... or ...
1521 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1522 //   the case of the old generation, not the perm generation; see CR 6543076):
1523 //   we may be approaching a point at which allocation requests may fail because
1524 //   we will be out of sufficient free space given allocation rate estimates.]
1525 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1526 
1527   assert_lock_strong(freelistLock());
1528   if (occupancy() > initiating_occupancy()) {
1529     if (PrintGCDetails && Verbose) {
1530       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1531         short_name(), occupancy(), initiating_occupancy());
1532     }
1533     return true;
1534   }
1535   if (UseCMSInitiatingOccupancyOnly) {
1536     return false;
1537   }
1538   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1539     if (PrintGCDetails && Verbose) {
1540       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1541         short_name());
1542     }
1543     return true;
1544   }
1545   if (_cmsSpace->should_concurrent_collect()) {



1546     if (PrintGCDetails && Verbose) {
1547       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1548         short_name());
1549     }
1550     return true;
1551   }








1552   return false;
1553 }
1554 
1555 void ConcurrentMarkSweepGeneration::collect(bool   full,
1556                                             bool   clear_all_soft_refs,
1557                                             size_t size,
1558                                             bool   tlab)
1559 {
1560   collector()->collect(full, clear_all_soft_refs, size, tlab);
1561 }
1562 
1563 void CMSCollector::collect(bool   full,
1564                            bool   clear_all_soft_refs,
1565                            size_t size,
1566                            bool   tlab)
1567 {
1568   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1569     // For debugging purposes skip the collection if the state
1570     // is not currently idle
1571     if (TraceCMSState) {


1974     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1975     size_t free_size = cms_space->free();
1976     assert(free_size ==
1977            pointer_delta(cms_space->end(), cms_space->compaction_top())
1978            * HeapWordSize,
1979       "All the free space should be compacted into one chunk at top");
1980     assert(cms_space->dictionary()->totalChunkSize(
1981                                       debug_only(cms_space->freelistLock())) == 0 ||
1982            cms_space->totalSizeInIndexedFreeLists() == 0,
1983       "All the free space should be in a single chunk");
1984     size_t num = cms_space->totalCount();
1985     assert((free_size == 0 && num == 0) ||
1986            (free_size > 0  && (num == 1 || num == 2)),
1987          "There should be at most 2 free chunks after compaction");
1988   #endif // ASSERT
1989   _collectorState = Resetting;
1990   assert(_restart_addr == NULL,
1991          "Should have been NULL'd before baton was passed");
1992   reset(false /* == !asynch */);
1993   _cmsGen->reset_after_compaction();
1994   _concurrent_cycles_since_last_unload = 0;
1995 
1996   if (verifying() && !should_unload_classes()) {
1997     perm_gen_verify_bit_map()->clear_all();
1998   }
1999 
2000   // Clear any data recorded in the PLAB chunk arrays.
2001   if (_survivor_plab_array != NULL) {
2002     reset_survivor_plab_arrays();
2003   }
2004 
2005   // Adjust the per-size allocation stats for the next epoch.
2006   _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
2007   // Restart the "sweep timer" for next epoch.
2008   _sweep_timer.reset();
2009   _sweep_timer.start();
2010 
2011   // Sample collection pause time and reset for collection interval.
2012   if (UseAdaptiveSizePolicy) {
2013     size_policy()->msc_collection_end(gch->gc_cause());
2014   }
2015 
2016   // For a mark-sweep-compact, compute_new_size() will be called


2103     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2104     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2105     _c->_foregroundGCShouldWait = true;
2106   }
2107 };
2108 
2109 // There are separate collect_in_background and collect_in_foreground because of
2110 // the different locking requirements of the background collector and the
2111 // foreground collector.  There was originally an attempt to share
2112 // one "collect" method between the background collector and the foreground
2113 // collector but the if-then-else required made it cleaner to have
2114 // separate methods.
2115 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2116   assert(Thread::current()->is_ConcurrentGC_thread(),
2117     "A CMS asynchronous collection is only allowed on a CMS thread.");
2118 
2119   GenCollectedHeap* gch = GenCollectedHeap::heap();
2120   {
2121     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2122     MutexLockerEx hl(Heap_lock, safepoint_check);
2123     FreelistLocker fll(this);
2124     MutexLockerEx x(CGC_lock, safepoint_check);
2125     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2126       // The foreground collector is active or we're
2127       // not using asynchronous collections.  Skip this
2128       // background collection.
2129       assert(!_foregroundGCShouldWait, "Should be clear");
2130       return;
2131     } else {
2132       assert(_collectorState == Idling, "Should be idling before start.");
2133       _collectorState = InitialMarking;
2134       // Reset the expansion cause, now that we are about to begin
2135       // a new cycle.
2136       clear_expansion_cause();
2137     }
2138     // Decide if we want to enable class unloading as part of the
2139     // ensuing concurrent GC cycle.
2140     update_should_unload_classes(); 




2141     _full_gc_requested = false;           // acks all outstanding full gc requests
2142     // Signal that we are about to start a collection
2143     gch->increment_total_full_collections();  // ... starting a collection cycle
2144     _collection_count_start = gch->total_full_collections();
2145   }
2146 
2147   // Used for PrintGC
2148   size_t prev_used;
2149   if (PrintGC && Verbose) {
2150     prev_used = _cmsGen->used(); // XXXPERM
2151   }
2152 
2153   // The change of the collection state is normally done at this level;
2154   // the exceptions are phases that are executed while the world is
2155   // stopped.  For those phases the change of state is done while the
2156   // world is stopped.  For baton passing purposes this allows the
2157   // background collector to finish the phase and change state atomically.
2158   // The foreground collector cannot wait on a phase that is done
2159   // while the world is stopped because the foreground collector already
2160   // has the world stopped and would deadlock.


3049   assert(_num_par_pushes >= 0, "Inconsistency");
3050   if (_overflow_list == NULL) {
3051     assert(_num_par_pushes == 0, "Inconsistency");
3052   }
3053   return _overflow_list == NULL;
3054 }
3055 
3056 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3057 // merely consolidate assertion checks that appear to occur together frequently.
3058 void CMSCollector::verify_work_stacks_empty() const {
3059   assert(_markStack.isEmpty(), "Marking stack should be empty");
3060   assert(overflow_list_is_empty(), "Overflow list should be empty");
3061 }
3062 
3063 void CMSCollector::verify_overflow_empty() const {
3064   assert(overflow_list_is_empty(), "Overflow list should be empty");
3065   assert(no_preserved_marks(), "No preserved marks");
3066 }
3067 #endif // PRODUCT
3068 
3069 // Decide if we want to enable class unloading as part of the
3070 // ensuing concurrent GC cycle. We will collect the perm gen and
3071 // unload classes if it's the case that:
3072 // (1) an explicit gc request has been made and the flag
3073 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3074 // (2) (a) class unloading is enabled at the command line, and
3075 //     (b) (i)   perm gen threshold has been crossed, or  
3076 //         (ii)  old gen is getting really full, or
3077 //         (iii) the previous N CMS collections did not collect the
3078 //               perm gen
3079 // NOTE: Provided there is no change in the state of the heap between
3080 // calls to this method, it should have idempotent results. Moreover,
3081 // its results should be monotonically increasing (i.e. going from 0 to 1,
3082 // but not 1 to 0) between successive calls between which the heap was
3083 // not collected. For the implementation below, it must thus rely on
3084 // the property that concurrent_cycles_since_last_unload()
3085 // will not decrease unless a collection cycle happened and that
3086 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3087 // themselves also monotonic in that sense. See check_monotonicity()
3088 // below.
3089 bool CMSCollector::update_should_unload_classes() {
3090   // Condition 1 above
3091   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3092     _should_unload_classes = true;
3093   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3094     // Disjuncts 2.b.(i,ii,iii) above
3095     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3096                               CMSClassUnloadingMaxInterval)
3097                            || _permGen->should_concurrent_collect()
3098                            || _cmsGen->is_too_full();
3099   }
3100   return _should_unload_classes;
3101 }
3102 
3103 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3104   bool res = should_concurrent_collect();
3105 #define CMSIsTooFullPercentage 98
3106   res = res && occupancy() > (double)CMSIsTooFullPercentage/100.0;
3107   return res;
3108 }
3109 
3110 void CMSCollector::setup_cms_unloading_and_verification_state() {
3111   const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3112                              || VerifyBeforeExit;
3113   const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3114                              |   SharedHeap::SO_CodeCache;
3115 
3116   if (should_unload_classes()) {   // Should unload classes this cycle
3117     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3118     set_verifying(should_verify);    // Set verification state for this cycle
3119     return;                            // Nothing else needs to be done at this time
3120   }
3121 
3122   // Not unloading classes this cycle
3123   assert(!should_unload_classes(), "Inconsitency!");
3124   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3125     // We were not verifying, or we _were_ unloading classes in the last cycle,
3126     // AND some verification options are enabled this cycle; in this case,
3127     // we must make sure that the deadness map is allocated if not already so,
3128     // and cleared (if already allocated previously --
3129     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3130     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3131       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3132         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3133                 "permanent generation verification disabled");
3134         return;  // Note that we leave verification disabled, so we'll retry this
3135                  // allocation next cycle. We _could_ remember this failure
3136                  // and skip further attempts and permanently disable verification
3137                  // attempts if that is considered more desirable.
3138       }
3139       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3140               "_perm_gen_ver_bit_map inconsistency?");
3141     } else {
3142       perm_gen_verify_bit_map()->clear_all();
3143     }
3144     // Include symbols, strings and code cache elements to prevent their resurrection.


4741   SpecializationStats::print();
4742 }
4743 
4744 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4745   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4746 
4747   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4748 
4749   assert(haveFreelistLocks(), "must have free list locks");
4750   assert_lock_strong(bitMapLock());
4751 
4752   if (UseAdaptiveSizePolicy) {
4753     size_policy()->checkpoint_roots_final_begin();
4754   }
4755 
4756   ResourceMark rm;
4757   HandleMark   hm;
4758 
4759   GenCollectedHeap* gch = GenCollectedHeap::heap();
4760 
4761   if (should_unload_classes()) {
4762     CodeCache::gc_prologue();
4763   }
4764   assert(haveFreelistLocks(), "must have free list locks");
4765   assert_lock_strong(bitMapLock());
4766 
4767   if (!init_mark_was_synchronous) {
4768     // We might assume that we need not fill TLAB's when
4769     // CMSScavengeBeforeRemark is set, because we may have just done
4770     // a scavenge which would have filled all TLAB's -- and besides
4771     // Eden would be empty. This however may not always be the case --
4772     // for instance although we asked for a scavenge, it may not have
4773     // happened because of a JNI critical section. We probably need
4774     // a policy for deciding whether we can in that case wait until
4775     // the critical section releases and then do the remark following
4776     // the scavenge, and skip it here. In the absence of that policy,
4777     // or of an indication of whether the scavenge did indeed occur,
4778     // we cannot rely on TLAB's having been filled and must do
4779     // so here just in case a scavenge did not happen.
4780     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4781     // Update the saved marks which may affect the root scans.


4801         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4802                     gclog_or_tty);
4803         do_remark_non_parallel();
4804       }
4805     }
4806   } else {
4807     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4808     // The initial mark was stop-world, so there's no rescanning to
4809     // do; go straight on to the next step below.
4810   }
4811   verify_work_stacks_empty();
4812   verify_overflow_empty();
4813 
4814   {
4815     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4816     refProcessingWork(asynch, clear_all_soft_refs);
4817   }
4818   verify_work_stacks_empty();
4819   verify_overflow_empty();
4820 
4821   if (should_unload_classes()) {
4822     CodeCache::gc_epilogue();
4823   }
4824 
4825   // If we encountered any (marking stack / work queue) overflow
4826   // events during the current CMS cycle, take appropriate
4827   // remedial measures, where possible, so as to try and avoid
4828   // recurrence of that condition.
4829   assert(_markStack.isEmpty(), "No grey objects");
4830   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4831                      _ser_kac_ovflw;
4832   if (ser_ovflw > 0) {
4833     if (PrintCMSStatistics != 0) {
4834       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4835         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4836         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4837         _ser_kac_ovflw);
4838     }
4839     _markStack.expand();
4840     _ser_pmc_remark_ovflw = 0;
4841     _ser_pmc_preclean_ovflw = 0;


5671                                 &cmsKeepAliveClosure);
5672   {
5673     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5674     if (rp->processing_is_mt()) {
5675       CMSRefProcTaskExecutor task_executor(*this);
5676       rp->process_discovered_references(soft_ref_policy,
5677                                         &_is_alive_closure,
5678                                         &cmsKeepAliveClosure,
5679                                         &cmsDrainMarkingStackClosure,
5680                                         &task_executor);
5681     } else {
5682       rp->process_discovered_references(soft_ref_policy,
5683                                         &_is_alive_closure,
5684                                         &cmsKeepAliveClosure,
5685                                         &cmsDrainMarkingStackClosure,
5686                                         NULL);
5687     }
5688     verify_work_stacks_empty();
5689   }
5690 
5691   if (should_unload_classes()) {
5692     {
5693       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5694 
5695       // Follow SystemDictionary roots and unload classes
5696       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5697 
5698       // Follow CodeCache roots and unload any methods marked for unloading
5699       CodeCache::do_unloading(&_is_alive_closure,
5700                               &cmsKeepAliveClosure,
5701                               purged_class);
5702 
5703       cmsDrainMarkingStackClosure.do_void();
5704       verify_work_stacks_empty();
5705 
5706       // Update subklass/sibling/implementor links in KlassKlass descendants
5707       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5708       oop k;
5709       while ((k = _revisitStack.pop()) != NULL) {
5710         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5711                        &_is_alive_closure,


5774              "executing VM thread");
5775     }
5776   }
5777 }
5778 #endif
5779 
5780 void CMSCollector::sweep(bool asynch) {
5781   assert(_collectorState == Sweeping, "just checking");
5782   check_correct_thread_executing();
5783   verify_work_stacks_empty();
5784   verify_overflow_empty();
5785   incrementSweepCount();
5786   _sweep_timer.stop();
5787   _sweep_estimate.sample(_sweep_timer.seconds());
5788   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5789 
5790   // PermGen verification support: If perm gen sweeping is disabled in
5791   // this cycle, we preserve the perm gen object "deadness" information
5792   // in the perm_gen_verify_bit_map. In order to do that we traverse
5793   // all blocks in perm gen and mark all dead objects.
5794   if (verifying() && !should_unload_classes()) {


5795     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5796            "Should have already been allocated");
5797     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5798                                markBitMap(), perm_gen_verify_bit_map());
5799     if (asynch) {
5800       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5801                                bitMapLock());
5802       _permGen->cmsSpace()->blk_iterate(&mdo);
5803     } else {
5804       // In the case of synchronous sweep, we already have
5805       // the requisite locks/tokens.
5806       _permGen->cmsSpace()->blk_iterate(&mdo);
5807     }
5808   }
5809 
5810   if (asynch) {
5811     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5812     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5813     // First sweep the old gen then the perm gen
5814     {
5815       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5816                                bitMapLock());
5817       sweepWork(_cmsGen, asynch);
5818     }
5819 
5820     // Now repeat for perm gen
5821     if (should_unload_classes()) {
5822       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5823                              bitMapLock());
5824       sweepWork(_permGen, asynch);
5825     }
5826 
5827     // Update Universe::_heap_*_at_gc figures.
5828     // We need all the free list locks to make the abstract state
5829     // transition from Sweeping to Resetting. See detailed note
5830     // further below.
5831     {
5832       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5833                                _permGen->freelistLock());
5834       // Update heap occupancy information which is used as
5835       // input to soft ref clearing policy at the next gc.
5836       Universe::update_heap_info_at_gc();
5837       _collectorState = Resizing;
5838     }
5839   } else {
5840     // already have needed locks
5841     sweepWork(_cmsGen,  asynch);
5842 
5843     if (should_unload_classes()) {
5844       sweepWork(_permGen, asynch);
5845     }
5846     // Update heap occupancy information which is used as
5847     // input to soft ref clearing policy at the next gc.
5848     Universe::update_heap_info_at_gc();
5849     _collectorState = Resizing;
5850   }
5851   verify_work_stacks_empty();
5852   verify_overflow_empty();
5853 
5854   _sweep_timer.reset();
5855   _sweep_timer.start();
5856 
5857   update_time_of_last_gc(os::javaTimeMillis());
5858 
5859   // NOTE on abstract state transitions:
5860   // Mutators allocate-live and/or mark the mod-union table dirty
5861   // based on the state of the collection.  The former is done in
5862   // the interval [Marking, Sweeping] and the latter in the interval
5863   // [Marking, Sweeping).  Thus the transitions into the Marking state


5985         "Should possess CMS token to sweep");
5986   assert_lock_strong(gen->freelistLock());
5987   assert_lock_strong(bitMapLock());
5988 
5989   assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5990   gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5991                                       _sweep_estimate.padded_average());
5992   gen->setNearLargestChunk();
5993 
5994   {
5995     SweepClosure sweepClosure(this, gen, &_markBitMap,
5996                             CMSYield && asynch);
5997     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5998     // We need to free-up/coalesce garbage/blocks from a
5999     // co-terminal free run. This is done in the SweepClosure
6000     // destructor; so, do not remove this scope, else the
6001     // end-of-sweep-census below will be off by a little bit.
6002   }
6003   gen->cmsSpace()->sweep_completed();
6004   gen->cmsSpace()->endSweepFLCensus(sweepCount());
6005   if (should_unload_classes()) {                // unloaded classes this cycle,
6006     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6007   } else {                                      // did not unload classes,
6008     _concurrent_cycles_since_last_unload++;     // ... increment count
6009   }
6010 }
6011 
6012 // Reset CMS data structures (for now just the marking bit map)
6013 // preparatory for the next cycle.
6014 void CMSCollector::reset(bool asynch) {
6015   GenCollectedHeap* gch = GenCollectedHeap::heap();
6016   CMSAdaptiveSizePolicy* sp = size_policy();
6017   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6018   if (asynch) {
6019     CMSTokenSyncWithLocks ts(true, bitMapLock());
6020 
6021     // If the state is not "Resetting", the foreground  thread
6022     // has done a collection and the resetting.
6023     if (_collectorState != Resetting) {
6024       assert(_collectorState == Idling, "The state should only change"
6025         " because the foreground collector has finished the collection");
6026       return;
6027     }
6028 
6029     // Clear the mark bitmap (no grey objects to start with)


7247     }
7248     // anything including and to the right of _finger
7249     // will be scanned as we iterate over the remainder of the
7250     // bit map
7251   }
7252 }
7253 
7254 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7255                      MemRegion span,
7256                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7257                      CMSMarkStack*  revisitStack,
7258                      HeapWord* finger, MarkFromRootsClosure* parent) :
7259   OopClosure(collector->ref_processor()),
7260   _collector(collector),
7261   _span(span),
7262   _bitMap(bitMap),
7263   _markStack(markStack),
7264   _revisitStack(revisitStack),
7265   _finger(finger),
7266   _parent(parent),
7267   _should_remember_klasses(collector->should_unload_classes())
7268 { }
7269 
7270 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7271                      MemRegion span,
7272                      CMSBitMap* bit_map,
7273                      OopTaskQueue* work_queue,
7274                      CMSMarkStack*  overflow_stack,
7275                      CMSMarkStack*  revisit_stack,
7276                      HeapWord* finger,
7277                      HeapWord** global_finger_addr,
7278                      Par_MarkFromRootsClosure* parent) :
7279   OopClosure(collector->ref_processor()),
7280   _collector(collector),
7281   _whole_span(collector->_span),
7282   _span(span),
7283   _bit_map(bit_map),
7284   _work_queue(work_queue),
7285   _overflow_stack(overflow_stack),
7286   _revisit_stack(revisit_stack),
7287   _finger(finger),
7288   _global_finger_addr(global_finger_addr),
7289   _parent(parent),
7290   _should_remember_klasses(collector->should_unload_classes())
7291 { }
7292 
7293 
7294 void CMSCollector::lower_restart_addr(HeapWord* low) {
7295   assert(_span.contains(low), "Out of bounds addr");
7296   if (_restart_addr == NULL) {
7297     _restart_addr = low;
7298   } else {
7299     _restart_addr = MIN2(_restart_addr, low);
7300   }
7301 }
7302 
7303 // Upon stack overflow, we discard (part of) the stack,
7304 // remembering the least address amongst those discarded
7305 // in CMSCollector's _restart_address.
7306 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7307   // Remember the least grey address discarded
7308   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7309   _collector->lower_restart_addr(ra);
7310   _markStack->reset();  // discard stack contents


7413   }
7414 }
7415 
7416 
7417 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7418                                        MemRegion span,
7419                                        ReferenceProcessor* rp,
7420                                        CMSBitMap* bit_map,
7421                                        CMSBitMap* mod_union_table,
7422                                        CMSMarkStack*  mark_stack,
7423                                        CMSMarkStack*  revisit_stack,
7424                                        bool           concurrent_precleaning):
7425   OopClosure(rp),
7426   _collector(collector),
7427   _span(span),
7428   _bit_map(bit_map),
7429   _mod_union_table(mod_union_table),
7430   _mark_stack(mark_stack),
7431   _revisit_stack(revisit_stack),
7432   _concurrent_precleaning(concurrent_precleaning),
7433   _should_remember_klasses(collector->should_unload_classes())
7434 {
7435   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7436 }
7437 
7438 // Grey object rescan during pre-cleaning and second checkpoint phases --
7439 // the non-parallel version (the parallel version appears further below.)
7440 void PushAndMarkClosure::do_oop(oop* p) {
7441   oop    this_oop = *p;
7442   // Ignore mark word verification. If during concurrent precleaning
7443   // the object monitor may be locked. If during the checkpoint
7444   // phases, the object may already have been reached by a  different
7445   // path and may be at the end of the global overflow list (so
7446   // the mark word may be NULL).
7447   assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7448          "expected an oop or NULL");
7449   HeapWord* addr = (HeapWord*)this_oop;
7450   // Check if oop points into the CMS generation
7451   // and is not marked
7452   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7453     // a white object ...


7475          // in the overflow list.
7476          _collector->push_on_overflow_list(this_oop);
7477          _collector->_ser_pmc_remark_ovflw++;
7478       }
7479     }
7480   }
7481 }
7482 
7483 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7484                                                MemRegion span,
7485                                                ReferenceProcessor* rp,
7486                                                CMSBitMap* bit_map,
7487                                                OopTaskQueue* work_queue,
7488                                                CMSMarkStack* revisit_stack):
7489   OopClosure(rp),
7490   _collector(collector),
7491   _span(span),
7492   _bit_map(bit_map),
7493   _work_queue(work_queue),
7494   _revisit_stack(revisit_stack),
7495   _should_remember_klasses(collector->should_unload_classes())
7496 {
7497   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7498 }
7499 
7500 // Grey object rescan during second checkpoint phase --
7501 // the parallel version.
7502 void Par_PushAndMarkClosure::do_oop(oop* p) {
7503   oop    this_oop = *p;
7504   // In the assert below, we ignore the mark word because
7505   // this oop may point to an already visited object that is
7506   // on the overflow stack (in which case the mark word has
7507   // been hijacked for chaining into the overflow stack --
7508   // if this is the last object in the overflow stack then
7509   // its mark word will be NULL). Because this object may
7510   // have been subsequently popped off the global overflow
7511   // stack, and the mark word possibly restored to the prototypical
7512   // value, by the time we get to examined this failing assert in
7513   // the debugger, is_oop_or_null(false) may subsequently start
7514   // to hold.
7515   assert(this_oop->is_oop_or_null(true),


7997 
7998   // Common code path for original and adaptive free lists.
7999 
8000   // this object is live: we'd normally expect this to be
8001   // an oop, and like to assert the following:
8002   // assert(oop(addr)->is_oop(), "live block should be an oop");
8003   // However, as we commented above, this may be an object whose
8004   // header hasn't yet been initialized.
8005   size_t size;
8006   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8007   if (_bitMap->isMarked(addr + 1)) {
8008     // Determine the size from the bit map, rather than trying to
8009     // compute it from the object header.
8010     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8011     size = pointer_delta(nextOneAddr + 1, addr);
8012     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8013            "alignment problem");
8014 
8015     #ifdef DEBUG
8016       if (oop(addr)->klass() != NULL &&
8017           (   !_collector->should_unload_classes()
8018            || oop(addr)->is_parsable())) {
8019         // Ignore mark word because we are running concurrent with mutators
8020         assert(oop(addr)->is_oop(true), "live block should be an oop");
8021         assert(size ==
8022                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8023                "P-mark and computed size do not agree");
8024       }
8025     #endif
8026 
8027   } else {
8028     // This should be an initialized object that's alive.
8029     assert(oop(addr)->klass() != NULL &&
8030            (!_collector->should_unload_classes()
8031             || oop(addr)->is_parsable()),
8032            "Should be an initialized object");
8033     // Ignore mark word because we are running concurrent with mutators
8034     assert(oop(addr)->is_oop(true), "live block should be an oop");
8035     // Verify that the bit map has no bits marked between
8036     // addr and purported end of this block.
8037     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8038     assert(size >= 3, "Necessary for Printezis marks to work");
8039     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8040     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8041   }
8042   return size;
8043 }
8044 
8045 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8046                                             size_t chunkSize) {
8047   // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8048   // scheme.
8049   bool fcInFreeLists = fc->isFree();
8050   assert(_sp->adaptive_freelists(), "Should only be used in this case.");