Print this page
--- 1.300 ---
Merged changes between child workspace "/net/spot/workspaces/ysr/cms_bugs" and
 parent workspace "/net/jano2/export2/hotspot/ws/main/gc_baseline".
--- 1.297.1.1 ---
6621144 CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"


 208       }
 209     }
 210   } else {
 211     _par_gc_thread_states = NULL;
 212   }
 213   _incremental_collection_failed = false;
 214   // The "dilatation_factor" is the expansion that can occur on
 215   // account of the fact that the minimum object size in the CMS
 216   // generation may be larger than that in, say, a contiguous young
 217   //  generation.
 218   // Ideally, in the calculation below, we'd compute the dilatation
 219   // factor as: MinChunkSize/(promoting_gen's min object size)
 220   // Since we do not have such a general query interface for the
 221   // promoting generation, we'll instead just use the mimimum
 222   // object size (which today is a header's worth of space);
 223   // note that all arithmetic is in units of HeapWords.
 224   assert(MinChunkSize >= oopDesc::header_size(), "just checking");
 225   assert(_dilatation_factor >= 1.0, "from previous assert");
 226 }
 227 




























 228 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 229   assert(collector() != NULL, "no collector");
 230   collector()->ref_processor_init();
 231 }
 232 
 233 void CMSCollector::ref_processor_init() {
 234   if (_ref_processor == NULL) {
 235     // Allocate and initialize a reference processor
 236     _ref_processor = ReferenceProcessor::create_ref_processor(
 237         _span,                               // span
 238         _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
 239         _cmsGen->refs_discovery_is_mt(),     // mt_discovery
 240         &_is_alive_closure,
 241         ParallelGCThreads,
 242         ParallelRefProcEnabled);
 243     // Initialize the _ref_processor field of CMSGen
 244     _cmsGen->set_ref_processor(_ref_processor);
 245 
 246     // Allocate a dummy ref processor for perm gen.
 247     ReferenceProcessor* rp2 = new ReferenceProcessor();


 503   _eden_chunk_index(0),        // -- ditto --
 504   _survivor_plab_array(NULL),  // -- ditto --
 505   _survivor_chunk_array(NULL), // -- ditto --
 506   _survivor_chunk_capacity(0), // -- ditto --
 507   _survivor_chunk_index(0),    // -- ditto --
 508   _ser_pmc_preclean_ovflw(0),
 509   _ser_pmc_remark_ovflw(0),
 510   _par_pmc_remark_ovflw(0),
 511   _ser_kac_ovflw(0),
 512   _par_kac_ovflw(0),
 513 #ifndef PRODUCT
 514   _num_par_pushes(0),
 515 #endif
 516   _collection_count_start(0),
 517   _verifying(false),
 518   _icms_start_limit(NULL),
 519   _icms_stop_limit(NULL),
 520   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 521   _completed_initialization(false),
 522   _collector_policy(cp),
 523   _unload_classes(false),
 524   _unloaded_classes_last_cycle(false),

 525   _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 526 {
 527   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 528     ExplicitGCInvokesConcurrent = true;
 529   }
 530   // Now expand the span and allocate the collection support structures
 531   // (MUT, marking bit map etc.) to cover both generations subject to
 532   // collection.
 533 
 534   // First check that _permGen is adjacent to _cmsGen and above it.
 535   assert(   _cmsGen->reserved().word_size()  > 0
 536          && _permGen->reserved().word_size() > 0,
 537          "generations should not be of zero size");
 538   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
 539          "_cmsGen and _permGen should not overlap");
 540   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
 541          "_cmsGen->end() different from _permGen->start()");
 542 
 543   // For use by dirty card to oop closures.
 544   _cmsGen->cmsSpace()->set_collector(this);


 625       typedef struct OopTaskQueuePadded {
 626         OopTaskQueue work_queue;
 627         char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
 628       } OopTaskQueuePadded;
 629 
 630       for (i = 0; i < num_queues; i++) {
 631         OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
 632         if (q_padded == NULL) {
 633           warning("work_queue allocation failure.");
 634           return;
 635         }
 636         _task_queues->register_queue(i, &q_padded->work_queue);
 637       }
 638       for (i = 0; i < num_queues; i++) {
 639         _task_queues->queue(i)->initialize();
 640         _hash_seed[i] = 17;  // copied from ParNew
 641       }
 642     }
 643   }
 644 
 645   // "initiatingOccupancy" is the occupancy ratio at which we trigger
 646   // a new collection cycle.  Unless explicitly specified via
 647   // CMSTriggerRatio, it is calculated by:
 648   //   Let "f" be MinHeapFreeRatio in
 649   //
 650   //    intiatingOccupancy = 100-f +
 651   //                         f * (CMSTriggerRatio/100)
 652   // That is, if we assume the heap is at its desired maximum occupancy at the
 653   // end of a collection, we let CMSTriggerRatio of the (purported) free
 654   // space be allocated before initiating a new collection cycle.
 655   if (CMSInitiatingOccupancyFraction > 0) {
 656     _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
 657   } else {
 658     _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
 659                            (double)(CMSTriggerRatio *
 660                                     MinHeapFreeRatio) / 100.0)
 661                            / 100.0;
 662   }
 663   // Clip CMSBootstrapOccupancy between 0 and 100.
 664   _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
 665                          /(double)100;
 666 
 667   _full_gcs_since_conc_gc = 0;
 668 
 669   // Now tell CMS generations the identity of their collector
 670   ConcurrentMarkSweepGeneration::set_collector(this);
 671 
 672   // Create & start a CMS thread for this CMS collector
 673   _cmsThread = ConcurrentMarkSweepThread::start(this);
 674   assert(cmsThread() != NULL, "CMS Thread should have been created");
 675   assert(cmsThread()->collector() == this,
 676          "CMS Thread should refer to this gen");
 677   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 678 
 679   // Support for parallelizing young gen rescan
 680   GenCollectedHeap* gch = GenCollectedHeap::heap();
 681   _young_gen = gch->prev_gen(_cmsGen);
 682   if (gch->supports_inline_contig_alloc()) {
 683     _top_addr = gch->top_addr();
 684     _end_addr = gch->end_addr();


1396     }
1397   )
1398 
1399   FreelistLocker x(this);
1400   // ------------------------------------------------------------------
1401   // Print out lots of information which affects the initiation of
1402   // a collection.
1403   if (PrintCMSInitiationStatistics && stats().valid()) {
1404     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1405     gclog_or_tty->stamp();
1406     gclog_or_tty->print_cr("");
1407     stats().print_on(gclog_or_tty);
1408     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1409       stats().time_until_cms_gen_full());
1410     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1411     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1412                            _cmsGen->contiguous_available());
1413     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1414     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1415     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1416     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());

1417   }
1418   // ------------------------------------------------------------------
1419 
1420   // If the estimated time to complete a cms collection (cms_duration())
1421   // is less than the estimated time remaining until the cms generation
1422   // is full, start a collection.
1423   if (!UseCMSInitiatingOccupancyOnly) {
1424     if (stats().valid()) {
1425       if (stats().time_until_cms_start() == 0.0) {
1426         return true;
1427       }
1428     } else {
1429       // We want to conservatively collect somewhat early in order
1430       // to try and "bootstrap" our CMS/promotion statistics;
1431       // this branch will not fire after the first successful CMS
1432       // collection because the stats should then be valid.
1433       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1434         if (Verbose && PrintGCDetails) {
1435           gclog_or_tty->print_cr(
1436             " CMSCollector: collect for bootstrapping statistics:"
1437             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1438             _bootstrap_occupancy);
1439         }
1440         return true;
1441       }
1442     }
1443   }
1444 
1445   // Otherwise, we start a collection cycle if either the perm gen or
1446   // old gen want a collection cycle started. Each may use
1447   // an appropriate criterion for making this decision.
1448   // XXX We need to make sure that the gen expansion
1449   // criterion dovetails well with this.
1450   if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
1451     if (Verbose && PrintGCDetails) {
1452       gclog_or_tty->print_cr("CMS old gen initiated");
1453     }
1454     return true;
1455   }
1456 
1457   if (cms_should_unload_classes() &&
1458       _permGen->shouldConcurrentCollect(initiatingOccupancy())) {














1459     if (Verbose && PrintGCDetails) {
1460      gclog_or_tty->print_cr("CMS perm gen initiated");
1461     }
1462     return true;
1463   }
1464 
1465   return false;
1466 }
1467 
1468 // Clear _expansion_cause fields of constituent generations
1469 void CMSCollector::clear_expansion_cause() {
1470   _cmsGen->clear_expansion_cause();
1471   _permGen->clear_expansion_cause();
1472 }
1473 
1474 bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
1475   double initiatingOccupancy) {
1476   // We should be conservative in starting a collection cycle.  To
1477   // start too eagerly runs the risk of collecting too often in the
1478   // extreme.  To collect too rarely falls back on full collections,
1479   // which works, even if not optimum in terms of concurrent work.
1480   // As a work around for too eagerly collecting, use the flag
1481   // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1482   // giving the user an easily understandable way of controlling the
1483   // collections.
1484   // We want to start a new collection cycle if any of the following
1485   // conditions hold:
1486   // . our current occupancy exceeds the initiating occupancy, or
1487   // . we recently needed to expand and have not since that expansion,
1488   //   collected, or
1489   // . we are not using adaptive free lists and linear allocation is
1490   //   going to fail, or
1491   // . (for old gen) incremental collection has already failed or
1492   //   may soon fail in the near future as we may not be able to absorb
1493   //   promotions.
1494   assert_lock_strong(freelistLock());



1495 
1496   if (occupancy() > initiatingOccupancy) {

1497     if (PrintGCDetails && Verbose) {
1498       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1499         short_name(), occupancy(), initiatingOccupancy);
1500     }
1501     return true;
1502   }
1503   if (UseCMSInitiatingOccupancyOnly) {
1504     return false;
1505   }
1506   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1507     if (PrintGCDetails && Verbose) {
1508       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1509         short_name());
1510     }
1511     return true;
1512   }
1513   GenCollectedHeap* gch = GenCollectedHeap::heap();
1514   assert(gch->collector_policy()->is_two_generation_policy(),
1515          "You may want to check the correctness of the following");
1516   if (gch->incremental_collection_will_fail()) {
1517     if (PrintGCDetails && Verbose) {
1518       gclog_or_tty->print(" %s: collect because incremental collection will fail ",
1519         short_name());
1520     }
1521     return true;
1522   }
1523   if (!_cmsSpace->adaptive_freelists() &&
1524       _cmsSpace->linearAllocationWouldFail()) {
1525     if (PrintGCDetails && Verbose) {
1526       gclog_or_tty->print(" %s: collect because of linAB ",
1527         short_name());
1528     }
1529     return true;
1530   }
1531   return false;
1532 }
1533 
1534 void ConcurrentMarkSweepGeneration::collect(bool   full,
1535                                             bool   clear_all_soft_refs,
1536                                             size_t size,
1537                                             bool   tlab)
1538 {
1539   collector()->collect(full, clear_all_soft_refs, size, tlab);
1540 }
1541 
1542 void CMSCollector::collect(bool   full,
1543                            bool   clear_all_soft_refs,
1544                            size_t size,
1545                            bool   tlab)
1546 {
1547   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1548     // For debugging purposes skip the collection if the state
1549     // is not currently idle
1550     if (TraceCMSState) {


1953     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1954     size_t free_size = cms_space->free();
1955     assert(free_size ==
1956            pointer_delta(cms_space->end(), cms_space->compaction_top())
1957            * HeapWordSize,
1958       "All the free space should be compacted into one chunk at top");
1959     assert(cms_space->dictionary()->totalChunkSize(
1960                                       debug_only(cms_space->freelistLock())) == 0 ||
1961            cms_space->totalSizeInIndexedFreeLists() == 0,
1962       "All the free space should be in a single chunk");
1963     size_t num = cms_space->totalCount();
1964     assert((free_size == 0 && num == 0) ||
1965            (free_size > 0  && (num == 1 || num == 2)),
1966          "There should be at most 2 free chunks after compaction");
1967   #endif // ASSERT
1968   _collectorState = Resetting;
1969   assert(_restart_addr == NULL,
1970          "Should have been NULL'd before baton was passed");
1971   reset(false /* == !asynch */);
1972   _cmsGen->reset_after_compaction();

1973 
1974   if (verifying() && !cms_should_unload_classes()) {
1975     perm_gen_verify_bit_map()->clear_all();
1976   }
1977 
1978   // Clear any data recorded in the PLAB chunk arrays.
1979   if (_survivor_plab_array != NULL) {
1980     reset_survivor_plab_arrays();
1981   }
1982 
1983   // Adjust the per-size allocation stats for the next epoch.
1984   _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
1985   // Restart the "sweep timer" for next epoch.
1986   _sweep_timer.reset();
1987   _sweep_timer.start();
1988 
1989   // Sample collection pause time and reset for collection interval.
1990   if (UseAdaptiveSizePolicy) {
1991     size_policy()->msc_collection_end(gch->gc_cause());
1992   }
1993 
1994   // For a mark-sweep-compact, compute_new_size() will be called
1995   // in the heap's do_collection() method.
1996 }
1997 
1998 // A work method used by the foreground collector to do
1999 // a mark-sweep, after taking over from a possibly on-going
2000 // concurrent mark-sweep collection.
2001 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2002   CollectorState first_state, bool should_start_over) {
2003   if (PrintGC && Verbose) {
2004     gclog_or_tty->print_cr("Pass concurrent collection to foreground "


2081     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2082     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2083     _c->_foregroundGCShouldWait = true;
2084   }
2085 };
2086 
2087 // There are separate collect_in_background and collect_in_foreground because of
2088 // the different locking requirements of the background collector and the
2089 // foreground collector.  There was originally an attempt to share
2090 // one "collect" method between the background collector and the foreground
2091 // collector but the if-then-else required made it cleaner to have
2092 // separate methods.
2093 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2094   assert(Thread::current()->is_ConcurrentGC_thread(),
2095     "A CMS asynchronous collection is only allowed on a CMS thread.");
2096 
2097   GenCollectedHeap* gch = GenCollectedHeap::heap();
2098   {
2099     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2100     MutexLockerEx hl(Heap_lock, safepoint_check);

2101     MutexLockerEx x(CGC_lock, safepoint_check);
2102     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2103       // The foreground collector is active or we're
2104       // not using asynchronous collections.  Skip this
2105       // background collection.
2106       assert(!_foregroundGCShouldWait, "Should be clear");
2107       return;
2108     } else {
2109       assert(_collectorState == Idling, "Should be idling before start.");
2110       _collectorState = InitialMarking;
2111       // Reset the expansion cause, now that we are about to begin
2112       // a new cycle.
2113       clear_expansion_cause();
2114     }
2115     _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
2116     // This controls class unloading in response to an explicit gc request.
2117     // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
2118     // we will unload classes even if CMSClassUnloadingEnabled is not set.
2119     // See CR 6541037 and related CRs.
2120     _unload_classes = _full_gc_requested                      // ... for this cycle
2121                       && ExplicitGCInvokesConcurrentAndUnloadsClasses;
2122     _full_gc_requested = false;           // acks all outstanding full gc requests
2123     // Signal that we are about to start a collection
2124     gch->increment_total_full_collections();  // ... starting a collection cycle
2125     _collection_count_start = gch->total_full_collections();
2126   }
2127 
2128   // Used for PrintGC
2129   size_t prev_used;
2130   if (PrintGC && Verbose) {
2131     prev_used = _cmsGen->used(); // XXXPERM
2132   }
2133 
2134   // The change of the collection state is normally done at this level;
2135   // the exceptions are phases that are executed while the world is
2136   // stopped.  For those phases the change of state is done while the
2137   // world is stopped.  For baton passing purposes this allows the
2138   // background collector to finish the phase and change state atomically.
2139   // The foreground collector cannot wait on a phase that is done
2140   // while the world is stopped because the foreground collector already
2141   // has the world stopped and would deadlock.


3030   assert(_num_par_pushes >= 0, "Inconsistency");
3031   if (_overflow_list == NULL) {
3032     assert(_num_par_pushes == 0, "Inconsistency");
3033   }
3034   return _overflow_list == NULL;
3035 }
3036 
3037 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3038 // merely consolidate assertion checks that appear to occur together frequently.
3039 void CMSCollector::verify_work_stacks_empty() const {
3040   assert(_markStack.isEmpty(), "Marking stack should be empty");
3041   assert(overflow_list_is_empty(), "Overflow list should be empty");
3042 }
3043 
3044 void CMSCollector::verify_overflow_empty() const {
3045   assert(overflow_list_is_empty(), "Overflow list should be empty");
3046   assert(no_preserved_marks(), "No preserved marks");
3047 }
3048 #endif // PRODUCT
3049 









































3050 void CMSCollector::setup_cms_unloading_and_verification_state() {
3051   const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3052                              || VerifyBeforeExit;
3053   const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3054                              |   SharedHeap::SO_CodeCache;
3055 
3056   if (cms_should_unload_classes()) {   // Should unload classes this cycle
3057     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3058     set_verifying(should_verify);    // Set verification state for this cycle
3059     return;                            // Nothing else needs to be done at this time
3060   }
3061 
3062   // Not unloading classes this cycle
3063   assert(!cms_should_unload_classes(), "Inconsitency!");
3064   if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
3065     // We were not verifying, or we _were_ unloading classes in the last cycle,
3066     // AND some verification options are enabled this cycle; in this case,
3067     // we must make sure that the deadness map is allocated if not already so,
3068     // and cleared (if already allocated previously --
3069     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3070     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3071       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3072         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3073                 "permanent generation verification disabled");
3074         return;  // Note that we leave verification disabled, so we'll retry this
3075                  // allocation next cycle. We _could_ remember this failure
3076                  // and skip further attempts and permanently disable verification
3077                  // attempts if that is considered more desirable.
3078       }
3079       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3080               "_perm_gen_ver_bit_map inconsistency?");
3081     } else {
3082       perm_gen_verify_bit_map()->clear_all();
3083     }
3084     // Include symbols, strings and code cache elements to prevent their resurrection.


4681   SpecializationStats::print();
4682 }
4683 
4684 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4685   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4686 
4687   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4688 
4689   assert(haveFreelistLocks(), "must have free list locks");
4690   assert_lock_strong(bitMapLock());
4691 
4692   if (UseAdaptiveSizePolicy) {
4693     size_policy()->checkpoint_roots_final_begin();
4694   }
4695 
4696   ResourceMark rm;
4697   HandleMark   hm;
4698 
4699   GenCollectedHeap* gch = GenCollectedHeap::heap();
4700 
4701   if (cms_should_unload_classes()) {
4702     CodeCache::gc_prologue();
4703   }
4704   assert(haveFreelistLocks(), "must have free list locks");
4705   assert_lock_strong(bitMapLock());
4706 
4707   if (!init_mark_was_synchronous) {
4708     // We might assume that we need not fill TLAB's when
4709     // CMSScavengeBeforeRemark is set, because we may have just done
4710     // a scavenge which would have filled all TLAB's -- and besides
4711     // Eden would be empty. This however may not always be the case --
4712     // for instance although we asked for a scavenge, it may not have
4713     // happened because of a JNI critical section. We probably need
4714     // a policy for deciding whether we can in that case wait until
4715     // the critical section releases and then do the remark following
4716     // the scavenge, and skip it here. In the absence of that policy,
4717     // or of an indication of whether the scavenge did indeed occur,
4718     // we cannot rely on TLAB's having been filled and must do
4719     // so here just in case a scavenge did not happen.
4720     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4721     // Update the saved marks which may affect the root scans.


4741         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4742                     gclog_or_tty);
4743         do_remark_non_parallel();
4744       }
4745     }
4746   } else {
4747     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4748     // The initial mark was stop-world, so there's no rescanning to
4749     // do; go straight on to the next step below.
4750   }
4751   verify_work_stacks_empty();
4752   verify_overflow_empty();
4753 
4754   {
4755     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4756     refProcessingWork(asynch, clear_all_soft_refs);
4757   }
4758   verify_work_stacks_empty();
4759   verify_overflow_empty();
4760 
4761   if (cms_should_unload_classes()) {
4762     CodeCache::gc_epilogue();
4763   }
4764 
4765   // If we encountered any (marking stack / work queue) overflow
4766   // events during the current CMS cycle, take appropriate
4767   // remedial measures, where possible, so as to try and avoid
4768   // recurrence of that condition.
4769   assert(_markStack.isEmpty(), "No grey objects");
4770   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4771                      _ser_kac_ovflw;
4772   if (ser_ovflw > 0) {
4773     if (PrintCMSStatistics != 0) {
4774       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4775         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4776         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4777         _ser_kac_ovflw);
4778     }
4779     _markStack.expand();
4780     _ser_pmc_remark_ovflw = 0;
4781     _ser_pmc_preclean_ovflw = 0;


5611                                 &cmsKeepAliveClosure);
5612   {
5613     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5614     if (rp->processing_is_mt()) {
5615       CMSRefProcTaskExecutor task_executor(*this);
5616       rp->process_discovered_references(soft_ref_policy,
5617                                         &_is_alive_closure,
5618                                         &cmsKeepAliveClosure,
5619                                         &cmsDrainMarkingStackClosure,
5620                                         &task_executor);
5621     } else {
5622       rp->process_discovered_references(soft_ref_policy,
5623                                         &_is_alive_closure,
5624                                         &cmsKeepAliveClosure,
5625                                         &cmsDrainMarkingStackClosure,
5626                                         NULL);
5627     }
5628     verify_work_stacks_empty();
5629   }
5630 
5631   if (cms_should_unload_classes()) {
5632     {
5633       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5634 
5635       // Follow SystemDictionary roots and unload classes
5636       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5637 
5638       // Follow CodeCache roots and unload any methods marked for unloading
5639       CodeCache::do_unloading(&_is_alive_closure,
5640                               &cmsKeepAliveClosure,
5641                               purged_class);
5642 
5643       cmsDrainMarkingStackClosure.do_void();
5644       verify_work_stacks_empty();
5645 
5646       // Update subklass/sibling/implementor links in KlassKlass descendants
5647       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5648       oop k;
5649       while ((k = _revisitStack.pop()) != NULL) {
5650         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5651                        &_is_alive_closure,


5705     // We can be the CMS thread only if we are in a stop-world
5706     // phase of CMS collection.
5707     if (t->is_ConcurrentGC_thread()) {
5708       assert(_collectorState == InitialMarking ||
5709              _collectorState == FinalMarking,
5710              "Should be a stop-world phase");
5711       // The CMS thread should be holding the CMS_token.
5712       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5713              "Potential interference with concurrently "
5714              "executing VM thread");
5715     }
5716   }
5717 }
5718 #endif
5719 
5720 void CMSCollector::sweep(bool asynch) {
5721   assert(_collectorState == Sweeping, "just checking");
5722   check_correct_thread_executing();
5723   verify_work_stacks_empty();
5724   verify_overflow_empty();
5725   incrementSweepCount();
5726   _sweep_timer.stop();
5727   _sweep_estimate.sample(_sweep_timer.seconds());
5728   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5729 
5730   // PermGen verification support: If perm gen sweeping is disabled in
5731   // this cycle, we preserve the perm gen object "deadness" information
5732   // in the perm_gen_verify_bit_map. In order to do that we traverse
5733   // all blocks in perm gen and mark all dead objects.
5734   if (verifying() && !cms_should_unload_classes()) {
5735     CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5736                              bitMapLock());
5737     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5738            "Should have already been allocated");
5739     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5740                                markBitMap(), perm_gen_verify_bit_map());



5741     _permGen->cmsSpace()->blk_iterate(&mdo);




5742   }

5743 
5744   if (asynch) {
5745     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5746     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5747     // First sweep the old gen then the perm gen
5748     {
5749       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5750                                bitMapLock());
5751       sweepWork(_cmsGen, asynch);
5752     }
5753 
5754     // Now repeat for perm gen
5755     if (cms_should_unload_classes()) {
5756       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5757                              bitMapLock());
5758       sweepWork(_permGen, asynch);
5759     }
5760 
5761     // Update Universe::_heap_*_at_gc figures.
5762     // We need all the free list locks to make the abstract state
5763     // transition from Sweeping to Resetting. See detailed note
5764     // further below.
5765     {
5766       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5767                                _permGen->freelistLock());
5768       // Update heap occupancy information which is used as
5769       // input to soft ref clearing policy at the next gc.
5770       Universe::update_heap_info_at_gc();
5771       _collectorState = Resizing;
5772     }
5773   } else {
5774     // already have needed locks
5775     sweepWork(_cmsGen,  asynch);
5776 
5777     if (cms_should_unload_classes()) {
5778       sweepWork(_permGen, asynch);
5779     }
5780     // Update heap occupancy information which is used as
5781     // input to soft ref clearing policy at the next gc.
5782     Universe::update_heap_info_at_gc();
5783     _collectorState = Resizing;
5784   }
5785   verify_work_stacks_empty();
5786   verify_overflow_empty();
5787 
5788   _sweep_timer.reset();
5789   _sweep_timer.start();
5790 
5791   update_time_of_last_gc(os::javaTimeMillis());
5792 
5793   // NOTE on abstract state transitions:
5794   // Mutators allocate-live and/or mark the mod-union table dirty
5795   // based on the state of the collection.  The former is done in
5796   // the interval [Marking, Sweeping] and the latter in the interval
5797   // [Marking, Sweeping).  Thus the transitions into the Marking state


5918          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
5919         "Should possess CMS token to sweep");
5920   assert_lock_strong(gen->freelistLock());
5921   assert_lock_strong(bitMapLock());
5922 
5923   assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5924   gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5925                                       _sweep_estimate.padded_average());
5926   gen->setNearLargestChunk();
5927 
5928   {
5929     SweepClosure sweepClosure(this, gen, &_markBitMap,
5930                             CMSYield && asynch);
5931     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5932     // We need to free-up/coalesce garbage/blocks from a
5933     // co-terminal free run. This is done in the SweepClosure
5934     // destructor; so, do not remove this scope, else the
5935     // end-of-sweep-census below will be off by a little bit.
5936   }
5937   gen->cmsSpace()->sweep_completed();
5938   gen->cmsSpace()->endSweepFLCensus(sweepCount());





5939 }
5940 
5941 // Reset CMS data structures (for now just the marking bit map)
5942 // preparatory for the next cycle.
5943 void CMSCollector::reset(bool asynch) {
5944   GenCollectedHeap* gch = GenCollectedHeap::heap();
5945   CMSAdaptiveSizePolicy* sp = size_policy();
5946   AdaptiveSizePolicyOutput(sp, gch->total_collections());
5947   if (asynch) {
5948     CMSTokenSyncWithLocks ts(true, bitMapLock());
5949 
5950     // If the state is not "Resetting", the foreground  thread
5951     // has done a collection and the resetting.
5952     if (_collectorState != Resetting) {
5953       assert(_collectorState == Idling, "The state should only change"
5954         " because the foreground collector has finished the collection");
5955       return;
5956     }
5957 
5958     // Clear the mark bitmap (no grey objects to start with)


7176     }
7177     // anything including and to the right of _finger
7178     // will be scanned as we iterate over the remainder of the
7179     // bit map
7180   }
7181 }
7182 
7183 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7184                      MemRegion span,
7185                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7186                      CMSMarkStack*  revisitStack,
7187                      HeapWord* finger, MarkFromRootsClosure* parent) :
7188   OopClosure(collector->ref_processor()),
7189   _collector(collector),
7190   _span(span),
7191   _bitMap(bitMap),
7192   _markStack(markStack),
7193   _revisitStack(revisitStack),
7194   _finger(finger),
7195   _parent(parent),
7196   _should_remember_klasses(collector->cms_should_unload_classes())
7197 { }
7198 
7199 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7200                      MemRegion span,
7201                      CMSBitMap* bit_map,
7202                      OopTaskQueue* work_queue,
7203                      CMSMarkStack*  overflow_stack,
7204                      CMSMarkStack*  revisit_stack,
7205                      HeapWord* finger,
7206                      HeapWord** global_finger_addr,
7207                      Par_MarkFromRootsClosure* parent) :
7208   OopClosure(collector->ref_processor()),
7209   _collector(collector),
7210   _whole_span(collector->_span),
7211   _span(span),
7212   _bit_map(bit_map),
7213   _work_queue(work_queue),
7214   _overflow_stack(overflow_stack),
7215   _revisit_stack(revisit_stack),
7216   _finger(finger),
7217   _global_finger_addr(global_finger_addr),
7218   _parent(parent),
7219   _should_remember_klasses(collector->cms_should_unload_classes())
7220 { }
7221 
7222 
7223 void CMSCollector::lower_restart_addr(HeapWord* low) {
7224   assert(_span.contains(low), "Out of bounds addr");
7225   if (_restart_addr == NULL) {
7226     _restart_addr = low;
7227   } else {
7228     _restart_addr = MIN2(_restart_addr, low);
7229   }
7230 }
7231 
7232 // Upon stack overflow, we discard (part of) the stack,
7233 // remembering the least address amongst those discarded
7234 // in CMSCollector's _restart_address.
7235 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7236   // Remember the least grey address discarded
7237   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7238   _collector->lower_restart_addr(ra);
7239   _markStack->reset();  // discard stack contents


7342   }
7343 }
7344 
7345 
7346 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7347                                        MemRegion span,
7348                                        ReferenceProcessor* rp,
7349                                        CMSBitMap* bit_map,
7350                                        CMSBitMap* mod_union_table,
7351                                        CMSMarkStack*  mark_stack,
7352                                        CMSMarkStack*  revisit_stack,
7353                                        bool           concurrent_precleaning):
7354   OopClosure(rp),
7355   _collector(collector),
7356   _span(span),
7357   _bit_map(bit_map),
7358   _mod_union_table(mod_union_table),
7359   _mark_stack(mark_stack),
7360   _revisit_stack(revisit_stack),
7361   _concurrent_precleaning(concurrent_precleaning),
7362   _should_remember_klasses(collector->cms_should_unload_classes())
7363 {
7364   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7365 }
7366 
7367 // Grey object rescan during pre-cleaning and second checkpoint phases --
7368 // the non-parallel version (the parallel version appears further below.)
7369 void PushAndMarkClosure::do_oop(oop* p) {
7370   oop    this_oop = *p;
7371   // Ignore mark word verification. If during concurrent precleaning
7372   // the object monitor may be locked. If during the checkpoint
7373   // phases, the object may already have been reached by a  different
7374   // path and may be at the end of the global overflow list (so
7375   // the mark word may be NULL).
7376   assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7377          "expected an oop or NULL");
7378   HeapWord* addr = (HeapWord*)this_oop;
7379   // Check if oop points into the CMS generation
7380   // and is not marked
7381   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7382     // a white object ...


7404          // in the overflow list.
7405          _collector->push_on_overflow_list(this_oop);
7406          _collector->_ser_pmc_remark_ovflw++;
7407       }
7408     }
7409   }
7410 }
7411 
7412 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7413                                                MemRegion span,
7414                                                ReferenceProcessor* rp,
7415                                                CMSBitMap* bit_map,
7416                                                OopTaskQueue* work_queue,
7417                                                CMSMarkStack* revisit_stack):
7418   OopClosure(rp),
7419   _collector(collector),
7420   _span(span),
7421   _bit_map(bit_map),
7422   _work_queue(work_queue),
7423   _revisit_stack(revisit_stack),
7424   _should_remember_klasses(collector->cms_should_unload_classes())
7425 {
7426   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7427 }
7428 
7429 // Grey object rescan during second checkpoint phase --
7430 // the parallel version.
7431 void Par_PushAndMarkClosure::do_oop(oop* p) {
7432   oop    this_oop = *p;
7433   // In the assert below, we ignore the mark word because
7434   // this oop may point to an already visited object that is
7435   // on the overflow stack (in which case the mark word has
7436   // been hijacked for chaining into the overflow stack --
7437   // if this is the last object in the overflow stack then
7438   // its mark word will be NULL). Because this object may
7439   // have been subsequently popped off the global overflow
7440   // stack, and the mark word possibly restored to the prototypical
7441   // value, by the time we get to examined this failing assert in
7442   // the debugger, is_oop_or_null(false) may subsequently start
7443   // to hold.
7444   assert(this_oop->is_oop_or_null(true),


7926 
7927   // Common code path for original and adaptive free lists.
7928 
7929   // this object is live: we'd normally expect this to be
7930   // an oop, and like to assert the following:
7931   // assert(oop(addr)->is_oop(), "live block should be an oop");
7932   // However, as we commented above, this may be an object whose
7933   // header hasn't yet been initialized.
7934   size_t size;
7935   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7936   if (_bitMap->isMarked(addr + 1)) {
7937     // Determine the size from the bit map, rather than trying to
7938     // compute it from the object header.
7939     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7940     size = pointer_delta(nextOneAddr + 1, addr);
7941     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7942            "alignment problem");
7943 
7944     #ifdef DEBUG
7945       if (oop(addr)->klass() != NULL &&
7946           (   !_collector->cms_should_unload_classes()
7947            || oop(addr)->is_parsable())) {
7948         // Ignore mark word because we are running concurrent with mutators
7949         assert(oop(addr)->is_oop(true), "live block should be an oop");
7950         assert(size ==
7951                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7952                "P-mark and computed size do not agree");
7953       }
7954     #endif
7955 
7956   } else {
7957     // This should be an initialized object that's alive.
7958     assert(oop(addr)->klass() != NULL &&
7959            (!_collector->cms_should_unload_classes()
7960             || oop(addr)->is_parsable()),
7961            "Should be an initialized object");
7962     // Ignore mark word because we are running concurrent with mutators
7963     assert(oop(addr)->is_oop(true), "live block should be an oop");
7964     // Verify that the bit map has no bits marked between
7965     // addr and purported end of this block.
7966     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7967     assert(size >= 3, "Necessary for Printezis marks to work");
7968     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7969     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7970   }
7971   return size;
7972 }
7973 
7974 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
7975                                             size_t chunkSize) {
7976   // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
7977   // scheme.
7978   bool fcInFreeLists = fc->isFree();
7979   assert(_sp->adaptive_freelists(), "Should only be used in this case.");




 208       }
 209     }
 210   } else {
 211     _par_gc_thread_states = NULL;
 212   }
 213   _incremental_collection_failed = false;
 214   // The "dilatation_factor" is the expansion that can occur on
 215   // account of the fact that the minimum object size in the CMS
 216   // generation may be larger than that in, say, a contiguous young
 217   //  generation.
 218   // Ideally, in the calculation below, we'd compute the dilatation
 219   // factor as: MinChunkSize/(promoting_gen's min object size)
 220   // Since we do not have such a general query interface for the
 221   // promoting generation, we'll instead just use the mimimum
 222   // object size (which today is a header's worth of space);
 223   // note that all arithmetic is in units of HeapWords.
 224   assert(MinChunkSize >= oopDesc::header_size(), "just checking");
 225   assert(_dilatation_factor >= 1.0, "from previous assert");
 226 }
 227 
 228 
 229 // The field "_initiating_occupancy" represents the occupancy percentage
 230 // at which we trigger a new collection cycle.  Unless explicitly specified
 231 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
 232 // is calculated by:
 233 //
 234 //   Let "f" be MinHeapFreeRatio in
 235 //
 236 //    _intiating_occupancy = 100-f +
 237 //                           f * (CMSTrigger[Perm]Ratio/100)
 238 //   where CMSTrigger[Perm]Ratio is the argument "tr" below.
 239 //
 240 // That is, if we assume the heap is at its desired maximum occupancy at the
 241 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
 242 // space be allocated before initiating a new collection cycle.
 243 //
 244 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 245   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 246   if (io >= 0) {
 247     _initiating_occupancy = (double)io / 100.0;
 248   } else {
 249     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 250                              (double)(tr * MinHeapFreeRatio) / 100.0)
 251                             / 100.0;
 252   }
 253 }
 254 
 255 
 256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 257   assert(collector() != NULL, "no collector");
 258   collector()->ref_processor_init();
 259 }
 260 
 261 void CMSCollector::ref_processor_init() {
 262   if (_ref_processor == NULL) {
 263     // Allocate and initialize a reference processor
 264     _ref_processor = ReferenceProcessor::create_ref_processor(
 265         _span,                               // span
 266         _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
 267         _cmsGen->refs_discovery_is_mt(),     // mt_discovery
 268         &_is_alive_closure,
 269         ParallelGCThreads,
 270         ParallelRefProcEnabled);
 271     // Initialize the _ref_processor field of CMSGen
 272     _cmsGen->set_ref_processor(_ref_processor);
 273 
 274     // Allocate a dummy ref processor for perm gen.
 275     ReferenceProcessor* rp2 = new ReferenceProcessor();


 531   _eden_chunk_index(0),        // -- ditto --
 532   _survivor_plab_array(NULL),  // -- ditto --
 533   _survivor_chunk_array(NULL), // -- ditto --
 534   _survivor_chunk_capacity(0), // -- ditto --
 535   _survivor_chunk_index(0),    // -- ditto --
 536   _ser_pmc_preclean_ovflw(0),
 537   _ser_pmc_remark_ovflw(0),
 538   _par_pmc_remark_ovflw(0),
 539   _ser_kac_ovflw(0),
 540   _par_kac_ovflw(0),
 541 #ifndef PRODUCT
 542   _num_par_pushes(0),
 543 #endif
 544   _collection_count_start(0),
 545   _verifying(false),
 546   _icms_start_limit(NULL),
 547   _icms_stop_limit(NULL),
 548   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 549   _completed_initialization(false),
 550   _collector_policy(cp),
 551   _should_unload_classes(false),
 552   _concurrent_cycles_since_last_unload(0),
 553   _sweep_count(0),
 554   _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 555 {
 556   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 557     ExplicitGCInvokesConcurrent = true;
 558   }
 559   // Now expand the span and allocate the collection support structures
 560   // (MUT, marking bit map etc.) to cover both generations subject to
 561   // collection.
 562 
 563   // First check that _permGen is adjacent to _cmsGen and above it.
 564   assert(   _cmsGen->reserved().word_size()  > 0
 565          && _permGen->reserved().word_size() > 0,
 566          "generations should not be of zero size");
 567   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
 568          "_cmsGen and _permGen should not overlap");
 569   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
 570          "_cmsGen->end() different from _permGen->start()");
 571 
 572   // For use by dirty card to oop closures.
 573   _cmsGen->cmsSpace()->set_collector(this);


 654       typedef struct OopTaskQueuePadded {
 655         OopTaskQueue work_queue;
 656         char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
 657       } OopTaskQueuePadded;
 658 
 659       for (i = 0; i < num_queues; i++) {
 660         OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
 661         if (q_padded == NULL) {
 662           warning("work_queue allocation failure.");
 663           return;
 664         }
 665         _task_queues->register_queue(i, &q_padded->work_queue);
 666       }
 667       for (i = 0; i < num_queues; i++) {
 668         _task_queues->queue(i)->initialize();
 669         _hash_seed[i] = 17;  // copied from ParNew
 670       }
 671     }
 672   }
 673 
 674   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 675   _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
 676 















 677   // Clip CMSBootstrapOccupancy between 0 and 100.
 678   _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 679                          /(double)100;
 680 
 681   _full_gcs_since_conc_gc = 0;
 682 
 683   // Now tell CMS generations the identity of their collector
 684   ConcurrentMarkSweepGeneration::set_collector(this);
 685 
 686   // Create & start a CMS thread for this CMS collector
 687   _cmsThread = ConcurrentMarkSweepThread::start(this);
 688   assert(cmsThread() != NULL, "CMS Thread should have been created");
 689   assert(cmsThread()->collector() == this,
 690          "CMS Thread should refer to this gen");
 691   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 692 
 693   // Support for parallelizing young gen rescan
 694   GenCollectedHeap* gch = GenCollectedHeap::heap();
 695   _young_gen = gch->prev_gen(_cmsGen);
 696   if (gch->supports_inline_contig_alloc()) {
 697     _top_addr = gch->top_addr();
 698     _end_addr = gch->end_addr();


1410     }
1411   )
1412 
1413   FreelistLocker x(this);
1414   // ------------------------------------------------------------------
1415   // Print out lots of information which affects the initiation of
1416   // a collection.
1417   if (PrintCMSInitiationStatistics && stats().valid()) {
1418     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1419     gclog_or_tty->stamp();
1420     gclog_or_tty->print_cr("");
1421     stats().print_on(gclog_or_tty);
1422     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1423       stats().time_until_cms_gen_full());
1424     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1425     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1426                            _cmsGen->contiguous_available());
1427     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1428     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1429     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1430     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1431     gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1432   }
1433   // ------------------------------------------------------------------
1434 
1435   // If the estimated time to complete a cms collection (cms_duration())
1436   // is less than the estimated time remaining until the cms generation
1437   // is full, start a collection.
1438   if (!UseCMSInitiatingOccupancyOnly) {
1439     if (stats().valid()) {
1440       if (stats().time_until_cms_start() == 0.0) {
1441         return true;
1442       }
1443     } else {
1444       // We want to conservatively collect somewhat early in order
1445       // to try and "bootstrap" our CMS/promotion statistics;
1446       // this branch will not fire after the first successful CMS
1447       // collection because the stats should then be valid.
1448       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1449         if (Verbose && PrintGCDetails) {
1450           gclog_or_tty->print_cr(
1451             " CMSCollector: collect for bootstrapping statistics:"
1452             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1453             _bootstrap_occupancy);
1454         }
1455         return true;
1456       }
1457     }
1458   }
1459 
1460   // Otherwise, we start a collection cycle if either the perm gen or
1461   // old gen want a collection cycle started. Each may use
1462   // an appropriate criterion for making this decision.
1463   // XXX We need to make sure that the gen expansion
1464   // criterion dovetails well with this. XXX NEED TO FIX THIS
1465   if (_cmsGen->should_concurrent_collect()) {
1466     if (Verbose && PrintGCDetails) {
1467       gclog_or_tty->print_cr("CMS old gen initiated");
1468     }
1469     return true;
1470   }
1471 
1472   // We start a collection if we believe an incremental collection may fail;
1473   // this is not likely to be productive in practice because it's probably too
1474   // late anyway.
1475   GenCollectedHeap* gch = GenCollectedHeap::heap();
1476   assert(gch->collector_policy()->is_two_generation_policy(),
1477          "You may want to check the correctness of the following");
1478   if (gch->incremental_collection_will_fail()) {
1479     if (PrintGCDetails && Verbose) {
1480       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1481     }
1482     return true;
1483   }
1484 
1485   if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1486     bool res = update_should_unload_classes();
1487     if (res) {
1488       if (Verbose && PrintGCDetails) {
1489         gclog_or_tty->print_cr("CMS perm gen initiated");
1490       }
1491       return true;
1492     }
1493   }
1494   return false;
1495 }
1496 
1497 // Clear _expansion_cause fields of constituent generations
1498 void CMSCollector::clear_expansion_cause() {
1499   _cmsGen->clear_expansion_cause();
1500   _permGen->clear_expansion_cause();
1501 }
1502 
1503 // We should be conservative in starting a collection cycle.  To
1504 // start too eagerly runs the risk of collecting too often in the
1505 // extreme.  To collect too rarely falls back on full collections,
1506 // which works, even if not optimum in terms of concurrent work.
1507 // As a work around for too eagerly collecting, use the flag
1508 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1509 // giving the user an easily understandable way of controlling the
1510 // collections.
1511 // We want to start a new collection cycle if any of the following
1512 // conditions hold:
1513 // . our current occupancy exceeds the configured initiating occupancy
1514 //   for this generation, or
1515 // . we recently needed to expand this space and have not, since that
1516 //   expansion, done a collection of this generation, or
1517 // . the underlying space believes that it may be a good idea to initiate
1518 //   a concurrent collection (this may be based on criteria such as the
1519 //   following: the space uses linear allocation and linear allocation is
1520 //   going to fail, or there is believed to be excessive fragmentation in
1521 //   the generation, etc... or ...
1522 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1523 //   the case of the old generation, not the perm generation; see CR 6543076):
1524 //   we may be approaching a point at which allocation requests may fail because
1525 //   we will be out of sufficient free space given allocation rate estimates.]
1526 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1527 
1528   assert_lock_strong(freelistLock());
1529   if (occupancy() > initiating_occupancy()) {
1530     if (PrintGCDetails && Verbose) {
1531       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1532         short_name(), occupancy(), initiating_occupancy());
1533     }
1534     return true;
1535   }
1536   if (UseCMSInitiatingOccupancyOnly) {
1537     return false;
1538   }
1539   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1540     if (PrintGCDetails && Verbose) {
1541       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1542         short_name());
1543     }
1544     return true;
1545   }
1546   if (_cmsSpace->should_concurrent_collect()) {



1547     if (PrintGCDetails && Verbose) {
1548       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1549         short_name());
1550     }
1551     return true;
1552   }








1553   return false;
1554 }
1555 
1556 void ConcurrentMarkSweepGeneration::collect(bool   full,
1557                                             bool   clear_all_soft_refs,
1558                                             size_t size,
1559                                             bool   tlab)
1560 {
1561   collector()->collect(full, clear_all_soft_refs, size, tlab);
1562 }
1563 
1564 void CMSCollector::collect(bool   full,
1565                            bool   clear_all_soft_refs,
1566                            size_t size,
1567                            bool   tlab)
1568 {
1569   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1570     // For debugging purposes skip the collection if the state
1571     // is not currently idle
1572     if (TraceCMSState) {


1975     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1976     size_t free_size = cms_space->free();
1977     assert(free_size ==
1978            pointer_delta(cms_space->end(), cms_space->compaction_top())
1979            * HeapWordSize,
1980       "All the free space should be compacted into one chunk at top");
1981     assert(cms_space->dictionary()->totalChunkSize(
1982                                       debug_only(cms_space->freelistLock())) == 0 ||
1983            cms_space->totalSizeInIndexedFreeLists() == 0,
1984       "All the free space should be in a single chunk");
1985     size_t num = cms_space->totalCount();
1986     assert((free_size == 0 && num == 0) ||
1987            (free_size > 0  && (num == 1 || num == 2)),
1988          "There should be at most 2 free chunks after compaction");
1989   #endif // ASSERT
1990   _collectorState = Resetting;
1991   assert(_restart_addr == NULL,
1992          "Should have been NULL'd before baton was passed");
1993   reset(false /* == !asynch */);
1994   _cmsGen->reset_after_compaction();
1995   _concurrent_cycles_since_last_unload = 0;
1996 
1997   if (verifying() && !should_unload_classes()) {
1998     perm_gen_verify_bit_map()->clear_all();
1999   }
2000 
2001   // Clear any data recorded in the PLAB chunk arrays.
2002   if (_survivor_plab_array != NULL) {
2003     reset_survivor_plab_arrays();
2004   }
2005 
2006   // Adjust the per-size allocation stats for the next epoch.
2007   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2008   // Restart the "sweep timer" for next epoch.
2009   _sweep_timer.reset();
2010   _sweep_timer.start();
2011 
2012   // Sample collection pause time and reset for collection interval.
2013   if (UseAdaptiveSizePolicy) {
2014     size_policy()->msc_collection_end(gch->gc_cause());
2015   }
2016 
2017   // For a mark-sweep-compact, compute_new_size() will be called
2018   // in the heap's do_collection() method.
2019 }
2020 
2021 // A work method used by the foreground collector to do
2022 // a mark-sweep, after taking over from a possibly on-going
2023 // concurrent mark-sweep collection.
2024 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2025   CollectorState first_state, bool should_start_over) {
2026   if (PrintGC && Verbose) {
2027     gclog_or_tty->print_cr("Pass concurrent collection to foreground "


2104     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2105     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2106     _c->_foregroundGCShouldWait = true;
2107   }
2108 };
2109 
2110 // There are separate collect_in_background and collect_in_foreground because of
2111 // the different locking requirements of the background collector and the
2112 // foreground collector.  There was originally an attempt to share
2113 // one "collect" method between the background collector and the foreground
2114 // collector but the if-then-else required made it cleaner to have
2115 // separate methods.
2116 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2117   assert(Thread::current()->is_ConcurrentGC_thread(),
2118     "A CMS asynchronous collection is only allowed on a CMS thread.");
2119 
2120   GenCollectedHeap* gch = GenCollectedHeap::heap();
2121   {
2122     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2123     MutexLockerEx hl(Heap_lock, safepoint_check);
2124     FreelistLocker fll(this);
2125     MutexLockerEx x(CGC_lock, safepoint_check);
2126     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2127       // The foreground collector is active or we're
2128       // not using asynchronous collections.  Skip this
2129       // background collection.
2130       assert(!_foregroundGCShouldWait, "Should be clear");
2131       return;
2132     } else {
2133       assert(_collectorState == Idling, "Should be idling before start.");
2134       _collectorState = InitialMarking;
2135       // Reset the expansion cause, now that we are about to begin
2136       // a new cycle.
2137       clear_expansion_cause();
2138     }
2139     // Decide if we want to enable class unloading as part of the
2140     // ensuing concurrent GC cycle.
2141     update_should_unload_classes(); 




2142     _full_gc_requested = false;           // acks all outstanding full gc requests
2143     // Signal that we are about to start a collection
2144     gch->increment_total_full_collections();  // ... starting a collection cycle
2145     _collection_count_start = gch->total_full_collections();
2146   }
2147 
2148   // Used for PrintGC
2149   size_t prev_used;
2150   if (PrintGC && Verbose) {
2151     prev_used = _cmsGen->used(); // XXXPERM
2152   }
2153 
2154   // The change of the collection state is normally done at this level;
2155   // the exceptions are phases that are executed while the world is
2156   // stopped.  For those phases the change of state is done while the
2157   // world is stopped.  For baton passing purposes this allows the
2158   // background collector to finish the phase and change state atomically.
2159   // The foreground collector cannot wait on a phase that is done
2160   // while the world is stopped because the foreground collector already
2161   // has the world stopped and would deadlock.


3050   assert(_num_par_pushes >= 0, "Inconsistency");
3051   if (_overflow_list == NULL) {
3052     assert(_num_par_pushes == 0, "Inconsistency");
3053   }
3054   return _overflow_list == NULL;
3055 }
3056 
3057 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3058 // merely consolidate assertion checks that appear to occur together frequently.
3059 void CMSCollector::verify_work_stacks_empty() const {
3060   assert(_markStack.isEmpty(), "Marking stack should be empty");
3061   assert(overflow_list_is_empty(), "Overflow list should be empty");
3062 }
3063 
3064 void CMSCollector::verify_overflow_empty() const {
3065   assert(overflow_list_is_empty(), "Overflow list should be empty");
3066   assert(no_preserved_marks(), "No preserved marks");
3067 }
3068 #endif // PRODUCT
3069 
3070 // Decide if we want to enable class unloading as part of the
3071 // ensuing concurrent GC cycle. We will collect the perm gen and
3072 // unload classes if it's the case that:
3073 // (1) an explicit gc request has been made and the flag
3074 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3075 // (2) (a) class unloading is enabled at the command line, and
3076 //     (b) (i)   perm gen threshold has been crossed, or  
3077 //         (ii)  old gen is getting really full, or
3078 //         (iii) the previous N CMS collections did not collect the
3079 //               perm gen
3080 // NOTE: Provided there is no change in the state of the heap between
3081 // calls to this method, it should have idempotent results. Moreover,
3082 // its results should be monotonically increasing (i.e. going from 0 to 1,
3083 // but not 1 to 0) between successive calls between which the heap was
3084 // not collected. For the implementation below, it must thus rely on
3085 // the property that concurrent_cycles_since_last_unload()
3086 // will not decrease unless a collection cycle happened and that
3087 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3088 // themselves also monotonic in that sense. See check_monotonicity()
3089 // below.
3090 bool CMSCollector::update_should_unload_classes() {
3091   // Condition 1 above
3092   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3093     _should_unload_classes = true;
3094   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3095     // Disjuncts 2.b.(i,ii,iii) above
3096     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3097                               CMSClassUnloadingMaxInterval)
3098                            || _permGen->should_concurrent_collect()
3099                            || _cmsGen->is_too_full();
3100   }
3101   return _should_unload_classes;
3102 }
3103 
3104 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3105   bool res = should_concurrent_collect();
3106 #define CMSIsTooFullPercentage 98
3107   res = res && occupancy() > (double)CMSIsTooFullPercentage/100.0;
3108   return res;
3109 }
3110 
3111 void CMSCollector::setup_cms_unloading_and_verification_state() {
3112   const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3113                              || VerifyBeforeExit;
3114   const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3115                              |   SharedHeap::SO_CodeCache;
3116 
3117   if (should_unload_classes()) {   // Should unload classes this cycle
3118     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3119     set_verifying(should_verify);    // Set verification state for this cycle
3120     return;                            // Nothing else needs to be done at this time
3121   }
3122 
3123   // Not unloading classes this cycle
3124   assert(!should_unload_classes(), "Inconsitency!");
3125   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3126     // We were not verifying, or we _were_ unloading classes in the last cycle,
3127     // AND some verification options are enabled this cycle; in this case,
3128     // we must make sure that the deadness map is allocated if not already so,
3129     // and cleared (if already allocated previously --
3130     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3131     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3132       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3133         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3134                 "permanent generation verification disabled");
3135         return;  // Note that we leave verification disabled, so we'll retry this
3136                  // allocation next cycle. We _could_ remember this failure
3137                  // and skip further attempts and permanently disable verification
3138                  // attempts if that is considered more desirable.
3139       }
3140       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3141               "_perm_gen_ver_bit_map inconsistency?");
3142     } else {
3143       perm_gen_verify_bit_map()->clear_all();
3144     }
3145     // Include symbols, strings and code cache elements to prevent their resurrection.


4742   SpecializationStats::print();
4743 }
4744 
4745 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4746   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4747 
4748   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4749 
4750   assert(haveFreelistLocks(), "must have free list locks");
4751   assert_lock_strong(bitMapLock());
4752 
4753   if (UseAdaptiveSizePolicy) {
4754     size_policy()->checkpoint_roots_final_begin();
4755   }
4756 
4757   ResourceMark rm;
4758   HandleMark   hm;
4759 
4760   GenCollectedHeap* gch = GenCollectedHeap::heap();
4761 
4762   if (should_unload_classes()) {
4763     CodeCache::gc_prologue();
4764   }
4765   assert(haveFreelistLocks(), "must have free list locks");
4766   assert_lock_strong(bitMapLock());
4767 
4768   if (!init_mark_was_synchronous) {
4769     // We might assume that we need not fill TLAB's when
4770     // CMSScavengeBeforeRemark is set, because we may have just done
4771     // a scavenge which would have filled all TLAB's -- and besides
4772     // Eden would be empty. This however may not always be the case --
4773     // for instance although we asked for a scavenge, it may not have
4774     // happened because of a JNI critical section. We probably need
4775     // a policy for deciding whether we can in that case wait until
4776     // the critical section releases and then do the remark following
4777     // the scavenge, and skip it here. In the absence of that policy,
4778     // or of an indication of whether the scavenge did indeed occur,
4779     // we cannot rely on TLAB's having been filled and must do
4780     // so here just in case a scavenge did not happen.
4781     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4782     // Update the saved marks which may affect the root scans.


4802         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4803                     gclog_or_tty);
4804         do_remark_non_parallel();
4805       }
4806     }
4807   } else {
4808     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4809     // The initial mark was stop-world, so there's no rescanning to
4810     // do; go straight on to the next step below.
4811   }
4812   verify_work_stacks_empty();
4813   verify_overflow_empty();
4814 
4815   {
4816     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4817     refProcessingWork(asynch, clear_all_soft_refs);
4818   }
4819   verify_work_stacks_empty();
4820   verify_overflow_empty();
4821 
4822   if (should_unload_classes()) {
4823     CodeCache::gc_epilogue();
4824   }
4825 
4826   // If we encountered any (marking stack / work queue) overflow
4827   // events during the current CMS cycle, take appropriate
4828   // remedial measures, where possible, so as to try and avoid
4829   // recurrence of that condition.
4830   assert(_markStack.isEmpty(), "No grey objects");
4831   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4832                      _ser_kac_ovflw;
4833   if (ser_ovflw > 0) {
4834     if (PrintCMSStatistics != 0) {
4835       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4836         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4837         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4838         _ser_kac_ovflw);
4839     }
4840     _markStack.expand();
4841     _ser_pmc_remark_ovflw = 0;
4842     _ser_pmc_preclean_ovflw = 0;


5672                                 &cmsKeepAliveClosure);
5673   {
5674     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5675     if (rp->processing_is_mt()) {
5676       CMSRefProcTaskExecutor task_executor(*this);
5677       rp->process_discovered_references(soft_ref_policy,
5678                                         &_is_alive_closure,
5679                                         &cmsKeepAliveClosure,
5680                                         &cmsDrainMarkingStackClosure,
5681                                         &task_executor);
5682     } else {
5683       rp->process_discovered_references(soft_ref_policy,
5684                                         &_is_alive_closure,
5685                                         &cmsKeepAliveClosure,
5686                                         &cmsDrainMarkingStackClosure,
5687                                         NULL);
5688     }
5689     verify_work_stacks_empty();
5690   }
5691 
5692   if (should_unload_classes()) {
5693     {
5694       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5695 
5696       // Follow SystemDictionary roots and unload classes
5697       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5698 
5699       // Follow CodeCache roots and unload any methods marked for unloading
5700       CodeCache::do_unloading(&_is_alive_closure,
5701                               &cmsKeepAliveClosure,
5702                               purged_class);
5703 
5704       cmsDrainMarkingStackClosure.do_void();
5705       verify_work_stacks_empty();
5706 
5707       // Update subklass/sibling/implementor links in KlassKlass descendants
5708       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5709       oop k;
5710       while ((k = _revisitStack.pop()) != NULL) {
5711         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5712                        &_is_alive_closure,


5766     // We can be the CMS thread only if we are in a stop-world
5767     // phase of CMS collection.
5768     if (t->is_ConcurrentGC_thread()) {
5769       assert(_collectorState == InitialMarking ||
5770              _collectorState == FinalMarking,
5771              "Should be a stop-world phase");
5772       // The CMS thread should be holding the CMS_token.
5773       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5774              "Potential interference with concurrently "
5775              "executing VM thread");
5776     }
5777   }
5778 }
5779 #endif
5780 
5781 void CMSCollector::sweep(bool asynch) {
5782   assert(_collectorState == Sweeping, "just checking");
5783   check_correct_thread_executing();
5784   verify_work_stacks_empty();
5785   verify_overflow_empty();
5786   increment_sweep_count();
5787   _sweep_timer.stop();
5788   _sweep_estimate.sample(_sweep_timer.seconds());
5789   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5790 
5791   // PermGen verification support: If perm gen sweeping is disabled in
5792   // this cycle, we preserve the perm gen object "deadness" information
5793   // in the perm_gen_verify_bit_map. In order to do that we traverse
5794   // all blocks in perm gen and mark all dead objects.
5795   if (verifying() && !should_unload_classes()) {


5796     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5797            "Should have already been allocated");
5798     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5799                                markBitMap(), perm_gen_verify_bit_map());
5800     if (asynch) {
5801       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5802                                bitMapLock());
5803       _permGen->cmsSpace()->blk_iterate(&mdo);
5804     } else {
5805       // In the case of synchronous sweep, we already have
5806       // the requisite locks/tokens.
5807       _permGen->cmsSpace()->blk_iterate(&mdo);
5808     }
5809   }
5810 
5811   if (asynch) {
5812     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5813     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5814     // First sweep the old gen then the perm gen
5815     {
5816       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5817                                bitMapLock());
5818       sweepWork(_cmsGen, asynch);
5819     }
5820 
5821     // Now repeat for perm gen
5822     if (should_unload_classes()) {
5823       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5824                              bitMapLock());
5825       sweepWork(_permGen, asynch);
5826     }
5827 
5828     // Update Universe::_heap_*_at_gc figures.
5829     // We need all the free list locks to make the abstract state
5830     // transition from Sweeping to Resetting. See detailed note
5831     // further below.
5832     {
5833       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5834                                _permGen->freelistLock());
5835       // Update heap occupancy information which is used as
5836       // input to soft ref clearing policy at the next gc.
5837       Universe::update_heap_info_at_gc();
5838       _collectorState = Resizing;
5839     }
5840   } else {
5841     // already have needed locks
5842     sweepWork(_cmsGen,  asynch);
5843 
5844     if (should_unload_classes()) {
5845       sweepWork(_permGen, asynch);
5846     }
5847     // Update heap occupancy information which is used as
5848     // input to soft ref clearing policy at the next gc.
5849     Universe::update_heap_info_at_gc();
5850     _collectorState = Resizing;
5851   }
5852   verify_work_stacks_empty();
5853   verify_overflow_empty();
5854 
5855   _sweep_timer.reset();
5856   _sweep_timer.start();
5857 
5858   update_time_of_last_gc(os::javaTimeMillis());
5859 
5860   // NOTE on abstract state transitions:
5861   // Mutators allocate-live and/or mark the mod-union table dirty
5862   // based on the state of the collection.  The former is done in
5863   // the interval [Marking, Sweeping] and the latter in the interval
5864   // [Marking, Sweeping).  Thus the transitions into the Marking state


5985          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
5986         "Should possess CMS token to sweep");
5987   assert_lock_strong(gen->freelistLock());
5988   assert_lock_strong(bitMapLock());
5989 
5990   assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5991   gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5992                                       _sweep_estimate.padded_average());
5993   gen->setNearLargestChunk();
5994 
5995   {
5996     SweepClosure sweepClosure(this, gen, &_markBitMap,
5997                             CMSYield && asynch);
5998     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5999     // We need to free-up/coalesce garbage/blocks from a
6000     // co-terminal free run. This is done in the SweepClosure
6001     // destructor; so, do not remove this scope, else the
6002     // end-of-sweep-census below will be off by a little bit.
6003   }
6004   gen->cmsSpace()->sweep_completed();
6005   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6006   if (should_unload_classes()) {                // unloaded classes this cycle,
6007     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6008   } else {                                      // did not unload classes,
6009     _concurrent_cycles_since_last_unload++;     // ... increment count
6010   }
6011 }
6012 
6013 // Reset CMS data structures (for now just the marking bit map)
6014 // preparatory for the next cycle.
6015 void CMSCollector::reset(bool asynch) {
6016   GenCollectedHeap* gch = GenCollectedHeap::heap();
6017   CMSAdaptiveSizePolicy* sp = size_policy();
6018   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6019   if (asynch) {
6020     CMSTokenSyncWithLocks ts(true, bitMapLock());
6021 
6022     // If the state is not "Resetting", the foreground  thread
6023     // has done a collection and the resetting.
6024     if (_collectorState != Resetting) {
6025       assert(_collectorState == Idling, "The state should only change"
6026         " because the foreground collector has finished the collection");
6027       return;
6028     }
6029 
6030     // Clear the mark bitmap (no grey objects to start with)


7248     }
7249     // anything including and to the right of _finger
7250     // will be scanned as we iterate over the remainder of the
7251     // bit map
7252   }
7253 }
7254 
7255 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7256                      MemRegion span,
7257                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7258                      CMSMarkStack*  revisitStack,
7259                      HeapWord* finger, MarkFromRootsClosure* parent) :
7260   OopClosure(collector->ref_processor()),
7261   _collector(collector),
7262   _span(span),
7263   _bitMap(bitMap),
7264   _markStack(markStack),
7265   _revisitStack(revisitStack),
7266   _finger(finger),
7267   _parent(parent),
7268   _should_remember_klasses(collector->should_unload_classes())
7269 { }
7270 
7271 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7272                      MemRegion span,
7273                      CMSBitMap* bit_map,
7274                      OopTaskQueue* work_queue,
7275                      CMSMarkStack*  overflow_stack,
7276                      CMSMarkStack*  revisit_stack,
7277                      HeapWord* finger,
7278                      HeapWord** global_finger_addr,
7279                      Par_MarkFromRootsClosure* parent) :
7280   OopClosure(collector->ref_processor()),
7281   _collector(collector),
7282   _whole_span(collector->_span),
7283   _span(span),
7284   _bit_map(bit_map),
7285   _work_queue(work_queue),
7286   _overflow_stack(overflow_stack),
7287   _revisit_stack(revisit_stack),
7288   _finger(finger),
7289   _global_finger_addr(global_finger_addr),
7290   _parent(parent),
7291   _should_remember_klasses(collector->should_unload_classes())
7292 { }
7293 
7294 
7295 void CMSCollector::lower_restart_addr(HeapWord* low) {
7296   assert(_span.contains(low), "Out of bounds addr");
7297   if (_restart_addr == NULL) {
7298     _restart_addr = low;
7299   } else {
7300     _restart_addr = MIN2(_restart_addr, low);
7301   }
7302 }
7303 
7304 // Upon stack overflow, we discard (part of) the stack,
7305 // remembering the least address amongst those discarded
7306 // in CMSCollector's _restart_address.
7307 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7308   // Remember the least grey address discarded
7309   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7310   _collector->lower_restart_addr(ra);
7311   _markStack->reset();  // discard stack contents


7414   }
7415 }
7416 
7417 
7418 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7419                                        MemRegion span,
7420                                        ReferenceProcessor* rp,
7421                                        CMSBitMap* bit_map,
7422                                        CMSBitMap* mod_union_table,
7423                                        CMSMarkStack*  mark_stack,
7424                                        CMSMarkStack*  revisit_stack,
7425                                        bool           concurrent_precleaning):
7426   OopClosure(rp),
7427   _collector(collector),
7428   _span(span),
7429   _bit_map(bit_map),
7430   _mod_union_table(mod_union_table),
7431   _mark_stack(mark_stack),
7432   _revisit_stack(revisit_stack),
7433   _concurrent_precleaning(concurrent_precleaning),
7434   _should_remember_klasses(collector->should_unload_classes())
7435 {
7436   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7437 }
7438 
7439 // Grey object rescan during pre-cleaning and second checkpoint phases --
7440 // the non-parallel version (the parallel version appears further below.)
7441 void PushAndMarkClosure::do_oop(oop* p) {
7442   oop    this_oop = *p;
7443   // Ignore mark word verification. If during concurrent precleaning
7444   // the object monitor may be locked. If during the checkpoint
7445   // phases, the object may already have been reached by a  different
7446   // path and may be at the end of the global overflow list (so
7447   // the mark word may be NULL).
7448   assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7449          "expected an oop or NULL");
7450   HeapWord* addr = (HeapWord*)this_oop;
7451   // Check if oop points into the CMS generation
7452   // and is not marked
7453   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7454     // a white object ...


7476          // in the overflow list.
7477          _collector->push_on_overflow_list(this_oop);
7478          _collector->_ser_pmc_remark_ovflw++;
7479       }
7480     }
7481   }
7482 }
7483 
7484 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7485                                                MemRegion span,
7486                                                ReferenceProcessor* rp,
7487                                                CMSBitMap* bit_map,
7488                                                OopTaskQueue* work_queue,
7489                                                CMSMarkStack* revisit_stack):
7490   OopClosure(rp),
7491   _collector(collector),
7492   _span(span),
7493   _bit_map(bit_map),
7494   _work_queue(work_queue),
7495   _revisit_stack(revisit_stack),
7496   _should_remember_klasses(collector->should_unload_classes())
7497 {
7498   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7499 }
7500 
7501 // Grey object rescan during second checkpoint phase --
7502 // the parallel version.
7503 void Par_PushAndMarkClosure::do_oop(oop* p) {
7504   oop    this_oop = *p;
7505   // In the assert below, we ignore the mark word because
7506   // this oop may point to an already visited object that is
7507   // on the overflow stack (in which case the mark word has
7508   // been hijacked for chaining into the overflow stack --
7509   // if this is the last object in the overflow stack then
7510   // its mark word will be NULL). Because this object may
7511   // have been subsequently popped off the global overflow
7512   // stack, and the mark word possibly restored to the prototypical
7513   // value, by the time we get to examined this failing assert in
7514   // the debugger, is_oop_or_null(false) may subsequently start
7515   // to hold.
7516   assert(this_oop->is_oop_or_null(true),


7998 
7999   // Common code path for original and adaptive free lists.
8000 
8001   // this object is live: we'd normally expect this to be
8002   // an oop, and like to assert the following:
8003   // assert(oop(addr)->is_oop(), "live block should be an oop");
8004   // However, as we commented above, this may be an object whose
8005   // header hasn't yet been initialized.
8006   size_t size;
8007   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8008   if (_bitMap->isMarked(addr + 1)) {
8009     // Determine the size from the bit map, rather than trying to
8010     // compute it from the object header.
8011     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8012     size = pointer_delta(nextOneAddr + 1, addr);
8013     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8014            "alignment problem");
8015 
8016     #ifdef DEBUG
8017       if (oop(addr)->klass() != NULL &&
8018           (   !_collector->should_unload_classes()
8019            || oop(addr)->is_parsable())) {
8020         // Ignore mark word because we are running concurrent with mutators
8021         assert(oop(addr)->is_oop(true), "live block should be an oop");
8022         assert(size ==
8023                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8024                "P-mark and computed size do not agree");
8025       }
8026     #endif
8027 
8028   } else {
8029     // This should be an initialized object that's alive.
8030     assert(oop(addr)->klass() != NULL &&
8031            (!_collector->should_unload_classes()
8032             || oop(addr)->is_parsable()),
8033            "Should be an initialized object");
8034     // Ignore mark word because we are running concurrent with mutators
8035     assert(oop(addr)->is_oop(true), "live block should be an oop");
8036     // Verify that the bit map has no bits marked between
8037     // addr and purported end of this block.
8038     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8039     assert(size >= 3, "Necessary for Printezis marks to work");
8040     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8041     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8042   }
8043   return size;
8044 }
8045 
8046 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8047                                             size_t chunkSize) {
8048   // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8049   // scheme.
8050   bool fcInFreeLists = fc->isFree();
8051   assert(_sp->adaptive_freelists(), "Should only be used in this case.");