Print this page
--- 1.300 ---
Merged changes between child workspace "/net/spot/workspaces/ysr/cms_bugs" and
 parent workspace "/net/jano2/export2/hotspot/ws/main/gc_baseline".
--- 1.297.1.1 ---
6621144 CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"

Split Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
          +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
↓ open down ↓ 217 lines elided ↑ open up ↑
 218  218    // Ideally, in the calculation below, we'd compute the dilatation
 219  219    // factor as: MinChunkSize/(promoting_gen's min object size)
 220  220    // Since we do not have such a general query interface for the
 221  221    // promoting generation, we'll instead just use the mimimum
 222  222    // object size (which today is a header's worth of space);
 223  223    // note that all arithmetic is in units of HeapWords.
 224  224    assert(MinChunkSize >= oopDesc::header_size(), "just checking");
 225  225    assert(_dilatation_factor >= 1.0, "from previous assert");
 226  226  }
 227  227  
      228 +
      229 +// The field "_initiating_occupancy" represents the occupancy percentage
      230 +// at which we trigger a new collection cycle.  Unless explicitly specified
      231 +// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
      232 +// is calculated by:
      233 +//
      234 +//   Let "f" be MinHeapFreeRatio in
      235 +//
      236 +//    _intiating_occupancy = 100-f +
      237 +//                           f * (CMSTrigger[Perm]Ratio/100)
      238 +//   where CMSTrigger[Perm]Ratio is the argument "tr" below.
      239 +//
      240 +// That is, if we assume the heap is at its desired maximum occupancy at the
      241 +// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
      242 +// space be allocated before initiating a new collection cycle.
      243 +//
      244 +void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
      245 +  assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
      246 +  if (io >= 0) {
      247 +    _initiating_occupancy = (double)io / 100.0;
      248 +  } else {
      249 +    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
      250 +                             (double)(tr * MinHeapFreeRatio) / 100.0)
      251 +                            / 100.0;
      252 +  }
      253 +}
      254 +
      255 +
 228  256  void ConcurrentMarkSweepGeneration::ref_processor_init() {
 229  257    assert(collector() != NULL, "no collector");
 230  258    collector()->ref_processor_init();
 231  259  }
 232  260  
 233  261  void CMSCollector::ref_processor_init() {
 234  262    if (_ref_processor == NULL) {
 235  263      // Allocate and initialize a reference processor
 236  264      _ref_processor = ReferenceProcessor::create_ref_processor(
 237  265          _span,                               // span
↓ open down ↓ 275 lines elided ↑ open up ↑
 513  541  #ifndef PRODUCT
 514  542    _num_par_pushes(0),
 515  543  #endif
 516  544    _collection_count_start(0),
 517  545    _verifying(false),
 518  546    _icms_start_limit(NULL),
 519  547    _icms_stop_limit(NULL),
 520  548    _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 521  549    _completed_initialization(false),
 522  550    _collector_policy(cp),
 523      -  _unload_classes(false),
 524      -  _unloaded_classes_last_cycle(false),
      551 +  _should_unload_classes(false),
      552 +  _concurrent_cycles_since_last_unload(0),
      553 +  _sweep_count(0),
 525  554    _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 526  555  {
 527  556    if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 528  557      ExplicitGCInvokesConcurrent = true;
 529  558    }
 530  559    // Now expand the span and allocate the collection support structures
 531  560    // (MUT, marking bit map etc.) to cover both generations subject to
 532  561    // collection.
 533  562  
 534  563    // First check that _permGen is adjacent to _cmsGen and above it.
↓ open down ↓ 100 lines elided ↑ open up ↑
 635  664          }
 636  665          _task_queues->register_queue(i, &q_padded->work_queue);
 637  666        }
 638  667        for (i = 0; i < num_queues; i++) {
 639  668          _task_queues->queue(i)->initialize();
 640  669          _hash_seed[i] = 17;  // copied from ParNew
 641  670        }
 642  671      }
 643  672    }
 644  673  
 645      -  // "initiatingOccupancy" is the occupancy ratio at which we trigger
 646      -  // a new collection cycle.  Unless explicitly specified via
 647      -  // CMSTriggerRatio, it is calculated by:
 648      -  //   Let "f" be MinHeapFreeRatio in
 649      -  //
 650      -  //    intiatingOccupancy = 100-f +
 651      -  //                         f * (CMSTriggerRatio/100)
 652      -  // That is, if we assume the heap is at its desired maximum occupancy at the
 653      -  // end of a collection, we let CMSTriggerRatio of the (purported) free
 654      -  // space be allocated before initiating a new collection cycle.
 655      -  if (CMSInitiatingOccupancyFraction > 0) {
 656      -    _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
 657      -  } else {
 658      -    _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
 659      -                           (double)(CMSTriggerRatio *
 660      -                                    MinHeapFreeRatio) / 100.0)
 661      -                           / 100.0;
 662      -  }
      674 +  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
      675 +  _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
      676 +
 663  677    // Clip CMSBootstrapOccupancy between 0 and 100.
 664      -  _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
      678 +  _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 665  679                           /(double)100;
 666  680  
 667  681    _full_gcs_since_conc_gc = 0;
 668  682  
 669  683    // Now tell CMS generations the identity of their collector
 670  684    ConcurrentMarkSweepGeneration::set_collector(this);
 671  685  
 672  686    // Create & start a CMS thread for this CMS collector
 673  687    _cmsThread = ConcurrentMarkSweepThread::start(this);
 674  688    assert(cmsThread() != NULL, "CMS Thread should have been created");
↓ open down ↓ 731 lines elided ↑ open up ↑
1406 1420      gclog_or_tty->print_cr("");
1407 1421      stats().print_on(gclog_or_tty);
1408 1422      gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1409 1423        stats().time_until_cms_gen_full());
1410 1424      gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1411 1425      gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1412 1426                             _cmsGen->contiguous_available());
1413 1427      gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1414 1428      gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1415 1429      gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1416      -    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
     1430 +    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
     1431 +    gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1417 1432    }
1418 1433    // ------------------------------------------------------------------
1419 1434  
1420 1435    // If the estimated time to complete a cms collection (cms_duration())
1421 1436    // is less than the estimated time remaining until the cms generation
1422 1437    // is full, start a collection.
1423 1438    if (!UseCMSInitiatingOccupancyOnly) {
1424 1439      if (stats().valid()) {
1425 1440        if (stats().time_until_cms_start() == 0.0) {
1426 1441          return true;
↓ open down ↓ 12 lines elided ↑ open up ↑
1439 1454          }
1440 1455          return true;
1441 1456        }
1442 1457      }
1443 1458    }
1444 1459  
1445 1460    // Otherwise, we start a collection cycle if either the perm gen or
1446 1461    // old gen want a collection cycle started. Each may use
1447 1462    // an appropriate criterion for making this decision.
1448 1463    // XXX We need to make sure that the gen expansion
1449      -  // criterion dovetails well with this.
1450      -  if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
     1464 +  // criterion dovetails well with this. XXX NEED TO FIX THIS
     1465 +  if (_cmsGen->should_concurrent_collect()) {
1451 1466      if (Verbose && PrintGCDetails) {
1452 1467        gclog_or_tty->print_cr("CMS old gen initiated");
1453 1468      }
1454 1469      return true;
1455 1470    }
1456 1471  
1457      -  if (cms_should_unload_classes() &&
1458      -      _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
1459      -    if (Verbose && PrintGCDetails) {
1460      -     gclog_or_tty->print_cr("CMS perm gen initiated");
     1472 +  // We start a collection if we believe an incremental collection may fail;
     1473 +  // this is not likely to be productive in practice because it's probably too
     1474 +  // late anyway.
     1475 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
     1476 +  assert(gch->collector_policy()->is_two_generation_policy(),
     1477 +         "You may want to check the correctness of the following");
     1478 +  if (gch->incremental_collection_will_fail()) {
     1479 +    if (PrintGCDetails && Verbose) {
     1480 +      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1461 1481      }
1462 1482      return true;
1463 1483    }
1464 1484  
     1485 +  if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
     1486 +    bool res = update_should_unload_classes();
     1487 +    if (res) {
     1488 +      if (Verbose && PrintGCDetails) {
     1489 +        gclog_or_tty->print_cr("CMS perm gen initiated");
     1490 +      }
     1491 +      return true;
     1492 +    }
     1493 +  }
1465 1494    return false;
1466 1495  }
1467 1496  
1468 1497  // Clear _expansion_cause fields of constituent generations
1469 1498  void CMSCollector::clear_expansion_cause() {
1470 1499    _cmsGen->clear_expansion_cause();
1471 1500    _permGen->clear_expansion_cause();
1472 1501  }
1473 1502  
1474      -bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
1475      -  double initiatingOccupancy) {
1476      -  // We should be conservative in starting a collection cycle.  To
1477      -  // start too eagerly runs the risk of collecting too often in the
1478      -  // extreme.  To collect too rarely falls back on full collections,
1479      -  // which works, even if not optimum in terms of concurrent work.
1480      -  // As a work around for too eagerly collecting, use the flag
1481      -  // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1482      -  // giving the user an easily understandable way of controlling the
1483      -  // collections.
1484      -  // We want to start a new collection cycle if any of the following
1485      -  // conditions hold:
1486      -  // . our current occupancy exceeds the initiating occupancy, or
1487      -  // . we recently needed to expand and have not since that expansion,
1488      -  //   collected, or
1489      -  // . we are not using adaptive free lists and linear allocation is
1490      -  //   going to fail, or
1491      -  // . (for old gen) incremental collection has already failed or
1492      -  //   may soon fail in the near future as we may not be able to absorb
1493      -  //   promotions.
1494      -  assert_lock_strong(freelistLock());
     1503 +// We should be conservative in starting a collection cycle.  To
     1504 +// start too eagerly runs the risk of collecting too often in the
     1505 +// extreme.  To collect too rarely falls back on full collections,
     1506 +// which works, even if not optimum in terms of concurrent work.
     1507 +// As a work around for too eagerly collecting, use the flag
     1508 +// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
     1509 +// giving the user an easily understandable way of controlling the
     1510 +// collections.
     1511 +// We want to start a new collection cycle if any of the following
     1512 +// conditions hold:
     1513 +// . our current occupancy exceeds the configured initiating occupancy
     1514 +//   for this generation, or
     1515 +// . we recently needed to expand this space and have not, since that
     1516 +//   expansion, done a collection of this generation, or
     1517 +// . the underlying space believes that it may be a good idea to initiate
     1518 +//   a concurrent collection (this may be based on criteria such as the
     1519 +//   following: the space uses linear allocation and linear allocation is
     1520 +//   going to fail, or there is believed to be excessive fragmentation in
     1521 +//   the generation, etc... or ...
     1522 +// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
     1523 +//   the case of the old generation, not the perm generation; see CR 6543076):
     1524 +//   we may be approaching a point at which allocation requests may fail because
     1525 +//   we will be out of sufficient free space given allocation rate estimates.]
     1526 +bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1495 1527  
1496      -  if (occupancy() > initiatingOccupancy) {
     1528 +  assert_lock_strong(freelistLock());
     1529 +  if (occupancy() > initiating_occupancy()) {
1497 1530      if (PrintGCDetails && Verbose) {
1498 1531        gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1499      -        short_name(), occupancy(), initiatingOccupancy);
     1532 +        short_name(), occupancy(), initiating_occupancy());
1500 1533      }
1501 1534      return true;
1502 1535    }
1503 1536    if (UseCMSInitiatingOccupancyOnly) {
1504 1537      return false;
1505 1538    }
1506 1539    if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1507 1540      if (PrintGCDetails && Verbose) {
1508 1541        gclog_or_tty->print(" %s: collect because expanded for allocation ",
1509 1542          short_name());
1510 1543      }
1511 1544      return true;
1512 1545    }
1513      -  GenCollectedHeap* gch = GenCollectedHeap::heap();
1514      -  assert(gch->collector_policy()->is_two_generation_policy(),
1515      -         "You may want to check the correctness of the following");
1516      -  if (gch->incremental_collection_will_fail()) {
     1546 +  if (_cmsSpace->should_concurrent_collect()) {
1517 1547      if (PrintGCDetails && Verbose) {
1518      -      gclog_or_tty->print(" %s: collect because incremental collection will fail ",
     1548 +      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1519 1549          short_name());
1520 1550      }
1521 1551      return true;
1522 1552    }
1523      -  if (!_cmsSpace->adaptive_freelists() &&
1524      -      _cmsSpace->linearAllocationWouldFail()) {
1525      -    if (PrintGCDetails && Verbose) {
1526      -      gclog_or_tty->print(" %s: collect because of linAB ",
1527      -        short_name());
1528      -    }
1529      -    return true;
1530      -  }
1531 1553    return false;
1532 1554  }
1533 1555  
1534 1556  void ConcurrentMarkSweepGeneration::collect(bool   full,
1535 1557                                              bool   clear_all_soft_refs,
1536 1558                                              size_t size,
1537 1559                                              bool   tlab)
1538 1560  {
1539 1561    collector()->collect(full, clear_all_soft_refs, size, tlab);
1540 1562  }
↓ open down ↓ 422 lines elided ↑ open up ↑
1963 1985      size_t num = cms_space->totalCount();
1964 1986      assert((free_size == 0 && num == 0) ||
1965 1987             (free_size > 0  && (num == 1 || num == 2)),
1966 1988           "There should be at most 2 free chunks after compaction");
1967 1989    #endif // ASSERT
1968 1990    _collectorState = Resetting;
1969 1991    assert(_restart_addr == NULL,
1970 1992           "Should have been NULL'd before baton was passed");
1971 1993    reset(false /* == !asynch */);
1972 1994    _cmsGen->reset_after_compaction();
     1995 +  _concurrent_cycles_since_last_unload = 0;
1973 1996  
1974      -  if (verifying() && !cms_should_unload_classes()) {
     1997 +  if (verifying() && !should_unload_classes()) {
1975 1998      perm_gen_verify_bit_map()->clear_all();
1976 1999    }
1977 2000  
1978 2001    // Clear any data recorded in the PLAB chunk arrays.
1979 2002    if (_survivor_plab_array != NULL) {
1980 2003      reset_survivor_plab_arrays();
1981 2004    }
1982 2005  
1983 2006    // Adjust the per-size allocation stats for the next epoch.
1984      -  _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
     2007 +  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1985 2008    // Restart the "sweep timer" for next epoch.
1986 2009    _sweep_timer.reset();
1987 2010    _sweep_timer.start();
1988 2011  
1989 2012    // Sample collection pause time and reset for collection interval.
1990 2013    if (UseAdaptiveSizePolicy) {
1991 2014      size_policy()->msc_collection_end(gch->gc_cause());
1992 2015    }
1993 2016  
1994 2017    // For a mark-sweep-compact, compute_new_size() will be called
↓ open down ↓ 96 lines elided ↑ open up ↑
2091 2114  // collector but the if-then-else required made it cleaner to have
2092 2115  // separate methods.
2093 2116  void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2094 2117    assert(Thread::current()->is_ConcurrentGC_thread(),
2095 2118      "A CMS asynchronous collection is only allowed on a CMS thread.");
2096 2119  
2097 2120    GenCollectedHeap* gch = GenCollectedHeap::heap();
2098 2121    {
2099 2122      bool safepoint_check = Mutex::_no_safepoint_check_flag;
2100 2123      MutexLockerEx hl(Heap_lock, safepoint_check);
     2124 +    FreelistLocker fll(this);
2101 2125      MutexLockerEx x(CGC_lock, safepoint_check);
2102 2126      if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2103 2127        // The foreground collector is active or we're
2104 2128        // not using asynchronous collections.  Skip this
2105 2129        // background collection.
2106 2130        assert(!_foregroundGCShouldWait, "Should be clear");
2107 2131        return;
2108 2132      } else {
2109 2133        assert(_collectorState == Idling, "Should be idling before start.");
2110 2134        _collectorState = InitialMarking;
2111 2135        // Reset the expansion cause, now that we are about to begin
2112 2136        // a new cycle.
2113 2137        clear_expansion_cause();
2114 2138      }
2115      -    _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
2116      -    // This controls class unloading in response to an explicit gc request.
2117      -    // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
2118      -    // we will unload classes even if CMSClassUnloadingEnabled is not set.
2119      -    // See CR 6541037 and related CRs.
2120      -    _unload_classes = _full_gc_requested                      // ... for this cycle
2121      -                      && ExplicitGCInvokesConcurrentAndUnloadsClasses;
     2139 +    // Decide if we want to enable class unloading as part of the
     2140 +    // ensuing concurrent GC cycle.
     2141 +    update_should_unload_classes(); 
2122 2142      _full_gc_requested = false;           // acks all outstanding full gc requests
2123 2143      // Signal that we are about to start a collection
2124 2144      gch->increment_total_full_collections();  // ... starting a collection cycle
2125 2145      _collection_count_start = gch->total_full_collections();
2126 2146    }
2127 2147  
2128 2148    // Used for PrintGC
2129 2149    size_t prev_used;
2130 2150    if (PrintGC && Verbose) {
2131 2151      prev_used = _cmsGen->used(); // XXXPERM
↓ open down ↓ 908 lines elided ↑ open up ↑
3040 3060    assert(_markStack.isEmpty(), "Marking stack should be empty");
3041 3061    assert(overflow_list_is_empty(), "Overflow list should be empty");
3042 3062  }
3043 3063  
3044 3064  void CMSCollector::verify_overflow_empty() const {
3045 3065    assert(overflow_list_is_empty(), "Overflow list should be empty");
3046 3066    assert(no_preserved_marks(), "No preserved marks");
3047 3067  }
3048 3068  #endif // PRODUCT
3049 3069  
     3070 +// Decide if we want to enable class unloading as part of the
     3071 +// ensuing concurrent GC cycle. We will collect the perm gen and
     3072 +// unload classes if it's the case that:
     3073 +// (1) an explicit gc request has been made and the flag
     3074 +//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
     3075 +// (2) (a) class unloading is enabled at the command line, and
     3076 +//     (b) (i)   perm gen threshold has been crossed, or  
     3077 +//         (ii)  old gen is getting really full, or
     3078 +//         (iii) the previous N CMS collections did not collect the
     3079 +//               perm gen
     3080 +// NOTE: Provided there is no change in the state of the heap between
     3081 +// calls to this method, it should have idempotent results. Moreover,
     3082 +// its results should be monotonically increasing (i.e. going from 0 to 1,
     3083 +// but not 1 to 0) between successive calls between which the heap was
     3084 +// not collected. For the implementation below, it must thus rely on
     3085 +// the property that concurrent_cycles_since_last_unload()
     3086 +// will not decrease unless a collection cycle happened and that
     3087 +// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
     3088 +// themselves also monotonic in that sense. See check_monotonicity()
     3089 +// below.
     3090 +bool CMSCollector::update_should_unload_classes() {
     3091 +  // Condition 1 above
     3092 +  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
     3093 +    _should_unload_classes = true;
     3094 +  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
     3095 +    // Disjuncts 2.b.(i,ii,iii) above
     3096 +    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
     3097 +                              CMSClassUnloadingMaxInterval)
     3098 +                           || _permGen->should_concurrent_collect()
     3099 +                           || _cmsGen->is_too_full();
     3100 +  }
     3101 +  return _should_unload_classes;
     3102 +}
     3103 +
     3104 +bool ConcurrentMarkSweepGeneration::is_too_full() const {
     3105 +  bool res = should_concurrent_collect();
     3106 +#define CMSIsTooFullPercentage 98
     3107 +  res = res && occupancy() > (double)CMSIsTooFullPercentage/100.0;
     3108 +  return res;
     3109 +}
     3110 +
3050 3111  void CMSCollector::setup_cms_unloading_and_verification_state() {
3051 3112    const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3052 3113                               || VerifyBeforeExit;
3053 3114    const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3054 3115                               |   SharedHeap::SO_CodeCache;
3055 3116  
3056      -  if (cms_should_unload_classes()) {   // Should unload classes this cycle
     3117 +  if (should_unload_classes()) {   // Should unload classes this cycle
3057 3118      remove_root_scanning_option(rso);  // Shrink the root set appropriately
3058 3119      set_verifying(should_verify);    // Set verification state for this cycle
3059 3120      return;                            // Nothing else needs to be done at this time
3060 3121    }
3061 3122  
3062 3123    // Not unloading classes this cycle
3063      -  assert(!cms_should_unload_classes(), "Inconsitency!");
3064      -  if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
     3124 +  assert(!should_unload_classes(), "Inconsitency!");
     3125 +  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3065 3126      // We were not verifying, or we _were_ unloading classes in the last cycle,
3066 3127      // AND some verification options are enabled this cycle; in this case,
3067 3128      // we must make sure that the deadness map is allocated if not already so,
3068 3129      // and cleared (if already allocated previously --
3069 3130      // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3070 3131      if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3071 3132        if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3072 3133          warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3073 3134                  "permanent generation verification disabled");
3074 3135          return;  // Note that we leave verification disabled, so we'll retry this
↓ open down ↓ 1616 lines elided ↑ open up ↑
4691 4752  
4692 4753    if (UseAdaptiveSizePolicy) {
4693 4754      size_policy()->checkpoint_roots_final_begin();
4694 4755    }
4695 4756  
4696 4757    ResourceMark rm;
4697 4758    HandleMark   hm;
4698 4759  
4699 4760    GenCollectedHeap* gch = GenCollectedHeap::heap();
4700 4761  
4701      -  if (cms_should_unload_classes()) {
     4762 +  if (should_unload_classes()) {
4702 4763      CodeCache::gc_prologue();
4703 4764    }
4704 4765    assert(haveFreelistLocks(), "must have free list locks");
4705 4766    assert_lock_strong(bitMapLock());
4706 4767  
4707 4768    if (!init_mark_was_synchronous) {
4708 4769      // We might assume that we need not fill TLAB's when
4709 4770      // CMSScavengeBeforeRemark is set, because we may have just done
4710 4771      // a scavenge which would have filled all TLAB's -- and besides
4711 4772      // Eden would be empty. This however may not always be the case --
↓ open down ↓ 39 lines elided ↑ open up ↑
4751 4812    verify_work_stacks_empty();
4752 4813    verify_overflow_empty();
4753 4814  
4754 4815    {
4755 4816      NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4756 4817      refProcessingWork(asynch, clear_all_soft_refs);
4757 4818    }
4758 4819    verify_work_stacks_empty();
4759 4820    verify_overflow_empty();
4760 4821  
4761      -  if (cms_should_unload_classes()) {
     4822 +  if (should_unload_classes()) {
4762 4823      CodeCache::gc_epilogue();
4763 4824    }
4764 4825  
4765 4826    // If we encountered any (marking stack / work queue) overflow
4766 4827    // events during the current CMS cycle, take appropriate
4767 4828    // remedial measures, where possible, so as to try and avoid
4768 4829    // recurrence of that condition.
4769 4830    assert(_markStack.isEmpty(), "No grey objects");
4770 4831    size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4771 4832                       _ser_kac_ovflw;
↓ open down ↓ 849 lines elided ↑ open up ↑
5621 5682      } else {
5622 5683        rp->process_discovered_references(soft_ref_policy,
5623 5684                                          &_is_alive_closure,
5624 5685                                          &cmsKeepAliveClosure,
5625 5686                                          &cmsDrainMarkingStackClosure,
5626 5687                                          NULL);
5627 5688      }
5628 5689      verify_work_stacks_empty();
5629 5690    }
5630 5691  
5631      -  if (cms_should_unload_classes()) {
     5692 +  if (should_unload_classes()) {
5632 5693      {
5633 5694        TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5634 5695  
5635 5696        // Follow SystemDictionary roots and unload classes
5636 5697        bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5637 5698  
5638 5699        // Follow CodeCache roots and unload any methods marked for unloading
5639 5700        CodeCache::do_unloading(&_is_alive_closure,
5640 5701                                &cmsKeepAliveClosure,
5641 5702                                purged_class);
↓ open down ↓ 73 lines elided ↑ open up ↑
5715 5776      }
5716 5777    }
5717 5778  }
5718 5779  #endif
5719 5780  
5720 5781  void CMSCollector::sweep(bool asynch) {
5721 5782    assert(_collectorState == Sweeping, "just checking");
5722 5783    check_correct_thread_executing();
5723 5784    verify_work_stacks_empty();
5724 5785    verify_overflow_empty();
5725      -  incrementSweepCount();
     5786 +  increment_sweep_count();
5726 5787    _sweep_timer.stop();
5727 5788    _sweep_estimate.sample(_sweep_timer.seconds());
5728 5789    size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5729 5790  
5730 5791    // PermGen verification support: If perm gen sweeping is disabled in
5731 5792    // this cycle, we preserve the perm gen object "deadness" information
5732 5793    // in the perm_gen_verify_bit_map. In order to do that we traverse
5733 5794    // all blocks in perm gen and mark all dead objects.
5734      -  if (verifying() && !cms_should_unload_classes()) {
5735      -    CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5736      -                             bitMapLock());
     5795 +  if (verifying() && !should_unload_classes()) {
5737 5796      assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5738 5797             "Should have already been allocated");
5739 5798      MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5740 5799                                 markBitMap(), perm_gen_verify_bit_map());
5741      -    _permGen->cmsSpace()->blk_iterate(&mdo);
     5800 +    if (asynch) {
     5801 +      CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
     5802 +                               bitMapLock());
     5803 +      _permGen->cmsSpace()->blk_iterate(&mdo);
     5804 +    } else {
     5805 +      // In the case of synchronous sweep, we already have
     5806 +      // the requisite locks/tokens.
     5807 +      _permGen->cmsSpace()->blk_iterate(&mdo);
     5808 +    }
5742 5809    }
5743 5810  
5744 5811    if (asynch) {
5745 5812      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5746 5813      CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5747 5814      // First sweep the old gen then the perm gen
5748 5815      {
5749 5816        CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5750 5817                                 bitMapLock());
5751 5818        sweepWork(_cmsGen, asynch);
5752 5819      }
5753 5820  
5754 5821      // Now repeat for perm gen
5755      -    if (cms_should_unload_classes()) {
     5822 +    if (should_unload_classes()) {
5756 5823        CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5757 5824                               bitMapLock());
5758 5825        sweepWork(_permGen, asynch);
5759 5826      }
5760 5827  
5761 5828      // Update Universe::_heap_*_at_gc figures.
5762 5829      // We need all the free list locks to make the abstract state
5763 5830      // transition from Sweeping to Resetting. See detailed note
5764 5831      // further below.
5765 5832      {
↓ open down ↓ 1 lines elided ↑ open up ↑
5767 5834                                 _permGen->freelistLock());
5768 5835        // Update heap occupancy information which is used as
5769 5836        // input to soft ref clearing policy at the next gc.
5770 5837        Universe::update_heap_info_at_gc();
5771 5838        _collectorState = Resizing;
5772 5839      }
5773 5840    } else {
5774 5841      // already have needed locks
5775 5842      sweepWork(_cmsGen,  asynch);
5776 5843  
5777      -    if (cms_should_unload_classes()) {
     5844 +    if (should_unload_classes()) {
5778 5845        sweepWork(_permGen, asynch);
5779 5846      }
5780 5847      // Update heap occupancy information which is used as
5781 5848      // input to soft ref clearing policy at the next gc.
5782 5849      Universe::update_heap_info_at_gc();
5783 5850      _collectorState = Resizing;
5784 5851    }
5785 5852    verify_work_stacks_empty();
5786 5853    verify_overflow_empty();
5787 5854  
↓ open down ↓ 140 lines elided ↑ open up ↑
5928 5995    {
5929 5996      SweepClosure sweepClosure(this, gen, &_markBitMap,
5930 5997                              CMSYield && asynch);
5931 5998      gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5932 5999      // We need to free-up/coalesce garbage/blocks from a
5933 6000      // co-terminal free run. This is done in the SweepClosure
5934 6001      // destructor; so, do not remove this scope, else the
5935 6002      // end-of-sweep-census below will be off by a little bit.
5936 6003    }
5937 6004    gen->cmsSpace()->sweep_completed();
5938      -  gen->cmsSpace()->endSweepFLCensus(sweepCount());
     6005 +  gen->cmsSpace()->endSweepFLCensus(sweep_count());
     6006 +  if (should_unload_classes()) {                // unloaded classes this cycle,
     6007 +    _concurrent_cycles_since_last_unload = 0;   // ... reset count
     6008 +  } else {                                      // did not unload classes,
     6009 +    _concurrent_cycles_since_last_unload++;     // ... increment count
     6010 +  }
5939 6011  }
5940 6012  
5941 6013  // Reset CMS data structures (for now just the marking bit map)
5942 6014  // preparatory for the next cycle.
5943 6015  void CMSCollector::reset(bool asynch) {
5944 6016    GenCollectedHeap* gch = GenCollectedHeap::heap();
5945 6017    CMSAdaptiveSizePolicy* sp = size_policy();
5946 6018    AdaptiveSizePolicyOutput(sp, gch->total_collections());
5947 6019    if (asynch) {
5948 6020      CMSTokenSyncWithLocks ts(true, bitMapLock());
↓ open down ↓ 1237 lines elided ↑ open up ↑
7186 7258                       CMSMarkStack*  revisitStack,
7187 7259                       HeapWord* finger, MarkFromRootsClosure* parent) :
7188 7260    OopClosure(collector->ref_processor()),
7189 7261    _collector(collector),
7190 7262    _span(span),
7191 7263    _bitMap(bitMap),
7192 7264    _markStack(markStack),
7193 7265    _revisitStack(revisitStack),
7194 7266    _finger(finger),
7195 7267    _parent(parent),
7196      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7268 +  _should_remember_klasses(collector->should_unload_classes())
7197 7269  { }
7198 7270  
7199 7271  Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7200 7272                       MemRegion span,
7201 7273                       CMSBitMap* bit_map,
7202 7274                       OopTaskQueue* work_queue,
7203 7275                       CMSMarkStack*  overflow_stack,
7204 7276                       CMSMarkStack*  revisit_stack,
7205 7277                       HeapWord* finger,
7206 7278                       HeapWord** global_finger_addr,
↓ open down ↓ 2 lines elided ↑ open up ↑
7209 7281    _collector(collector),
7210 7282    _whole_span(collector->_span),
7211 7283    _span(span),
7212 7284    _bit_map(bit_map),
7213 7285    _work_queue(work_queue),
7214 7286    _overflow_stack(overflow_stack),
7215 7287    _revisit_stack(revisit_stack),
7216 7288    _finger(finger),
7217 7289    _global_finger_addr(global_finger_addr),
7218 7290    _parent(parent),
7219      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7291 +  _should_remember_klasses(collector->should_unload_classes())
7220 7292  { }
7221 7293  
7222 7294  
7223 7295  void CMSCollector::lower_restart_addr(HeapWord* low) {
7224 7296    assert(_span.contains(low), "Out of bounds addr");
7225 7297    if (_restart_addr == NULL) {
7226 7298      _restart_addr = low;
7227 7299    } else {
7228 7300      _restart_addr = MIN2(_restart_addr, low);
7229 7301    }
↓ open down ↓ 122 lines elided ↑ open up ↑
7352 7424                                         CMSMarkStack*  revisit_stack,
7353 7425                                         bool           concurrent_precleaning):
7354 7426    OopClosure(rp),
7355 7427    _collector(collector),
7356 7428    _span(span),
7357 7429    _bit_map(bit_map),
7358 7430    _mod_union_table(mod_union_table),
7359 7431    _mark_stack(mark_stack),
7360 7432    _revisit_stack(revisit_stack),
7361 7433    _concurrent_precleaning(concurrent_precleaning),
7362      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7434 +  _should_remember_klasses(collector->should_unload_classes())
7363 7435  {
7364 7436    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7365 7437  }
7366 7438  
7367 7439  // Grey object rescan during pre-cleaning and second checkpoint phases --
7368 7440  // the non-parallel version (the parallel version appears further below.)
7369 7441  void PushAndMarkClosure::do_oop(oop* p) {
7370 7442    oop    this_oop = *p;
7371 7443    // Ignore mark word verification. If during concurrent precleaning
7372 7444    // the object monitor may be locked. If during the checkpoint
↓ open down ↓ 41 lines elided ↑ open up ↑
7414 7486                                                 ReferenceProcessor* rp,
7415 7487                                                 CMSBitMap* bit_map,
7416 7488                                                 OopTaskQueue* work_queue,
7417 7489                                                 CMSMarkStack* revisit_stack):
7418 7490    OopClosure(rp),
7419 7491    _collector(collector),
7420 7492    _span(span),
7421 7493    _bit_map(bit_map),
7422 7494    _work_queue(work_queue),
7423 7495    _revisit_stack(revisit_stack),
7424      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7496 +  _should_remember_klasses(collector->should_unload_classes())
7425 7497  {
7426 7498    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7427 7499  }
7428 7500  
7429 7501  // Grey object rescan during second checkpoint phase --
7430 7502  // the parallel version.
7431 7503  void Par_PushAndMarkClosure::do_oop(oop* p) {
7432 7504    oop    this_oop = *p;
7433 7505    // In the assert below, we ignore the mark word because
7434 7506    // this oop may point to an already visited object that is
↓ open down ↓ 501 lines elided ↑ open up ↑
7936 8008    if (_bitMap->isMarked(addr + 1)) {
7937 8009      // Determine the size from the bit map, rather than trying to
7938 8010      // compute it from the object header.
7939 8011      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7940 8012      size = pointer_delta(nextOneAddr + 1, addr);
7941 8013      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7942 8014             "alignment problem");
7943 8015  
7944 8016      #ifdef DEBUG
7945 8017        if (oop(addr)->klass() != NULL &&
7946      -          (   !_collector->cms_should_unload_classes()
     8018 +          (   !_collector->should_unload_classes()
7947 8019             || oop(addr)->is_parsable())) {
7948 8020          // Ignore mark word because we are running concurrent with mutators
7949 8021          assert(oop(addr)->is_oop(true), "live block should be an oop");
7950 8022          assert(size ==
7951 8023                 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7952 8024                 "P-mark and computed size do not agree");
7953 8025        }
7954 8026      #endif
7955 8027  
7956 8028    } else {
7957 8029      // This should be an initialized object that's alive.
7958 8030      assert(oop(addr)->klass() != NULL &&
7959      -           (!_collector->cms_should_unload_classes()
     8031 +           (!_collector->should_unload_classes()
7960 8032              || oop(addr)->is_parsable()),
7961 8033             "Should be an initialized object");
7962 8034      // Ignore mark word because we are running concurrent with mutators
7963 8035      assert(oop(addr)->is_oop(true), "live block should be an oop");
7964 8036      // Verify that the bit map has no bits marked between
7965 8037      // addr and purported end of this block.
7966 8038      size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7967 8039      assert(size >= 3, "Necessary for Printezis marks to work");
7968 8040      assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7969 8041      DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
↓ open down ↓ 818 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX