Print this page
--- 1.300 ---
Merged changes between child workspace "/net/spot/workspaces/ysr/cms_bugs" and
 parent workspace "/net/jano2/export2/hotspot/ws/main/gc_baseline".
--- 1.297.1.1 ---
6621144 CMS: assertion failure "is_cms_thread == Thread::current()->is_ConcurrentGC_thread()"

Split Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
          +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
↓ open down ↓ 217 lines elided ↑ open up ↑
 218  218    // Ideally, in the calculation below, we'd compute the dilatation
 219  219    // factor as: MinChunkSize/(promoting_gen's min object size)
 220  220    // Since we do not have such a general query interface for the
 221  221    // promoting generation, we'll instead just use the mimimum
 222  222    // object size (which today is a header's worth of space);
 223  223    // note that all arithmetic is in units of HeapWords.
 224  224    assert(MinChunkSize >= oopDesc::header_size(), "just checking");
 225  225    assert(_dilatation_factor >= 1.0, "from previous assert");
 226  226  }
 227  227  
      228 +
      229 +// The field "_initiating_occupancy" represents the occupancy percentage
      230 +// at which we trigger a new collection cycle.  Unless explicitly specified
      231 +// via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
      232 +// is calculated by:
      233 +//
      234 +//   Let "f" be MinHeapFreeRatio in
      235 +//
      236 +//    _intiating_occupancy = 100-f +
      237 +//                           f * (CMSTrigger[Perm]Ratio/100)
      238 +//   where CMSTrigger[Perm]Ratio is the argument "tr" below.
      239 +//
      240 +// That is, if we assume the heap is at its desired maximum occupancy at the
      241 +// end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
      242 +// space be allocated before initiating a new collection cycle.
      243 +//
      244 +void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
      245 +  assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
      246 +  if (io > 0) {
      247 +    _initiating_occupancy = (double)io / 100.0;
      248 +  } else {
      249 +    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
      250 +                             (double)(tr * MinHeapFreeRatio) / 100.0)
      251 +                            / 100.0;
      252 +  }
      253 +}
      254 +
      255 +
 228  256  void ConcurrentMarkSweepGeneration::ref_processor_init() {
 229  257    assert(collector() != NULL, "no collector");
 230  258    collector()->ref_processor_init();
 231  259  }
 232  260  
 233  261  void CMSCollector::ref_processor_init() {
 234  262    if (_ref_processor == NULL) {
 235  263      // Allocate and initialize a reference processor
 236  264      _ref_processor = ReferenceProcessor::create_ref_processor(
 237  265          _span,                               // span
↓ open down ↓ 275 lines elided ↑ open up ↑
 513  541  #ifndef PRODUCT
 514  542    _num_par_pushes(0),
 515  543  #endif
 516  544    _collection_count_start(0),
 517  545    _verifying(false),
 518  546    _icms_start_limit(NULL),
 519  547    _icms_stop_limit(NULL),
 520  548    _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 521  549    _completed_initialization(false),
 522  550    _collector_policy(cp),
 523      -  _unload_classes(false),
 524      -  _unloaded_classes_last_cycle(false),
      551 +  _should_unload_classes(false),
      552 +  _concurrent_cycles_since_last_unload(0),
 525  553    _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 526  554  {
 527  555    if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 528  556      ExplicitGCInvokesConcurrent = true;
 529  557    }
 530  558    // Now expand the span and allocate the collection support structures
 531  559    // (MUT, marking bit map etc.) to cover both generations subject to
 532  560    // collection.
 533  561  
 534  562    // First check that _permGen is adjacent to _cmsGen and above it.
↓ open down ↓ 100 lines elided ↑ open up ↑
 635  663          }
 636  664          _task_queues->register_queue(i, &q_padded->work_queue);
 637  665        }
 638  666        for (i = 0; i < num_queues; i++) {
 639  667          _task_queues->queue(i)->initialize();
 640  668          _hash_seed[i] = 17;  // copied from ParNew
 641  669        }
 642  670      }
 643  671    }
 644  672  
 645      -  // "initiatingOccupancy" is the occupancy ratio at which we trigger
 646      -  // a new collection cycle.  Unless explicitly specified via
 647      -  // CMSTriggerRatio, it is calculated by:
 648      -  //   Let "f" be MinHeapFreeRatio in
 649      -  //
 650      -  //    intiatingOccupancy = 100-f +
 651      -  //                         f * (CMSTriggerRatio/100)
 652      -  // That is, if we assume the heap is at its desired maximum occupancy at the
 653      -  // end of a collection, we let CMSTriggerRatio of the (purported) free
 654      -  // space be allocated before initiating a new collection cycle.
 655      -  if (CMSInitiatingOccupancyFraction > 0) {
 656      -    _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
 657      -  } else {
 658      -    _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
 659      -                           (double)(CMSTriggerRatio *
 660      -                                    MinHeapFreeRatio) / 100.0)
 661      -                           / 100.0;
 662      -  }
      673 +  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
      674 +  _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
      675 +
 663  676    // Clip CMSBootstrapOccupancy between 0 and 100.
 664      -  _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
      677 +  _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 665  678                           /(double)100;
 666  679  
 667  680    _full_gcs_since_conc_gc = 0;
 668  681  
 669  682    // Now tell CMS generations the identity of their collector
 670  683    ConcurrentMarkSweepGeneration::set_collector(this);
 671  684  
 672  685    // Create & start a CMS thread for this CMS collector
 673  686    _cmsThread = ConcurrentMarkSweepThread::start(this);
 674  687    assert(cmsThread() != NULL, "CMS Thread should have been created");
↓ open down ↓ 731 lines elided ↑ open up ↑
1406 1419      gclog_or_tty->print_cr("");
1407 1420      stats().print_on(gclog_or_tty);
1408 1421      gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1409 1422        stats().time_until_cms_gen_full());
1410 1423      gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1411 1424      gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1412 1425                             _cmsGen->contiguous_available());
1413 1426      gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1414 1427      gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1415 1428      gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1416      -    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
     1429 +    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
     1430 +    gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1417 1431    }
1418 1432    // ------------------------------------------------------------------
1419 1433  
1420 1434    // If the estimated time to complete a cms collection (cms_duration())
1421 1435    // is less than the estimated time remaining until the cms generation
1422 1436    // is full, start a collection.
1423 1437    if (!UseCMSInitiatingOccupancyOnly) {
1424 1438      if (stats().valid()) {
1425 1439        if (stats().time_until_cms_start() == 0.0) {
1426 1440          return true;
↓ open down ↓ 12 lines elided ↑ open up ↑
1439 1453          }
1440 1454          return true;
1441 1455        }
1442 1456      }
1443 1457    }
1444 1458  
1445 1459    // Otherwise, we start a collection cycle if either the perm gen or
1446 1460    // old gen want a collection cycle started. Each may use
1447 1461    // an appropriate criterion for making this decision.
1448 1462    // XXX We need to make sure that the gen expansion
1449      -  // criterion dovetails well with this.
1450      -  if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
     1463 +  // criterion dovetails well with this. XXX NEED TO FIX THIS
     1464 +  if (_cmsGen->should_concurrent_collect()) {
1451 1465      if (Verbose && PrintGCDetails) {
1452 1466        gclog_or_tty->print_cr("CMS old gen initiated");
1453 1467      }
1454 1468      return true;
1455 1469    }
1456 1470  
1457      -  if (cms_should_unload_classes() &&
1458      -      _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
1459      -    if (Verbose && PrintGCDetails) {
1460      -     gclog_or_tty->print_cr("CMS perm gen initiated");
     1471 +  // We start a collection if we believe an incremental collection may fail;
     1472 +  // this is not likely to be productive in practice because it's probably too
     1473 +  // late anyway.
     1474 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
     1475 +  assert(gch->collector_policy()->is_two_generation_policy(),
     1476 +         "You may want to check the correctness of the following");
     1477 +  if (gch->incremental_collection_will_fail()) {
     1478 +    if (PrintGCDetails && Verbose) {
     1479 +      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1461 1480      }
1462 1481      return true;
1463 1482    }
1464 1483  
     1484 +  if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
     1485 +    bool res = update_should_unload_classes();
     1486 +    if (res) {
     1487 +      if (Verbose && PrintGCDetails) {
     1488 +        gclog_or_tty->print_cr("CMS perm gen initiated");
     1489 +      }
     1490 +      return true;
     1491 +    }
     1492 +  }
1465 1493    return false;
1466 1494  }
1467 1495  
1468 1496  // Clear _expansion_cause fields of constituent generations
1469 1497  void CMSCollector::clear_expansion_cause() {
1470 1498    _cmsGen->clear_expansion_cause();
1471 1499    _permGen->clear_expansion_cause();
1472 1500  }
1473 1501  
1474      -bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
1475      -  double initiatingOccupancy) {
1476      -  // We should be conservative in starting a collection cycle.  To
1477      -  // start too eagerly runs the risk of collecting too often in the
1478      -  // extreme.  To collect too rarely falls back on full collections,
1479      -  // which works, even if not optimum in terms of concurrent work.
1480      -  // As a work around for too eagerly collecting, use the flag
1481      -  // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1482      -  // giving the user an easily understandable way of controlling the
1483      -  // collections.
1484      -  // We want to start a new collection cycle if any of the following
1485      -  // conditions hold:
1486      -  // . our current occupancy exceeds the initiating occupancy, or
1487      -  // . we recently needed to expand and have not since that expansion,
1488      -  //   collected, or
1489      -  // . we are not using adaptive free lists and linear allocation is
1490      -  //   going to fail, or
1491      -  // . (for old gen) incremental collection has already failed or
1492      -  //   may soon fail in the near future as we may not be able to absorb
1493      -  //   promotions.
1494      -  assert_lock_strong(freelistLock());
     1502 +// We should be conservative in starting a collection cycle.  To
     1503 +// start too eagerly runs the risk of collecting too often in the
     1504 +// extreme.  To collect too rarely falls back on full collections,
     1505 +// which works, even if not optimum in terms of concurrent work.
     1506 +// As a work around for too eagerly collecting, use the flag
     1507 +// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
     1508 +// giving the user an easily understandable way of controlling the
     1509 +// collections.
     1510 +// We want to start a new collection cycle if any of the following
     1511 +// conditions hold:
     1512 +// . our current occupancy exceeds the configured initiating occupancy
     1513 +//   for this generation, or
     1514 +// . we recently needed to expand this space and have not, since that
     1515 +//   expansion, done a collection of this generation, or
     1516 +// . the underlying space believes that it may be a good idea to initiate
     1517 +//   a concurrent collection (this may be based on criteria such as the
     1518 +//   following: the space uses linear allocation and linear allocation is
     1519 +//   going to fail, or there is believed to be excessive fragmentation in
     1520 +//   the generation, etc... or ...
     1521 +// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
     1522 +//   the case of the old generation, not the perm generation; see CR 6543076):
     1523 +//   we may be approaching a point at which allocation requests may fail because
     1524 +//   we will be out of sufficient free space given allocation rate estimates.]
     1525 +bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1495 1526  
1496      -  if (occupancy() > initiatingOccupancy) {
     1527 +  assert_lock_strong(freelistLock());
     1528 +  if (occupancy() > initiating_occupancy()) {
1497 1529      if (PrintGCDetails && Verbose) {
1498 1530        gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1499      -        short_name(), occupancy(), initiatingOccupancy);
     1531 +        short_name(), occupancy(), initiating_occupancy());
1500 1532      }
1501 1533      return true;
1502 1534    }
1503 1535    if (UseCMSInitiatingOccupancyOnly) {
1504 1536      return false;
1505 1537    }
1506 1538    if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1507 1539      if (PrintGCDetails && Verbose) {
1508 1540        gclog_or_tty->print(" %s: collect because expanded for allocation ",
1509 1541          short_name());
1510 1542      }
1511 1543      return true;
1512 1544    }
1513      -  GenCollectedHeap* gch = GenCollectedHeap::heap();
1514      -  assert(gch->collector_policy()->is_two_generation_policy(),
1515      -         "You may want to check the correctness of the following");
1516      -  if (gch->incremental_collection_will_fail()) {
     1545 +  if (_cmsSpace->should_concurrent_collect()) {
1517 1546      if (PrintGCDetails && Verbose) {
1518      -      gclog_or_tty->print(" %s: collect because incremental collection will fail ",
     1547 +      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1519 1548          short_name());
1520 1549      }
1521 1550      return true;
1522 1551    }
1523      -  if (!_cmsSpace->adaptive_freelists() &&
1524      -      _cmsSpace->linearAllocationWouldFail()) {
1525      -    if (PrintGCDetails && Verbose) {
1526      -      gclog_or_tty->print(" %s: collect because of linAB ",
1527      -        short_name());
1528      -    }
1529      -    return true;
1530      -  }
1531 1552    return false;
1532 1553  }
1533 1554  
1534 1555  void ConcurrentMarkSweepGeneration::collect(bool   full,
1535 1556                                              bool   clear_all_soft_refs,
1536 1557                                              size_t size,
1537 1558                                              bool   tlab)
1538 1559  {
1539 1560    collector()->collect(full, clear_all_soft_refs, size, tlab);
1540 1561  }
↓ open down ↓ 422 lines elided ↑ open up ↑
1963 1984      size_t num = cms_space->totalCount();
1964 1985      assert((free_size == 0 && num == 0) ||
1965 1986             (free_size > 0  && (num == 1 || num == 2)),
1966 1987           "There should be at most 2 free chunks after compaction");
1967 1988    #endif // ASSERT
1968 1989    _collectorState = Resetting;
1969 1990    assert(_restart_addr == NULL,
1970 1991           "Should have been NULL'd before baton was passed");
1971 1992    reset(false /* == !asynch */);
1972 1993    _cmsGen->reset_after_compaction();
     1994 +  _concurrent_cycles_since_last_unload = 0;
1973 1995  
1974      -  if (verifying() && !cms_should_unload_classes()) {
     1996 +  if (verifying() && !should_unload_classes()) {
1975 1997      perm_gen_verify_bit_map()->clear_all();
1976 1998    }
1977 1999  
1978 2000    // Clear any data recorded in the PLAB chunk arrays.
1979 2001    if (_survivor_plab_array != NULL) {
1980 2002      reset_survivor_plab_arrays();
1981 2003    }
1982 2004  
1983 2005    // Adjust the per-size allocation stats for the next epoch.
1984 2006    _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
↓ open down ↓ 106 lines elided ↑ open up ↑
2091 2113  // collector but the if-then-else required made it cleaner to have
2092 2114  // separate methods.
2093 2115  void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2094 2116    assert(Thread::current()->is_ConcurrentGC_thread(),
2095 2117      "A CMS asynchronous collection is only allowed on a CMS thread.");
2096 2118  
2097 2119    GenCollectedHeap* gch = GenCollectedHeap::heap();
2098 2120    {
2099 2121      bool safepoint_check = Mutex::_no_safepoint_check_flag;
2100 2122      MutexLockerEx hl(Heap_lock, safepoint_check);
     2123 +    FreelistLocker fll(this);
2101 2124      MutexLockerEx x(CGC_lock, safepoint_check);
2102 2125      if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2103 2126        // The foreground collector is active or we're
2104 2127        // not using asynchronous collections.  Skip this
2105 2128        // background collection.
2106 2129        assert(!_foregroundGCShouldWait, "Should be clear");
2107 2130        return;
2108 2131      } else {
2109 2132        assert(_collectorState == Idling, "Should be idling before start.");
2110 2133        _collectorState = InitialMarking;
2111 2134        // Reset the expansion cause, now that we are about to begin
2112 2135        // a new cycle.
2113 2136        clear_expansion_cause();
2114 2137      }
2115      -    _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
2116      -    // This controls class unloading in response to an explicit gc request.
2117      -    // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
2118      -    // we will unload classes even if CMSClassUnloadingEnabled is not set.
2119      -    // See CR 6541037 and related CRs.
2120      -    _unload_classes = _full_gc_requested                      // ... for this cycle
2121      -                      && ExplicitGCInvokesConcurrentAndUnloadsClasses;
     2138 +    // Decide if we want to enable class unloading as part of the
     2139 +    // ensuing concurrent GC cycle.
     2140 +    update_should_unload_classes(); 
2122 2141      _full_gc_requested = false;           // acks all outstanding full gc requests
2123 2142      // Signal that we are about to start a collection
2124 2143      gch->increment_total_full_collections();  // ... starting a collection cycle
2125 2144      _collection_count_start = gch->total_full_collections();
2126 2145    }
2127 2146  
2128 2147    // Used for PrintGC
2129 2148    size_t prev_used;
2130 2149    if (PrintGC && Verbose) {
2131 2150      prev_used = _cmsGen->used(); // XXXPERM
↓ open down ↓ 908 lines elided ↑ open up ↑
3040 3059    assert(_markStack.isEmpty(), "Marking stack should be empty");
3041 3060    assert(overflow_list_is_empty(), "Overflow list should be empty");
3042 3061  }
3043 3062  
3044 3063  void CMSCollector::verify_overflow_empty() const {
3045 3064    assert(overflow_list_is_empty(), "Overflow list should be empty");
3046 3065    assert(no_preserved_marks(), "No preserved marks");
3047 3066  }
3048 3067  #endif // PRODUCT
3049 3068  
     3069 +// Decide if we want to enable class unloading as part of the
     3070 +// ensuing concurrent GC cycle. We will collect the perm gen and
     3071 +// unload classes if it's the case that:
     3072 +// (1) an explicit gc request has been made and the flag
     3073 +//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
     3074 +// (2) (a) class unloading is enabled at the command line, and
     3075 +//     (b) (i)   perm gen threshold has been crossed, or  
     3076 +//         (ii)  old gen is getting really full, or
     3077 +//         (iii) the previous N CMS collections did not collect the
     3078 +//               perm gen
     3079 +// NOTE: Provided there is no change in the state of the heap between
     3080 +// calls to this method, it should have idempotent results. Moreover,
     3081 +// its results should be monotonically increasing (i.e. going from 0 to 1,
     3082 +// but not 1 to 0) between successive calls between which the heap was
     3083 +// not collected. For the implementation below, it must thus rely on
     3084 +// the property that concurrent_cycles_since_last_unload()
     3085 +// will not decrease unless a collection cycle happened and that
     3086 +// _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
     3087 +// themselves also monotonic in that sense. See check_monotonicity()
     3088 +// below.
     3089 +bool CMSCollector::update_should_unload_classes() {
     3090 +  // Condition 1 above
     3091 +  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
     3092 +    _should_unload_classes = true;
     3093 +  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
     3094 +    // Disjuncts 2.b.(i,ii,iii) above
     3095 +    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
     3096 +                              CMSClassUnloadingMaxInterval)
     3097 +                           || _permGen->should_concurrent_collect()
     3098 +                           || _cmsGen->is_too_full();
     3099 +  }
     3100 +  return _should_unload_classes;
     3101 +}
     3102 +
     3103 +bool ConcurrentMarkSweepGeneration::is_too_full() const {
     3104 +  bool res = should_concurrent_collect();
     3105 +#define CMSIsTooFullPercentage 98
     3106 +  res = res && occupancy() > (double)CMSIsTooFullPercentage/100.0;
     3107 +  return res;
     3108 +}
     3109 +
3050 3110  void CMSCollector::setup_cms_unloading_and_verification_state() {
3051 3111    const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3052 3112                               || VerifyBeforeExit;
3053 3113    const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3054 3114                               |   SharedHeap::SO_CodeCache;
3055 3115  
3056      -  if (cms_should_unload_classes()) {   // Should unload classes this cycle
     3116 +  if (should_unload_classes()) {   // Should unload classes this cycle
3057 3117      remove_root_scanning_option(rso);  // Shrink the root set appropriately
3058 3118      set_verifying(should_verify);    // Set verification state for this cycle
3059 3119      return;                            // Nothing else needs to be done at this time
3060 3120    }
3061 3121  
3062 3122    // Not unloading classes this cycle
3063      -  assert(!cms_should_unload_classes(), "Inconsitency!");
3064      -  if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
     3123 +  assert(!should_unload_classes(), "Inconsitency!");
     3124 +  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3065 3125      // We were not verifying, or we _were_ unloading classes in the last cycle,
3066 3126      // AND some verification options are enabled this cycle; in this case,
3067 3127      // we must make sure that the deadness map is allocated if not already so,
3068 3128      // and cleared (if already allocated previously --
3069 3129      // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3070 3130      if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3071 3131        if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3072 3132          warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3073 3133                  "permanent generation verification disabled");
3074 3134          return;  // Note that we leave verification disabled, so we'll retry this
↓ open down ↓ 1616 lines elided ↑ open up ↑
4691 4751  
4692 4752    if (UseAdaptiveSizePolicy) {
4693 4753      size_policy()->checkpoint_roots_final_begin();
4694 4754    }
4695 4755  
4696 4756    ResourceMark rm;
4697 4757    HandleMark   hm;
4698 4758  
4699 4759    GenCollectedHeap* gch = GenCollectedHeap::heap();
4700 4760  
4701      -  if (cms_should_unload_classes()) {
     4761 +  if (should_unload_classes()) {
4702 4762      CodeCache::gc_prologue();
4703 4763    }
4704 4764    assert(haveFreelistLocks(), "must have free list locks");
4705 4765    assert_lock_strong(bitMapLock());
4706 4766  
4707 4767    if (!init_mark_was_synchronous) {
4708 4768      // We might assume that we need not fill TLAB's when
4709 4769      // CMSScavengeBeforeRemark is set, because we may have just done
4710 4770      // a scavenge which would have filled all TLAB's -- and besides
4711 4771      // Eden would be empty. This however may not always be the case --
↓ open down ↓ 39 lines elided ↑ open up ↑
4751 4811    verify_work_stacks_empty();
4752 4812    verify_overflow_empty();
4753 4813  
4754 4814    {
4755 4815      NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4756 4816      refProcessingWork(asynch, clear_all_soft_refs);
4757 4817    }
4758 4818    verify_work_stacks_empty();
4759 4819    verify_overflow_empty();
4760 4820  
4761      -  if (cms_should_unload_classes()) {
     4821 +  if (should_unload_classes()) {
4762 4822      CodeCache::gc_epilogue();
4763 4823    }
4764 4824  
4765 4825    // If we encountered any (marking stack / work queue) overflow
4766 4826    // events during the current CMS cycle, take appropriate
4767 4827    // remedial measures, where possible, so as to try and avoid
4768 4828    // recurrence of that condition.
4769 4829    assert(_markStack.isEmpty(), "No grey objects");
4770 4830    size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4771 4831                       _ser_kac_ovflw;
↓ open down ↓ 849 lines elided ↑ open up ↑
5621 5681      } else {
5622 5682        rp->process_discovered_references(soft_ref_policy,
5623 5683                                          &_is_alive_closure,
5624 5684                                          &cmsKeepAliveClosure,
5625 5685                                          &cmsDrainMarkingStackClosure,
5626 5686                                          NULL);
5627 5687      }
5628 5688      verify_work_stacks_empty();
5629 5689    }
5630 5690  
5631      -  if (cms_should_unload_classes()) {
     5691 +  if (should_unload_classes()) {
5632 5692      {
5633 5693        TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5634 5694  
5635 5695        // Follow SystemDictionary roots and unload classes
5636 5696        bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5637 5697  
5638 5698        // Follow CodeCache roots and unload any methods marked for unloading
5639 5699        CodeCache::do_unloading(&_is_alive_closure,
5640 5700                                &cmsKeepAliveClosure,
5641 5701                                purged_class);
↓ open down ↓ 82 lines elided ↑ open up ↑
5724 5784    verify_overflow_empty();
5725 5785    incrementSweepCount();
5726 5786    _sweep_timer.stop();
5727 5787    _sweep_estimate.sample(_sweep_timer.seconds());
5728 5788    size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5729 5789  
5730 5790    // PermGen verification support: If perm gen sweeping is disabled in
5731 5791    // this cycle, we preserve the perm gen object "deadness" information
5732 5792    // in the perm_gen_verify_bit_map. In order to do that we traverse
5733 5793    // all blocks in perm gen and mark all dead objects.
5734      -  if (verifying() && !cms_should_unload_classes()) {
5735      -    CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5736      -                             bitMapLock());
     5794 +  if (verifying() && !should_unload_classes()) {
5737 5795      assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5738 5796             "Should have already been allocated");
5739 5797      MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5740 5798                                 markBitMap(), perm_gen_verify_bit_map());
5741      -    _permGen->cmsSpace()->blk_iterate(&mdo);
     5799 +    if (asynch) {
     5800 +      CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
     5801 +                               bitMapLock());
     5802 +      _permGen->cmsSpace()->blk_iterate(&mdo);
     5803 +    } else {
     5804 +      // In the case of synchronous sweep, we already have
     5805 +      // the requisite locks/tokens.
     5806 +      _permGen->cmsSpace()->blk_iterate(&mdo);
     5807 +    }
5742 5808    }
5743 5809  
5744 5810    if (asynch) {
5745 5811      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5746 5812      CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5747 5813      // First sweep the old gen then the perm gen
5748 5814      {
5749 5815        CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5750 5816                                 bitMapLock());
5751 5817        sweepWork(_cmsGen, asynch);
5752 5818      }
5753 5819  
5754 5820      // Now repeat for perm gen
5755      -    if (cms_should_unload_classes()) {
     5821 +    if (should_unload_classes()) {
5756 5822        CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5757 5823                               bitMapLock());
5758 5824        sweepWork(_permGen, asynch);
5759 5825      }
5760 5826  
5761 5827      // Update Universe::_heap_*_at_gc figures.
5762 5828      // We need all the free list locks to make the abstract state
5763 5829      // transition from Sweeping to Resetting. See detailed note
5764 5830      // further below.
5765 5831      {
↓ open down ↓ 1 lines elided ↑ open up ↑
5767 5833                                 _permGen->freelistLock());
5768 5834        // Update heap occupancy information which is used as
5769 5835        // input to soft ref clearing policy at the next gc.
5770 5836        Universe::update_heap_info_at_gc();
5771 5837        _collectorState = Resizing;
5772 5838      }
5773 5839    } else {
5774 5840      // already have needed locks
5775 5841      sweepWork(_cmsGen,  asynch);
5776 5842  
5777      -    if (cms_should_unload_classes()) {
     5843 +    if (should_unload_classes()) {
5778 5844        sweepWork(_permGen, asynch);
5779 5845      }
5780 5846      // Update heap occupancy information which is used as
5781 5847      // input to soft ref clearing policy at the next gc.
5782 5848      Universe::update_heap_info_at_gc();
5783 5849      _collectorState = Resizing;
5784 5850    }
5785 5851    verify_work_stacks_empty();
5786 5852    verify_overflow_empty();
5787 5853  
↓ open down ↓ 141 lines elided ↑ open up ↑
5929 5995      SweepClosure sweepClosure(this, gen, &_markBitMap,
5930 5996                              CMSYield && asynch);
5931 5997      gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5932 5998      // We need to free-up/coalesce garbage/blocks from a
5933 5999      // co-terminal free run. This is done in the SweepClosure
5934 6000      // destructor; so, do not remove this scope, else the
5935 6001      // end-of-sweep-census below will be off by a little bit.
5936 6002    }
5937 6003    gen->cmsSpace()->sweep_completed();
5938 6004    gen->cmsSpace()->endSweepFLCensus(sweepCount());
     6005 +  if (should_unload_classes()) {                // unloaded classes this cycle,
     6006 +    _concurrent_cycles_since_last_unload = 0;   // ... reset count
     6007 +  } else {                                      // did not unload classes,
     6008 +    _concurrent_cycles_since_last_unload++;     // ... increment count
     6009 +  }
5939 6010  }
5940 6011  
5941 6012  // Reset CMS data structures (for now just the marking bit map)
5942 6013  // preparatory for the next cycle.
5943 6014  void CMSCollector::reset(bool asynch) {
5944 6015    GenCollectedHeap* gch = GenCollectedHeap::heap();
5945 6016    CMSAdaptiveSizePolicy* sp = size_policy();
5946 6017    AdaptiveSizePolicyOutput(sp, gch->total_collections());
5947 6018    if (asynch) {
5948 6019      CMSTokenSyncWithLocks ts(true, bitMapLock());
↓ open down ↓ 1237 lines elided ↑ open up ↑
7186 7257                       CMSMarkStack*  revisitStack,
7187 7258                       HeapWord* finger, MarkFromRootsClosure* parent) :
7188 7259    OopClosure(collector->ref_processor()),
7189 7260    _collector(collector),
7190 7261    _span(span),
7191 7262    _bitMap(bitMap),
7192 7263    _markStack(markStack),
7193 7264    _revisitStack(revisitStack),
7194 7265    _finger(finger),
7195 7266    _parent(parent),
7196      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7267 +  _should_remember_klasses(collector->should_unload_classes())
7197 7268  { }
7198 7269  
7199 7270  Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7200 7271                       MemRegion span,
7201 7272                       CMSBitMap* bit_map,
7202 7273                       OopTaskQueue* work_queue,
7203 7274                       CMSMarkStack*  overflow_stack,
7204 7275                       CMSMarkStack*  revisit_stack,
7205 7276                       HeapWord* finger,
7206 7277                       HeapWord** global_finger_addr,
↓ open down ↓ 2 lines elided ↑ open up ↑
7209 7280    _collector(collector),
7210 7281    _whole_span(collector->_span),
7211 7282    _span(span),
7212 7283    _bit_map(bit_map),
7213 7284    _work_queue(work_queue),
7214 7285    _overflow_stack(overflow_stack),
7215 7286    _revisit_stack(revisit_stack),
7216 7287    _finger(finger),
7217 7288    _global_finger_addr(global_finger_addr),
7218 7289    _parent(parent),
7219      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7290 +  _should_remember_klasses(collector->should_unload_classes())
7220 7291  { }
7221 7292  
7222 7293  
7223 7294  void CMSCollector::lower_restart_addr(HeapWord* low) {
7224 7295    assert(_span.contains(low), "Out of bounds addr");
7225 7296    if (_restart_addr == NULL) {
7226 7297      _restart_addr = low;
7227 7298    } else {
7228 7299      _restart_addr = MIN2(_restart_addr, low);
7229 7300    }
↓ open down ↓ 122 lines elided ↑ open up ↑
7352 7423                                         CMSMarkStack*  revisit_stack,
7353 7424                                         bool           concurrent_precleaning):
7354 7425    OopClosure(rp),
7355 7426    _collector(collector),
7356 7427    _span(span),
7357 7428    _bit_map(bit_map),
7358 7429    _mod_union_table(mod_union_table),
7359 7430    _mark_stack(mark_stack),
7360 7431    _revisit_stack(revisit_stack),
7361 7432    _concurrent_precleaning(concurrent_precleaning),
7362      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7433 +  _should_remember_klasses(collector->should_unload_classes())
7363 7434  {
7364 7435    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7365 7436  }
7366 7437  
7367 7438  // Grey object rescan during pre-cleaning and second checkpoint phases --
7368 7439  // the non-parallel version (the parallel version appears further below.)
7369 7440  void PushAndMarkClosure::do_oop(oop* p) {
7370 7441    oop    this_oop = *p;
7371 7442    // Ignore mark word verification. If during concurrent precleaning
7372 7443    // the object monitor may be locked. If during the checkpoint
↓ open down ↓ 41 lines elided ↑ open up ↑
7414 7485                                                 ReferenceProcessor* rp,
7415 7486                                                 CMSBitMap* bit_map,
7416 7487                                                 OopTaskQueue* work_queue,
7417 7488                                                 CMSMarkStack* revisit_stack):
7418 7489    OopClosure(rp),
7419 7490    _collector(collector),
7420 7491    _span(span),
7421 7492    _bit_map(bit_map),
7422 7493    _work_queue(work_queue),
7423 7494    _revisit_stack(revisit_stack),
7424      -  _should_remember_klasses(collector->cms_should_unload_classes())
     7495 +  _should_remember_klasses(collector->should_unload_classes())
7425 7496  {
7426 7497    assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7427 7498  }
7428 7499  
7429 7500  // Grey object rescan during second checkpoint phase --
7430 7501  // the parallel version.
7431 7502  void Par_PushAndMarkClosure::do_oop(oop* p) {
7432 7503    oop    this_oop = *p;
7433 7504    // In the assert below, we ignore the mark word because
7434 7505    // this oop may point to an already visited object that is
↓ open down ↓ 501 lines elided ↑ open up ↑
7936 8007    if (_bitMap->isMarked(addr + 1)) {
7937 8008      // Determine the size from the bit map, rather than trying to
7938 8009      // compute it from the object header.
7939 8010      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7940 8011      size = pointer_delta(nextOneAddr + 1, addr);
7941 8012      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7942 8013             "alignment problem");
7943 8014  
7944 8015      #ifdef DEBUG
7945 8016        if (oop(addr)->klass() != NULL &&
7946      -          (   !_collector->cms_should_unload_classes()
     8017 +          (   !_collector->should_unload_classes()
7947 8018             || oop(addr)->is_parsable())) {
7948 8019          // Ignore mark word because we are running concurrent with mutators
7949 8020          assert(oop(addr)->is_oop(true), "live block should be an oop");
7950 8021          assert(size ==
7951 8022                 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7952 8023                 "P-mark and computed size do not agree");
7953 8024        }
7954 8025      #endif
7955 8026  
7956 8027    } else {
7957 8028      // This should be an initialized object that's alive.
7958 8029      assert(oop(addr)->klass() != NULL &&
7959      -           (!_collector->cms_should_unload_classes()
     8030 +           (!_collector->should_unload_classes()
7960 8031              || oop(addr)->is_parsable()),
7961 8032             "Should be an initialized object");
7962 8033      // Ignore mark word because we are running concurrent with mutators
7963 8034      assert(oop(addr)->is_oop(true), "live block should be an oop");
7964 8035      // Verify that the bit map has no bits marked between
7965 8036      // addr and purported end of this block.
7966 8037      size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7967 8038      assert(size >= 3, "Necessary for Printezis marks to work");
7968 8039      assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7969 8040      DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
↓ open down ↓ 818 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX