1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
27
28 // statics
29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
30 bool CMSCollector::_full_gc_requested = false;
31
32 //////////////////////////////////////////////////////////////////
33 // In support of CMS/VM thread synchronization
34 //////////////////////////////////////////////////////////////////
35 // We split use of the CGC_lock into 2 "levels".
36 // The low-level locking is of the usual CGC_lock monitor. We introduce
37 // a higher level "token" (hereafter "CMS token") built on top of the
38 // low level monitor (hereafter "CGC lock").
39 // The token-passing protocol gives priority to the VM thread. The
40 // CMS-lock doesn't provide any fairness guarantees, but clients
41 // should ensure that it is only held for very short, bounded
42 // durations.
43 //
44 // When either of the CMS thread or the VM thread is involved in
45 // collection operations during which it does not want the other
46 // thread to interfere, it obtains the CMS token.
47 //
48 // If either thread tries to get the token while the other has
49 // it, that thread waits. However, if the VM thread and CMS thread
50 // both want the token, then the VM thread gets priority while the
51 // CMS thread waits. This ensures, for instance, that the "concurrent"
52 // phases of the CMS thread's work do not block out the VM thread
53 // for long periods of time as the CMS thread continues to hog
54 // the token. (See bug 4616232).
55 //
56 // The baton-passing functions are, however, controlled by the
57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
58 // and here the low-level CMS lock, not the high level token,
59 // ensures mutual exclusion.
60 //
61 // Two important conditions that we have to satisfy:
62 // 1. if a thread does a low-level wait on the CMS lock, then it
63 // relinquishes the CMS token if it were holding that token
64 // when it acquired the low-level CMS lock.
65 // 2. any low-level notifications on the low-level lock
66 // should only be sent when a thread has relinquished the token.
67 //
68 // In the absence of either property, we'd have potential deadlock.
69 //
70 // We protect each of the CMS (concurrent and sequential) phases
71 // with the CMS _token_, not the CMS _lock_.
72 //
73 // The only code protected by CMS lock is the token acquisition code
74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
75 // baton-passing code.
76 //
77 // Unfortunately, i couldn't come up with a good abstraction to factor and
78 // hide the naked CGC_lock manipulation in the baton-passing code
79 // further below. That's something we should try to do. Also, the proof
80 // of correctness of this 2-level locking scheme is far from obvious,
81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
82 // that there may be a theoretical possibility of delay/starvation in the
83 // low-level lock/wait/notify scheme used for the baton-passing because of
84 // potential intereference with the priority scheme embodied in the
85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
86 // invocation further below and marked with "XXX 20011219YSR".
87 // Indeed, as we note elsewhere, this may become yet more slippery
88 // in the presence of multiple CMS and/or multiple VM threads. XXX
89
90 class CMSTokenSync: public StackObj {
91 private:
92 bool _is_cms_thread;
93 public:
94 CMSTokenSync(bool is_cms_thread):
95 _is_cms_thread(is_cms_thread) {
96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
97 "Incorrect argument to constructor");
98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
99 }
100
101 ~CMSTokenSync() {
102 assert(_is_cms_thread ?
103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
105 "Incorrect state");
106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
107 }
108 };
109
110 // Convenience class that does a CMSTokenSync, and then acquires
111 // upto three locks.
112 class CMSTokenSyncWithLocks: public CMSTokenSync {
113 private:
114 // Note: locks are acquired in textual declaration order
115 // and released in the opposite order
116 MutexLockerEx _locker1, _locker2, _locker3;
117 public:
118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
120 CMSTokenSync(is_cms_thread),
121 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
122 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
123 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
124 { }
125 };
126
127
128 // Wrapper class to temporarily disable icms during a foreground cms collection.
129 class ICMSDisabler: public StackObj {
130 public:
131 // The ctor disables icms and wakes up the thread so it notices the change;
132 // the dtor re-enables icms. Note that the CMSCollector methods will check
133 // CMSIncrementalMode.
134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
135 ~ICMSDisabler() { CMSCollector::enable_icms(); }
136 };
137
138 //////////////////////////////////////////////////////////////////
139 // Concurrent Mark-Sweep Generation /////////////////////////////
140 //////////////////////////////////////////////////////////////////
141
142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
143
144 // This struct contains per-thread things necessary to support parallel
145 // young-gen collection.
146 class CMSParGCThreadState: public CHeapObj {
147 public:
148 CFLS_LAB lab;
149 PromotionInfo promo;
150
151 // Constructor.
152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
153 promo.setSpace(cfls);
154 }
155 };
156
157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
158 ReservedSpace rs, size_t initial_byte_size, int level,
159 CardTableRS* ct, bool use_adaptive_freelists,
160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
161 CardGeneration(rs, initial_byte_size, level, ct),
162 _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
163 _debug_collection_type(Concurrent_collection_type)
164 {
165 HeapWord* bottom = (HeapWord*) _virtual_space.low();
166 HeapWord* end = (HeapWord*) _virtual_space.high();
167
168 _direct_allocated_words = 0;
169 NOT_PRODUCT(
170 _numObjectsPromoted = 0;
171 _numWordsPromoted = 0;
172 _numObjectsAllocated = 0;
173 _numWordsAllocated = 0;
174 )
175
176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
177 use_adaptive_freelists,
178 dictionaryChoice);
179 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
180 if (_cmsSpace == NULL) {
181 vm_exit_during_initialization(
182 "CompactibleFreeListSpace allocation failure");
183 }
184 _cmsSpace->_gen = this;
185
186 _gc_stats = new CMSGCStats();
187
188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
189 // offsets match. The ability to tell free chunks from objects
190 // depends on this property.
191 debug_only(
192 FreeChunk* junk = NULL;
193 assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
194 "Offset of FreeChunk::_prev within FreeChunk must match"
195 " that of OopDesc::_klass within OopDesc");
196 )
197 if (ParallelGCThreads > 0) {
198 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
199 _par_gc_thread_states =
200 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
201 if (_par_gc_thread_states == NULL) {
202 vm_exit_during_initialization("Could not allocate par gc structs");
203 }
204 for (uint i = 0; i < ParallelGCThreads; i++) {
205 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
206 if (_par_gc_thread_states[i] == NULL) {
207 vm_exit_during_initialization("Could not allocate par gc structs");
208 }
209 }
210 } else {
211 _par_gc_thread_states = NULL;
212 }
213 _incremental_collection_failed = false;
214 // The "dilatation_factor" is the expansion that can occur on
215 // account of the fact that the minimum object size in the CMS
216 // generation may be larger than that in, say, a contiguous young
217 // generation.
218 // Ideally, in the calculation below, we'd compute the dilatation
219 // factor as: MinChunkSize/(promoting_gen's min object size)
220 // Since we do not have such a general query interface for the
221 // promoting generation, we'll instead just use the mimimum
222 // object size (which today is a header's worth of space);
223 // note that all arithmetic is in units of HeapWords.
224 assert(MinChunkSize >= oopDesc::header_size(), "just checking");
225 assert(_dilatation_factor >= 1.0, "from previous assert");
226 }
227
228
229 // The field "_initiating_occupancy" represents the occupancy percentage
230 // at which we trigger a new collection cycle. Unless explicitly specified
231 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
232 // is calculated by:
233 //
234 // Let "f" be MinHeapFreeRatio in
235 //
236 // _intiating_occupancy = 100-f +
237 // f * (CMSTrigger[Perm]Ratio/100)
238 // where CMSTrigger[Perm]Ratio is the argument "tr" below.
239 //
240 // That is, if we assume the heap is at its desired maximum occupancy at the
241 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
242 // space be allocated before initiating a new collection cycle.
243 //
244 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
245 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
246 if (io >= 0) {
247 _initiating_occupancy = (double)io / 100.0;
248 } else {
249 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
250 (double)(tr * MinHeapFreeRatio) / 100.0)
251 / 100.0;
252 }
253 }
254
255
256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
257 assert(collector() != NULL, "no collector");
258 collector()->ref_processor_init();
259 }
260
261 void CMSCollector::ref_processor_init() {
262 if (_ref_processor == NULL) {
263 // Allocate and initialize a reference processor
264 _ref_processor = ReferenceProcessor::create_ref_processor(
265 _span, // span
266 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
267 _cmsGen->refs_discovery_is_mt(), // mt_discovery
268 &_is_alive_closure,
269 ParallelGCThreads,
270 ParallelRefProcEnabled);
271 // Initialize the _ref_processor field of CMSGen
272 _cmsGen->set_ref_processor(_ref_processor);
273
274 // Allocate a dummy ref processor for perm gen.
275 ReferenceProcessor* rp2 = new ReferenceProcessor();
276 if (rp2 == NULL) {
277 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
278 }
279 _permGen->set_ref_processor(rp2);
280 }
281 }
282
283 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
284 GenCollectedHeap* gch = GenCollectedHeap::heap();
285 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
286 "Wrong type of heap");
287 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
288 gch->gen_policy()->size_policy();
289 assert(sp->is_gc_cms_adaptive_size_policy(),
290 "Wrong type of size policy");
291 return sp;
292 }
293
294 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
295 CMSGCAdaptivePolicyCounters* results =
296 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
297 assert(
298 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
299 "Wrong gc policy counter kind");
300 return results;
301 }
302
303
304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
305
306 const char* gen_name = "old";
307
308 // Generation Counters - generation 1, 1 subspace
309 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
310
311 _space_counters = new GSpaceCounters(gen_name, 0,
312 _virtual_space.reserved_size(),
313 this, _gen_counters);
314 }
315
316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317 _cms_gen(cms_gen)
318 {
319 assert(alpha <= 100, "bad value");
320 _saved_alpha = alpha;
321
322 // Initialize the alphas to the bootstrap value of 100.
323 _gc0_alpha = _cms_alpha = 100;
324
325 _cms_begin_time.update();
326 _cms_end_time.update();
327
328 _gc0_duration = 0.0;
329 _gc0_period = 0.0;
330 _gc0_promoted = 0;
331
332 _cms_duration = 0.0;
333 _cms_period = 0.0;
334 _cms_allocated = 0;
335
336 _cms_used_at_gc0_begin = 0;
337 _cms_used_at_gc0_end = 0;
338 _allow_duty_cycle_reduction = false;
339 _valid_bits = 0;
340 _icms_duty_cycle = CMSIncrementalDutyCycle;
341 }
342
343 // If promotion failure handling is on use
344 // the padded average size of the promotion for each
345 // young generation collection.
346 double CMSStats::time_until_cms_gen_full() const {
347 size_t cms_free = _cms_gen->cmsSpace()->free();
348 GenCollectedHeap* gch = GenCollectedHeap::heap();
349 size_t expected_promotion = gch->get_gen(0)->capacity();
350 if (HandlePromotionFailure) {
351 expected_promotion = MIN2(
352 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
353 expected_promotion);
354 }
355 if (cms_free > expected_promotion) {
356 // Start a cms collection if there isn't enough space to promote
357 // for the next minor collection. Use the padded average as
358 // a safety factor.
359 cms_free -= expected_promotion;
360
361 // Adjust by the safety factor.
362 double cms_free_dbl = (double)cms_free;
363 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
364
365 if (PrintGCDetails && Verbose) {
366 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
367 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
368 cms_free, expected_promotion);
369 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
370 cms_free_dbl, cms_consumption_rate() + 1.0);
371 }
372 // Add 1 in case the consumption rate goes to zero.
373 return cms_free_dbl / (cms_consumption_rate() + 1.0);
374 }
375 return 0.0;
376 }
377
378 // Compare the duration of the cms collection to the
379 // time remaining before the cms generation is empty.
380 // Note that the time from the start of the cms collection
381 // to the start of the cms sweep (less than the total
382 // duration of the cms collection) can be used. This
383 // has been tried and some applications experienced
384 // promotion failures early in execution. This was
385 // possibly because the averages were not accurate
386 // enough at the beginning.
387 double CMSStats::time_until_cms_start() const {
388 // We add "gc0_period" to the "work" calculation
389 // below because this query is done (mostly) at the
390 // end of a scavenge, so we need to conservatively
391 // account for that much possible delay
392 // in the query so as to avoid concurrent mode failures
393 // due to starting the collection just a wee bit too
394 // late.
395 double work = cms_duration() + gc0_period();
396 double deadline = time_until_cms_gen_full();
397 if (work > deadline) {
398 if (Verbose && PrintGCDetails) {
399 gclog_or_tty->print(
400 " CMSCollector: collect because of anticipated promotion "
401 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
402 gc0_period(), time_until_cms_gen_full());
403 }
404 return 0.0;
405 }
406 return work - deadline;
407 }
408
409 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
410 // amount of change to prevent wild oscillation.
411 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
412 unsigned int new_duty_cycle) {
413 assert(old_duty_cycle <= 100, "bad input value");
414 assert(new_duty_cycle <= 100, "bad input value");
415
416 // Note: use subtraction with caution since it may underflow (values are
417 // unsigned). Addition is safe since we're in the range 0-100.
418 unsigned int damped_duty_cycle = new_duty_cycle;
419 if (new_duty_cycle < old_duty_cycle) {
420 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
421 if (new_duty_cycle + largest_delta < old_duty_cycle) {
422 damped_duty_cycle = old_duty_cycle - largest_delta;
423 }
424 } else if (new_duty_cycle > old_duty_cycle) {
425 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
426 if (new_duty_cycle > old_duty_cycle + largest_delta) {
427 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
428 }
429 }
430 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
431
432 if (CMSTraceIncrementalPacing) {
433 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
434 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
435 }
436 return damped_duty_cycle;
437 }
438
439 unsigned int CMSStats::icms_update_duty_cycle_impl() {
440 assert(CMSIncrementalPacing && valid(),
441 "should be handled in icms_update_duty_cycle()");
442
443 double cms_time_so_far = cms_timer().seconds();
444 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
445 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
446
447 // Avoid division by 0.
448 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
449 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
450
451 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
452 if (new_duty_cycle > _icms_duty_cycle) {
453 // Avoid very small duty cycles (1 or 2); 0 is allowed.
454 if (new_duty_cycle > 2) {
455 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
456 new_duty_cycle);
457 }
458 } else if (_allow_duty_cycle_reduction) {
459 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
460 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
461 // Respect the minimum duty cycle.
462 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
463 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
464 }
465
466 if (PrintGCDetails || CMSTraceIncrementalPacing) {
467 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
468 }
469
470 _allow_duty_cycle_reduction = false;
471 return _icms_duty_cycle;
472 }
473
474 #ifndef PRODUCT
475 void CMSStats::print_on(outputStream *st) const {
476 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
477 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
478 gc0_duration(), gc0_period(), gc0_promoted());
479 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
480 cms_duration(), cms_duration_per_mb(),
481 cms_period(), cms_allocated());
482 st->print(",cms_since_beg=%g,cms_since_end=%g",
483 cms_time_since_begin(), cms_time_since_end());
484 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
485 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
486 if (CMSIncrementalMode) {
487 st->print(",dc=%d", icms_duty_cycle());
488 }
489
490 if (valid()) {
491 st->print(",promo_rate=%g,cms_alloc_rate=%g",
492 promotion_rate(), cms_allocation_rate());
493 st->print(",cms_consumption_rate=%g,time_until_full=%g",
494 cms_consumption_rate(), time_until_cms_gen_full());
495 }
496 st->print(" ");
497 }
498 #endif // #ifndef PRODUCT
499
500 CMSCollector::CollectorState CMSCollector::_collectorState =
501 CMSCollector::Idling;
502 bool CMSCollector::_foregroundGCIsActive = false;
503 bool CMSCollector::_foregroundGCShouldWait = false;
504
505 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
506 ConcurrentMarkSweepGeneration* permGen,
507 CardTableRS* ct,
508 ConcurrentMarkSweepPolicy* cp):
509 _cmsGen(cmsGen),
510 _permGen(permGen),
511 _ct(ct),
512 _ref_processor(NULL), // will be set later
513 _conc_workers(NULL), // may be set later
514 _abort_preclean(false),
515 _start_sampling(false),
516 _between_prologue_and_epilogue(false),
517 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
518 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
519 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
520 -1 /* lock-free */, "No_lock" /* dummy */),
521 _modUnionClosure(&_modUnionTable),
522 _modUnionClosurePar(&_modUnionTable),
523 _is_alive_closure(&_markBitMap),
524 _restart_addr(NULL),
525 _overflow_list(NULL),
526 _preserved_oop_stack(NULL),
527 _preserved_mark_stack(NULL),
528 _stats(cmsGen),
529 _eden_chunk_array(NULL), // may be set in ctor body
530 _eden_chunk_capacity(0), // -- ditto --
531 _eden_chunk_index(0), // -- ditto --
532 _survivor_plab_array(NULL), // -- ditto --
533 _survivor_chunk_array(NULL), // -- ditto --
534 _survivor_chunk_capacity(0), // -- ditto --
535 _survivor_chunk_index(0), // -- ditto --
536 _ser_pmc_preclean_ovflw(0),
537 _ser_pmc_remark_ovflw(0),
538 _par_pmc_remark_ovflw(0),
539 _ser_kac_ovflw(0),
540 _par_kac_ovflw(0),
541 #ifndef PRODUCT
542 _num_par_pushes(0),
543 #endif
544 _collection_count_start(0),
545 _verifying(false),
546 _icms_start_limit(NULL),
547 _icms_stop_limit(NULL),
548 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
549 _completed_initialization(false),
550 _collector_policy(cp),
551 _should_unload_classes(false),
552 _concurrent_cycles_since_last_unload(0),
553 _sweep_count(0),
554 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
555 {
556 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
557 ExplicitGCInvokesConcurrent = true;
558 }
559 // Now expand the span and allocate the collection support structures
560 // (MUT, marking bit map etc.) to cover both generations subject to
561 // collection.
562
563 // First check that _permGen is adjacent to _cmsGen and above it.
564 assert( _cmsGen->reserved().word_size() > 0
565 && _permGen->reserved().word_size() > 0,
566 "generations should not be of zero size");
567 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
568 "_cmsGen and _permGen should not overlap");
569 assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
570 "_cmsGen->end() different from _permGen->start()");
571
572 // For use by dirty card to oop closures.
573 _cmsGen->cmsSpace()->set_collector(this);
574 _permGen->cmsSpace()->set_collector(this);
575
576 // Adjust my span to cover old (cms) gen and perm gen
577 _span = _cmsGen->reserved()._union(_permGen->reserved());
578 // Initialize the span of is_alive_closure
579 _is_alive_closure.set_span(_span);
580
581 // Allocate MUT and marking bit map
582 {
583 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
584 if (!_markBitMap.allocate(_span)) {
585 warning("Failed to allocate CMS Bit Map");
586 return;
587 }
588 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
589 }
590 {
591 _modUnionTable.allocate(_span);
592 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
593 }
594
595 if (!_markStack.allocate(CMSMarkStackSize)) {
596 warning("Failed to allocate CMS Marking Stack");
597 return;
598 }
599 if (!_revisitStack.allocate(CMSRevisitStackSize)) {
600 warning("Failed to allocate CMS Revisit Stack");
601 return;
602 }
603
604 // Support for multi-threaded concurrent phases
605 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
606 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
607 // just for now
608 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
609 }
610 if (ParallelCMSThreads > 1) {
611 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
612 ParallelCMSThreads, true);
613 if (_conc_workers == NULL) {
614 warning("GC/CMS: _conc_workers allocation failure: "
615 "forcing -CMSConcurrentMTEnabled");
616 CMSConcurrentMTEnabled = false;
617 }
618 } else {
619 CMSConcurrentMTEnabled = false;
620 }
621 }
622 if (!CMSConcurrentMTEnabled) {
623 ParallelCMSThreads = 0;
624 } else {
625 // Turn off CMSCleanOnEnter optimization temporarily for
626 // the MT case where it's not fixed yet; see 6178663.
627 CMSCleanOnEnter = false;
628 }
629 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
630 "Inconsistency");
631
632 // Parallel task queues; these are shared for the
633 // concurrent and stop-world phases of CMS, but
634 // are not shared with parallel scavenge (ParNew).
635 {
636 uint i;
637 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
638
639 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
640 || ParallelRefProcEnabled)
641 && num_queues > 0) {
642 _task_queues = new OopTaskQueueSet(num_queues);
643 if (_task_queues == NULL) {
644 warning("task_queues allocation failure.");
645 return;
646 }
647 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
648 if (_hash_seed == NULL) {
649 warning("_hash_seed array allocation failure");
650 return;
651 }
652
653 // XXX use a global constant instead of 64!
654 typedef struct OopTaskQueuePadded {
655 OopTaskQueue work_queue;
656 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
657 } OopTaskQueuePadded;
658
659 for (i = 0; i < num_queues; i++) {
660 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
661 if (q_padded == NULL) {
662 warning("work_queue allocation failure.");
663 return;
664 }
665 _task_queues->register_queue(i, &q_padded->work_queue);
666 }
667 for (i = 0; i < num_queues; i++) {
668 _task_queues->queue(i)->initialize();
669 _hash_seed[i] = 17; // copied from ParNew
670 }
671 }
672 }
673
674 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
675 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
676
677 // Clip CMSBootstrapOccupancy between 0 and 100.
678 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
679 /(double)100;
680
681 _full_gcs_since_conc_gc = 0;
682
683 // Now tell CMS generations the identity of their collector
684 ConcurrentMarkSweepGeneration::set_collector(this);
685
686 // Create & start a CMS thread for this CMS collector
687 _cmsThread = ConcurrentMarkSweepThread::start(this);
688 assert(cmsThread() != NULL, "CMS Thread should have been created");
689 assert(cmsThread()->collector() == this,
690 "CMS Thread should refer to this gen");
691 assert(CGC_lock != NULL, "Where's the CGC_lock?");
692
693 // Support for parallelizing young gen rescan
694 GenCollectedHeap* gch = GenCollectedHeap::heap();
695 _young_gen = gch->prev_gen(_cmsGen);
696 if (gch->supports_inline_contig_alloc()) {
697 _top_addr = gch->top_addr();
698 _end_addr = gch->end_addr();
699 assert(_young_gen != NULL, "no _young_gen");
700 _eden_chunk_index = 0;
701 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
702 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
703 if (_eden_chunk_array == NULL) {
704 _eden_chunk_capacity = 0;
705 warning("GC/CMS: _eden_chunk_array allocation failure");
706 }
707 }
708 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
709
710 // Support for parallelizing survivor space rescan
711 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
712 size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
713 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
714 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
715 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
716 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
717 || _cursor == NULL) {
718 warning("Failed to allocate survivor plab/chunk array");
719 if (_survivor_plab_array != NULL) {
720 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
721 _survivor_plab_array = NULL;
722 }
723 if (_survivor_chunk_array != NULL) {
724 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
725 _survivor_chunk_array = NULL;
726 }
727 if (_cursor != NULL) {
728 FREE_C_HEAP_ARRAY(size_t, _cursor);
729 _cursor = NULL;
730 }
731 } else {
732 _survivor_chunk_capacity = 2*max_plab_samples;
733 for (uint i = 0; i < ParallelGCThreads; i++) {
734 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
735 if (vec == NULL) {
736 warning("Failed to allocate survivor plab array");
737 for (int j = i; j > 0; j--) {
738 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
739 }
740 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
741 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
742 _survivor_plab_array = NULL;
743 _survivor_chunk_array = NULL;
744 _survivor_chunk_capacity = 0;
745 break;
746 } else {
747 ChunkArray* cur =
748 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
749 max_plab_samples);
750 assert(cur->end() == 0, "Should be 0");
751 assert(cur->array() == vec, "Should be vec");
752 assert(cur->capacity() == max_plab_samples, "Error");
753 }
754 }
755 }
756 }
757 assert( ( _survivor_plab_array != NULL
758 && _survivor_chunk_array != NULL)
759 || ( _survivor_chunk_capacity == 0
760 && _survivor_chunk_index == 0),
761 "Error");
762
763 // Choose what strong roots should be scanned depending on verification options
764 // and perm gen collection mode.
765 if (!CMSClassUnloadingEnabled) {
766 // If class unloading is disabled we want to include all classes into the root set.
767 add_root_scanning_option(SharedHeap::SO_AllClasses);
768 } else {
769 add_root_scanning_option(SharedHeap::SO_SystemClasses);
770 }
771
772 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
773 _gc_counters = new CollectorCounters("CMS", 1);
774 _completed_initialization = true;
775 _sweep_timer.start(); // start of time
776 }
777
778 const char* ConcurrentMarkSweepGeneration::name() const {
779 return "concurrent mark-sweep generation";
780 }
781 void ConcurrentMarkSweepGeneration::update_counters() {
782 if (UsePerfData) {
783 _space_counters->update_all();
784 _gen_counters->update_all();
785 }
786 }
787
788 // this is an optimized version of update_counters(). it takes the
789 // used value as a parameter rather than computing it.
790 //
791 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
792 if (UsePerfData) {
793 _space_counters->update_used(used);
794 _space_counters->update_capacity();
795 _gen_counters->update_all();
796 }
797 }
798
799 void ConcurrentMarkSweepGeneration::print() const {
800 Generation::print();
801 cmsSpace()->print();
802 }
803
804 #ifndef PRODUCT
805 void ConcurrentMarkSweepGeneration::print_statistics() {
806 cmsSpace()->printFLCensus(0);
807 }
808 #endif
809
810 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
811 GenCollectedHeap* gch = GenCollectedHeap::heap();
812 if (PrintGCDetails) {
813 if (Verbose) {
814 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
815 level(), short_name(), s, used(), capacity());
816 } else {
817 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
818 level(), short_name(), s, used() / K, capacity() / K);
819 }
820 }
821 if (Verbose) {
822 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
823 gch->used(), gch->capacity());
824 } else {
825 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
826 gch->used() / K, gch->capacity() / K);
827 }
828 }
829
830 size_t
831 ConcurrentMarkSweepGeneration::contiguous_available() const {
832 // dld proposes an improvement in precision here. If the committed
833 // part of the space ends in a free block we should add that to
834 // uncommitted size in the calculation below. Will make this
835 // change later, staying with the approximation below for the
836 // time being. -- ysr.
837 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
838 }
839
840 size_t
841 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
842 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
843 }
844
845 size_t ConcurrentMarkSweepGeneration::max_available() const {
846 return free() + _virtual_space.uncommitted_size();
847 }
848
849 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
850 size_t max_promotion_in_bytes,
851 bool younger_handles_promotion_failure) const {
852
853 // This is the most conservative test. Full promotion is
854 // guaranteed if this is used. The multiplicative factor is to
855 // account for the worst case "dilatation".
856 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
857 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
858 adjusted_max_promo_bytes = (double)max_uintx;
859 }
860 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
861
862 if (younger_handles_promotion_failure && !result) {
863 // Full promotion is not guaranteed because fragmentation
864 // of the cms generation can prevent the full promotion.
865 result = (max_available() >= (size_t)adjusted_max_promo_bytes);
866
867 if (!result) {
868 // With promotion failure handling the test for the ability
869 // to support the promotion does not have to be guaranteed.
870 // Use an average of the amount promoted.
871 result = max_available() >= (size_t)
872 gc_stats()->avg_promoted()->padded_average();
873 if (PrintGC && Verbose && result) {
874 gclog_or_tty->print_cr(
875 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
876 " max_available: " SIZE_FORMAT
877 " avg_promoted: " SIZE_FORMAT,
878 max_available(), (size_t)
879 gc_stats()->avg_promoted()->padded_average());
880 }
881 } else {
882 if (PrintGC && Verbose) {
883 gclog_or_tty->print_cr(
884 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
885 " max_available: " SIZE_FORMAT
886 " adj_max_promo_bytes: " SIZE_FORMAT,
887 max_available(), (size_t)adjusted_max_promo_bytes);
888 }
889 }
890 } else {
891 if (PrintGC && Verbose) {
892 gclog_or_tty->print_cr(
893 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
894 " contiguous_available: " SIZE_FORMAT
895 " adj_max_promo_bytes: " SIZE_FORMAT,
896 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
897 }
898 }
899 return result;
900 }
901
902 CompactibleSpace*
903 ConcurrentMarkSweepGeneration::first_compaction_space() const {
904 return _cmsSpace;
905 }
906
907 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
908 // Clear the promotion information. These pointers can be adjusted
909 // along with all the other pointers into the heap but
910 // compaction is expected to be a rare event with
911 // a heap using cms so don't do it without seeing the need.
912 if (ParallelGCThreads > 0) {
913 for (uint i = 0; i < ParallelGCThreads; i++) {
914 _par_gc_thread_states[i]->promo.reset();
915 }
916 }
917 }
918
919 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
920 blk->do_space(_cmsSpace);
921 }
922
923 void ConcurrentMarkSweepGeneration::compute_new_size() {
924 assert_locked_or_safepoint(Heap_lock);
925
926 // If incremental collection failed, we just want to expand
927 // to the limit.
928 if (incremental_collection_failed()) {
929 clear_incremental_collection_failed();
930 grow_to_reserved();
931 return;
932 }
933
934 size_t expand_bytes = 0;
935 double free_percentage = ((double) free()) / capacity();
936 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
937 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
938
939 // compute expansion delta needed for reaching desired free percentage
940 if (free_percentage < desired_free_percentage) {
941 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
942 assert(desired_capacity >= capacity(), "invalid expansion size");
943 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
944 }
945 if (expand_bytes > 0) {
946 if (PrintGCDetails && Verbose) {
947 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
948 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
949 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
950 gclog_or_tty->print_cr(" Desired free fraction %f",
951 desired_free_percentage);
952 gclog_or_tty->print_cr(" Maximum free fraction %f",
953 maximum_free_percentage);
954 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
955 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
956 desired_capacity/1000);
957 int prev_level = level() - 1;
958 if (prev_level >= 0) {
959 size_t prev_size = 0;
960 GenCollectedHeap* gch = GenCollectedHeap::heap();
961 Generation* prev_gen = gch->_gens[prev_level];
962 prev_size = prev_gen->capacity();
963 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
964 prev_size/1000);
965 }
966 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
967 unsafe_max_alloc_nogc()/1000);
968 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
969 contiguous_available()/1000);
970 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
971 expand_bytes);
972 }
973 // safe if expansion fails
974 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
975 if (PrintGCDetails && Verbose) {
976 gclog_or_tty->print_cr(" Expanded free fraction %f",
977 ((double) free()) / capacity());
978 }
979 }
980 }
981
982 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
983 return cmsSpace()->freelistLock();
984 }
985
986 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
987 bool tlab) {
988 CMSSynchronousYieldRequest yr;
989 MutexLockerEx x(freelistLock(),
990 Mutex::_no_safepoint_check_flag);
991 return have_lock_and_allocate(size, tlab);
992 }
993
994 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
995 bool tlab) {
996 assert_lock_strong(freelistLock());
997 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
998 HeapWord* res = cmsSpace()->allocate(adjustedSize);
999 // Allocate the object live (grey) if the background collector has
1000 // started marking. This is necessary because the marker may
1001 // have passed this address and consequently this object will
1002 // not otherwise be greyed and would be incorrectly swept up.
1003 // Note that if this object contains references, the writing
1004 // of those references will dirty the card containing this object
1005 // allowing the object to be blackened (and its references scanned)
1006 // either during a preclean phase or at the final checkpoint.
1007 if (res != NULL) {
1008 collector()->direct_allocated(res, adjustedSize);
1009 _direct_allocated_words += adjustedSize;
1010 // allocation counters
1011 NOT_PRODUCT(
1012 _numObjectsAllocated++;
1013 _numWordsAllocated += (int)adjustedSize;
1014 )
1015 }
1016 return res;
1017 }
1018
1019 // In the case of direct allocation by mutators in a generation that
1020 // is being concurrently collected, the object must be allocated
1021 // live (grey) if the background collector has started marking.
1022 // This is necessary because the marker may
1023 // have passed this address and consequently this object will
1024 // not otherwise be greyed and would be incorrectly swept up.
1025 // Note that if this object contains references, the writing
1026 // of those references will dirty the card containing this object
1027 // allowing the object to be blackened (and its references scanned)
1028 // either during a preclean phase or at the final checkpoint.
1029 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1030 assert(_markBitMap.covers(start, size), "Out of bounds");
1031 if (_collectorState >= Marking) {
1032 MutexLockerEx y(_markBitMap.lock(),
1033 Mutex::_no_safepoint_check_flag);
1034 // [see comments preceding SweepClosure::do_blk() below for details]
1035 // 1. need to mark the object as live so it isn't collected
1036 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1037 // 3. need to mark the end of the object so sweeper can skip over it
1038 // if it's uninitialized when the sweeper reaches it.
1039 _markBitMap.mark(start); // object is live
1040 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1041 _markBitMap.mark(start + size - 1);
1042 // mark end of object
1043 }
1044 // check that oop looks uninitialized
1045 assert(oop(start)->klass() == NULL, "_klass should be NULL");
1046 }
1047
1048 void CMSCollector::promoted(bool par, HeapWord* start,
1049 bool is_obj_array, size_t obj_size) {
1050 assert(_markBitMap.covers(start), "Out of bounds");
1051 // See comment in direct_allocated() about when objects should
1052 // be allocated live.
1053 if (_collectorState >= Marking) {
1054 // we already hold the marking bit map lock, taken in
1055 // the prologue
1056 if (par) {
1057 _markBitMap.par_mark(start);
1058 } else {
1059 _markBitMap.mark(start);
1060 }
1061 // We don't need to mark the object as uninitialized (as
1062 // in direct_allocated above) because this is being done with the
1063 // world stopped and the object will be initialized by the
1064 // time the sweeper gets to look at it.
1065 assert(SafepointSynchronize::is_at_safepoint(),
1066 "expect promotion only at safepoints");
1067
1068 if (_collectorState < Sweeping) {
1069 // Mark the appropriate cards in the modUnionTable, so that
1070 // this object gets scanned before the sweep. If this is
1071 // not done, CMS generation references in the object might
1072 // not get marked.
1073 // For the case of arrays, which are otherwise precisely
1074 // marked, we need to dirty the entire array, not just its head.
1075 if (is_obj_array) {
1076 // The [par_]mark_range() method expects mr.end() below to
1077 // be aligned to the granularity of a bit's representation
1078 // in the heap. In the case of the MUT below, that's a
1079 // card size.
1080 MemRegion mr(start,
1081 (HeapWord*)round_to((intptr_t)(start + obj_size),
1082 CardTableModRefBS::card_size /* bytes */));
1083 if (par) {
1084 _modUnionTable.par_mark_range(mr);
1085 } else {
1086 _modUnionTable.mark_range(mr);
1087 }
1088 } else { // not an obj array; we can just mark the head
1089 if (par) {
1090 _modUnionTable.par_mark(start);
1091 } else {
1092 _modUnionTable.mark(start);
1093 }
1094 }
1095 }
1096 }
1097 }
1098
1099 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1100 {
1101 size_t delta = pointer_delta(addr, space->bottom());
1102 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1103 }
1104
1105 void CMSCollector::icms_update_allocation_limits()
1106 {
1107 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1108 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1109
1110 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1111 if (CMSTraceIncrementalPacing) {
1112 stats().print();
1113 }
1114
1115 assert(duty_cycle <= 100, "invalid duty cycle");
1116 if (duty_cycle != 0) {
1117 // The duty_cycle is a percentage between 0 and 100; convert to words and
1118 // then compute the offset from the endpoints of the space.
1119 size_t free_words = eden->free() / HeapWordSize;
1120 double free_words_dbl = (double)free_words;
1121 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1122 size_t offset_words = (free_words - duty_cycle_words) / 2;
1123
1124 _icms_start_limit = eden->top() + offset_words;
1125 _icms_stop_limit = eden->end() - offset_words;
1126
1127 // The limits may be adjusted (shifted to the right) by
1128 // CMSIncrementalOffset, to allow the application more mutator time after a
1129 // young gen gc (when all mutators were stopped) and before CMS starts and
1130 // takes away one or more cpus.
1131 if (CMSIncrementalOffset != 0) {
1132 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1133 size_t adjustment = (size_t)adjustment_dbl;
1134 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1135 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1136 _icms_start_limit += adjustment;
1137 _icms_stop_limit = tmp_stop;
1138 }
1139 }
1140 }
1141 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1142 _icms_start_limit = _icms_stop_limit = eden->end();
1143 }
1144
1145 // Install the new start limit.
1146 eden->set_soft_end(_icms_start_limit);
1147
1148 if (CMSTraceIncrementalMode) {
1149 gclog_or_tty->print(" icms alloc limits: "
1150 PTR_FORMAT "," PTR_FORMAT
1151 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1152 _icms_start_limit, _icms_stop_limit,
1153 percent_of_space(eden, _icms_start_limit),
1154 percent_of_space(eden, _icms_stop_limit));
1155 if (Verbose) {
1156 gclog_or_tty->print("eden: ");
1157 eden->print_on(gclog_or_tty);
1158 }
1159 }
1160 }
1161
1162 // Any changes here should try to maintain the invariant
1163 // that if this method is called with _icms_start_limit
1164 // and _icms_stop_limit both NULL, then it should return NULL
1165 // and not notify the icms thread.
1166 HeapWord*
1167 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1168 size_t word_size)
1169 {
1170 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1171 // nop.
1172 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1173 if (top <= _icms_start_limit) {
1174 if (CMSTraceIncrementalMode) {
1175 space->print_on(gclog_or_tty);
1176 gclog_or_tty->stamp();
1177 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1178 ", new limit=" PTR_FORMAT
1179 " (" SIZE_FORMAT "%%)",
1180 top, _icms_stop_limit,
1181 percent_of_space(space, _icms_stop_limit));
1182 }
1183 ConcurrentMarkSweepThread::start_icms();
1184 assert(top < _icms_stop_limit, "Tautology");
1185 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1186 return _icms_stop_limit;
1187 }
1188
1189 // The allocation will cross both the _start and _stop limits, so do the
1190 // stop notification also and return end().
1191 if (CMSTraceIncrementalMode) {
1192 space->print_on(gclog_or_tty);
1193 gclog_or_tty->stamp();
1194 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1195 ", new limit=" PTR_FORMAT
1196 " (" SIZE_FORMAT "%%)",
1197 top, space->end(),
1198 percent_of_space(space, space->end()));
1199 }
1200 ConcurrentMarkSweepThread::stop_icms();
1201 return space->end();
1202 }
1203
1204 if (top <= _icms_stop_limit) {
1205 if (CMSTraceIncrementalMode) {
1206 space->print_on(gclog_or_tty);
1207 gclog_or_tty->stamp();
1208 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1209 ", new limit=" PTR_FORMAT
1210 " (" SIZE_FORMAT "%%)",
1211 top, space->end(),
1212 percent_of_space(space, space->end()));
1213 }
1214 ConcurrentMarkSweepThread::stop_icms();
1215 return space->end();
1216 }
1217
1218 if (CMSTraceIncrementalMode) {
1219 space->print_on(gclog_or_tty);
1220 gclog_or_tty->stamp();
1221 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1222 ", new limit=" PTR_FORMAT,
1223 top, NULL);
1224 }
1225 }
1226
1227 return NULL;
1228 }
1229
1230 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
1231 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1232 // allocate, copy and if necessary update promoinfo --
1233 // delegate to underlying space.
1234 assert_lock_strong(freelistLock());
1235
1236 #ifndef PRODUCT
1237 if (Universe::heap()->promotion_should_fail()) {
1238 return NULL;
1239 }
1240 #endif // #ifndef PRODUCT
1241
1242 oop res = _cmsSpace->promote(obj, obj_size, ref);
1243 if (res == NULL) {
1244 // expand and retry
1245 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1246 expand(s*HeapWordSize, MinHeapDeltaBytes,
1247 CMSExpansionCause::_satisfy_promotion);
1248 // Since there's currently no next generation, we don't try to promote
1249 // into a more senior generation.
1250 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1251 "is made to pass on a possibly failing "
1252 "promotion to next generation");
1253 res = _cmsSpace->promote(obj, obj_size, ref);
1254 }
1255 if (res != NULL) {
1256 // See comment in allocate() about when objects should
1257 // be allocated live.
1258 assert(obj->is_oop(), "Will dereference klass pointer below");
1259 collector()->promoted(false, // Not parallel
1260 (HeapWord*)res, obj->is_objArray(), obj_size);
1261 // promotion counters
1262 NOT_PRODUCT(
1263 _numObjectsPromoted++;
1264 _numWordsPromoted +=
1265 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1266 )
1267 }
1268 return res;
1269 }
1270
1271
1272 HeapWord*
1273 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1274 HeapWord* top,
1275 size_t word_sz)
1276 {
1277 return collector()->allocation_limit_reached(space, top, word_sz);
1278 }
1279
1280 // Things to support parallel young-gen collection.
1281 oop
1282 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1283 oop old, markOop m,
1284 size_t word_sz) {
1285 #ifndef PRODUCT
1286 if (Universe::heap()->promotion_should_fail()) {
1287 return NULL;
1288 }
1289 #endif // #ifndef PRODUCT
1290
1291 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1292 PromotionInfo* promoInfo = &ps->promo;
1293 // if we are tracking promotions, then first ensure space for
1294 // promotion (including spooling space for saving header if necessary).
1295 // then allocate and copy, then track promoted info if needed.
1296 // When tracking (see PromotionInfo::track()), the mark word may
1297 // be displaced and in this case restoration of the mark word
1298 // occurs in the (oop_since_save_marks_)iterate phase.
1299 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1300 // Out of space for allocating spooling buffers;
1301 // try expanding and allocating spooling buffers.
1302 if (!expand_and_ensure_spooling_space(promoInfo)) {
1303 return NULL;
1304 }
1305 }
1306 assert(promoInfo->has_spooling_space(), "Control point invariant");
1307 HeapWord* obj_ptr = ps->lab.alloc(word_sz);
1308 if (obj_ptr == NULL) {
1309 obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
1310 if (obj_ptr == NULL) {
1311 return NULL;
1312 }
1313 }
1314 oop obj = oop(obj_ptr);
1315 assert(obj->klass() == NULL, "Object should be uninitialized here.");
1316 // Otherwise, copy the object. Here we must be careful to insert the
1317 // klass pointer last, since this marks the block as an allocated object.
1318 HeapWord* old_ptr = (HeapWord*)old;
1319 if (word_sz > (size_t)oopDesc::header_size()) {
1320 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1321 obj_ptr + oopDesc::header_size(),
1322 word_sz - oopDesc::header_size());
1323 }
1324 // Restore the mark word copied above.
1325 obj->set_mark(m);
1326 // Now we can track the promoted object, if necessary. We take care
1327 // To delay the transition from uninitialized to full object
1328 // (i.e., insertion of klass pointer) until after, so that it
1329 // atomically becomes a promoted object.
1330 if (promoInfo->tracking()) {
1331 promoInfo->track((PromotedObject*)obj, old->klass());
1332 }
1333 // Finally, install the klass pointer.
1334 obj->set_klass(old->klass());
1335
1336 assert(old->is_oop(), "Will dereference klass ptr below");
1337 collector()->promoted(true, // parallel
1338 obj_ptr, old->is_objArray(), word_sz);
1339
1340 NOT_PRODUCT(
1341 Atomic::inc(&_numObjectsPromoted);
1342 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
1343 &_numWordsPromoted);
1344 )
1345
1346 return obj;
1347 }
1348
1349 void
1350 ConcurrentMarkSweepGeneration::
1351 par_promote_alloc_undo(int thread_num,
1352 HeapWord* obj, size_t word_sz) {
1353 // CMS does not support promotion undo.
1354 ShouldNotReachHere();
1355 }
1356
1357 void
1358 ConcurrentMarkSweepGeneration::
1359 par_promote_alloc_done(int thread_num) {
1360 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1361 ps->lab.retire();
1362 #if CFLS_LAB_REFILL_STATS
1363 if (thread_num == 0) {
1364 _cmsSpace->print_par_alloc_stats();
1365 }
1366 #endif
1367 }
1368
1369 void
1370 ConcurrentMarkSweepGeneration::
1371 par_oop_since_save_marks_iterate_done(int thread_num) {
1372 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1373 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1374 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1375 }
1376
1377 // XXXPERM
1378 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1379 size_t size,
1380 bool tlab)
1381 {
1382 // We allow a STW collection only if a full
1383 // collection was requested.
1384 return full || should_allocate(size, tlab); // FIX ME !!!
1385 // This and promotion failure handling are connected at the
1386 // hip and should be fixed by untying them.
1387 }
1388
1389 bool CMSCollector::shouldConcurrentCollect() {
1390 if (_full_gc_requested) {
1391 assert(ExplicitGCInvokesConcurrent, "Unexpected state");
1392 if (Verbose && PrintGCDetails) {
1393 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1394 " gc request");
1395 }
1396 return true;
1397 }
1398
1399 // For debugging purposes, change the type of collection.
1400 // If the rotation is not on the concurrent collection
1401 // type, don't start a concurrent collection.
1402 NOT_PRODUCT(
1403 if (RotateCMSCollectionTypes &&
1404 (_cmsGen->debug_collection_type() !=
1405 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1406 assert(_cmsGen->debug_collection_type() !=
1407 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1408 "Bad cms collection type");
1409 return false;
1410 }
1411 )
1412
1413 FreelistLocker x(this);
1414 // ------------------------------------------------------------------
1415 // Print out lots of information which affects the initiation of
1416 // a collection.
1417 if (PrintCMSInitiationStatistics && stats().valid()) {
1418 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1419 gclog_or_tty->stamp();
1420 gclog_or_tty->print_cr("");
1421 stats().print_on(gclog_or_tty);
1422 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1423 stats().time_until_cms_gen_full());
1424 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1425 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1426 _cmsGen->contiguous_available());
1427 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1428 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1429 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1430 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1431 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1432 }
1433 // ------------------------------------------------------------------
1434
1435 // If the estimated time to complete a cms collection (cms_duration())
1436 // is less than the estimated time remaining until the cms generation
1437 // is full, start a collection.
1438 if (!UseCMSInitiatingOccupancyOnly) {
1439 if (stats().valid()) {
1440 if (stats().time_until_cms_start() == 0.0) {
1441 return true;
1442 }
1443 } else {
1444 // We want to conservatively collect somewhat early in order
1445 // to try and "bootstrap" our CMS/promotion statistics;
1446 // this branch will not fire after the first successful CMS
1447 // collection because the stats should then be valid.
1448 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1449 if (Verbose && PrintGCDetails) {
1450 gclog_or_tty->print_cr(
1451 " CMSCollector: collect for bootstrapping statistics:"
1452 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1453 _bootstrap_occupancy);
1454 }
1455 return true;
1456 }
1457 }
1458 }
1459
1460 // Otherwise, we start a collection cycle if either the perm gen or
1461 // old gen want a collection cycle started. Each may use
1462 // an appropriate criterion for making this decision.
1463 // XXX We need to make sure that the gen expansion
1464 // criterion dovetails well with this. XXX NEED TO FIX THIS
1465 if (_cmsGen->should_concurrent_collect()) {
1466 if (Verbose && PrintGCDetails) {
1467 gclog_or_tty->print_cr("CMS old gen initiated");
1468 }
1469 return true;
1470 }
1471
1472 // We start a collection if we believe an incremental collection may fail;
1473 // this is not likely to be productive in practice because it's probably too
1474 // late anyway.
1475 GenCollectedHeap* gch = GenCollectedHeap::heap();
1476 assert(gch->collector_policy()->is_two_generation_policy(),
1477 "You may want to check the correctness of the following");
1478 if (gch->incremental_collection_will_fail()) {
1479 if (PrintGCDetails && Verbose) {
1480 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1481 }
1482 return true;
1483 }
1484
1485 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1486 bool res = update_should_unload_classes();
1487 if (res) {
1488 if (Verbose && PrintGCDetails) {
1489 gclog_or_tty->print_cr("CMS perm gen initiated");
1490 }
1491 return true;
1492 }
1493 }
1494 return false;
1495 }
1496
1497 // Clear _expansion_cause fields of constituent generations
1498 void CMSCollector::clear_expansion_cause() {
1499 _cmsGen->clear_expansion_cause();
1500 _permGen->clear_expansion_cause();
1501 }
1502
1503 // We should be conservative in starting a collection cycle. To
1504 // start too eagerly runs the risk of collecting too often in the
1505 // extreme. To collect too rarely falls back on full collections,
1506 // which works, even if not optimum in terms of concurrent work.
1507 // As a work around for too eagerly collecting, use the flag
1508 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1509 // giving the user an easily understandable way of controlling the
1510 // collections.
1511 // We want to start a new collection cycle if any of the following
1512 // conditions hold:
1513 // . our current occupancy exceeds the configured initiating occupancy
1514 // for this generation, or
1515 // . we recently needed to expand this space and have not, since that
1516 // expansion, done a collection of this generation, or
1517 // . the underlying space believes that it may be a good idea to initiate
1518 // a concurrent collection (this may be based on criteria such as the
1519 // following: the space uses linear allocation and linear allocation is
1520 // going to fail, or there is believed to be excessive fragmentation in
1521 // the generation, etc... or ...
1522 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1523 // the case of the old generation, not the perm generation; see CR 6543076):
1524 // we may be approaching a point at which allocation requests may fail because
1525 // we will be out of sufficient free space given allocation rate estimates.]
1526 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1527
1528 assert_lock_strong(freelistLock());
1529 if (occupancy() > initiating_occupancy()) {
1530 if (PrintGCDetails && Verbose) {
1531 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1532 short_name(), occupancy(), initiating_occupancy());
1533 }
1534 return true;
1535 }
1536 if (UseCMSInitiatingOccupancyOnly) {
1537 return false;
1538 }
1539 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1540 if (PrintGCDetails && Verbose) {
1541 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1542 short_name());
1543 }
1544 return true;
1545 }
1546 if (_cmsSpace->should_concurrent_collect()) {
1547 if (PrintGCDetails && Verbose) {
1548 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1549 short_name());
1550 }
1551 return true;
1552 }
1553 return false;
1554 }
1555
1556 void ConcurrentMarkSweepGeneration::collect(bool full,
1557 bool clear_all_soft_refs,
1558 size_t size,
1559 bool tlab)
1560 {
1561 collector()->collect(full, clear_all_soft_refs, size, tlab);
1562 }
1563
1564 void CMSCollector::collect(bool full,
1565 bool clear_all_soft_refs,
1566 size_t size,
1567 bool tlab)
1568 {
1569 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1570 // For debugging purposes skip the collection if the state
1571 // is not currently idle
1572 if (TraceCMSState) {
1573 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1574 Thread::current(), full, _collectorState);
1575 }
1576 return;
1577 }
1578
1579 // The following "if" branch is present for defensive reasons.
1580 // In the current uses of this interface, it can be replaced with:
1581 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1582 // But I am not placing that assert here to allow future
1583 // generality in invoking this interface.
1584 if (GC_locker::is_active()) {
1585 // A consistency test for GC_locker
1586 assert(GC_locker::needs_gc(), "Should have been set already");
1587 // Skip this foreground collection, instead
1588 // expanding the heap if necessary.
1589 // Need the free list locks for the call to free() in compute_new_size()
1590 compute_new_size();
1591 return;
1592 }
1593 acquire_control_and_collect(full, clear_all_soft_refs);
1594 _full_gcs_since_conc_gc++;
1595
1596 }
1597
1598 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1599 GenCollectedHeap* gch = GenCollectedHeap::heap();
1600 unsigned int gc_count = gch->total_full_collections();
1601 if (gc_count == full_gc_count) {
1602 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1603 _full_gc_requested = true;
1604 CGC_lock->notify(); // nudge CMS thread
1605 }
1606 }
1607
1608
1609 // The foreground and background collectors need to coordinate in order
1610 // to make sure that they do not mutually interfere with CMS collections.
1611 // When a background collection is active,
1612 // the foreground collector may need to take over (preempt) and
1613 // synchronously complete an ongoing collection. Depending on the
1614 // frequency of the background collections and the heap usage
1615 // of the application, this preemption can be seldom or frequent.
1616 // There are only certain
1617 // points in the background collection that the "collection-baton"
1618 // can be passed to the foreground collector.
1619 //
1620 // The foreground collector will wait for the baton before
1621 // starting any part of the collection. The foreground collector
1622 // will only wait at one location.
1623 //
1624 // The background collector will yield the baton before starting a new
1625 // phase of the collection (e.g., before initial marking, marking from roots,
1626 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1627 // of the loop which switches the phases. The background collector does some
1628 // of the phases (initial mark, final re-mark) with the world stopped.
1629 // Because of locking involved in stopping the world,
1630 // the foreground collector should not block waiting for the background
1631 // collector when it is doing a stop-the-world phase. The background
1632 // collector will yield the baton at an additional point just before
1633 // it enters a stop-the-world phase. Once the world is stopped, the
1634 // background collector checks the phase of the collection. If the
1635 // phase has not changed, it proceeds with the collection. If the
1636 // phase has changed, it skips that phase of the collection. See
1637 // the comments on the use of the Heap_lock in collect_in_background().
1638 //
1639 // Variable used in baton passing.
1640 // _foregroundGCIsActive - Set to true by the foreground collector when
1641 // it wants the baton. The foreground clears it when it has finished
1642 // the collection.
1643 // _foregroundGCShouldWait - Set to true by the background collector
1644 // when it is running. The foreground collector waits while
1645 // _foregroundGCShouldWait is true.
1646 // CGC_lock - monitor used to protect access to the above variables
1647 // and to notify the foreground and background collectors.
1648 // _collectorState - current state of the CMS collection.
1649 //
1650 // The foreground collector
1651 // acquires the CGC_lock
1652 // sets _foregroundGCIsActive
1653 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1654 // various locks acquired in preparation for the collection
1655 // are released so as not to block the background collector
1656 // that is in the midst of a collection
1657 // proceeds with the collection
1658 // clears _foregroundGCIsActive
1659 // returns
1660 //
1661 // The background collector in a loop iterating on the phases of the
1662 // collection
1663 // acquires the CGC_lock
1664 // sets _foregroundGCShouldWait
1665 // if _foregroundGCIsActive is set
1666 // clears _foregroundGCShouldWait, notifies _CGC_lock
1667 // waits on _CGC_lock for _foregroundGCIsActive to become false
1668 // and exits the loop.
1669 // otherwise
1670 // proceed with that phase of the collection
1671 // if the phase is a stop-the-world phase,
1672 // yield the baton once more just before enqueueing
1673 // the stop-world CMS operation (executed by the VM thread).
1674 // returns after all phases of the collection are done
1675 //
1676
1677 void CMSCollector::acquire_control_and_collect(bool full,
1678 bool clear_all_soft_refs) {
1679 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1680 assert(!Thread::current()->is_ConcurrentGC_thread(),
1681 "shouldn't try to acquire control from self!");
1682
1683 // Start the protocol for acquiring control of the
1684 // collection from the background collector (aka CMS thread).
1685 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1686 "VM thread should have CMS token");
1687 // Remember the possibly interrupted state of an ongoing
1688 // concurrent collection
1689 CollectorState first_state = _collectorState;
1690
1691 // Signal to a possibly ongoing concurrent collection that
1692 // we want to do a foreground collection.
1693 _foregroundGCIsActive = true;
1694
1695 // Disable incremental mode during a foreground collection.
1696 ICMSDisabler icms_disabler;
1697
1698 // release locks and wait for a notify from the background collector
1699 // releasing the locks in only necessary for phases which
1700 // do yields to improve the granularity of the collection.
1701 assert_lock_strong(bitMapLock());
1702 // We need to lock the Free list lock for the space that we are
1703 // currently collecting.
1704 assert(haveFreelistLocks(), "Must be holding free list locks");
1705 bitMapLock()->unlock();
1706 releaseFreelistLocks();
1707 {
1708 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1709 if (_foregroundGCShouldWait) {
1710 // We are going to be waiting for action for the CMS thread;
1711 // it had better not be gone (for instance at shutdown)!
1712 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1713 "CMS thread must be running");
1714 // Wait here until the background collector gives us the go-ahead
1715 ConcurrentMarkSweepThread::clear_CMS_flag(
1716 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1717 // Get a possibly blocked CMS thread going:
1718 // Note that we set _foregroundGCIsActive true above,
1719 // without protection of the CGC_lock.
1720 CGC_lock->notify();
1721 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1722 "Possible deadlock");
1723 while (_foregroundGCShouldWait) {
1724 // wait for notification
1725 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1726 // Possibility of delay/starvation here, since CMS token does
1727 // not know to give priority to VM thread? Actually, i think
1728 // there wouldn't be any delay/starvation, but the proof of
1729 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1730 }
1731 ConcurrentMarkSweepThread::set_CMS_flag(
1732 ConcurrentMarkSweepThread::CMS_vm_has_token);
1733 }
1734 }
1735 // The CMS_token is already held. Get back the other locks.
1736 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1737 "VM thread should have CMS token");
1738 getFreelistLocks();
1739 bitMapLock()->lock_without_safepoint_check();
1740 if (TraceCMSState) {
1741 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1742 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1743 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1744 }
1745
1746 // Check if we need to do a compaction, or if not, whether
1747 // we need to start the mark-sweep from scratch.
1748 bool should_compact = false;
1749 bool should_start_over = false;
1750 decide_foreground_collection_type(clear_all_soft_refs,
1751 &should_compact, &should_start_over);
1752
1753 NOT_PRODUCT(
1754 if (RotateCMSCollectionTypes) {
1755 if (_cmsGen->debug_collection_type() ==
1756 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1757 should_compact = true;
1758 } else if (_cmsGen->debug_collection_type() ==
1759 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1760 should_compact = false;
1761 }
1762 }
1763 )
1764
1765 if (PrintGCDetails && first_state > Idling) {
1766 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1767 if (GCCause::is_user_requested_gc(cause) ||
1768 GCCause::is_serviceability_requested_gc(cause)) {
1769 gclog_or_tty->print(" (concurrent mode interrupted)");
1770 } else {
1771 gclog_or_tty->print(" (concurrent mode failure)");
1772 }
1773 }
1774
1775 if (should_compact) {
1776 // If the collection is being acquired from the background
1777 // collector, there may be references on the discovered
1778 // references lists that have NULL referents (being those
1779 // that were concurrently cleared by a mutator) or
1780 // that are no longer active (having been enqueued concurrently
1781 // by the mutator).
1782 // Scrub the list of those references because Mark-Sweep-Compact
1783 // code assumes referents are not NULL and that all discovered
1784 // Reference objects are active.
1785 ref_processor()->clean_up_discovered_references();
1786
1787 do_compaction_work(clear_all_soft_refs);
1788
1789 // Has the GC time limit been exceeded?
1790 check_gc_time_limit();
1791
1792 } else {
1793 do_mark_sweep_work(clear_all_soft_refs, first_state,
1794 should_start_over);
1795 }
1796 // Reset the expansion cause, now that we just completed
1797 // a collection cycle.
1798 clear_expansion_cause();
1799 _foregroundGCIsActive = false;
1800 return;
1801 }
1802
1803 void CMSCollector::check_gc_time_limit() {
1804
1805 // Ignore explicit GC's. Exiting here does not set the flag and
1806 // does not reset the count. Updating of the averages for system
1807 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
1808 GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
1809 if (GCCause::is_user_requested_gc(gc_cause) ||
1810 GCCause::is_serviceability_requested_gc(gc_cause)) {
1811 return;
1812 }
1813
1814 // Calculate the fraction of the CMS generation was freed during
1815 // the last collection.
1816 // Only consider the STW compacting cost for now.
1817 //
1818 // Note that the gc time limit test only works for the collections
1819 // of the young gen + tenured gen and not for collections of the
1820 // permanent gen. That is because the calculation of the space
1821 // freed by the collection is the free space in the young gen +
1822 // tenured gen.
1823
1824 double fraction_free =
1825 ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
1826 if ((100.0 * size_policy()->compacting_gc_cost()) >
1827 ((double) GCTimeLimit) &&
1828 ((fraction_free * 100) < GCHeapFreeLimit)) {
1829 size_policy()->inc_gc_time_limit_count();
1830 if (UseGCOverheadLimit &&
1831 (size_policy()->gc_time_limit_count() >
1832 AdaptiveSizePolicyGCTimeLimitThreshold)) {
1833 size_policy()->set_gc_time_limit_exceeded(true);
1834 // Avoid consecutive OOM due to the gc time limit by resetting
1835 // the counter.
1836 size_policy()->reset_gc_time_limit_count();
1837 if (PrintGCDetails) {
1838 gclog_or_tty->print_cr(" GC is exceeding overhead limit "
1839 "of %d%%", GCTimeLimit);
1840 }
1841 } else {
1842 if (PrintGCDetails) {
1843 gclog_or_tty->print_cr(" GC would exceed overhead limit "
1844 "of %d%%", GCTimeLimit);
1845 }
1846 }
1847 } else {
1848 size_policy()->reset_gc_time_limit_count();
1849 }
1850 }
1851
1852 // Resize the perm generation and the tenured generation
1853 // after obtaining the free list locks for the
1854 // two generations.
1855 void CMSCollector::compute_new_size() {
1856 assert_locked_or_safepoint(Heap_lock);
1857 FreelistLocker z(this);
1858 _permGen->compute_new_size();
1859 _cmsGen->compute_new_size();
1860 }
1861
1862 // A work method used by foreground collection to determine
1863 // what type of collection (compacting or not, continuing or fresh)
1864 // it should do.
1865 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1866 // and CMSCompactWhenClearAllSoftRefs the default in the future
1867 // and do away with the flags after a suitable period.
1868 void CMSCollector::decide_foreground_collection_type(
1869 bool clear_all_soft_refs, bool* should_compact,
1870 bool* should_start_over) {
1871 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1872 // flag is set, and we have either requested a System.gc() or
1873 // the number of full gc's since the last concurrent cycle
1874 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1875 // or if an incremental collection has failed
1876 GenCollectedHeap* gch = GenCollectedHeap::heap();
1877 assert(gch->collector_policy()->is_two_generation_policy(),
1878 "You may want to check the correctness of the following");
1879 // Inform cms gen if this was due to partial collection failing.
1880 // The CMS gen may use this fact to determine its expansion policy.
1881 if (gch->incremental_collection_will_fail()) {
1882 assert(!_cmsGen->incremental_collection_failed(),
1883 "Should have been noticed, reacted to and cleared");
1884 _cmsGen->set_incremental_collection_failed();
1885 }
1886 *should_compact =
1887 UseCMSCompactAtFullCollection &&
1888 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1889 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1890 gch->incremental_collection_will_fail());
1891 *should_start_over = false;
1892 if (clear_all_soft_refs && !*should_compact) {
1893 // We are about to do a last ditch collection attempt
1894 // so it would normally make sense to do a compaction
1895 // to reclaim as much space as possible.
1896 if (CMSCompactWhenClearAllSoftRefs) {
1897 // Default: The rationale is that in this case either
1898 // we are past the final marking phase, in which case
1899 // we'd have to start over, or so little has been done
1900 // that there's little point in saving that work. Compaction
1901 // appears to be the sensible choice in either case.
1902 *should_compact = true;
1903 } else {
1904 // We have been asked to clear all soft refs, but not to
1905 // compact. Make sure that we aren't past the final checkpoint
1906 // phase, for that is where we process soft refs. If we are already
1907 // past that phase, we'll need to redo the refs discovery phase and
1908 // if necessary clear soft refs that weren't previously
1909 // cleared. We do so by remembering the phase in which
1910 // we came in, and if we are past the refs processing
1911 // phase, we'll choose to just redo the mark-sweep
1912 // collection from scratch.
1913 if (_collectorState > FinalMarking) {
1914 // We are past the refs processing phase;
1915 // start over and do a fresh synchronous CMS cycle
1916 _collectorState = Resetting; // skip to reset to start new cycle
1917 reset(false /* == !asynch */);
1918 *should_start_over = true;
1919 } // else we can continue a possibly ongoing current cycle
1920 }
1921 }
1922 }
1923
1924 // A work method used by the foreground collector to do
1925 // a mark-sweep-compact.
1926 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1927 GenCollectedHeap* gch = GenCollectedHeap::heap();
1928 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1929 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1930 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1931 "collections passed to foreground collector", _full_gcs_since_conc_gc);
1932 }
1933
1934 // Sample collection interval time and reset for collection pause.
1935 if (UseAdaptiveSizePolicy) {
1936 size_policy()->msc_collection_begin();
1937 }
1938
1939 // Temporarily widen the span of the weak reference processing to
1940 // the entire heap.
1941 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1942 ReferenceProcessorSpanMutator x(ref_processor(), new_span);
1943
1944 // Temporarily, clear the "is_alive_non_header" field of the
1945 // reference processor.
1946 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
1947
1948 // Temporarily make reference _processing_ single threaded (non-MT).
1949 ReferenceProcessorMTProcMutator z(ref_processor(), false);
1950
1951 // Temporarily make refs discovery atomic
1952 ReferenceProcessorAtomicMutator w(ref_processor(), true);
1953
1954 ref_processor()->set_enqueuing_is_done(false);
1955 ref_processor()->enable_discovery();
1956 // If an asynchronous collection finishes, the _modUnionTable is
1957 // all clear. If we are assuming the collection from an asynchronous
1958 // collection, clear the _modUnionTable.
1959 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1960 "_modUnionTable should be clear if the baton was not passed");
1961 _modUnionTable.clear_all();
1962
1963 // We must adjust the allocation statistics being maintained
1964 // in the free list space. We do so by reading and clearing
1965 // the sweep timer and updating the block flux rate estimates below.
1966 assert(_sweep_timer.is_active(), "We should never see the timer inactive");
1967 _sweep_timer.stop();
1968 // Note that we do not use this sample to update the _sweep_estimate.
1969 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
1970 _sweep_estimate.padded_average());
1971
1972 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1973 ref_processor(), clear_all_soft_refs);
1974 #ifdef ASSERT
1975 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1976 size_t free_size = cms_space->free();
1977 assert(free_size ==
1978 pointer_delta(cms_space->end(), cms_space->compaction_top())
1979 * HeapWordSize,
1980 "All the free space should be compacted into one chunk at top");
1981 assert(cms_space->dictionary()->totalChunkSize(
1982 debug_only(cms_space->freelistLock())) == 0 ||
1983 cms_space->totalSizeInIndexedFreeLists() == 0,
1984 "All the free space should be in a single chunk");
1985 size_t num = cms_space->totalCount();
1986 assert((free_size == 0 && num == 0) ||
1987 (free_size > 0 && (num == 1 || num == 2)),
1988 "There should be at most 2 free chunks after compaction");
1989 #endif // ASSERT
1990 _collectorState = Resetting;
1991 assert(_restart_addr == NULL,
1992 "Should have been NULL'd before baton was passed");
1993 reset(false /* == !asynch */);
1994 _cmsGen->reset_after_compaction();
1995 _concurrent_cycles_since_last_unload = 0;
1996
1997 if (verifying() && !should_unload_classes()) {
1998 perm_gen_verify_bit_map()->clear_all();
1999 }
2000
2001 // Clear any data recorded in the PLAB chunk arrays.
2002 if (_survivor_plab_array != NULL) {
2003 reset_survivor_plab_arrays();
2004 }
2005
2006 // Adjust the per-size allocation stats for the next epoch.
2007 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2008 // Restart the "sweep timer" for next epoch.
2009 _sweep_timer.reset();
2010 _sweep_timer.start();
2011
2012 // Sample collection pause time and reset for collection interval.
2013 if (UseAdaptiveSizePolicy) {
2014 size_policy()->msc_collection_end(gch->gc_cause());
2015 }
2016
2017 // For a mark-sweep-compact, compute_new_size() will be called
2018 // in the heap's do_collection() method.
2019 }
2020
2021 // A work method used by the foreground collector to do
2022 // a mark-sweep, after taking over from a possibly on-going
2023 // concurrent mark-sweep collection.
2024 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2025 CollectorState first_state, bool should_start_over) {
2026 if (PrintGC && Verbose) {
2027 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2028 "collector with count %d",
2029 _full_gcs_since_conc_gc);
2030 }
2031 switch (_collectorState) {
2032 case Idling:
2033 if (first_state == Idling || should_start_over) {
2034 // The background GC was not active, or should
2035 // restarted from scratch; start the cycle.
2036 _collectorState = InitialMarking;
2037 }
2038 // If first_state was not Idling, then a background GC
2039 // was in progress and has now finished. No need to do it
2040 // again. Leave the state as Idling.
2041 break;
2042 case Precleaning:
2043 // In the foreground case don't do the precleaning since
2044 // it is not done concurrently and there is extra work
2045 // required.
2046 _collectorState = FinalMarking;
2047 }
2048 if (PrintGCDetails &&
2049 (_collectorState > Idling ||
2050 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2051 gclog_or_tty->print(" (concurrent mode failure)");
2052 }
2053 collect_in_foreground(clear_all_soft_refs);
2054
2055 // For a mark-sweep, compute_new_size() will be called
2056 // in the heap's do_collection() method.
2057 }
2058
2059
2060 void CMSCollector::getFreelistLocks() const {
2061 // Get locks for all free lists in all generations that this
2062 // collector is responsible for
2063 _cmsGen->freelistLock()->lock_without_safepoint_check();
2064 _permGen->freelistLock()->lock_without_safepoint_check();
2065 }
2066
2067 void CMSCollector::releaseFreelistLocks() const {
2068 // Release locks for all free lists in all generations that this
2069 // collector is responsible for
2070 _cmsGen->freelistLock()->unlock();
2071 _permGen->freelistLock()->unlock();
2072 }
2073
2074 bool CMSCollector::haveFreelistLocks() const {
2075 // Check locks for all free lists in all generations that this
2076 // collector is responsible for
2077 assert_lock_strong(_cmsGen->freelistLock());
2078 assert_lock_strong(_permGen->freelistLock());
2079 PRODUCT_ONLY(ShouldNotReachHere());
2080 return true;
2081 }
2082
2083 // A utility class that is used by the CMS collector to
2084 // temporarily "release" the foreground collector from its
2085 // usual obligation to wait for the background collector to
2086 // complete an ongoing phase before proceeding.
2087 class ReleaseForegroundGC: public StackObj {
2088 private:
2089 CMSCollector* _c;
2090 public:
2091 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2092 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2093 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2094 // allow a potentially blocked foreground collector to proceed
2095 _c->_foregroundGCShouldWait = false;
2096 if (_c->_foregroundGCIsActive) {
2097 CGC_lock->notify();
2098 }
2099 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2100 "Possible deadlock");
2101 }
2102
2103 ~ReleaseForegroundGC() {
2104 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2105 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2106 _c->_foregroundGCShouldWait = true;
2107 }
2108 };
2109
2110 // There are separate collect_in_background and collect_in_foreground because of
2111 // the different locking requirements of the background collector and the
2112 // foreground collector. There was originally an attempt to share
2113 // one "collect" method between the background collector and the foreground
2114 // collector but the if-then-else required made it cleaner to have
2115 // separate methods.
2116 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2117 assert(Thread::current()->is_ConcurrentGC_thread(),
2118 "A CMS asynchronous collection is only allowed on a CMS thread.");
2119
2120 GenCollectedHeap* gch = GenCollectedHeap::heap();
2121 {
2122 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2123 MutexLockerEx hl(Heap_lock, safepoint_check);
2124 FreelistLocker fll(this);
2125 MutexLockerEx x(CGC_lock, safepoint_check);
2126 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2127 // The foreground collector is active or we're
2128 // not using asynchronous collections. Skip this
2129 // background collection.
2130 assert(!_foregroundGCShouldWait, "Should be clear");
2131 return;
2132 } else {
2133 assert(_collectorState == Idling, "Should be idling before start.");
2134 _collectorState = InitialMarking;
2135 // Reset the expansion cause, now that we are about to begin
2136 // a new cycle.
2137 clear_expansion_cause();
2138 }
2139 // Decide if we want to enable class unloading as part of the
2140 // ensuing concurrent GC cycle.
2141 update_should_unload_classes();
2142 _full_gc_requested = false; // acks all outstanding full gc requests
2143 // Signal that we are about to start a collection
2144 gch->increment_total_full_collections(); // ... starting a collection cycle
2145 _collection_count_start = gch->total_full_collections();
2146 }
2147
2148 // Used for PrintGC
2149 size_t prev_used;
2150 if (PrintGC && Verbose) {
2151 prev_used = _cmsGen->used(); // XXXPERM
2152 }
2153
2154 // The change of the collection state is normally done at this level;
2155 // the exceptions are phases that are executed while the world is
2156 // stopped. For those phases the change of state is done while the
2157 // world is stopped. For baton passing purposes this allows the
2158 // background collector to finish the phase and change state atomically.
2159 // The foreground collector cannot wait on a phase that is done
2160 // while the world is stopped because the foreground collector already
2161 // has the world stopped and would deadlock.
2162 while (_collectorState != Idling) {
2163 if (TraceCMSState) {
2164 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2165 Thread::current(), _collectorState);
2166 }
2167 // The foreground collector
2168 // holds the Heap_lock throughout its collection.
2169 // holds the CMS token (but not the lock)
2170 // except while it is waiting for the background collector to yield.
2171 //
2172 // The foreground collector should be blocked (not for long)
2173 // if the background collector is about to start a phase
2174 // executed with world stopped. If the background
2175 // collector has already started such a phase, the
2176 // foreground collector is blocked waiting for the
2177 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2178 // are executed in the VM thread.
2179 //
2180 // The locking order is
2181 // PendingListLock (PLL) -- if applicable (FinalMarking)
2182 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2183 // CMS token (claimed in
2184 // stop_world_and_do() -->
2185 // safepoint_synchronize() -->
2186 // CMSThread::synchronize())
2187
2188 {
2189 // Check if the FG collector wants us to yield.
2190 CMSTokenSync x(true); // is cms thread
2191 if (waitForForegroundGC()) {
2192 // We yielded to a foreground GC, nothing more to be
2193 // done this round.
2194 assert(_foregroundGCShouldWait == false, "We set it to false in "
2195 "waitForForegroundGC()");
2196 if (TraceCMSState) {
2197 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2198 " exiting collection CMS state %d",
2199 Thread::current(), _collectorState);
2200 }
2201 return;
2202 } else {
2203 // The background collector can run but check to see if the
2204 // foreground collector has done a collection while the
2205 // background collector was waiting to get the CGC_lock
2206 // above. If yes, break so that _foregroundGCShouldWait
2207 // is cleared before returning.
2208 if (_collectorState == Idling) {
2209 break;
2210 }
2211 }
2212 }
2213
2214 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2215 "should be waiting");
2216
2217 switch (_collectorState) {
2218 case InitialMarking:
2219 {
2220 ReleaseForegroundGC x(this);
2221 stats().record_cms_begin();
2222
2223 VM_CMS_Initial_Mark initial_mark_op(this);
2224 VMThread::execute(&initial_mark_op);
2225 }
2226 // The collector state may be any legal state at this point
2227 // since the background collector may have yielded to the
2228 // foreground collector.
2229 break;
2230 case Marking:
2231 // initial marking in checkpointRootsInitialWork has been completed
2232 if (markFromRoots(true)) { // we were successful
2233 assert(_collectorState == Precleaning, "Collector state should "
2234 "have changed");
2235 } else {
2236 assert(_foregroundGCIsActive, "Internal state inconsistency");
2237 }
2238 break;
2239 case Precleaning:
2240 if (UseAdaptiveSizePolicy) {
2241 size_policy()->concurrent_precleaning_begin();
2242 }
2243 // marking from roots in markFromRoots has been completed
2244 preclean();
2245 if (UseAdaptiveSizePolicy) {
2246 size_policy()->concurrent_precleaning_end();
2247 }
2248 assert(_collectorState == AbortablePreclean ||
2249 _collectorState == FinalMarking,
2250 "Collector state should have changed");
2251 break;
2252 case AbortablePreclean:
2253 if (UseAdaptiveSizePolicy) {
2254 size_policy()->concurrent_phases_resume();
2255 }
2256 abortable_preclean();
2257 if (UseAdaptiveSizePolicy) {
2258 size_policy()->concurrent_precleaning_end();
2259 }
2260 assert(_collectorState == FinalMarking, "Collector state should "
2261 "have changed");
2262 break;
2263 case FinalMarking:
2264 {
2265 ReleaseForegroundGC x(this);
2266
2267 VM_CMS_Final_Remark final_remark_op(this);
2268 VMThread::execute(&final_remark_op);
2269 }
2270 assert(_foregroundGCShouldWait, "block post-condition");
2271 break;
2272 case Sweeping:
2273 if (UseAdaptiveSizePolicy) {
2274 size_policy()->concurrent_sweeping_begin();
2275 }
2276 // final marking in checkpointRootsFinal has been completed
2277 sweep(true);
2278 assert(_collectorState == Resizing, "Collector state change "
2279 "to Resizing must be done under the free_list_lock");
2280 _full_gcs_since_conc_gc = 0;
2281
2282 // Stop the timers for adaptive size policy for the concurrent phases
2283 if (UseAdaptiveSizePolicy) {
2284 size_policy()->concurrent_sweeping_end();
2285 size_policy()->concurrent_phases_end(gch->gc_cause(),
2286 gch->prev_gen(_cmsGen)->capacity(),
2287 _cmsGen->free());
2288 }
2289
2290 case Resizing: {
2291 // Sweeping has been completed...
2292 // At this point the background collection has completed.
2293 // Don't move the call to compute_new_size() down
2294 // into code that might be executed if the background
2295 // collection was preempted.
2296 {
2297 ReleaseForegroundGC x(this); // unblock FG collection
2298 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2299 CMSTokenSync z(true); // not strictly needed.
2300 if (_collectorState == Resizing) {
2301 compute_new_size();
2302 _collectorState = Resetting;
2303 } else {
2304 assert(_collectorState == Idling, "The state should only change"
2305 " because the foreground collector has finished the collection");
2306 }
2307 }
2308 break;
2309 }
2310 case Resetting:
2311 // CMS heap resizing has been completed
2312 reset(true);
2313 assert(_collectorState == Idling, "Collector state should "
2314 "have changed");
2315 stats().record_cms_end();
2316 // Don't move the concurrent_phases_end() and compute_new_size()
2317 // calls to here because a preempted background collection
2318 // has it's state set to "Resetting".
2319 break;
2320 case Idling:
2321 default:
2322 ShouldNotReachHere();
2323 break;
2324 }
2325 if (TraceCMSState) {
2326 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2327 Thread::current(), _collectorState);
2328 }
2329 assert(_foregroundGCShouldWait, "block post-condition");
2330 }
2331
2332 // Should this be in gc_epilogue?
2333 collector_policy()->counters()->update_counters();
2334
2335 {
2336 // Clear _foregroundGCShouldWait and, in the event that the
2337 // foreground collector is waiting, notify it, before
2338 // returning.
2339 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2340 _foregroundGCShouldWait = false;
2341 if (_foregroundGCIsActive) {
2342 CGC_lock->notify();
2343 }
2344 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2345 "Possible deadlock");
2346 }
2347 if (TraceCMSState) {
2348 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2349 " exiting collection CMS state %d",
2350 Thread::current(), _collectorState);
2351 }
2352 if (PrintGC && Verbose) {
2353 _cmsGen->print_heap_change(prev_used);
2354 }
2355 }
2356
2357 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2358 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2359 "Foreground collector should be waiting, not executing");
2360 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2361 "may only be done by the VM Thread with the world stopped");
2362 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2363 "VM thread should have CMS token");
2364
2365 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2366 true, gclog_or_tty);)
2367 if (UseAdaptiveSizePolicy) {
2368 size_policy()->ms_collection_begin();
2369 }
2370 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2371
2372 HandleMark hm; // Discard invalid handles created during verification
2373
2374 if (VerifyBeforeGC &&
2375 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2376 Universe::verify(true);
2377 }
2378
2379 bool init_mark_was_synchronous = false; // until proven otherwise
2380 while (_collectorState != Idling) {
2381 if (TraceCMSState) {
2382 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2383 Thread::current(), _collectorState);
2384 }
2385 switch (_collectorState) {
2386 case InitialMarking:
2387 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2388 checkpointRootsInitial(false);
2389 assert(_collectorState == Marking, "Collector state should have changed"
2390 " within checkpointRootsInitial()");
2391 break;
2392 case Marking:
2393 // initial marking in checkpointRootsInitialWork has been completed
2394 if (VerifyDuringGC &&
2395 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2396 gclog_or_tty->print("Verify before initial mark: ");
2397 Universe::verify(true);
2398 }
2399 {
2400 bool res = markFromRoots(false);
2401 assert(res && _collectorState == FinalMarking, "Collector state should "
2402 "have changed");
2403 break;
2404 }
2405 case FinalMarking:
2406 if (VerifyDuringGC &&
2407 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2408 gclog_or_tty->print("Verify before re-mark: ");
2409 Universe::verify(true);
2410 }
2411 checkpointRootsFinal(false, clear_all_soft_refs,
2412 init_mark_was_synchronous);
2413 assert(_collectorState == Sweeping, "Collector state should not "
2414 "have changed within checkpointRootsFinal()");
2415 break;
2416 case Sweeping:
2417 // final marking in checkpointRootsFinal has been completed
2418 if (VerifyDuringGC &&
2419 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2420 gclog_or_tty->print("Verify before sweep: ");
2421 Universe::verify(true);
2422 }
2423 sweep(false);
2424 assert(_collectorState == Resizing, "Incorrect state");
2425 break;
2426 case Resizing: {
2427 // Sweeping has been completed; the actual resize in this case
2428 // is done separately; nothing to be done in this state.
2429 _collectorState = Resetting;
2430 break;
2431 }
2432 case Resetting:
2433 // The heap has been resized.
2434 if (VerifyDuringGC &&
2435 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2436 gclog_or_tty->print("Verify before reset: ");
2437 Universe::verify(true);
2438 }
2439 reset(false);
2440 assert(_collectorState == Idling, "Collector state should "
2441 "have changed");
2442 break;
2443 case Precleaning:
2444 case AbortablePreclean:
2445 // Elide the preclean phase
2446 _collectorState = FinalMarking;
2447 break;
2448 default:
2449 ShouldNotReachHere();
2450 }
2451 if (TraceCMSState) {
2452 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2453 Thread::current(), _collectorState);
2454 }
2455 }
2456
2457 if (UseAdaptiveSizePolicy) {
2458 GenCollectedHeap* gch = GenCollectedHeap::heap();
2459 size_policy()->ms_collection_end(gch->gc_cause());
2460 }
2461
2462 if (VerifyAfterGC &&
2463 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2464 Universe::verify(true);
2465 }
2466 if (TraceCMSState) {
2467 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2468 " exiting collection CMS state %d",
2469 Thread::current(), _collectorState);
2470 }
2471 }
2472
2473 bool CMSCollector::waitForForegroundGC() {
2474 bool res = false;
2475 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2476 "CMS thread should have CMS token");
2477 // Block the foreground collector until the
2478 // background collectors decides whether to
2479 // yield.
2480 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2481 _foregroundGCShouldWait = true;
2482 if (_foregroundGCIsActive) {
2483 // The background collector yields to the
2484 // foreground collector and returns a value
2485 // indicating that it has yielded. The foreground
2486 // collector can proceed.
2487 res = true;
2488 _foregroundGCShouldWait = false;
2489 ConcurrentMarkSweepThread::clear_CMS_flag(
2490 ConcurrentMarkSweepThread::CMS_cms_has_token);
2491 ConcurrentMarkSweepThread::set_CMS_flag(
2492 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2493 // Get a possibly blocked foreground thread going
2494 CGC_lock->notify();
2495 if (TraceCMSState) {
2496 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2497 Thread::current(), _collectorState);
2498 }
2499 while (_foregroundGCIsActive) {
2500 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2501 }
2502 ConcurrentMarkSweepThread::set_CMS_flag(
2503 ConcurrentMarkSweepThread::CMS_cms_has_token);
2504 ConcurrentMarkSweepThread::clear_CMS_flag(
2505 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2506 }
2507 if (TraceCMSState) {
2508 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2509 Thread::current(), _collectorState);
2510 }
2511 return res;
2512 }
2513
2514 // Because of the need to lock the free lists and other structures in
2515 // the collector, common to all the generations that the collector is
2516 // collecting, we need the gc_prologues of individual CMS generations
2517 // delegate to their collector. It may have been simpler had the
2518 // current infrastructure allowed one to call a prologue on a
2519 // collector. In the absence of that we have the generation's
2520 // prologue delegate to the collector, which delegates back
2521 // some "local" work to a worker method in the individual generations
2522 // that it's responsible for collecting, while itself doing any
2523 // work common to all generations it's responsible for. A similar
2524 // comment applies to the gc_epilogue()'s.
2525 // The role of the varaible _between_prologue_and_epilogue is to
2526 // enforce the invocation protocol.
2527 void CMSCollector::gc_prologue(bool full) {
2528 // Call gc_prologue_work() for each CMSGen and PermGen that
2529 // we are responsible for.
2530
2531 // The following locking discipline assumes that we are only called
2532 // when the world is stopped.
2533 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2534
2535 // The CMSCollector prologue must call the gc_prologues for the
2536 // "generations" (including PermGen if any) that it's responsible
2537 // for.
2538
2539 assert( Thread::current()->is_VM_thread()
2540 || ( CMSScavengeBeforeRemark
2541 && Thread::current()->is_ConcurrentGC_thread()),
2542 "Incorrect thread type for prologue execution");
2543
2544 if (_between_prologue_and_epilogue) {
2545 // We have already been invoked; this is a gc_prologue delegation
2546 // from yet another CMS generation that we are responsible for, just
2547 // ignore it since all relevant work has already been done.
2548 return;
2549 }
2550
2551 // set a bit saying prologue has been called; cleared in epilogue
2552 _between_prologue_and_epilogue = true;
2553 // Claim locks for common data structures, then call gc_prologue_work()
2554 // for each CMSGen and PermGen that we are responsible for.
2555
2556 getFreelistLocks(); // gets free list locks on constituent spaces
2557 bitMapLock()->lock_without_safepoint_check();
2558
2559 // Should call gc_prologue_work() for all cms gens we are responsible for
2560 bool registerClosure = _collectorState >= Marking
2561 && _collectorState < Sweeping;
2562 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
2563 : &_modUnionClosure;
2564 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2565 _permGen->gc_prologue_work(full, registerClosure, muc);
2566
2567 if (!full) {
2568 stats().record_gc0_begin();
2569 }
2570 }
2571
2572 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2573 // Delegate to CMScollector which knows how to coordinate between
2574 // this and any other CMS generations that it is responsible for
2575 // collecting.
2576 collector()->gc_prologue(full);
2577 }
2578
2579 // This is a "private" interface for use by this generation's CMSCollector.
2580 // Not to be called directly by any other entity (for instance,
2581 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2582 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2583 bool registerClosure, ModUnionClosure* modUnionClosure) {
2584 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2585 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2586 "Should be NULL");
2587 if (registerClosure) {
2588 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2589 }
2590 cmsSpace()->gc_prologue();
2591 // Clear stat counters
2592 NOT_PRODUCT(
2593 assert(_numObjectsPromoted == 0, "check");
2594 assert(_numWordsPromoted == 0, "check");
2595 if (Verbose && PrintGC) {
2596 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2597 SIZE_FORMAT" bytes concurrently",
2598 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2599 }
2600 _numObjectsAllocated = 0;
2601 _numWordsAllocated = 0;
2602 )
2603 }
2604
2605 void CMSCollector::gc_epilogue(bool full) {
2606 // The following locking discipline assumes that we are only called
2607 // when the world is stopped.
2608 assert(SafepointSynchronize::is_at_safepoint(),
2609 "world is stopped assumption");
2610
2611 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2612 // if linear allocation blocks need to be appropriately marked to allow the
2613 // the blocks to be parsable. We also check here whether we need to nudge the
2614 // CMS collector thread to start a new cycle (if it's not already active).
2615 assert( Thread::current()->is_VM_thread()
2616 || ( CMSScavengeBeforeRemark
2617 && Thread::current()->is_ConcurrentGC_thread()),
2618 "Incorrect thread type for epilogue execution");
2619
2620 if (!_between_prologue_and_epilogue) {
2621 // We have already been invoked; this is a gc_epilogue delegation
2622 // from yet another CMS generation that we are responsible for, just
2623 // ignore it since all relevant work has already been done.
2624 return;
2625 }
2626 assert(haveFreelistLocks(), "must have freelist locks");
2627 assert_lock_strong(bitMapLock());
2628
2629 _cmsGen->gc_epilogue_work(full);
2630 _permGen->gc_epilogue_work(full);
2631
2632 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2633 // in case sampling was not already enabled, enable it
2634 _start_sampling = true;
2635 }
2636 // reset _eden_chunk_array so sampling starts afresh
2637 _eden_chunk_index = 0;
2638
2639 size_t cms_used = _cmsGen->cmsSpace()->used();
2640 size_t perm_used = _permGen->cmsSpace()->used();
2641
2642 // update performance counters - this uses a special version of
2643 // update_counters() that allows the utilization to be passed as a
2644 // parameter, avoiding multiple calls to used().
2645 //
2646 _cmsGen->update_counters(cms_used);
2647 _permGen->update_counters(perm_used);
2648
2649 if (CMSIncrementalMode) {
2650 icms_update_allocation_limits();
2651 }
2652
2653 bitMapLock()->unlock();
2654 releaseFreelistLocks();
2655
2656 _between_prologue_and_epilogue = false; // ready for next cycle
2657 }
2658
2659 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2660 collector()->gc_epilogue(full);
2661
2662 // Also reset promotion tracking in par gc thread states.
2663 if (ParallelGCThreads > 0) {
2664 for (uint i = 0; i < ParallelGCThreads; i++) {
2665 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2666 }
2667 }
2668 }
2669
2670 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2671 assert(!incremental_collection_failed(), "Should have been cleared");
2672 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2673 cmsSpace()->gc_epilogue();
2674 // Print stat counters
2675 NOT_PRODUCT(
2676 assert(_numObjectsAllocated == 0, "check");
2677 assert(_numWordsAllocated == 0, "check");
2678 if (Verbose && PrintGC) {
2679 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2680 SIZE_FORMAT" bytes",
2681 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2682 }
2683 _numObjectsPromoted = 0;
2684 _numWordsPromoted = 0;
2685 )
2686
2687 if (PrintGC && Verbose) {
2688 // Call down the chain in contiguous_available needs the freelistLock
2689 // so print this out before releasing the freeListLock.
2690 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2691 contiguous_available());
2692 }
2693 }
2694
2695 #ifndef PRODUCT
2696 bool CMSCollector::have_cms_token() {
2697 Thread* thr = Thread::current();
2698 if (thr->is_VM_thread()) {
2699 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2700 } else if (thr->is_ConcurrentGC_thread()) {
2701 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2702 } else if (thr->is_GC_task_thread()) {
2703 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2704 ParGCRareEvent_lock->owned_by_self();
2705 }
2706 return false;
2707 }
2708 #endif
2709
2710 // Check reachability of the given heap address in CMS generation,
2711 // treating all other generations as roots.
2712 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2713 // We could "guarantee" below, rather than assert, but i'll
2714 // leave these as "asserts" so that an adventurous debugger
2715 // could try this in the product build provided some subset of
2716 // the conditions were met, provided they were intersted in the
2717 // results and knew that the computation below wouldn't interfere
2718 // with other concurrent computations mutating the structures
2719 // being read or written.
2720 assert(SafepointSynchronize::is_at_safepoint(),
2721 "Else mutations in object graph will make answer suspect");
2722 assert(have_cms_token(), "Should hold cms token");
2723 assert(haveFreelistLocks(), "must hold free list locks");
2724 assert_lock_strong(bitMapLock());
2725
2726 // Clear the marking bit map array before starting, but, just
2727 // for kicks, first report if the given address is already marked
2728 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2729 _markBitMap.isMarked(addr) ? "" : " not");
2730
2731 if (verify_after_remark()) {
2732 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2733 bool result = verification_mark_bm()->isMarked(addr);
2734 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2735 result ? "IS" : "is NOT");
2736 return result;
2737 } else {
2738 gclog_or_tty->print_cr("Could not compute result");
2739 return false;
2740 }
2741 }
2742
2743 ////////////////////////////////////////////////////////
2744 // CMS Verification Support
2745 ////////////////////////////////////////////////////////
2746 // Following the remark phase, the following invariant
2747 // should hold -- each object in the CMS heap which is
2748 // marked in markBitMap() should be marked in the verification_mark_bm().
2749
2750 class VerifyMarkedClosure: public BitMapClosure {
2751 CMSBitMap* _marks;
2752 bool _failed;
2753
2754 public:
2755 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2756
2757 void do_bit(size_t offset) {
2758 HeapWord* addr = _marks->offsetToHeapWord(offset);
2759 if (!_marks->isMarked(addr)) {
2760 oop(addr)->print();
2761 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2762 _failed = true;
2763 }
2764 }
2765
2766 bool failed() { return _failed; }
2767 };
2768
2769 bool CMSCollector::verify_after_remark() {
2770 gclog_or_tty->print(" [Verifying CMS Marking... ");
2771 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2772 static bool init = false;
2773
2774 assert(SafepointSynchronize::is_at_safepoint(),
2775 "Else mutations in object graph will make answer suspect");
2776 assert(have_cms_token(),
2777 "Else there may be mutual interference in use of "
2778 " verification data structures");
2779 assert(_collectorState > Marking && _collectorState <= Sweeping,
2780 "Else marking info checked here may be obsolete");
2781 assert(haveFreelistLocks(), "must hold free list locks");
2782 assert_lock_strong(bitMapLock());
2783
2784
2785 // Allocate marking bit map if not already allocated
2786 if (!init) { // first time
2787 if (!verification_mark_bm()->allocate(_span)) {
2788 return false;
2789 }
2790 init = true;
2791 }
2792
2793 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2794
2795 // Turn off refs discovery -- so we will be tracing through refs.
2796 // This is as intended, because by this time
2797 // GC must already have cleared any refs that need to be cleared,
2798 // and traced those that need to be marked; moreover,
2799 // the marking done here is not going to intefere in any
2800 // way with the marking information used by GC.
2801 NoRefDiscovery no_discovery(ref_processor());
2802
2803 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2804
2805 // Clear any marks from a previous round
2806 verification_mark_bm()->clear_all();
2807 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2808 assert(overflow_list_is_empty(), "overflow list should be empty");
2809
2810 GenCollectedHeap* gch = GenCollectedHeap::heap();
2811 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2812 // Update the saved marks which may affect the root scans.
2813 gch->save_marks();
2814
2815 if (CMSRemarkVerifyVariant == 1) {
2816 // In this first variant of verification, we complete
2817 // all marking, then check if the new marks-verctor is
2818 // a subset of the CMS marks-vector.
2819 verify_after_remark_work_1();
2820 } else if (CMSRemarkVerifyVariant == 2) {
2821 // In this second variant of verification, we flag an error
2822 // (i.e. an object reachable in the new marks-vector not reachable
2823 // in the CMS marks-vector) immediately, also indicating the
2824 // identify of an object (A) that references the unmarked object (B) --
2825 // presumably, a mutation to A failed to be picked up by preclean/remark?
2826 verify_after_remark_work_2();
2827 } else {
2828 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2829 CMSRemarkVerifyVariant);
2830 }
2831 gclog_or_tty->print(" done] ");
2832 return true;
2833 }
2834
2835 void CMSCollector::verify_after_remark_work_1() {
2836 ResourceMark rm;
2837 HandleMark hm;
2838 GenCollectedHeap* gch = GenCollectedHeap::heap();
2839
2840 // Mark from roots one level into CMS
2841 MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
2842 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2843
2844 gch->gen_process_strong_roots(_cmsGen->level(),
2845 true, // younger gens are roots
2846 true, // collecting perm gen
2847 SharedHeap::ScanningOption(roots_scanning_options()),
2848 NULL, ¬Older);
2849
2850 // Now mark from the roots
2851 assert(_revisitStack.isEmpty(), "Should be empty");
2852 MarkFromRootsClosure markFromRootsClosure(this, _span,
2853 verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2854 false /* don't yield */, true /* verifying */);
2855 assert(_restart_addr == NULL, "Expected pre-condition");
2856 verification_mark_bm()->iterate(&markFromRootsClosure);
2857 while (_restart_addr != NULL) {
2858 // Deal with stack overflow: by restarting at the indicated
2859 // address.
2860 HeapWord* ra = _restart_addr;
2861 markFromRootsClosure.reset(ra);
2862 _restart_addr = NULL;
2863 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2864 }
2865 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2866 verify_work_stacks_empty();
2867 // Should reset the revisit stack above, since no class tree
2868 // surgery is forthcoming.
2869 _revisitStack.reset(); // throwing away all contents
2870
2871 // Marking completed -- now verify that each bit marked in
2872 // verification_mark_bm() is also marked in markBitMap(); flag all
2873 // errors by printing corresponding objects.
2874 VerifyMarkedClosure vcl(markBitMap());
2875 verification_mark_bm()->iterate(&vcl);
2876 if (vcl.failed()) {
2877 gclog_or_tty->print("Verification failed");
2878 Universe::heap()->print();
2879 fatal(" ... aborting");
2880 }
2881 }
2882
2883 void CMSCollector::verify_after_remark_work_2() {
2884 ResourceMark rm;
2885 HandleMark hm;
2886 GenCollectedHeap* gch = GenCollectedHeap::heap();
2887
2888 // Mark from roots one level into CMS
2889 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2890 markBitMap(), true /* nmethods */);
2891 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2892 gch->gen_process_strong_roots(_cmsGen->level(),
2893 true, // younger gens are roots
2894 true, // collecting perm gen
2895 SharedHeap::ScanningOption(roots_scanning_options()),
2896 NULL, ¬Older);
2897
2898 // Now mark from the roots
2899 assert(_revisitStack.isEmpty(), "Should be empty");
2900 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2901 verification_mark_bm(), markBitMap(), verification_mark_stack());
2902 assert(_restart_addr == NULL, "Expected pre-condition");
2903 verification_mark_bm()->iterate(&markFromRootsClosure);
2904 while (_restart_addr != NULL) {
2905 // Deal with stack overflow: by restarting at the indicated
2906 // address.
2907 HeapWord* ra = _restart_addr;
2908 markFromRootsClosure.reset(ra);
2909 _restart_addr = NULL;
2910 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2911 }
2912 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2913 verify_work_stacks_empty();
2914 // Should reset the revisit stack above, since no class tree
2915 // surgery is forthcoming.
2916 _revisitStack.reset(); // throwing away all contents
2917
2918 // Marking completed -- now verify that each bit marked in
2919 // verification_mark_bm() is also marked in markBitMap(); flag all
2920 // errors by printing corresponding objects.
2921 VerifyMarkedClosure vcl(markBitMap());
2922 verification_mark_bm()->iterate(&vcl);
2923 assert(!vcl.failed(), "Else verification above should not have succeeded");
2924 }
2925
2926 void ConcurrentMarkSweepGeneration::save_marks() {
2927 // delegate to CMS space
2928 cmsSpace()->save_marks();
2929 for (uint i = 0; i < ParallelGCThreads; i++) {
2930 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2931 }
2932 }
2933
2934 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2935 return cmsSpace()->no_allocs_since_save_marks();
2936 }
2937
2938 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2939 \
2940 void ConcurrentMarkSweepGeneration:: \
2941 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2942 cl->set_generation(this); \
2943 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2944 cl->reset_generation(); \
2945 save_marks(); \
2946 }
2947
2948 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2949
2950 void
2951 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
2952 {
2953 // Not currently implemented; need to do the following. -- ysr.
2954 // dld -- I think that is used for some sort of allocation profiler. So it
2955 // really means the objects allocated by the mutator since the last
2956 // GC. We could potentially implement this cheaply by recording only
2957 // the direct allocations in a side data structure.
2958 //
2959 // I think we probably ought not to be required to support these
2960 // iterations at any arbitrary point; I think there ought to be some
2961 // call to enable/disable allocation profiling in a generation/space,
2962 // and the iterator ought to return the objects allocated in the
2963 // gen/space since the enable call, or the last iterator call (which
2964 // will probably be at a GC.) That way, for gens like CM&S that would
2965 // require some extra data structure to support this, we only pay the
2966 // cost when it's in use...
2967 cmsSpace()->object_iterate_since_last_GC(blk);
2968 }
2969
2970 void
2971 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
2972 cl->set_generation(this);
2973 younger_refs_in_space_iterate(_cmsSpace, cl);
2974 cl->reset_generation();
2975 }
2976
2977 void
2978 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
2979 if (freelistLock()->owned_by_self()) {
2980 Generation::oop_iterate(mr, cl);
2981 } else {
2982 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2983 Generation::oop_iterate(mr, cl);
2984 }
2985 }
2986
2987 void
2988 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
2989 if (freelistLock()->owned_by_self()) {
2990 Generation::oop_iterate(cl);
2991 } else {
2992 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2993 Generation::oop_iterate(cl);
2994 }
2995 }
2996
2997 void
2998 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2999 if (freelistLock()->owned_by_self()) {
3000 Generation::object_iterate(cl);
3001 } else {
3002 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3003 Generation::object_iterate(cl);
3004 }
3005 }
3006
3007 void
3008 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3009 }
3010
3011 void
3012 ConcurrentMarkSweepGeneration::post_compact() {
3013 }
3014
3015 void
3016 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3017 // Fix the linear allocation blocks to look like free blocks.
3018
3019 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3020 // are not called when the heap is verified during universe initialization and
3021 // at vm shutdown.
3022 if (freelistLock()->owned_by_self()) {
3023 cmsSpace()->prepare_for_verify();
3024 } else {
3025 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3026 cmsSpace()->prepare_for_verify();
3027 }
3028 }
3029
3030 void
3031 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
3032 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3033 // are not called when the heap is verified during universe initialization and
3034 // at vm shutdown.
3035 if (freelistLock()->owned_by_self()) {
3036 cmsSpace()->verify(false /* ignored */);
3037 } else {
3038 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3039 cmsSpace()->verify(false /* ignored */);
3040 }
3041 }
3042
3043 void CMSCollector::verify(bool allow_dirty /* ignored */) {
3044 _cmsGen->verify(allow_dirty);
3045 _permGen->verify(allow_dirty);
3046 }
3047
3048 #ifndef PRODUCT
3049 bool CMSCollector::overflow_list_is_empty() const {
3050 assert(_num_par_pushes >= 0, "Inconsistency");
3051 if (_overflow_list == NULL) {
3052 assert(_num_par_pushes == 0, "Inconsistency");
3053 }
3054 return _overflow_list == NULL;
3055 }
3056
3057 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3058 // merely consolidate assertion checks that appear to occur together frequently.
3059 void CMSCollector::verify_work_stacks_empty() const {
3060 assert(_markStack.isEmpty(), "Marking stack should be empty");
3061 assert(overflow_list_is_empty(), "Overflow list should be empty");
3062 }
3063
3064 void CMSCollector::verify_overflow_empty() const {
3065 assert(overflow_list_is_empty(), "Overflow list should be empty");
3066 assert(no_preserved_marks(), "No preserved marks");
3067 }
3068 #endif // PRODUCT
3069
3070 // Decide if we want to enable class unloading as part of the
3071 // ensuing concurrent GC cycle. We will collect the perm gen and
3072 // unload classes if it's the case that:
3073 // (1) an explicit gc request has been made and the flag
3074 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3075 // (2) (a) class unloading is enabled at the command line, and
3076 // (b) (i) perm gen threshold has been crossed, or
3077 // (ii) old gen is getting really full, or
3078 // (iii) the previous N CMS collections did not collect the
3079 // perm gen
3080 // NOTE: Provided there is no change in the state of the heap between
3081 // calls to this method, it should have idempotent results. Moreover,
3082 // its results should be monotonically increasing (i.e. going from 0 to 1,
3083 // but not 1 to 0) between successive calls between which the heap was
3084 // not collected. For the implementation below, it must thus rely on
3085 // the property that concurrent_cycles_since_last_unload()
3086 // will not decrease unless a collection cycle happened and that
3087 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3088 // themselves also monotonic in that sense. See check_monotonicity()
3089 // below.
3090 bool CMSCollector::update_should_unload_classes() {
3091 // Condition 1 above
3092 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3093 _should_unload_classes = true;
3094 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3095 // Disjuncts 2.b.(i,ii,iii) above
3096 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3097 CMSClassUnloadingMaxInterval)
3098 || _permGen->should_concurrent_collect()
3099 || _cmsGen->is_too_full();
3100 }
3101 return _should_unload_classes;
3102 }
3103
3104 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3105 bool res = should_concurrent_collect();
3106 #define CMSIsTooFullPercentage 98
3107 res = res && occupancy() > (double)CMSIsTooFullPercentage/100.0;
3108 return res;
3109 }
3110
3111 void CMSCollector::setup_cms_unloading_and_verification_state() {
3112 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3113 || VerifyBeforeExit;
3114 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
3115 | SharedHeap::SO_CodeCache;
3116
3117 if (should_unload_classes()) { // Should unload classes this cycle
3118 remove_root_scanning_option(rso); // Shrink the root set appropriately
3119 set_verifying(should_verify); // Set verification state for this cycle
3120 return; // Nothing else needs to be done at this time
3121 }
3122
3123 // Not unloading classes this cycle
3124 assert(!should_unload_classes(), "Inconsitency!");
3125 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3126 // We were not verifying, or we _were_ unloading classes in the last cycle,
3127 // AND some verification options are enabled this cycle; in this case,
3128 // we must make sure that the deadness map is allocated if not already so,
3129 // and cleared (if already allocated previously --
3130 // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3131 if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3132 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3133 warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3134 "permanent generation verification disabled");
3135 return; // Note that we leave verification disabled, so we'll retry this
3136 // allocation next cycle. We _could_ remember this failure
3137 // and skip further attempts and permanently disable verification
3138 // attempts if that is considered more desirable.
3139 }
3140 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3141 "_perm_gen_ver_bit_map inconsistency?");
3142 } else {
3143 perm_gen_verify_bit_map()->clear_all();
3144 }
3145 // Include symbols, strings and code cache elements to prevent their resurrection.
3146 add_root_scanning_option(rso);
3147 set_verifying(true);
3148 } else if (verifying() && !should_verify) {
3149 // We were verifying, but some verification flags got disabled.
3150 set_verifying(false);
3151 // Exclude symbols, strings and code cache elements from root scanning to
3152 // reduce IM and RM pauses.
3153 remove_root_scanning_option(rso);
3154 }
3155 }
3156
3157
3158 #ifndef PRODUCT
3159 HeapWord* CMSCollector::block_start(const void* p) const {
3160 const HeapWord* addr = (HeapWord*)p;
3161 if (_span.contains(p)) {
3162 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3163 return _cmsGen->cmsSpace()->block_start(p);
3164 } else {
3165 assert(_permGen->cmsSpace()->is_in_reserved(addr),
3166 "Inconsistent _span?");
3167 return _permGen->cmsSpace()->block_start(p);
3168 }
3169 }
3170 return NULL;
3171 }
3172 #endif
3173
3174 HeapWord*
3175 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3176 bool tlab,
3177 bool parallel) {
3178 assert(!tlab, "Can't deal with TLAB allocation");
3179 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3180 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3181 CMSExpansionCause::_satisfy_allocation);
3182 if (GCExpandToAllocateDelayMillis > 0) {
3183 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3184 }
3185 size_t adj_word_sz = CompactibleFreeListSpace::adjustObjectSize(word_size);
3186 if (parallel) {
3187 return cmsSpace()->par_allocate(adj_word_sz);
3188 } else {
3189 return cmsSpace()->allocate(adj_word_sz);
3190 }
3191 }
3192
3193 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3194 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3195 // to CardGeneration and share it...
3196 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3197 CMSExpansionCause::Cause cause)
3198 {
3199 assert_locked_or_safepoint(Heap_lock);
3200
3201 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
3202 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
3203 bool success = false;
3204 if (aligned_expand_bytes > aligned_bytes) {
3205 success = grow_by(aligned_expand_bytes);
3206 }
3207 if (!success) {
3208 success = grow_by(aligned_bytes);
3209 }
3210 if (!success) {
3211 size_t remaining_bytes = _virtual_space.uncommitted_size();
3212 if (remaining_bytes > 0) {
3213 success = grow_by(remaining_bytes);
3214 }
3215 }
3216 if (GC_locker::is_active()) {
3217 if (PrintGC && Verbose) {
3218 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
3219 }
3220 }
3221 // remember why we expanded; this information is used
3222 // by shouldConcurrentCollect() when making decisions on whether to start
3223 // a new CMS cycle.
3224 if (success) {
3225 set_expansion_cause(cause);
3226 if (PrintGCDetails && Verbose) {
3227 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3228 CMSExpansionCause::to_string(cause));
3229 }
3230 }
3231 }
3232
3233 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3234 HeapWord* res = NULL;
3235 MutexLocker x(ParGCRareEvent_lock);
3236 while (true) {
3237 // Expansion by some other thread might make alloc OK now:
3238 res = ps->lab.alloc(word_sz);
3239 if (res != NULL) return res;
3240 // If there's not enough expansion space available, give up.
3241 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3242 return NULL;
3243 }
3244 // Otherwise, we try expansion.
3245 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3246 CMSExpansionCause::_allocate_par_lab);
3247 // Now go around the loop and try alloc again;
3248 // A competing par_promote might beat us to the expansion space,
3249 // so we may go around the loop again if promotion fails agaion.
3250 if (GCExpandToAllocateDelayMillis > 0) {
3251 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3252 }
3253 }
3254 }
3255
3256
3257 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3258 PromotionInfo* promo) {
3259 MutexLocker x(ParGCRareEvent_lock);
3260 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3261 while (true) {
3262 // Expansion by some other thread might make alloc OK now:
3263 if (promo->ensure_spooling_space()) {
3264 assert(promo->has_spooling_space(),
3265 "Post-condition of successful ensure_spooling_space()");
3266 return true;
3267 }
3268 // If there's not enough expansion space available, give up.
3269 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3270 return false;
3271 }
3272 // Otherwise, we try expansion.
3273 expand(refill_size_bytes, MinHeapDeltaBytes,
3274 CMSExpansionCause::_allocate_par_spooling_space);
3275 // Now go around the loop and try alloc again;
3276 // A competing allocation might beat us to the expansion space,
3277 // so we may go around the loop again if allocation fails again.
3278 if (GCExpandToAllocateDelayMillis > 0) {
3279 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3280 }
3281 }
3282 }
3283
3284
3285
3286 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3287 assert_locked_or_safepoint(Heap_lock);
3288 size_t size = ReservedSpace::page_align_size_down(bytes);
3289 if (size > 0) {
3290 shrink_by(size);
3291 }
3292 }
3293
3294 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3295 assert_locked_or_safepoint(Heap_lock);
3296 bool result = _virtual_space.expand_by(bytes);
3297 if (result) {
3298 HeapWord* old_end = _cmsSpace->end();
3299 size_t new_word_size =
3300 heap_word_size(_virtual_space.committed_size());
3301 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3302 _bts->resize(new_word_size); // resize the block offset shared array
3303 Universe::heap()->barrier_set()->resize_covered_region(mr);
3304 // Hmmmm... why doesn't CFLS::set_end verify locking?
3305 // This is quite ugly; FIX ME XXX
3306 _cmsSpace->assert_locked();
3307 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3308
3309 // update the space and generation capacity counters
3310 if (UsePerfData) {
3311 _space_counters->update_capacity();
3312 _gen_counters->update_all();
3313 }
3314
3315 if (Verbose && PrintGC) {
3316 size_t new_mem_size = _virtual_space.committed_size();
3317 size_t old_mem_size = new_mem_size - bytes;
3318 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3319 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3320 }
3321 }
3322 return result;
3323 }
3324
3325 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3326 assert_locked_or_safepoint(Heap_lock);
3327 bool success = true;
3328 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3329 if (remaining_bytes > 0) {
3330 success = grow_by(remaining_bytes);
3331 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3332 }
3333 return success;
3334 }
3335
3336 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3337 assert_locked_or_safepoint(Heap_lock);
3338 assert_lock_strong(freelistLock());
3339 // XXX Fix when compaction is implemented.
3340 warning("Shrinking of CMS not yet implemented");
3341 return;
3342 }
3343
3344
3345 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3346 // phases.
3347 class CMSPhaseAccounting: public StackObj {
3348 public:
3349 CMSPhaseAccounting(CMSCollector *collector,
3350 const char *phase,
3351 bool print_cr = true);
3352 ~CMSPhaseAccounting();
3353
3354 private:
3355 CMSCollector *_collector;
3356 const char *_phase;
3357 elapsedTimer _wallclock;
3358 bool _print_cr;
3359
3360 public:
3361 // Not MT-safe; so do not pass around these StackObj's
3362 // where they may be accessed by other threads.
3363 jlong wallclock_millis() {
3364 assert(_wallclock.is_active(), "Wall clock should not stop");
3365 _wallclock.stop(); // to record time
3366 jlong ret = _wallclock.milliseconds();
3367 _wallclock.start(); // restart
3368 return ret;
3369 }
3370 };
3371
3372 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3373 const char *phase,
3374 bool print_cr) :
3375 _collector(collector), _phase(phase), _print_cr(print_cr) {
3376
3377 if (PrintCMSStatistics != 0) {
3378 _collector->resetYields();
3379 }
3380 if (PrintGCDetails && PrintGCTimeStamps) {
3381 gclog_or_tty->date_stamp(PrintGCDateStamps);
3382 gclog_or_tty->stamp();
3383 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3384 _collector->cmsGen()->short_name(), _phase);
3385 }
3386 _collector->resetTimer();
3387 _wallclock.start();
3388 _collector->startTimer();
3389 }
3390
3391 CMSPhaseAccounting::~CMSPhaseAccounting() {
3392 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3393 _collector->stopTimer();
3394 _wallclock.stop();
3395 if (PrintGCDetails) {
3396 gclog_or_tty->date_stamp(PrintGCDateStamps);
3397 if (PrintGCTimeStamps) {
3398 gclog_or_tty->stamp();
3399 gclog_or_tty->print(": ");
3400 }
3401 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3402 _collector->cmsGen()->short_name(),
3403 _phase, _collector->timerValue(), _wallclock.seconds());
3404 if (_print_cr) {
3405 gclog_or_tty->print_cr("");
3406 }
3407 if (PrintCMSStatistics != 0) {
3408 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3409 _collector->yields());
3410 }
3411 }
3412 }
3413
3414 // CMS work
3415
3416 // Checkpoint the roots into this generation from outside
3417 // this generation. [Note this initial checkpoint need only
3418 // be approximate -- we'll do a catch up phase subsequently.]
3419 void CMSCollector::checkpointRootsInitial(bool asynch) {
3420 assert(_collectorState == InitialMarking, "Wrong collector state");
3421 check_correct_thread_executing();
3422 ReferenceProcessor* rp = ref_processor();
3423 SpecializationStats::clear();
3424 assert(_restart_addr == NULL, "Control point invariant");
3425 if (asynch) {
3426 // acquire locks for subsequent manipulations
3427 MutexLockerEx x(bitMapLock(),
3428 Mutex::_no_safepoint_check_flag);
3429 checkpointRootsInitialWork(asynch);
3430 rp->verify_no_references_recorded();
3431 rp->enable_discovery(); // enable ("weak") refs discovery
3432 _collectorState = Marking;
3433 } else {
3434 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3435 // which recognizes if we are a CMS generation, and doesn't try to turn on
3436 // discovery; verify that they aren't meddling.
3437 assert(!rp->discovery_is_atomic(),
3438 "incorrect setting of discovery predicate");
3439 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3440 "ref discovery for this generation kind");
3441 // already have locks
3442 checkpointRootsInitialWork(asynch);
3443 rp->enable_discovery(); // now enable ("weak") refs discovery
3444 _collectorState = Marking;
3445 }
3446 SpecializationStats::print();
3447 }
3448
3449 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3450 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3451 assert(_collectorState == InitialMarking, "just checking");
3452
3453 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3454 // precede our marking with a collection of all
3455 // younger generations to keep floating garbage to a minimum.
3456 // XXX: we won't do this for now -- it's an optimization to be done later.
3457
3458 // already have locks
3459 assert_lock_strong(bitMapLock());
3460 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3461
3462 // Setup the verification and class unloading state for this
3463 // CMS collection cycle.
3464 setup_cms_unloading_and_verification_state();
3465
3466 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3467 PrintGCDetails && Verbose, true, gclog_or_tty);)
3468 if (UseAdaptiveSizePolicy) {
3469 size_policy()->checkpoint_roots_initial_begin();
3470 }
3471
3472 // Reset all the PLAB chunk arrays if necessary.
3473 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3474 reset_survivor_plab_arrays();
3475 }
3476
3477 ResourceMark rm;
3478 HandleMark hm;
3479
3480 FalseClosure falseClosure;
3481 // In the case of a synchronous collection, we will elide the
3482 // remark step, so it's important to catch all the nmethod oops
3483 // in this step; hence the last argument to the constrcutor below.
3484 MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
3485 GenCollectedHeap* gch = GenCollectedHeap::heap();
3486
3487 verify_work_stacks_empty();
3488 verify_overflow_empty();
3489
3490 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3491 // Update the saved marks which may affect the root scans.
3492 gch->save_marks();
3493
3494 // weak reference processing has not started yet.
3495 ref_processor()->set_enqueuing_is_done(false);
3496
3497 {
3498 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3499 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3500 gch->gen_process_strong_roots(_cmsGen->level(),
3501 true, // younger gens are roots
3502 true, // collecting perm gen
3503 SharedHeap::ScanningOption(roots_scanning_options()),
3504 NULL, ¬Older);
3505 }
3506
3507 // Clear mod-union table; it will be dirtied in the prologue of
3508 // CMS generation per each younger generation collection.
3509
3510 assert(_modUnionTable.isAllClear(),
3511 "Was cleared in most recent final checkpoint phase"
3512 " or no bits are set in the gc_prologue before the start of the next "
3513 "subsequent marking phase.");
3514
3515 // Temporarily disabled, since pre/post-consumption closures don't
3516 // care about precleaned cards
3517 #if 0
3518 {
3519 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
3520 (HeapWord*)_virtual_space.high());
3521 _ct->ct_bs()->preclean_dirty_cards(mr);
3522 }
3523 #endif
3524
3525 // Save the end of the used_region of the constituent generations
3526 // to be used to limit the extent of sweep in each generation.
3527 save_sweep_limits();
3528 if (UseAdaptiveSizePolicy) {
3529 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3530 }
3531 verify_overflow_empty();
3532 }
3533
3534 bool CMSCollector::markFromRoots(bool asynch) {
3535 // we might be tempted to assert that:
3536 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3537 // "inconsistent argument?");
3538 // However that wouldn't be right, because it's possible that
3539 // a safepoint is indeed in progress as a younger generation
3540 // stop-the-world GC happens even as we mark in this generation.
3541 assert(_collectorState == Marking, "inconsistent state?");
3542 check_correct_thread_executing();
3543 verify_overflow_empty();
3544
3545 bool res;
3546 if (asynch) {
3547
3548 // Start the timers for adaptive size policy for the concurrent phases
3549 // Do it here so that the foreground MS can use the concurrent
3550 // timer since a foreground MS might has the sweep done concurrently
3551 // or STW.
3552 if (UseAdaptiveSizePolicy) {
3553 size_policy()->concurrent_marking_begin();
3554 }
3555
3556 // Weak ref discovery note: We may be discovering weak
3557 // refs in this generation concurrent (but interleaved) with
3558 // weak ref discovery by a younger generation collector.
3559
3560 CMSTokenSyncWithLocks ts(true, bitMapLock());
3561 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3562 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3563 res = markFromRootsWork(asynch);
3564 if (res) {
3565 _collectorState = Precleaning;
3566 } else { // We failed and a foreground collection wants to take over
3567 assert(_foregroundGCIsActive, "internal state inconsistency");
3568 assert(_restart_addr == NULL, "foreground will restart from scratch");
3569 if (PrintGCDetails) {
3570 gclog_or_tty->print_cr("bailing out to foreground collection");
3571 }
3572 }
3573 if (UseAdaptiveSizePolicy) {
3574 size_policy()->concurrent_marking_end();
3575 }
3576 } else {
3577 assert(SafepointSynchronize::is_at_safepoint(),
3578 "inconsistent with asynch == false");
3579 if (UseAdaptiveSizePolicy) {
3580 size_policy()->ms_collection_marking_begin();
3581 }
3582 // already have locks
3583 res = markFromRootsWork(asynch);
3584 _collectorState = FinalMarking;
3585 if (UseAdaptiveSizePolicy) {
3586 GenCollectedHeap* gch = GenCollectedHeap::heap();
3587 size_policy()->ms_collection_marking_end(gch->gc_cause());
3588 }
3589 }
3590 verify_overflow_empty();
3591 return res;
3592 }
3593
3594 bool CMSCollector::markFromRootsWork(bool asynch) {
3595 // iterate over marked bits in bit map, doing a full scan and mark
3596 // from these roots using the following algorithm:
3597 // . if oop is to the right of the current scan pointer,
3598 // mark corresponding bit (we'll process it later)
3599 // . else (oop is to left of current scan pointer)
3600 // push oop on marking stack
3601 // . drain the marking stack
3602
3603 // Note that when we do a marking step we need to hold the
3604 // bit map lock -- recall that direct allocation (by mutators)
3605 // and promotion (by younger generation collectors) is also
3606 // marking the bit map. [the so-called allocate live policy.]
3607 // Because the implementation of bit map marking is not
3608 // robust wrt simultaneous marking of bits in the same word,
3609 // we need to make sure that there is no such interference
3610 // between concurrent such updates.
3611
3612 // already have locks
3613 assert_lock_strong(bitMapLock());
3614
3615 // Clear the revisit stack, just in case there are any
3616 // obsolete contents from a short-circuited previous CMS cycle.
3617 _revisitStack.reset();
3618 verify_work_stacks_empty();
3619 verify_overflow_empty();
3620 assert(_revisitStack.isEmpty(), "tabula rasa");
3621
3622 bool result = false;
3623 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
3624 result = do_marking_mt(asynch);
3625 } else {
3626 result = do_marking_st(asynch);
3627 }
3628 return result;
3629 }
3630
3631 // Forward decl
3632 class CMSConcMarkingTask;
3633
3634 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3635 CMSCollector* _collector;
3636 CMSConcMarkingTask* _task;
3637 bool _yield;
3638 protected:
3639 virtual void yield();
3640 public:
3641 // "n_threads" is the number of threads to be terminated.
3642 // "queue_set" is a set of work queues of other threads.
3643 // "collector" is the CMS collector associated with this task terminator.
3644 // "yield" indicates whether we need the gang as a whole to yield.
3645 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
3646 CMSCollector* collector, bool yield) :
3647 ParallelTaskTerminator(n_threads, queue_set),
3648 _collector(collector),
3649 _yield(yield) { }
3650
3651 void set_task(CMSConcMarkingTask* task) {
3652 _task = task;
3653 }
3654 };
3655
3656 // MT Concurrent Marking Task
3657 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3658 CMSCollector* _collector;
3659 YieldingFlexibleWorkGang* _workers; // the whole gang
3660 int _n_workers; // requested/desired # workers
3661 bool _asynch;
3662 bool _result;
3663 CompactibleFreeListSpace* _cms_space;
3664 CompactibleFreeListSpace* _perm_space;
3665 HeapWord* _global_finger;
3666
3667 // Exposed here for yielding support
3668 Mutex* const _bit_map_lock;
3669
3670 // The per thread work queues, available here for stealing
3671 OopTaskQueueSet* _task_queues;
3672 CMSConcMarkingTerminator _term;
3673
3674 public:
3675 CMSConcMarkingTask(CMSCollector* collector,
3676 CompactibleFreeListSpace* cms_space,
3677 CompactibleFreeListSpace* perm_space,
3678 bool asynch, int n_workers,
3679 YieldingFlexibleWorkGang* workers,
3680 OopTaskQueueSet* task_queues):
3681 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3682 _collector(collector),
3683 _cms_space(cms_space),
3684 _perm_space(perm_space),
3685 _asynch(asynch), _n_workers(n_workers), _result(true),
3686 _workers(workers), _task_queues(task_queues),
3687 _term(n_workers, task_queues, _collector, asynch),
3688 _bit_map_lock(collector->bitMapLock())
3689 {
3690 assert(n_workers <= workers->total_workers(),
3691 "Else termination won't work correctly today"); // XXX FIX ME!
3692 _requested_size = n_workers;
3693 _term.set_task(this);
3694 assert(_cms_space->bottom() < _perm_space->bottom(),
3695 "Finger incorrectly initialized below");
3696 _global_finger = _cms_space->bottom();
3697 }
3698
3699
3700 OopTaskQueueSet* task_queues() { return _task_queues; }
3701
3702 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3703
3704 HeapWord** global_finger_addr() { return &_global_finger; }
3705
3706 CMSConcMarkingTerminator* terminator() { return &_term; }
3707
3708 void work(int i);
3709
3710 virtual void coordinator_yield(); // stuff done by coordinator
3711 bool result() { return _result; }
3712
3713 void reset(HeapWord* ra) {
3714 _term.reset_for_reuse();
3715 }
3716
3717 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3718 OopTaskQueue* work_q);
3719
3720 private:
3721 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3722 void do_work_steal(int i);
3723 void bump_global_finger(HeapWord* f);
3724 };
3725
3726 void CMSConcMarkingTerminator::yield() {
3727 if (ConcurrentMarkSweepThread::should_yield() &&
3728 !_collector->foregroundGCIsActive() &&
3729 _yield) {
3730 _task->yield();
3731 } else {
3732 ParallelTaskTerminator::yield();
3733 }
3734 }
3735
3736 ////////////////////////////////////////////////////////////////
3737 // Concurrent Marking Algorithm Sketch
3738 ////////////////////////////////////////////////////////////////
3739 // Until all tasks exhausted (both spaces):
3740 // -- claim next available chunk
3741 // -- bump global finger via CAS
3742 // -- find first object that starts in this chunk
3743 // and start scanning bitmap from that position
3744 // -- scan marked objects for oops
3745 // -- CAS-mark target, and if successful:
3746 // . if target oop is above global finger (volatile read)
3747 // nothing to do
3748 // . if target oop is in chunk and above local finger
3749 // then nothing to do
3750 // . else push on work-queue
3751 // -- Deal with possible overflow issues:
3752 // . local work-queue overflow causes stuff to be pushed on
3753 // global (common) overflow queue
3754 // . always first empty local work queue
3755 // . then get a batch of oops from global work queue if any
3756 // . then do work stealing
3757 // -- When all tasks claimed (both spaces)
3758 // and local work queue empty,
3759 // then in a loop do:
3760 // . check global overflow stack; steal a batch of oops and trace
3761 // . try to steal from other threads oif GOS is empty
3762 // . if neither is available, offer termination
3763 // -- Terminate and return result
3764 //
3765 void CMSConcMarkingTask::work(int i) {
3766 elapsedTimer _timer;
3767 ResourceMark rm;
3768 HandleMark hm;
3769
3770 DEBUG_ONLY(_collector->verify_overflow_empty();)
3771
3772 // Before we begin work, our work queue should be empty
3773 assert(work_queue(i)->size() == 0, "Expected to be empty");
3774 // Scan the bitmap covering _cms_space, tracing through grey objects.
3775 _timer.start();
3776 do_scan_and_mark(i, _cms_space);
3777 _timer.stop();
3778 if (PrintCMSStatistics != 0) {
3779 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3780 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3781 }
3782
3783 // ... do the same for the _perm_space
3784 _timer.reset();
3785 _timer.start();
3786 do_scan_and_mark(i, _perm_space);
3787 _timer.stop();
3788 if (PrintCMSStatistics != 0) {
3789 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3790 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3791 }
3792
3793 // ... do work stealing
3794 _timer.reset();
3795 _timer.start();
3796 do_work_steal(i);
3797 _timer.stop();
3798 if (PrintCMSStatistics != 0) {
3799 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3800 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
3801 }
3802 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3803 assert(work_queue(i)->size() == 0, "Should have been emptied");
3804 // Note that under the current task protocol, the
3805 // following assertion is true even of the spaces
3806 // expanded since the completion of the concurrent
3807 // marking. XXX This will likely change under a strict
3808 // ABORT semantics.
3809 assert(_global_finger > _cms_space->end() &&
3810 _global_finger >= _perm_space->end(),
3811 "All tasks have been completed");
3812 DEBUG_ONLY(_collector->verify_overflow_empty();)
3813 }
3814
3815 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3816 HeapWord* read = _global_finger;
3817 HeapWord* cur = read;
3818 while (f > read) {
3819 cur = read;
3820 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3821 if (cur == read) {
3822 // our cas succeeded
3823 assert(_global_finger >= f, "protocol consistency");
3824 break;
3825 }
3826 }
3827 }
3828
3829 // This is really inefficient, and should be redone by
3830 // using (not yet available) block-read and -write interfaces to the
3831 // stack and the work_queue. XXX FIX ME !!!
3832 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3833 OopTaskQueue* work_q) {
3834 // Fast lock-free check
3835 if (ovflw_stk->length() == 0) {
3836 return false;
3837 }
3838 assert(work_q->size() == 0, "Shouldn't steal");
3839 MutexLockerEx ml(ovflw_stk->par_lock(),
3840 Mutex::_no_safepoint_check_flag);
3841 // Grab up to 1/4 the size of the work queue
3842 size_t num = MIN2((size_t)work_q->max_elems()/4,
3843 (size_t)ParGCDesiredObjsFromOverflowList);
3844 num = MIN2(num, ovflw_stk->length());
3845 for (int i = (int) num; i > 0; i--) {
3846 oop cur = ovflw_stk->pop();
3847 assert(cur != NULL, "Counted wrong?");
3848 work_q->push(cur);
3849 }
3850 return num > 0;
3851 }
3852
3853 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3854 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3855 int n_tasks = pst->n_tasks();
3856 // We allow that there may be no tasks to do here because
3857 // we are restarting after a stack overflow.
3858 assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
3859 int nth_task = 0;
3860
3861 HeapWord* start = sp->bottom();
3862 size_t chunk_size = sp->marking_task_size();
3863 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3864 // Having claimed the nth task in this space,
3865 // compute the chunk that it corresponds to:
3866 MemRegion span = MemRegion(start + nth_task*chunk_size,
3867 start + (nth_task+1)*chunk_size);
3868 // Try and bump the global finger via a CAS;
3869 // note that we need to do the global finger bump
3870 // _before_ taking the intersection below, because
3871 // the task corresponding to that region will be
3872 // deemed done even if the used_region() expands
3873 // because of allocation -- as it almost certainly will
3874 // during start-up while the threads yield in the
3875 // closure below.
3876 HeapWord* finger = span.end();
3877 bump_global_finger(finger); // atomically
3878 // There are null tasks here corresponding to chunks
3879 // beyond the "top" address of the space.
3880 span = span.intersection(sp->used_region());
3881 if (!span.is_empty()) { // Non-null task
3882 // We want to skip the first object because
3883 // the protocol is to scan any object in its entirety
3884 // that _starts_ in this span; a fortiori, any
3885 // object starting in an earlier span is scanned
3886 // as part of an earlier claimed task.
3887 // Below we use the "careful" version of block_start
3888 // so we do not try to navigate uninitialized objects.
3889 HeapWord* prev_obj = sp->block_start_careful(span.start());
3890 // Below we use a variant of block_size that uses the
3891 // Printezis bits to avoid waiting for allocated
3892 // objects to become initialized/parsable.
3893 while (prev_obj < span.start()) {
3894 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3895 if (sz > 0) {
3896 prev_obj += sz;
3897 } else {
3898 // In this case we may end up doing a bit of redundant
3899 // scanning, but that appears unavoidable, short of
3900 // locking the free list locks; see bug 6324141.
3901 break;
3902 }
3903 }
3904 if (prev_obj < span.end()) {
3905 MemRegion my_span = MemRegion(prev_obj, span.end());
3906 // Do the marking work within a non-empty span --
3907 // the last argument to the constructor indicates whether the
3908 // iteration should be incremental with periodic yields.
3909 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3910 &_collector->_markBitMap,
3911 work_queue(i),
3912 &_collector->_markStack,
3913 &_collector->_revisitStack,
3914 _asynch);
3915 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3916 } // else nothing to do for this task
3917 } // else nothing to do for this task
3918 }
3919 // We'd be tempted to assert here that since there are no
3920 // more tasks left to claim in this space, the global_finger
3921 // must exceed space->top() and a fortiori space->end(). However,
3922 // that would not quite be correct because the bumping of
3923 // global_finger occurs strictly after the claiming of a task,
3924 // so by the time we reach here the global finger may not yet
3925 // have been bumped up by the thread that claimed the last
3926 // task.
3927 pst->all_tasks_completed();
3928 }
3929
3930 class Par_ConcMarkingClosure: public OopClosure {
3931 CMSCollector* _collector;
3932 MemRegion _span;
3933 CMSBitMap* _bit_map;
3934 CMSMarkStack* _overflow_stack;
3935 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
3936 OopTaskQueue* _work_queue;
3937
3938 public:
3939 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
3940 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3941 _collector(collector),
3942 _span(_collector->_span),
3943 _work_queue(work_queue),
3944 _bit_map(bit_map),
3945 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
3946
3947 void do_oop(oop* p);
3948 void trim_queue(size_t max);
3949 void handle_stack_overflow(HeapWord* lost);
3950 };
3951
3952 // Grey object rescan during work stealing phase --
3953 // the salient assumption here is that stolen oops must
3954 // always be initialized, so we do not need to check for
3955 // uninitialized objects before scanning here.
3956 void Par_ConcMarkingClosure::do_oop(oop* p) {
3957 oop this_oop = *p;
3958 assert(this_oop->is_oop_or_null(),
3959 "expected an oop or NULL");
3960 HeapWord* addr = (HeapWord*)this_oop;
3961 // Check if oop points into the CMS generation
3962 // and is not marked
3963 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3964 // a white object ...
3965 // If we manage to "claim" the object, by being the
3966 // first thread to mark it, then we push it on our
3967 // marking stack
3968 if (_bit_map->par_mark(addr)) { // ... now grey
3969 // push on work queue (grey set)
3970 bool simulate_overflow = false;
3971 NOT_PRODUCT(
3972 if (CMSMarkStackOverflowALot &&
3973 _collector->simulate_overflow()) {
3974 // simulate a stack overflow
3975 simulate_overflow = true;
3976 }
3977 )
3978 if (simulate_overflow ||
3979 !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
3980 // stack overflow
3981 if (PrintCMSStatistics != 0) {
3982 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3983 SIZE_FORMAT, _overflow_stack->capacity());
3984 }
3985 // We cannot assert that the overflow stack is full because
3986 // it may have been emptied since.
3987 assert(simulate_overflow ||
3988 _work_queue->size() == _work_queue->max_elems(),
3989 "Else push should have succeeded");
3990 handle_stack_overflow(addr);
3991 }
3992 } // Else, some other thread got there first
3993 }
3994 }
3995
3996 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3997 while (_work_queue->size() > max) {
3998 oop new_oop;
3999 if (_work_queue->pop_local(new_oop)) {
4000 assert(new_oop->is_oop(), "Should be an oop");
4001 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4002 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4003 assert(new_oop->is_parsable(), "Should be parsable");
4004 new_oop->oop_iterate(this); // do_oop() above
4005 }
4006 }
4007 }
4008
4009 // Upon stack overflow, we discard (part of) the stack,
4010 // remembering the least address amongst those discarded
4011 // in CMSCollector's _restart_address.
4012 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4013 // We need to do this under a mutex to prevent other
4014 // workers from interfering with the expansion below.
4015 MutexLockerEx ml(_overflow_stack->par_lock(),
4016 Mutex::_no_safepoint_check_flag);
4017 // Remember the least grey address discarded
4018 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4019 _collector->lower_restart_addr(ra);
4020 _overflow_stack->reset(); // discard stack contents
4021 _overflow_stack->expand(); // expand the stack if possible
4022 }
4023
4024
4025 void CMSConcMarkingTask::do_work_steal(int i) {
4026 OopTaskQueue* work_q = work_queue(i);
4027 oop obj_to_scan;
4028 CMSBitMap* bm = &(_collector->_markBitMap);
4029 CMSMarkStack* ovflw = &(_collector->_markStack);
4030 int* seed = _collector->hash_seed(i);
4031 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
4032 while (true) {
4033 cl.trim_queue(0);
4034 assert(work_q->size() == 0, "Should have been emptied above");
4035 if (get_work_from_overflow_stack(ovflw, work_q)) {
4036 // Can't assert below because the work obtained from the
4037 // overflow stack may already have been stolen from us.
4038 // assert(work_q->size() > 0, "Work from overflow stack");
4039 continue;
4040 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4041 assert(obj_to_scan->is_oop(), "Should be an oop");
4042 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4043 obj_to_scan->oop_iterate(&cl);
4044 } else if (terminator()->offer_termination()) {
4045 assert(work_q->size() == 0, "Impossible!");
4046 break;
4047 }
4048 }
4049 }
4050
4051 // This is run by the CMS (coordinator) thread.
4052 void CMSConcMarkingTask::coordinator_yield() {
4053 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4054 "CMS thread should hold CMS token");
4055
4056 // First give up the locks, then yield, then re-lock
4057 // We should probably use a constructor/destructor idiom to
4058 // do this unlock/lock or modify the MutexUnlocker class to
4059 // serve our purpose. XXX
4060 assert_lock_strong(_bit_map_lock);
4061 _bit_map_lock->unlock();
4062 ConcurrentMarkSweepThread::desynchronize(true);
4063 ConcurrentMarkSweepThread::acknowledge_yield_request();
4064 _collector->stopTimer();
4065 if (PrintCMSStatistics != 0) {
4066 _collector->incrementYields();
4067 }
4068 _collector->icms_wait();
4069
4070 // It is possible for whichever thread initiated the yield request
4071 // not to get a chance to wake up and take the bitmap lock between
4072 // this thread releasing it and reacquiring it. So, while the
4073 // should_yield() flag is on, let's sleep for a bit to give the
4074 // other thread a chance to wake up. The limit imposed on the number
4075 // of iterations is defensive, to avoid any unforseen circumstances
4076 // putting us into an infinite loop. Since it's always been this
4077 // (coordinator_yield()) method that was observed to cause the
4078 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4079 // which is by default non-zero. For the other seven methods that
4080 // also perform the yield operation, as are using a different
4081 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4082 // can enable the sleeping for those methods too, if necessary.
4083 // See 6442774.
4084 //
4085 // We really need to reconsider the synchronization between the GC
4086 // thread and the yield-requesting threads in the future and we
4087 // should really use wait/notify, which is the recommended
4088 // way of doing this type of interaction. Additionally, we should
4089 // consolidate the eight methods that do the yield operation and they
4090 // are almost identical into one for better maintenability and
4091 // readability. See 6445193.
4092 //
4093 // Tony 2006.06.29
4094 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4095 ConcurrentMarkSweepThread::should_yield() &&
4096 !CMSCollector::foregroundGCIsActive(); ++i) {
4097 os::sleep(Thread::current(), 1, false);
4098 ConcurrentMarkSweepThread::acknowledge_yield_request();
4099 }
4100
4101 ConcurrentMarkSweepThread::synchronize(true);
4102 _bit_map_lock->lock_without_safepoint_check();
4103 _collector->startTimer();
4104 }
4105
4106 bool CMSCollector::do_marking_mt(bool asynch) {
4107 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
4108 // In the future this would be determined ergonomically, based
4109 // on #cpu's, # active mutator threads (and load), and mutation rate.
4110 int num_workers = ParallelCMSThreads;
4111
4112 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4113 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4114
4115 CMSConcMarkingTask tsk(this, cms_space, perm_space,
4116 asynch, num_workers /* number requested XXX */,
4117 conc_workers(), task_queues());
4118
4119 // Since the actual number of workers we get may be different
4120 // from the number we requested above, do we need to do anything different
4121 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4122 // class?? XXX
4123 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4124 perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4125
4126 // Refs discovery is already non-atomic.
4127 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4128 // Mutate the Refs discovery so it is MT during the
4129 // multi-threaded marking phase.
4130 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
4131
4132 conc_workers()->start_task(&tsk);
4133 while (tsk.yielded()) {
4134 tsk.coordinator_yield();
4135 conc_workers()->continue_task(&tsk);
4136 }
4137 // If the task was aborted, _restart_addr will be non-NULL
4138 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4139 while (_restart_addr != NULL) {
4140 // XXX For now we do not make use of ABORTED state and have not
4141 // yet implemented the right abort semantics (even in the original
4142 // single-threaded CMS case). That needs some more investigation
4143 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4144 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4145 // If _restart_addr is non-NULL, a marking stack overflow
4146 // occured; we need to do a fresh marking iteration from the
4147 // indicated restart address.
4148 if (_foregroundGCIsActive && asynch) {
4149 // We may be running into repeated stack overflows, having
4150 // reached the limit of the stack size, while making very
4151 // slow forward progress. It may be best to bail out and
4152 // let the foreground collector do its job.
4153 // Clear _restart_addr, so that foreground GC
4154 // works from scratch. This avoids the headache of
4155 // a "rescan" which would otherwise be needed because
4156 // of the dirty mod union table & card table.
4157 _restart_addr = NULL;
4158 return false;
4159 }
4160 // Adjust the task to restart from _restart_addr
4161 tsk.reset(_restart_addr);
4162 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4163 _restart_addr);
4164 perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4165 _restart_addr);
4166 _restart_addr = NULL;
4167 // Get the workers going again
4168 conc_workers()->start_task(&tsk);
4169 while (tsk.yielded()) {
4170 tsk.coordinator_yield();
4171 conc_workers()->continue_task(&tsk);
4172 }
4173 }
4174 assert(tsk.completed(), "Inconsistency");
4175 assert(tsk.result() == true, "Inconsistency");
4176 return true;
4177 }
4178
4179 bool CMSCollector::do_marking_st(bool asynch) {
4180 ResourceMark rm;
4181 HandleMark hm;
4182
4183 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4184 &_markStack, &_revisitStack, CMSYield && asynch);
4185 // the last argument to iterate indicates whether the iteration
4186 // should be incremental with periodic yields.
4187 _markBitMap.iterate(&markFromRootsClosure);
4188 // If _restart_addr is non-NULL, a marking stack overflow
4189 // occured; we need to do a fresh iteration from the
4190 // indicated restart address.
4191 while (_restart_addr != NULL) {
4192 if (_foregroundGCIsActive && asynch) {
4193 // We may be running into repeated stack overflows, having
4194 // reached the limit of the stack size, while making very
4195 // slow forward progress. It may be best to bail out and
4196 // let the foreground collector do its job.
4197 // Clear _restart_addr, so that foreground GC
4198 // works from scratch. This avoids the headache of
4199 // a "rescan" which would otherwise be needed because
4200 // of the dirty mod union table & card table.
4201 _restart_addr = NULL;
4202 return false; // indicating failure to complete marking
4203 }
4204 // Deal with stack overflow:
4205 // we restart marking from _restart_addr
4206 HeapWord* ra = _restart_addr;
4207 markFromRootsClosure.reset(ra);
4208 _restart_addr = NULL;
4209 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4210 }
4211 return true;
4212 }
4213
4214 void CMSCollector::preclean() {
4215 check_correct_thread_executing();
4216 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4217 verify_work_stacks_empty();
4218 verify_overflow_empty();
4219 _abort_preclean = false;
4220 if (CMSPrecleaningEnabled) {
4221 _eden_chunk_index = 0;
4222 size_t used = get_eden_used();
4223 size_t capacity = get_eden_capacity();
4224 // Don't start sampling unless we will get sufficiently
4225 // many samples.
4226 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4227 * CMSScheduleRemarkEdenPenetration)) {
4228 _start_sampling = true;
4229 } else {
4230 _start_sampling = false;
4231 }
4232 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4233 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4234 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4235 }
4236 CMSTokenSync x(true); // is cms thread
4237 if (CMSPrecleaningEnabled) {
4238 sample_eden();
4239 _collectorState = AbortablePreclean;
4240 } else {
4241 _collectorState = FinalMarking;
4242 }
4243 verify_work_stacks_empty();
4244 verify_overflow_empty();
4245 }
4246
4247 // Try and schedule the remark such that young gen
4248 // occupancy is CMSScheduleRemarkEdenPenetration %.
4249 void CMSCollector::abortable_preclean() {
4250 check_correct_thread_executing();
4251 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4252 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4253
4254 // If Eden's current occupancy is below this threshold,
4255 // immediately schedule the remark; else preclean
4256 // past the next scavenge in an effort to
4257 // schedule the pause as described avove. By choosing
4258 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4259 // we will never do an actual abortable preclean cycle.
4260 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4261 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4262 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4263 // We need more smarts in the abortable preclean
4264 // loop below to deal with cases where allocation
4265 // in young gen is very very slow, and our precleaning
4266 // is running a losing race against a horde of
4267 // mutators intent on flooding us with CMS updates
4268 // (dirty cards).
4269 // One, admittedly dumb, strategy is to give up
4270 // after a certain number of abortable precleaning loops
4271 // or after a certain maximum time. We want to make
4272 // this smarter in the next iteration.
4273 // XXX FIX ME!!! YSR
4274 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4275 while (!(should_abort_preclean() ||
4276 ConcurrentMarkSweepThread::should_terminate())) {
4277 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4278 cumworkdone += workdone;
4279 loops++;
4280 // Voluntarily terminate abortable preclean phase if we have
4281 // been at it for too long.
4282 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4283 loops >= CMSMaxAbortablePrecleanLoops) {
4284 if (PrintGCDetails) {
4285 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4286 }
4287 break;
4288 }
4289 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4290 if (PrintGCDetails) {
4291 gclog_or_tty->print(" CMS: abort preclean due to time ");
4292 }
4293 break;
4294 }
4295 // If we are doing little work each iteration, we should
4296 // take a short break.
4297 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4298 // Sleep for some time, waiting for work to accumulate
4299 stopTimer();
4300 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4301 startTimer();
4302 waited++;
4303 }
4304 }
4305 if (PrintCMSStatistics > 0) {
4306 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4307 loops, waited, cumworkdone);
4308 }
4309 }
4310 CMSTokenSync x(true); // is cms thread
4311 if (_collectorState != Idling) {
4312 assert(_collectorState == AbortablePreclean,
4313 "Spontaneous state transition?");
4314 _collectorState = FinalMarking;
4315 } // Else, a foreground collection completed this CMS cycle.
4316 return;
4317 }
4318
4319 // Respond to an Eden sampling opportunity
4320 void CMSCollector::sample_eden() {
4321 // Make sure a young gc cannot sneak in between our
4322 // reading and recording of a sample.
4323 assert(Thread::current()->is_ConcurrentGC_thread(),
4324 "Only the cms thread may collect Eden samples");
4325 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4326 "Should collect samples while holding CMS token");
4327 if (!_start_sampling) {
4328 return;
4329 }
4330 if (_eden_chunk_array) {
4331 if (_eden_chunk_index < _eden_chunk_capacity) {
4332 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4333 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4334 "Unexpected state of Eden");
4335 // We'd like to check that what we just sampled is an oop-start address;
4336 // however, we cannot do that here since the object may not yet have been
4337 // initialized. So we'll instead do the check when we _use_ this sample
4338 // later.
4339 if (_eden_chunk_index == 0 ||
4340 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4341 _eden_chunk_array[_eden_chunk_index-1])
4342 >= CMSSamplingGrain)) {
4343 _eden_chunk_index++; // commit sample
4344 }
4345 }
4346 }
4347 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4348 size_t used = get_eden_used();
4349 size_t capacity = get_eden_capacity();
4350 assert(used <= capacity, "Unexpected state of Eden");
4351 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4352 _abort_preclean = true;
4353 }
4354 }
4355 }
4356
4357
4358 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4359 assert(_collectorState == Precleaning ||
4360 _collectorState == AbortablePreclean, "incorrect state");
4361 ResourceMark rm;
4362 HandleMark hm;
4363 // Do one pass of scrubbing the discovered reference lists
4364 // to remove any reference objects with strongly-reachable
4365 // referents.
4366 if (clean_refs) {
4367 ReferenceProcessor* rp = ref_processor();
4368 CMSPrecleanRefsYieldClosure yield_cl(this);
4369 assert(rp->span().equals(_span), "Spans should be equal");
4370 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4371 &_markStack);
4372 CMSDrainMarkingStackClosure complete_trace(this,
4373 _span, &_markBitMap, &_markStack,
4374 &keep_alive);
4375
4376 // We don't want this step to interfere with a young
4377 // collection because we don't want to take CPU
4378 // or memory bandwidth away from the young GC threads
4379 // (which may be as many as there are CPUs).
4380 // Note that we don't need to protect ourselves from
4381 // interference with mutators because they can't
4382 // manipulate the discovered reference lists nor affect
4383 // the computed reachability of the referents, the
4384 // only properties manipulated by the precleaning
4385 // of these reference lists.
4386 stopTimer();
4387 CMSTokenSyncWithLocks x(true /* is cms thread */,
4388 bitMapLock());
4389 startTimer();
4390 sample_eden();
4391 // The following will yield to allow foreground
4392 // collection to proceed promptly. XXX YSR:
4393 // The code in this method may need further
4394 // tweaking for better performance and some restructuring
4395 // for cleaner interfaces.
4396 rp->preclean_discovered_references(
4397 rp->is_alive_non_header(), &keep_alive, &complete_trace,
4398 &yield_cl);
4399 }
4400
4401 if (clean_survivor) { // preclean the active survivor space(s)
4402 assert(_young_gen->kind() == Generation::DefNew ||
4403 _young_gen->kind() == Generation::ParNew ||
4404 _young_gen->kind() == Generation::ASParNew,
4405 "incorrect type for cast");
4406 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4407 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4408 &_markBitMap, &_modUnionTable,
4409 &_markStack, &_revisitStack,
4410 true /* precleaning phase */);
4411 stopTimer();
4412 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4413 bitMapLock());
4414 startTimer();
4415 unsigned int before_count =
4416 GenCollectedHeap::heap()->total_collections();
4417 SurvivorSpacePrecleanClosure
4418 sss_cl(this, _span, &_markBitMap, &_markStack,
4419 &pam_cl, before_count, CMSYield);
4420 dng->from()->object_iterate_careful(&sss_cl);
4421 dng->to()->object_iterate_careful(&sss_cl);
4422 }
4423 MarkRefsIntoAndScanClosure
4424 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4425 &_markStack, &_revisitStack, this, CMSYield,
4426 true /* precleaning phase */);
4427 // CAUTION: The following closure has persistent state that may need to
4428 // be reset upon a decrease in the sequence of addresses it
4429 // processes.
4430 ScanMarkedObjectsAgainCarefullyClosure
4431 smoac_cl(this, _span,
4432 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4433
4434 // Preclean dirty cards in ModUnionTable and CardTable using
4435 // appropriate convergence criterion;
4436 // repeat CMSPrecleanIter times unless we find that
4437 // we are losing.
4438 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4439 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4440 "Bad convergence multiplier");
4441 assert(CMSPrecleanThreshold >= 100,
4442 "Unreasonably low CMSPrecleanThreshold");
4443
4444 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4445 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4446 numIter < CMSPrecleanIter;
4447 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4448 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4449 if (CMSPermGenPrecleaningEnabled) {
4450 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl);
4451 }
4452 if (Verbose && PrintGCDetails) {
4453 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4454 }
4455 // Either there are very few dirty cards, so re-mark
4456 // pause will be small anyway, or our pre-cleaning isn't
4457 // that much faster than the rate at which cards are being
4458 // dirtied, so we might as well stop and re-mark since
4459 // precleaning won't improve our re-mark time by much.
4460 if (curNumCards <= CMSPrecleanThreshold ||
4461 (numIter > 0 &&
4462 (curNumCards * CMSPrecleanDenominator >
4463 lastNumCards * CMSPrecleanNumerator))) {
4464 numIter++;
4465 cumNumCards += curNumCards;
4466 break;
4467 }
4468 }
4469 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4470 if (CMSPermGenPrecleaningEnabled) {
4471 curNumCards += preclean_card_table(_permGen, &smoac_cl);
4472 }
4473 cumNumCards += curNumCards;
4474 if (PrintGCDetails && PrintCMSStatistics != 0) {
4475 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4476 curNumCards, cumNumCards, numIter);
4477 }
4478 return cumNumCards; // as a measure of useful work done
4479 }
4480
4481 // PRECLEANING NOTES:
4482 // Precleaning involves:
4483 // . reading the bits of the modUnionTable and clearing the set bits.
4484 // . For the cards corresponding to the set bits, we scan the
4485 // objects on those cards. This means we need the free_list_lock
4486 // so that we can safely iterate over the CMS space when scanning
4487 // for oops.
4488 // . When we scan the objects, we'll be both reading and setting
4489 // marks in the marking bit map, so we'll need the marking bit map.
4490 // . For protecting _collector_state transitions, we take the CGC_lock.
4491 // Note that any races in the reading of of card table entries by the
4492 // CMS thread on the one hand and the clearing of those entries by the
4493 // VM thread or the setting of those entries by the mutator threads on the
4494 // other are quite benign. However, for efficiency it makes sense to keep
4495 // the VM thread from racing with the CMS thread while the latter is
4496 // dirty card info to the modUnionTable. We therefore also use the
4497 // CGC_lock to protect the reading of the card table and the mod union
4498 // table by the CM thread.
4499 // . We run concurrently with mutator updates, so scanning
4500 // needs to be done carefully -- we should not try to scan
4501 // potentially uninitialized objects.
4502 //
4503 // Locking strategy: While holding the CGC_lock, we scan over and
4504 // reset a maximal dirty range of the mod union / card tables, then lock
4505 // the free_list_lock and bitmap lock to do a full marking, then
4506 // release these locks; and repeat the cycle. This allows for a
4507 // certain amount of fairness in the sharing of these locks between
4508 // the CMS collector on the one hand, and the VM thread and the
4509 // mutators on the other.
4510
4511 // NOTE: preclean_mod_union_table() and preclean_card_table()
4512 // further below are largely identical; if you need to modify
4513 // one of these methods, please check the other method too.
4514
4515 size_t CMSCollector::preclean_mod_union_table(
4516 ConcurrentMarkSweepGeneration* gen,
4517 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4518 verify_work_stacks_empty();
4519 verify_overflow_empty();
4520
4521 // strategy: starting with the first card, accumulate contiguous
4522 // ranges of dirty cards; clear these cards, then scan the region
4523 // covered by these cards.
4524
4525 // Since all of the MUT is committed ahead, we can just use
4526 // that, in case the generations expand while we are precleaning.
4527 // It might also be fine to just use the committed part of the
4528 // generation, but we might potentially miss cards when the
4529 // generation is rapidly expanding while we are in the midst
4530 // of precleaning.
4531 HeapWord* startAddr = gen->reserved().start();
4532 HeapWord* endAddr = gen->reserved().end();
4533
4534 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4535
4536 size_t numDirtyCards, cumNumDirtyCards;
4537 HeapWord *nextAddr, *lastAddr;
4538 for (cumNumDirtyCards = numDirtyCards = 0,
4539 nextAddr = lastAddr = startAddr;
4540 nextAddr < endAddr;
4541 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4542
4543 ResourceMark rm;
4544 HandleMark hm;
4545
4546 MemRegion dirtyRegion;
4547 {
4548 stopTimer();
4549 CMSTokenSync ts(true);
4550 startTimer();
4551 sample_eden();
4552 // Get dirty region starting at nextOffset (inclusive),
4553 // simultaneously clearing it.
4554 dirtyRegion =
4555 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4556 assert(dirtyRegion.start() >= nextAddr,
4557 "returned region inconsistent?");
4558 }
4559 // Remember where the next search should begin.
4560 // The returned region (if non-empty) is a right open interval,
4561 // so lastOffset is obtained from the right end of that
4562 // interval.
4563 lastAddr = dirtyRegion.end();
4564 // Should do something more transparent and less hacky XXX
4565 numDirtyCards =
4566 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4567
4568 // We'll scan the cards in the dirty region (with periodic
4569 // yields for foreground GC as needed).
4570 if (!dirtyRegion.is_empty()) {
4571 assert(numDirtyCards > 0, "consistency check");
4572 HeapWord* stop_point = NULL;
4573 {
4574 stopTimer();
4575 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4576 bitMapLock());
4577 startTimer();
4578 verify_work_stacks_empty();
4579 verify_overflow_empty();
4580 sample_eden();
4581 stop_point =
4582 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4583 }
4584 if (stop_point != NULL) {
4585 // The careful iteration stopped early either because it found an
4586 // uninitialized object, or because we were in the midst of an
4587 // "abortable preclean", which should now be aborted. Redirty
4588 // the bits corresponding to the partially-scanned or unscanned
4589 // cards. We'll either restart at the next block boundary or
4590 // abort the preclean.
4591 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4592 (_collectorState == AbortablePreclean && should_abort_preclean()),
4593 "Unparsable objects should only be in perm gen.");
4594
4595 stopTimer();
4596 CMSTokenSyncWithLocks ts(true, bitMapLock());
4597 startTimer();
4598 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4599 if (should_abort_preclean()) {
4600 break; // out of preclean loop
4601 } else {
4602 // Compute the next address at which preclean should pick up;
4603 // might need bitMapLock in order to read P-bits.
4604 lastAddr = next_card_start_after_block(stop_point);
4605 }
4606 }
4607 } else {
4608 assert(lastAddr == endAddr, "consistency check");
4609 assert(numDirtyCards == 0, "consistency check");
4610 break;
4611 }
4612 }
4613 verify_work_stacks_empty();
4614 verify_overflow_empty();
4615 return cumNumDirtyCards;
4616 }
4617
4618 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4619 // below are largely identical; if you need to modify
4620 // one of these methods, please check the other method too.
4621
4622 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4623 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4624 // strategy: it's similar to precleamModUnionTable above, in that
4625 // we accumulate contiguous ranges of dirty cards, mark these cards
4626 // precleaned, then scan the region covered by these cards.
4627 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4628 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4629
4630 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4631
4632 size_t numDirtyCards, cumNumDirtyCards;
4633 HeapWord *lastAddr, *nextAddr;
4634
4635 for (cumNumDirtyCards = numDirtyCards = 0,
4636 nextAddr = lastAddr = startAddr;
4637 nextAddr < endAddr;
4638 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4639
4640 ResourceMark rm;
4641 HandleMark hm;
4642
4643 MemRegion dirtyRegion;
4644 {
4645 // See comments in "Precleaning notes" above on why we
4646 // do this locking. XXX Could the locking overheads be
4647 // too high when dirty cards are sparse? [I don't think so.]
4648 stopTimer();
4649 CMSTokenSync x(true); // is cms thread
4650 startTimer();
4651 sample_eden();
4652 // Get and clear dirty region from card table
4653 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
4654 MemRegion(nextAddr, endAddr));
4655 assert(dirtyRegion.start() >= nextAddr,
4656 "returned region inconsistent?");
4657 }
4658 lastAddr = dirtyRegion.end();
4659 numDirtyCards =
4660 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4661
4662 if (!dirtyRegion.is_empty()) {
4663 stopTimer();
4664 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4665 startTimer();
4666 sample_eden();
4667 verify_work_stacks_empty();
4668 verify_overflow_empty();
4669 HeapWord* stop_point =
4670 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4671 if (stop_point != NULL) {
4672 // The careful iteration stopped early because it found an
4673 // uninitialized object. Redirty the bits corresponding to the
4674 // partially-scanned or unscanned cards, and start again at the
4675 // next block boundary.
4676 assert(CMSPermGenPrecleaningEnabled ||
4677 (_collectorState == AbortablePreclean && should_abort_preclean()),
4678 "Unparsable objects should only be in perm gen.");
4679 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4680 if (should_abort_preclean()) {
4681 break; // out of preclean loop
4682 } else {
4683 // Compute the next address at which preclean should pick up.
4684 lastAddr = next_card_start_after_block(stop_point);
4685 }
4686 }
4687 } else {
4688 break;
4689 }
4690 }
4691 verify_work_stacks_empty();
4692 verify_overflow_empty();
4693 return cumNumDirtyCards;
4694 }
4695
4696 void CMSCollector::checkpointRootsFinal(bool asynch,
4697 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4698 assert(_collectorState == FinalMarking, "incorrect state transition?");
4699 check_correct_thread_executing();
4700 // world is stopped at this checkpoint
4701 assert(SafepointSynchronize::is_at_safepoint(),
4702 "world should be stopped");
4703 verify_work_stacks_empty();
4704 verify_overflow_empty();
4705
4706 SpecializationStats::clear();
4707 if (PrintGCDetails) {
4708 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4709 _young_gen->used() / K,
4710 _young_gen->capacity() / K);
4711 }
4712 if (asynch) {
4713 if (CMSScavengeBeforeRemark) {
4714 GenCollectedHeap* gch = GenCollectedHeap::heap();
4715 // Temporarily set flag to false, GCH->do_collection will
4716 // expect it to be false and set to true
4717 FlagSetting fl(gch->_is_gc_active, false);
4718 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4719 PrintGCDetails && Verbose, true, gclog_or_tty);)
4720 int level = _cmsGen->level() - 1;
4721 if (level >= 0) {
4722 gch->do_collection(true, // full (i.e. force, see below)
4723 false, // !clear_all_soft_refs
4724 0, // size
4725 false, // is_tlab
4726 level // max_level
4727 );
4728 }
4729 }
4730 FreelistLocker x(this);
4731 MutexLockerEx y(bitMapLock(),
4732 Mutex::_no_safepoint_check_flag);
4733 assert(!init_mark_was_synchronous, "but that's impossible!");
4734 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4735 } else {
4736 // already have all the locks
4737 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4738 init_mark_was_synchronous);
4739 }
4740 verify_work_stacks_empty();
4741 verify_overflow_empty();
4742 SpecializationStats::print();
4743 }
4744
4745 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4746 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4747
4748 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4749
4750 assert(haveFreelistLocks(), "must have free list locks");
4751 assert_lock_strong(bitMapLock());
4752
4753 if (UseAdaptiveSizePolicy) {
4754 size_policy()->checkpoint_roots_final_begin();
4755 }
4756
4757 ResourceMark rm;
4758 HandleMark hm;
4759
4760 GenCollectedHeap* gch = GenCollectedHeap::heap();
4761
4762 if (should_unload_classes()) {
4763 CodeCache::gc_prologue();
4764 }
4765 assert(haveFreelistLocks(), "must have free list locks");
4766 assert_lock_strong(bitMapLock());
4767
4768 if (!init_mark_was_synchronous) {
4769 // We might assume that we need not fill TLAB's when
4770 // CMSScavengeBeforeRemark is set, because we may have just done
4771 // a scavenge which would have filled all TLAB's -- and besides
4772 // Eden would be empty. This however may not always be the case --
4773 // for instance although we asked for a scavenge, it may not have
4774 // happened because of a JNI critical section. We probably need
4775 // a policy for deciding whether we can in that case wait until
4776 // the critical section releases and then do the remark following
4777 // the scavenge, and skip it here. In the absence of that policy,
4778 // or of an indication of whether the scavenge did indeed occur,
4779 // we cannot rely on TLAB's having been filled and must do
4780 // so here just in case a scavenge did not happen.
4781 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4782 // Update the saved marks which may affect the root scans.
4783 gch->save_marks();
4784
4785 {
4786 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4787
4788 // Note on the role of the mod union table:
4789 // Since the marker in "markFromRoots" marks concurrently with
4790 // mutators, it is possible for some reachable objects not to have been
4791 // scanned. For instance, an only reference to an object A was
4792 // placed in object B after the marker scanned B. Unless B is rescanned,
4793 // A would be collected. Such updates to references in marked objects
4794 // are detected via the mod union table which is the set of all cards
4795 // dirtied since the first checkpoint in this GC cycle and prior to
4796 // the most recent young generation GC, minus those cleaned up by the
4797 // concurrent precleaning.
4798 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
4799 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4800 do_remark_parallel();
4801 } else {
4802 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4803 gclog_or_tty);
4804 do_remark_non_parallel();
4805 }
4806 }
4807 } else {
4808 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4809 // The initial mark was stop-world, so there's no rescanning to
4810 // do; go straight on to the next step below.
4811 }
4812 verify_work_stacks_empty();
4813 verify_overflow_empty();
4814
4815 {
4816 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4817 refProcessingWork(asynch, clear_all_soft_refs);
4818 }
4819 verify_work_stacks_empty();
4820 verify_overflow_empty();
4821
4822 if (should_unload_classes()) {
4823 CodeCache::gc_epilogue();
4824 }
4825
4826 // If we encountered any (marking stack / work queue) overflow
4827 // events during the current CMS cycle, take appropriate
4828 // remedial measures, where possible, so as to try and avoid
4829 // recurrence of that condition.
4830 assert(_markStack.isEmpty(), "No grey objects");
4831 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4832 _ser_kac_ovflw;
4833 if (ser_ovflw > 0) {
4834 if (PrintCMSStatistics != 0) {
4835 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4836 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4837 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4838 _ser_kac_ovflw);
4839 }
4840 _markStack.expand();
4841 _ser_pmc_remark_ovflw = 0;
4842 _ser_pmc_preclean_ovflw = 0;
4843 _ser_kac_ovflw = 0;
4844 }
4845 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4846 if (PrintCMSStatistics != 0) {
4847 gclog_or_tty->print_cr("Work queue overflow (benign) "
4848 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4849 _par_pmc_remark_ovflw, _par_kac_ovflw);
4850 }
4851 _par_pmc_remark_ovflw = 0;
4852 _par_kac_ovflw = 0;
4853 }
4854 if (PrintCMSStatistics != 0) {
4855 if (_markStack._hit_limit > 0) {
4856 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4857 _markStack._hit_limit);
4858 }
4859 if (_markStack._failed_double > 0) {
4860 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4861 " current capacity "SIZE_FORMAT,
4862 _markStack._failed_double,
4863 _markStack.capacity());
4864 }
4865 }
4866 _markStack._hit_limit = 0;
4867 _markStack._failed_double = 0;
4868
4869 if ((VerifyAfterGC || VerifyDuringGC) &&
4870 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4871 verify_after_remark();
4872 }
4873
4874 // Change under the freelistLocks.
4875 _collectorState = Sweeping;
4876 // Call isAllClear() under bitMapLock
4877 assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
4878 " final marking");
4879 if (UseAdaptiveSizePolicy) {
4880 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4881 }
4882 }
4883
4884 // Parallel remark task
4885 class CMSParRemarkTask: public AbstractGangTask {
4886 CMSCollector* _collector;
4887 WorkGang* _workers;
4888 int _n_workers;
4889 CompactibleFreeListSpace* _cms_space;
4890 CompactibleFreeListSpace* _perm_space;
4891
4892 // The per-thread work queues, available here for stealing.
4893 OopTaskQueueSet* _task_queues;
4894 ParallelTaskTerminator _term;
4895
4896 public:
4897 CMSParRemarkTask(CMSCollector* collector,
4898 CompactibleFreeListSpace* cms_space,
4899 CompactibleFreeListSpace* perm_space,
4900 int n_workers, WorkGang* workers,
4901 OopTaskQueueSet* task_queues):
4902 AbstractGangTask("Rescan roots and grey objects in parallel"),
4903 _collector(collector),
4904 _cms_space(cms_space), _perm_space(perm_space),
4905 _n_workers(n_workers),
4906 _workers(workers),
4907 _task_queues(task_queues),
4908 _term(workers->total_workers(), task_queues) { }
4909
4910 OopTaskQueueSet* task_queues() { return _task_queues; }
4911
4912 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4913
4914 ParallelTaskTerminator* terminator() { return &_term; }
4915
4916 void work(int i);
4917
4918 private:
4919 // Work method in support of parallel rescan ... of young gen spaces
4920 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
4921 ContiguousSpace* space,
4922 HeapWord** chunk_array, size_t chunk_top);
4923
4924 // ... of dirty cards in old space
4925 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4926 Par_MarkRefsIntoAndScanClosure* cl);
4927
4928 // ... work stealing for the above
4929 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4930 };
4931
4932 void CMSParRemarkTask::work(int i) {
4933 elapsedTimer _timer;
4934 ResourceMark rm;
4935 HandleMark hm;
4936
4937 // ---------- rescan from roots --------------
4938 _timer.start();
4939 GenCollectedHeap* gch = GenCollectedHeap::heap();
4940 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4941 _collector->_span, _collector->ref_processor(),
4942 &(_collector->_markBitMap),
4943 work_queue(i), &(_collector->_revisitStack));
4944
4945 // Rescan young gen roots first since these are likely
4946 // coarsely partitioned and may, on that account, constitute
4947 // the critical path; thus, it's best to start off that
4948 // work first.
4949 // ---------- young gen roots --------------
4950 {
4951 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
4952 EdenSpace* eden_space = dng->eden();
4953 ContiguousSpace* from_space = dng->from();
4954 ContiguousSpace* to_space = dng->to();
4955
4956 HeapWord** eca = _collector->_eden_chunk_array;
4957 size_t ect = _collector->_eden_chunk_index;
4958 HeapWord** sca = _collector->_survivor_chunk_array;
4959 size_t sct = _collector->_survivor_chunk_index;
4960
4961 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4962 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4963
4964 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
4965 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
4966 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
4967
4968 _timer.stop();
4969 if (PrintCMSStatistics != 0) {
4970 gclog_or_tty->print_cr(
4971 "Finished young gen rescan work in %dth thread: %3.3f sec",
4972 i, _timer.seconds());
4973 }
4974 }
4975
4976 // ---------- remaining roots --------------
4977 _timer.reset();
4978 _timer.start();
4979 gch->gen_process_strong_roots(_collector->_cmsGen->level(),
4980 false, // yg was scanned above
4981 true, // collecting perm gen
4982 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4983 NULL, &par_mrias_cl);
4984 _timer.stop();
4985 if (PrintCMSStatistics != 0) {
4986 gclog_or_tty->print_cr(
4987 "Finished remaining root rescan work in %dth thread: %3.3f sec",
4988 i, _timer.seconds());
4989 }
4990
4991 // ---------- rescan dirty cards ------------
4992 _timer.reset();
4993 _timer.start();
4994
4995 // Do the rescan tasks for each of the two spaces
4996 // (cms_space and perm_space) in turn.
4997 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
4998 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
4999 _timer.stop();
5000 if (PrintCMSStatistics != 0) {
5001 gclog_or_tty->print_cr(
5002 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5003 i, _timer.seconds());
5004 }
5005
5006 // ---------- steal work from other threads ...
5007 // ---------- ... and drain overflow list.
5008 _timer.reset();
5009 _timer.start();
5010 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
5011 _timer.stop();
5012 if (PrintCMSStatistics != 0) {
5013 gclog_or_tty->print_cr(
5014 "Finished work stealing in %dth thread: %3.3f sec",
5015 i, _timer.seconds());
5016 }
5017 }
5018
5019 void
5020 CMSParRemarkTask::do_young_space_rescan(int i,
5021 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5022 HeapWord** chunk_array, size_t chunk_top) {
5023 // Until all tasks completed:
5024 // . claim an unclaimed task
5025 // . compute region boundaries corresponding to task claimed
5026 // using chunk_array
5027 // . par_oop_iterate(cl) over that region
5028
5029 ResourceMark rm;
5030 HandleMark hm;
5031
5032 SequentialSubTasksDone* pst = space->par_seq_tasks();
5033 assert(pst->valid(), "Uninitialized use?");
5034
5035 int nth_task = 0;
5036 int n_tasks = pst->n_tasks();
5037
5038 HeapWord *start, *end;
5039 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5040 // We claimed task # nth_task; compute its boundaries.
5041 if (chunk_top == 0) { // no samples were taken
5042 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5043 start = space->bottom();
5044 end = space->top();
5045 } else if (nth_task == 0) {
5046 start = space->bottom();
5047 end = chunk_array[nth_task];
5048 } else if (nth_task < (jint)chunk_top) {
5049 assert(nth_task >= 1, "Control point invariant");
5050 start = chunk_array[nth_task - 1];
5051 end = chunk_array[nth_task];
5052 } else {
5053 assert(nth_task == (jint)chunk_top, "Control point invariant");
5054 start = chunk_array[chunk_top - 1];
5055 end = space->top();
5056 }
5057 MemRegion mr(start, end);
5058 // Verify that mr is in space
5059 assert(mr.is_empty() || space->used_region().contains(mr),
5060 "Should be in space");
5061 // Verify that "start" is an object boundary
5062 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5063 "Should be an oop");
5064 space->par_oop_iterate(mr, cl);
5065 }
5066 pst->all_tasks_completed();
5067 }
5068
5069 void
5070 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5071 CompactibleFreeListSpace* sp, int i,
5072 Par_MarkRefsIntoAndScanClosure* cl) {
5073 // Until all tasks completed:
5074 // . claim an unclaimed task
5075 // . compute region boundaries corresponding to task claimed
5076 // . transfer dirty bits ct->mut for that region
5077 // . apply rescanclosure to dirty mut bits for that region
5078
5079 ResourceMark rm;
5080 HandleMark hm;
5081
5082 OopTaskQueue* work_q = work_queue(i);
5083 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5084 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5085 // CAUTION: This closure has state that persists across calls to
5086 // the work method dirty_range_iterate_clear() in that it has
5087 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5088 // use of that state in the imbedded UpwardsObjectClosure instance
5089 // assumes that the cards are always iterated (even if in parallel
5090 // by several threads) in monotonically increasing order per each
5091 // thread. This is true of the implementation below which picks
5092 // card ranges (chunks) in monotonically increasing order globally
5093 // and, a-fortiori, in monotonically increasing order per thread
5094 // (the latter order being a subsequence of the former).
5095 // If the work code below is ever reorganized into a more chaotic
5096 // work-partitioning form than the current "sequential tasks"
5097 // paradigm, the use of that persistent state will have to be
5098 // revisited and modified appropriately. See also related
5099 // bug 4756801 work on which should examine this code to make
5100 // sure that the changes there do not run counter to the
5101 // assumptions made here and necessary for correctness and
5102 // efficiency. Note also that this code might yield inefficient
5103 // behaviour in the case of very large objects that span one or
5104 // more work chunks. Such objects would potentially be scanned
5105 // several times redundantly. Work on 4756801 should try and
5106 // address that performance anomaly if at all possible. XXX
5107 MemRegion full_span = _collector->_span;
5108 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5109 CMSMarkStack* rs = &(_collector->_revisitStack); // shared
5110 MarkFromDirtyCardsClosure
5111 greyRescanClosure(_collector, full_span, // entire span of interest
5112 sp, bm, work_q, rs, cl);
5113
5114 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5115 assert(pst->valid(), "Uninitialized use?");
5116 int nth_task = 0;
5117 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5118 MemRegion span = sp->used_region();
5119 HeapWord* start_addr = span.start();
5120 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5121 alignment);
5122 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5123 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5124 start_addr, "Check alignment");
5125 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5126 chunk_size, "Check alignment");
5127
5128 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5129 // Having claimed the nth_task, compute corresponding mem-region,
5130 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5131 // The alignment restriction ensures that we do not need any
5132 // synchronization with other gang-workers while setting or
5133 // clearing bits in thus chunk of the MUT.
5134 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5135 start_addr + (nth_task+1)*chunk_size);
5136 // The last chunk's end might be way beyond end of the
5137 // used region. In that case pull back appropriately.
5138 if (this_span.end() > end_addr) {
5139 this_span.set_end(end_addr);
5140 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5141 }
5142 // Iterate over the dirty cards covering this chunk, marking them
5143 // precleaned, and setting the corresponding bits in the mod union
5144 // table. Since we have been careful to partition at Card and MUT-word
5145 // boundaries no synchronization is needed between parallel threads.
5146 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5147 &modUnionClosure);
5148
5149 // Having transferred these marks into the modUnionTable,
5150 // rescan the marked objects on the dirty cards in the modUnionTable.
5151 // Even if this is at a synchronous collection, the initial marking
5152 // may have been done during an asynchronous collection so there
5153 // may be dirty bits in the mod-union table.
5154 _collector->_modUnionTable.dirty_range_iterate_clear(
5155 this_span, &greyRescanClosure);
5156 _collector->_modUnionTable.verifyNoOneBitsInRange(
5157 this_span.start(),
5158 this_span.end());
5159 }
5160 pst->all_tasks_completed(); // declare that i am done
5161 }
5162
5163 // . see if we can share work_queues with ParNew? XXX
5164 void
5165 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5166 int* seed) {
5167 OopTaskQueue* work_q = work_queue(i);
5168 NOT_PRODUCT(int num_steals = 0;)
5169 oop obj_to_scan;
5170 CMSBitMap* bm = &(_collector->_markBitMap);
5171 size_t num_from_overflow_list =
5172 MIN2((size_t)work_q->max_elems()/4,
5173 (size_t)ParGCDesiredObjsFromOverflowList);
5174
5175 while (true) {
5176 // Completely finish any left over work from (an) earlier round(s)
5177 cl->trim_queue(0);
5178 // Now check if there's any work in the overflow list
5179 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5180 work_q)) {
5181 // found something in global overflow list;
5182 // not yet ready to go stealing work from others.
5183 // We'd like to assert(work_q->size() != 0, ...)
5184 // because we just took work from the overflow list,
5185 // but of course we can't since all of that could have
5186 // been already stolen from us.
5187 // "He giveth and He taketh away."
5188 continue;
5189 }
5190 // Verify that we have no work before we resort to stealing
5191 assert(work_q->size() == 0, "Have work, shouldn't steal");
5192 // Try to steal from other queues that have work
5193 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5194 NOT_PRODUCT(num_steals++;)
5195 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5196 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5197 // Do scanning work
5198 obj_to_scan->oop_iterate(cl);
5199 // Loop around, finish this work, and try to steal some more
5200 } else if (terminator()->offer_termination()) {
5201 break; // nirvana from the infinite cycle
5202 }
5203 }
5204 NOT_PRODUCT(
5205 if (PrintCMSStatistics != 0) {
5206 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5207 }
5208 )
5209 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5210 "Else our work is not yet done");
5211 }
5212
5213 // Return a thread-local PLAB recording array, as appropriate.
5214 void* CMSCollector::get_data_recorder(int thr_num) {
5215 if (_survivor_plab_array != NULL &&
5216 (CMSPLABRecordAlways ||
5217 (_collectorState > Marking && _collectorState < FinalMarking))) {
5218 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5219 ChunkArray* ca = &_survivor_plab_array[thr_num];
5220 ca->reset(); // clear it so that fresh data is recorded
5221 return (void*) ca;
5222 } else {
5223 return NULL;
5224 }
5225 }
5226
5227 // Reset all the thread-local PLAB recording arrays
5228 void CMSCollector::reset_survivor_plab_arrays() {
5229 for (uint i = 0; i < ParallelGCThreads; i++) {
5230 _survivor_plab_array[i].reset();
5231 }
5232 }
5233
5234 // Merge the per-thread plab arrays into the global survivor chunk
5235 // array which will provide the partitioning of the survivor space
5236 // for CMS rescan.
5237 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
5238 assert(_survivor_plab_array != NULL, "Error");
5239 assert(_survivor_chunk_array != NULL, "Error");
5240 assert(_collectorState == FinalMarking, "Error");
5241 for (uint j = 0; j < ParallelGCThreads; j++) {
5242 _cursor[j] = 0;
5243 }
5244 HeapWord* top = surv->top();
5245 size_t i;
5246 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5247 HeapWord* min_val = top; // Higher than any PLAB address
5248 uint min_tid = 0; // position of min_val this round
5249 for (uint j = 0; j < ParallelGCThreads; j++) {
5250 ChunkArray* cur_sca = &_survivor_plab_array[j];
5251 if (_cursor[j] == cur_sca->end()) {
5252 continue;
5253 }
5254 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5255 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5256 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5257 if (cur_val < min_val) {
5258 min_tid = j;
5259 min_val = cur_val;
5260 } else {
5261 assert(cur_val < top, "All recorded addresses should be less");
5262 }
5263 }
5264 // At this point min_val and min_tid are respectively
5265 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5266 // and the thread (j) that witnesses that address.
5267 // We record this address in the _survivor_chunk_array[i]
5268 // and increment _cursor[min_tid] prior to the next round i.
5269 if (min_val == top) {
5270 break;
5271 }
5272 _survivor_chunk_array[i] = min_val;
5273 _cursor[min_tid]++;
5274 }
5275 // We are all done; record the size of the _survivor_chunk_array
5276 _survivor_chunk_index = i; // exclusive: [0, i)
5277 if (PrintCMSStatistics > 0) {
5278 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5279 }
5280 // Verify that we used up all the recorded entries
5281 #ifdef ASSERT
5282 size_t total = 0;
5283 for (uint j = 0; j < ParallelGCThreads; j++) {
5284 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5285 total += _cursor[j];
5286 }
5287 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5288 // Check that the merged array is in sorted order
5289 if (total > 0) {
5290 for (size_t i = 0; i < total - 1; i++) {
5291 if (PrintCMSStatistics > 0) {
5292 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5293 i, _survivor_chunk_array[i]);
5294 }
5295 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5296 "Not sorted");
5297 }
5298 }
5299 #endif // ASSERT
5300 }
5301
5302 // Set up the space's par_seq_tasks structure for work claiming
5303 // for parallel rescan of young gen.
5304 // See ParRescanTask where this is currently used.
5305 void
5306 CMSCollector::
5307 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5308 assert(n_threads > 0, "Unexpected n_threads argument");
5309 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5310
5311 // Eden space
5312 {
5313 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5314 assert(!pst->valid(), "Clobbering existing data?");
5315 // Each valid entry in [0, _eden_chunk_index) represents a task.
5316 size_t n_tasks = _eden_chunk_index + 1;
5317 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5318 pst->set_par_threads(n_threads);
5319 pst->set_n_tasks((int)n_tasks);
5320 }
5321
5322 // Merge the survivor plab arrays into _survivor_chunk_array
5323 if (_survivor_plab_array != NULL) {
5324 merge_survivor_plab_arrays(dng->from());
5325 } else {
5326 assert(_survivor_chunk_index == 0, "Error");
5327 }
5328
5329 // To space
5330 {
5331 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5332 assert(!pst->valid(), "Clobbering existing data?");
5333 pst->set_par_threads(n_threads);
5334 pst->set_n_tasks(1);
5335 assert(pst->valid(), "Error");
5336 }
5337
5338 // From space
5339 {
5340 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5341 assert(!pst->valid(), "Clobbering existing data?");
5342 size_t n_tasks = _survivor_chunk_index + 1;
5343 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5344 pst->set_par_threads(n_threads);
5345 pst->set_n_tasks((int)n_tasks);
5346 assert(pst->valid(), "Error");
5347 }
5348 }
5349
5350 // Parallel version of remark
5351 void CMSCollector::do_remark_parallel() {
5352 GenCollectedHeap* gch = GenCollectedHeap::heap();
5353 WorkGang* workers = gch->workers();
5354 assert(workers != NULL, "Need parallel worker threads.");
5355 int n_workers = workers->total_workers();
5356 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5357 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5358
5359 CMSParRemarkTask tsk(this,
5360 cms_space, perm_space,
5361 n_workers, workers, task_queues());
5362
5363 // Set up for parallel process_strong_roots work.
5364 gch->set_par_threads(n_workers);
5365 gch->change_strong_roots_parity();
5366 // We won't be iterating over the cards in the card table updating
5367 // the younger_gen cards, so we shouldn't call the following else
5368 // the verification code as well as subsequent younger_refs_iterate
5369 // code would get confused. XXX
5370 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5371
5372 // The young gen rescan work will not be done as part of
5373 // process_strong_roots (which currently doesn't knw how to
5374 // parallelize such a scan), but rather will be broken up into
5375 // a set of parallel tasks (via the sampling that the [abortable]
5376 // preclean phase did of EdenSpace, plus the [two] tasks of
5377 // scanning the [two] survivor spaces. Further fine-grain
5378 // parallelization of the scanning of the survivor spaces
5379 // themselves, and of precleaning of the younger gen itself
5380 // is deferred to the future.
5381 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5382
5383 // The dirty card rescan work is broken up into a "sequence"
5384 // of parallel tasks (per constituent space) that are dynamically
5385 // claimed by the parallel threads.
5386 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5387 perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5388
5389 // It turns out that even when we're using 1 thread, doing the work in a
5390 // separate thread causes wide variance in run times. We can't help this
5391 // in the multi-threaded case, but we special-case n=1 here to get
5392 // repeatable measurements of the 1-thread overhead of the parallel code.
5393 if (n_workers > 1) {
5394 // Make refs discovery MT-safe
5395 ReferenceProcessorMTMutator mt(ref_processor(), true);
5396 workers->run_task(&tsk);
5397 } else {
5398 tsk.work(0);
5399 }
5400 gch->set_par_threads(0); // 0 ==> non-parallel.
5401 // restore, single-threaded for now, any preserved marks
5402 // as a result of work_q overflow
5403 restore_preserved_marks_if_any();
5404 }
5405
5406 // Non-parallel version of remark
5407 void CMSCollector::do_remark_non_parallel() {
5408 ResourceMark rm;
5409 HandleMark hm;
5410 GenCollectedHeap* gch = GenCollectedHeap::heap();
5411 MarkRefsIntoAndScanClosure
5412 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5413 &_markStack, &_revisitStack, this,
5414 false /* should_yield */, false /* not precleaning */);
5415 MarkFromDirtyCardsClosure
5416 markFromDirtyCardsClosure(this, _span,
5417 NULL, // space is set further below
5418 &_markBitMap, &_markStack, &_revisitStack,
5419 &mrias_cl);
5420 {
5421 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5422 // Iterate over the dirty cards, marking them precleaned, and
5423 // setting the corresponding bits in the mod union table.
5424 {
5425 ModUnionClosure modUnionClosure(&_modUnionTable);
5426 _ct->ct_bs()->dirty_card_iterate(
5427 _cmsGen->used_region(),
5428 &modUnionClosure);
5429 _ct->ct_bs()->dirty_card_iterate(
5430 _permGen->used_region(),
5431 &modUnionClosure);
5432 }
5433 // Having transferred these marks into the modUnionTable, we just need
5434 // to rescan the marked objects on the dirty cards in the modUnionTable.
5435 // The initial marking may have been done during an asynchronous
5436 // collection so there may be dirty bits in the mod-union table.
5437 const int alignment =
5438 CardTableModRefBS::card_size * BitsPerWord;
5439 {
5440 // ... First handle dirty cards in CMS gen
5441 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5442 MemRegion ur = _cmsGen->used_region();
5443 HeapWord* lb = ur.start();
5444 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5445 MemRegion cms_span(lb, ub);
5446 _modUnionTable.dirty_range_iterate_clear(cms_span,
5447 &markFromDirtyCardsClosure);
5448 verify_work_stacks_empty();
5449 if (PrintCMSStatistics != 0) {
5450 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5451 markFromDirtyCardsClosure.num_dirty_cards());
5452 }
5453 }
5454 {
5455 // .. and then repeat for dirty cards in perm gen
5456 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5457 MemRegion ur = _permGen->used_region();
5458 HeapWord* lb = ur.start();
5459 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5460 MemRegion perm_span(lb, ub);
5461 _modUnionTable.dirty_range_iterate_clear(perm_span,
5462 &markFromDirtyCardsClosure);
5463 verify_work_stacks_empty();
5464 if (PrintCMSStatistics != 0) {
5465 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5466 markFromDirtyCardsClosure.num_dirty_cards());
5467 }
5468 }
5469 }
5470 if (VerifyDuringGC &&
5471 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5472 HandleMark hm; // Discard invalid handles created during verification
5473 Universe::verify(true);
5474 }
5475 {
5476 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5477
5478 verify_work_stacks_empty();
5479
5480 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5481 gch->gen_process_strong_roots(_cmsGen->level(),
5482 true, // younger gens as roots
5483 true, // collecting perm gen
5484 SharedHeap::ScanningOption(roots_scanning_options()),
5485 NULL, &mrias_cl);
5486 }
5487 verify_work_stacks_empty();
5488 // Restore evacuated mark words, if any, used for overflow list links
5489 if (!CMSOverflowEarlyRestoration) {
5490 restore_preserved_marks_if_any();
5491 }
5492 verify_overflow_empty();
5493 }
5494
5495 ////////////////////////////////////////////////////////
5496 // Parallel Reference Processing Task Proxy Class
5497 ////////////////////////////////////////////////////////
5498 class CMSRefProcTaskProxy: public AbstractGangTask {
5499 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5500 CMSCollector* _collector;
5501 CMSBitMap* _mark_bit_map;
5502 MemRegion _span;
5503 OopTaskQueueSet* _task_queues;
5504 ParallelTaskTerminator _term;
5505 ProcessTask& _task;
5506
5507 public:
5508 CMSRefProcTaskProxy(ProcessTask& task,
5509 CMSCollector* collector,
5510 const MemRegion& span,
5511 CMSBitMap* mark_bit_map,
5512 int total_workers,
5513 OopTaskQueueSet* task_queues):
5514 AbstractGangTask("Process referents by policy in parallel"),
5515 _task(task),
5516 _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
5517 _task_queues(task_queues),
5518 _term(total_workers, task_queues)
5519 { }
5520
5521 OopTaskQueueSet* task_queues() { return _task_queues; }
5522
5523 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5524
5525 ParallelTaskTerminator* terminator() { return &_term; }
5526
5527 void do_work_steal(int i,
5528 CMSParDrainMarkingStackClosure* drain,
5529 CMSParKeepAliveClosure* keep_alive,
5530 int* seed);
5531
5532 virtual void work(int i);
5533 };
5534
5535 void CMSRefProcTaskProxy::work(int i) {
5536 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5537 _mark_bit_map, work_queue(i));
5538 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5539 _mark_bit_map, work_queue(i));
5540 CMSIsAliveClosure is_alive_closure(_mark_bit_map);
5541 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
5542 if (_task.marks_oops_alive()) {
5543 do_work_steal(i, &par_drain_stack, &par_keep_alive,
5544 _collector->hash_seed(i));
5545 }
5546 assert(work_queue(i)->size() == 0, "work_queue should be empty");
5547 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5548 }
5549
5550 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5551 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5552 EnqueueTask& _task;
5553
5554 public:
5555 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5556 : AbstractGangTask("Enqueue reference objects in parallel"),
5557 _task(task)
5558 { }
5559
5560 virtual void work(int i)
5561 {
5562 _task.work(i);
5563 }
5564 };
5565
5566 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5567 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5568 _collector(collector),
5569 _span(span),
5570 _bit_map(bit_map),
5571 _work_queue(work_queue),
5572 _mark_and_push(collector, span, bit_map, work_queue),
5573 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5574 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5575 { }
5576
5577 // . see if we can share work_queues with ParNew? XXX
5578 void CMSRefProcTaskProxy::do_work_steal(int i,
5579 CMSParDrainMarkingStackClosure* drain,
5580 CMSParKeepAliveClosure* keep_alive,
5581 int* seed) {
5582 OopTaskQueue* work_q = work_queue(i);
5583 NOT_PRODUCT(int num_steals = 0;)
5584 oop obj_to_scan;
5585 size_t num_from_overflow_list =
5586 MIN2((size_t)work_q->max_elems()/4,
5587 (size_t)ParGCDesiredObjsFromOverflowList);
5588
5589 while (true) {
5590 // Completely finish any left over work from (an) earlier round(s)
5591 drain->trim_queue(0);
5592 // Now check if there's any work in the overflow list
5593 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5594 work_q)) {
5595 // Found something in global overflow list;
5596 // not yet ready to go stealing work from others.
5597 // We'd like to assert(work_q->size() != 0, ...)
5598 // because we just took work from the overflow list,
5599 // but of course we can't, since all of that might have
5600 // been already stolen from us.
5601 continue;
5602 }
5603 // Verify that we have no work before we resort to stealing
5604 assert(work_q->size() == 0, "Have work, shouldn't steal");
5605 // Try to steal from other queues that have work
5606 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5607 NOT_PRODUCT(num_steals++;)
5608 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5609 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5610 // Do scanning work
5611 obj_to_scan->oop_iterate(keep_alive);
5612 // Loop around, finish this work, and try to steal some more
5613 } else if (terminator()->offer_termination()) {
5614 break; // nirvana from the infinite cycle
5615 }
5616 }
5617 NOT_PRODUCT(
5618 if (PrintCMSStatistics != 0) {
5619 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5620 }
5621 )
5622 }
5623
5624 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5625 {
5626 GenCollectedHeap* gch = GenCollectedHeap::heap();
5627 WorkGang* workers = gch->workers();
5628 assert(workers != NULL, "Need parallel worker threads.");
5629 int n_workers = workers->total_workers();
5630 CMSRefProcTaskProxy rp_task(task, &_collector,
5631 _collector.ref_processor()->span(),
5632 _collector.markBitMap(),
5633 n_workers, _collector.task_queues());
5634 workers->run_task(&rp_task);
5635 }
5636
5637 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5638 {
5639
5640 GenCollectedHeap* gch = GenCollectedHeap::heap();
5641 WorkGang* workers = gch->workers();
5642 assert(workers != NULL, "Need parallel worker threads.");
5643 CMSRefEnqueueTaskProxy enq_task(task);
5644 workers->run_task(&enq_task);
5645 }
5646
5647 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5648
5649 ResourceMark rm;
5650 HandleMark hm;
5651 ReferencePolicy* soft_ref_policy;
5652
5653 assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
5654 // Process weak references.
5655 if (clear_all_soft_refs) {
5656 soft_ref_policy = new AlwaysClearPolicy();
5657 } else {
5658 #ifdef COMPILER2
5659 soft_ref_policy = new LRUMaxHeapPolicy();
5660 #else
5661 soft_ref_policy = new LRUCurrentHeapPolicy();
5662 #endif // COMPILER2
5663 }
5664 verify_work_stacks_empty();
5665
5666 ReferenceProcessor* rp = ref_processor();
5667 assert(rp->span().equals(_span), "Spans should be equal");
5668 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5669 &_markStack);
5670 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5671 _span, &_markBitMap, &_markStack,
5672 &cmsKeepAliveClosure);
5673 {
5674 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5675 if (rp->processing_is_mt()) {
5676 CMSRefProcTaskExecutor task_executor(*this);
5677 rp->process_discovered_references(soft_ref_policy,
5678 &_is_alive_closure,
5679 &cmsKeepAliveClosure,
5680 &cmsDrainMarkingStackClosure,
5681 &task_executor);
5682 } else {
5683 rp->process_discovered_references(soft_ref_policy,
5684 &_is_alive_closure,
5685 &cmsKeepAliveClosure,
5686 &cmsDrainMarkingStackClosure,
5687 NULL);
5688 }
5689 verify_work_stacks_empty();
5690 }
5691
5692 if (should_unload_classes()) {
5693 {
5694 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5695
5696 // Follow SystemDictionary roots and unload classes
5697 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5698
5699 // Follow CodeCache roots and unload any methods marked for unloading
5700 CodeCache::do_unloading(&_is_alive_closure,
5701 &cmsKeepAliveClosure,
5702 purged_class);
5703
5704 cmsDrainMarkingStackClosure.do_void();
5705 verify_work_stacks_empty();
5706
5707 // Update subklass/sibling/implementor links in KlassKlass descendants
5708 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5709 oop k;
5710 while ((k = _revisitStack.pop()) != NULL) {
5711 ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5712 &_is_alive_closure,
5713 &cmsKeepAliveClosure);
5714 }
5715 assert(!ClassUnloading ||
5716 (_markStack.isEmpty() && overflow_list_is_empty()),
5717 "Should not have found new reachable objects");
5718 assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5719 cmsDrainMarkingStackClosure.do_void();
5720 verify_work_stacks_empty();
5721 }
5722
5723 {
5724 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
5725 // Now clean up stale oops in SymbolTable and StringTable
5726 SymbolTable::unlink(&_is_alive_closure);
5727 StringTable::unlink(&_is_alive_closure);
5728 }
5729 }
5730
5731 verify_work_stacks_empty();
5732 // Restore any preserved marks as a result of mark stack or
5733 // work queue overflow
5734 restore_preserved_marks_if_any(); // done single-threaded for now
5735
5736 rp->set_enqueuing_is_done(true);
5737 if (rp->processing_is_mt()) {
5738 CMSRefProcTaskExecutor task_executor(*this);
5739 rp->enqueue_discovered_references(&task_executor);
5740 } else {
5741 rp->enqueue_discovered_references(NULL);
5742 }
5743 rp->verify_no_references_recorded();
5744 assert(!rp->discovery_enabled(), "should have been disabled");
5745
5746 // JVMTI object tagging is based on JNI weak refs. If any of these
5747 // refs were cleared then JVMTI needs to update its maps and
5748 // maybe post ObjectFrees to agents.
5749 JvmtiExport::cms_ref_processing_epilogue();
5750 }
5751
5752 #ifndef PRODUCT
5753 void CMSCollector::check_correct_thread_executing() {
5754 Thread* t = Thread::current();
5755 // Only the VM thread or the CMS thread should be here.
5756 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5757 "Unexpected thread type");
5758 // If this is the vm thread, the foreground process
5759 // should not be waiting. Note that _foregroundGCIsActive is
5760 // true while the foreground collector is waiting.
5761 if (_foregroundGCShouldWait) {
5762 // We cannot be the VM thread
5763 assert(t->is_ConcurrentGC_thread(),
5764 "Should be CMS thread");
5765 } else {
5766 // We can be the CMS thread only if we are in a stop-world
5767 // phase of CMS collection.
5768 if (t->is_ConcurrentGC_thread()) {
5769 assert(_collectorState == InitialMarking ||
5770 _collectorState == FinalMarking,
5771 "Should be a stop-world phase");
5772 // The CMS thread should be holding the CMS_token.
5773 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5774 "Potential interference with concurrently "
5775 "executing VM thread");
5776 }
5777 }
5778 }
5779 #endif
5780
5781 void CMSCollector::sweep(bool asynch) {
5782 assert(_collectorState == Sweeping, "just checking");
5783 check_correct_thread_executing();
5784 verify_work_stacks_empty();
5785 verify_overflow_empty();
5786 increment_sweep_count();
5787 _sweep_timer.stop();
5788 _sweep_estimate.sample(_sweep_timer.seconds());
5789 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5790
5791 // PermGen verification support: If perm gen sweeping is disabled in
5792 // this cycle, we preserve the perm gen object "deadness" information
5793 // in the perm_gen_verify_bit_map. In order to do that we traverse
5794 // all blocks in perm gen and mark all dead objects.
5795 if (verifying() && !should_unload_classes()) {
5796 assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5797 "Should have already been allocated");
5798 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5799 markBitMap(), perm_gen_verify_bit_map());
5800 if (asynch) {
5801 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5802 bitMapLock());
5803 _permGen->cmsSpace()->blk_iterate(&mdo);
5804 } else {
5805 // In the case of synchronous sweep, we already have
5806 // the requisite locks/tokens.
5807 _permGen->cmsSpace()->blk_iterate(&mdo);
5808 }
5809 }
5810
5811 if (asynch) {
5812 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5813 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5814 // First sweep the old gen then the perm gen
5815 {
5816 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5817 bitMapLock());
5818 sweepWork(_cmsGen, asynch);
5819 }
5820
5821 // Now repeat for perm gen
5822 if (should_unload_classes()) {
5823 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5824 bitMapLock());
5825 sweepWork(_permGen, asynch);
5826 }
5827
5828 // Update Universe::_heap_*_at_gc figures.
5829 // We need all the free list locks to make the abstract state
5830 // transition from Sweeping to Resetting. See detailed note
5831 // further below.
5832 {
5833 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5834 _permGen->freelistLock());
5835 // Update heap occupancy information which is used as
5836 // input to soft ref clearing policy at the next gc.
5837 Universe::update_heap_info_at_gc();
5838 _collectorState = Resizing;
5839 }
5840 } else {
5841 // already have needed locks
5842 sweepWork(_cmsGen, asynch);
5843
5844 if (should_unload_classes()) {
5845 sweepWork(_permGen, asynch);
5846 }
5847 // Update heap occupancy information which is used as
5848 // input to soft ref clearing policy at the next gc.
5849 Universe::update_heap_info_at_gc();
5850 _collectorState = Resizing;
5851 }
5852 verify_work_stacks_empty();
5853 verify_overflow_empty();
5854
5855 _sweep_timer.reset();
5856 _sweep_timer.start();
5857
5858 update_time_of_last_gc(os::javaTimeMillis());
5859
5860 // NOTE on abstract state transitions:
5861 // Mutators allocate-live and/or mark the mod-union table dirty
5862 // based on the state of the collection. The former is done in
5863 // the interval [Marking, Sweeping] and the latter in the interval
5864 // [Marking, Sweeping). Thus the transitions into the Marking state
5865 // and out of the Sweeping state must be synchronously visible
5866 // globally to the mutators.
5867 // The transition into the Marking state happens with the world
5868 // stopped so the mutators will globally see it. Sweeping is
5869 // done asynchronously by the background collector so the transition
5870 // from the Sweeping state to the Resizing state must be done
5871 // under the freelistLock (as is the check for whether to
5872 // allocate-live and whether to dirty the mod-union table).
5873 assert(_collectorState == Resizing, "Change of collector state to"
5874 " Resizing must be done under the freelistLocks (plural)");
5875
5876 // Now that sweeping has been completed, if the GCH's
5877 // incremental_collection_will_fail flag is set, clear it,
5878 // thus inviting a younger gen collection to promote into
5879 // this generation. If such a promotion may still fail,
5880 // the flag will be set again when a young collection is
5881 // attempted.
5882 // I think the incremental_collection_will_fail flag's use
5883 // is specific to a 2 generation collection policy, so i'll
5884 // assert that that's the configuration we are operating within.
5885 // The use of the flag can and should be generalized appropriately
5886 // in the future to deal with a general n-generation system.
5887
5888 GenCollectedHeap* gch = GenCollectedHeap::heap();
5889 assert(gch->collector_policy()->is_two_generation_policy(),
5890 "Resetting of incremental_collection_will_fail flag"
5891 " may be incorrect otherwise");
5892 gch->clear_incremental_collection_will_fail();
5893 gch->update_full_collections_completed(_collection_count_start);
5894 }
5895
5896 // FIX ME!!! Looks like this belongs in CFLSpace, with
5897 // CMSGen merely delegating to it.
5898 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5899 double nearLargestPercent = 0.999;
5900 HeapWord* minAddr = _cmsSpace->bottom();
5901 HeapWord* largestAddr =
5902 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5903 if (largestAddr == 0) {
5904 // The dictionary appears to be empty. In this case
5905 // try to coalesce at the end of the heap.
5906 largestAddr = _cmsSpace->end();
5907 }
5908 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5909 size_t nearLargestOffset =
5910 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5911 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5912 }
5913
5914 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5915 return addr >= _cmsSpace->nearLargestChunk();
5916 }
5917
5918 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5919 return _cmsSpace->find_chunk_at_end();
5920 }
5921
5922 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5923 bool full) {
5924 // The next lower level has been collected. Gather any statistics
5925 // that are of interest at this point.
5926 if (!full && (current_level + 1) == level()) {
5927 // Gather statistics on the young generation collection.
5928 collector()->stats().record_gc0_end(used());
5929 }
5930 }
5931
5932 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
5933 GenCollectedHeap* gch = GenCollectedHeap::heap();
5934 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
5935 "Wrong type of heap");
5936 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
5937 gch->gen_policy()->size_policy();
5938 assert(sp->is_gc_cms_adaptive_size_policy(),
5939 "Wrong type of size policy");
5940 return sp;
5941 }
5942
5943 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
5944 if (PrintGCDetails && Verbose) {
5945 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
5946 }
5947 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
5948 _debug_collection_type =
5949 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
5950 if (PrintGCDetails && Verbose) {
5951 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
5952 }
5953 }
5954
5955 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
5956 bool asynch) {
5957 // We iterate over the space(s) underlying this generation,
5958 // checking the mark bit map to see if the bits corresponding
5959 // to specific blocks are marked or not. Blocks that are
5960 // marked are live and are not swept up. All remaining blocks
5961 // are swept up, with coalescing on-the-fly as we sweep up
5962 // contiguous free and/or garbage blocks:
5963 // We need to ensure that the sweeper synchronizes with allocators
5964 // and stop-the-world collectors. In particular, the following
5965 // locks are used:
5966 // . CMS token: if this is held, a stop the world collection cannot occur
5967 // . freelistLock: if this is held no allocation can occur from this
5968 // generation by another thread
5969 // . bitMapLock: if this is held, no other thread can access or update
5970 //
5971
5972 // Note that we need to hold the freelistLock if we use
5973 // block iterate below; else the iterator might go awry if
5974 // a mutator (or promotion) causes block contents to change
5975 // (for instance if the allocator divvies up a block).
5976 // If we hold the free list lock, for all practical purposes
5977 // young generation GC's can't occur (they'll usually need to
5978 // promote), so we might as well prevent all young generation
5979 // GC's while we do a sweeping step. For the same reason, we might
5980 // as well take the bit map lock for the entire duration
5981
5982 // check that we hold the requisite locks
5983 assert(have_cms_token(), "Should hold cms token");
5984 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
5985 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
5986 "Should possess CMS token to sweep");
5987 assert_lock_strong(gen->freelistLock());
5988 assert_lock_strong(bitMapLock());
5989
5990 assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
5991 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
5992 _sweep_estimate.padded_average());
5993 gen->setNearLargestChunk();
5994
5995 {
5996 SweepClosure sweepClosure(this, gen, &_markBitMap,
5997 CMSYield && asynch);
5998 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5999 // We need to free-up/coalesce garbage/blocks from a
6000 // co-terminal free run. This is done in the SweepClosure
6001 // destructor; so, do not remove this scope, else the
6002 // end-of-sweep-census below will be off by a little bit.
6003 }
6004 gen->cmsSpace()->sweep_completed();
6005 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6006 if (should_unload_classes()) { // unloaded classes this cycle,
6007 _concurrent_cycles_since_last_unload = 0; // ... reset count
6008 } else { // did not unload classes,
6009 _concurrent_cycles_since_last_unload++; // ... increment count
6010 }
6011 }
6012
6013 // Reset CMS data structures (for now just the marking bit map)
6014 // preparatory for the next cycle.
6015 void CMSCollector::reset(bool asynch) {
6016 GenCollectedHeap* gch = GenCollectedHeap::heap();
6017 CMSAdaptiveSizePolicy* sp = size_policy();
6018 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6019 if (asynch) {
6020 CMSTokenSyncWithLocks ts(true, bitMapLock());
6021
6022 // If the state is not "Resetting", the foreground thread
6023 // has done a collection and the resetting.
6024 if (_collectorState != Resetting) {
6025 assert(_collectorState == Idling, "The state should only change"
6026 " because the foreground collector has finished the collection");
6027 return;
6028 }
6029
6030 // Clear the mark bitmap (no grey objects to start with)
6031 // for the next cycle.
6032 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6033 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6034
6035 HeapWord* curAddr = _markBitMap.startWord();
6036 while (curAddr < _markBitMap.endWord()) {
6037 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6038 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6039 _markBitMap.clear_large_range(chunk);
6040 if (ConcurrentMarkSweepThread::should_yield() &&
6041 !foregroundGCIsActive() &&
6042 CMSYield) {
6043 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6044 "CMS thread should hold CMS token");
6045 assert_lock_strong(bitMapLock());
6046 bitMapLock()->unlock();
6047 ConcurrentMarkSweepThread::desynchronize(true);
6048 ConcurrentMarkSweepThread::acknowledge_yield_request();
6049 stopTimer();
6050 if (PrintCMSStatistics != 0) {
6051 incrementYields();
6052 }
6053 icms_wait();
6054
6055 // See the comment in coordinator_yield()
6056 for (unsigned i = 0; i < CMSYieldSleepCount &&
6057 ConcurrentMarkSweepThread::should_yield() &&
6058 !CMSCollector::foregroundGCIsActive(); ++i) {
6059 os::sleep(Thread::current(), 1, false);
6060 ConcurrentMarkSweepThread::acknowledge_yield_request();
6061 }
6062
6063 ConcurrentMarkSweepThread::synchronize(true);
6064 bitMapLock()->lock_without_safepoint_check();
6065 startTimer();
6066 }
6067 curAddr = chunk.end();
6068 }
6069 _collectorState = Idling;
6070 } else {
6071 // already have the lock
6072 assert(_collectorState == Resetting, "just checking");
6073 assert_lock_strong(bitMapLock());
6074 _markBitMap.clear_all();
6075 _collectorState = Idling;
6076 }
6077
6078 // Stop incremental mode after a cycle completes, so that any future cycles
6079 // are triggered by allocation.
6080 stop_icms();
6081
6082 NOT_PRODUCT(
6083 if (RotateCMSCollectionTypes) {
6084 _cmsGen->rotate_debug_collection_type();
6085 }
6086 )
6087 }
6088
6089 void CMSCollector::do_CMS_operation(CMS_op_type op) {
6090 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6091 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6092 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
6093 TraceCollectorStats tcs(counters());
6094
6095 switch (op) {
6096 case CMS_op_checkpointRootsInitial: {
6097 checkpointRootsInitial(true); // asynch
6098 if (PrintGC) {
6099 _cmsGen->printOccupancy("initial-mark");
6100 }
6101 break;
6102 }
6103 case CMS_op_checkpointRootsFinal: {
6104 checkpointRootsFinal(true, // asynch
6105 false, // !clear_all_soft_refs
6106 false); // !init_mark_was_synchronous
6107 if (PrintGC) {
6108 _cmsGen->printOccupancy("remark");
6109 }
6110 break;
6111 }
6112 default:
6113 fatal("No such CMS_op");
6114 }
6115 }
6116
6117 #ifndef PRODUCT
6118 size_t const CMSCollector::skip_header_HeapWords() {
6119 return FreeChunk::header_size();
6120 }
6121
6122 // Try and collect here conditions that should hold when
6123 // CMS thread is exiting. The idea is that the foreground GC
6124 // thread should not be blocked if it wants to terminate
6125 // the CMS thread and yet continue to run the VM for a while
6126 // after that.
6127 void CMSCollector::verify_ok_to_terminate() const {
6128 assert(Thread::current()->is_ConcurrentGC_thread(),
6129 "should be called by CMS thread");
6130 assert(!_foregroundGCShouldWait, "should be false");
6131 // We could check here that all the various low-level locks
6132 // are not held by the CMS thread, but that is overkill; see
6133 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6134 // is checked.
6135 }
6136 #endif
6137
6138 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6139 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6140 "missing Printezis mark?");
6141 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6142 size_t size = pointer_delta(nextOneAddr + 1, addr);
6143 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6144 "alignment problem");
6145 assert(size >= 3, "Necessary for Printezis marks to work");
6146 return size;
6147 }
6148
6149 // A variant of the above (block_size_using_printezis_bits()) except
6150 // that we return 0 if the P-bits are not yet set.
6151 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6152 if (_markBitMap.isMarked(addr)) {
6153 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
6154 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6155 size_t size = pointer_delta(nextOneAddr + 1, addr);
6156 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6157 "alignment problem");
6158 assert(size >= 3, "Necessary for Printezis marks to work");
6159 return size;
6160 } else {
6161 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
6162 return 0;
6163 }
6164 }
6165
6166 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6167 size_t sz = 0;
6168 oop p = (oop)addr;
6169 if (p->klass() != NULL && p->is_parsable()) {
6170 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6171 } else {
6172 sz = block_size_using_printezis_bits(addr);
6173 }
6174 assert(sz > 0, "size must be nonzero");
6175 HeapWord* next_block = addr + sz;
6176 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6177 CardTableModRefBS::card_size);
6178 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6179 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6180 "must be different cards");
6181 return next_card;
6182 }
6183
6184
6185 // CMS Bit Map Wrapper /////////////////////////////////////////
6186
6187 // Construct a CMS bit map infrastructure, but don't create the
6188 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6189 // further below.
6190 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6191 _bm(NULL,0),
6192 _shifter(shifter),
6193 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6194 {
6195 _bmStartWord = 0;
6196 _bmWordSize = 0;
6197 }
6198
6199 bool CMSBitMap::allocate(MemRegion mr) {
6200 _bmStartWord = mr.start();
6201 _bmWordSize = mr.word_size();
6202 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6203 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6204 if (!brs.is_reserved()) {
6205 warning("CMS bit map allocation failure");
6206 return false;
6207 }
6208 // For now we'll just commit all of the bit map up fromt.
6209 // Later on we'll try to be more parsimonious with swap.
6210 if (!_virtual_space.initialize(brs, brs.size())) {
6211 warning("CMS bit map backing store failure");
6212 return false;
6213 }
6214 assert(_virtual_space.committed_size() == brs.size(),
6215 "didn't reserve backing store for all of CMS bit map?");
6216 _bm.set_map((uintptr_t*)_virtual_space.low());
6217 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6218 _bmWordSize, "inconsistency in bit map sizing");
6219 _bm.set_size(_bmWordSize >> _shifter);
6220
6221 // bm.clear(); // can we rely on getting zero'd memory? verify below
6222 assert(isAllClear(),
6223 "Expected zero'd memory from ReservedSpace constructor");
6224 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6225 "consistency check");
6226 return true;
6227 }
6228
6229 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6230 HeapWord *next_addr, *end_addr, *last_addr;
6231 assert_locked();
6232 assert(covers(mr), "out-of-range error");
6233 // XXX assert that start and end are appropriately aligned
6234 for (next_addr = mr.start(), end_addr = mr.end();
6235 next_addr < end_addr; next_addr = last_addr) {
6236 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6237 last_addr = dirty_region.end();
6238 if (!dirty_region.is_empty()) {
6239 cl->do_MemRegion(dirty_region);
6240 } else {
6241 assert(last_addr == end_addr, "program logic");
6242 return;
6243 }
6244 }
6245 }
6246
6247 #ifndef PRODUCT
6248 void CMSBitMap::assert_locked() const {
6249 CMSLockVerifier::assert_locked(lock());
6250 }
6251
6252 bool CMSBitMap::covers(MemRegion mr) const {
6253 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6254 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6255 "size inconsistency");
6256 return (mr.start() >= _bmStartWord) &&
6257 (mr.end() <= endWord());
6258 }
6259
6260 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6261 return (start >= _bmStartWord && (start + size) <= endWord());
6262 }
6263
6264 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6265 // verify that there are no 1 bits in the interval [left, right)
6266 FalseBitMapClosure falseBitMapClosure;
6267 iterate(&falseBitMapClosure, left, right);
6268 }
6269
6270 void CMSBitMap::region_invariant(MemRegion mr)
6271 {
6272 assert_locked();
6273 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6274 assert(!mr.is_empty(), "unexpected empty region");
6275 assert(covers(mr), "mr should be covered by bit map");
6276 // convert address range into offset range
6277 size_t start_ofs = heapWordToOffset(mr.start());
6278 // Make sure that end() is appropriately aligned
6279 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6280 (1 << (_shifter+LogHeapWordSize))),
6281 "Misaligned mr.end()");
6282 size_t end_ofs = heapWordToOffset(mr.end());
6283 assert(end_ofs > start_ofs, "Should mark at least one bit");
6284 }
6285
6286 #endif
6287
6288 bool CMSMarkStack::allocate(size_t size) {
6289 // allocate a stack of the requisite depth
6290 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6291 size * sizeof(oop)));
6292 if (!rs.is_reserved()) {
6293 warning("CMSMarkStack allocation failure");
6294 return false;
6295 }
6296 if (!_virtual_space.initialize(rs, rs.size())) {
6297 warning("CMSMarkStack backing store failure");
6298 return false;
6299 }
6300 assert(_virtual_space.committed_size() == rs.size(),
6301 "didn't reserve backing store for all of CMS stack?");
6302 _base = (oop*)(_virtual_space.low());
6303 _index = 0;
6304 _capacity = size;
6305 NOT_PRODUCT(_max_depth = 0);
6306 return true;
6307 }
6308
6309 // XXX FIX ME !!! In the MT case we come in here holding a
6310 // leaf lock. For printing we need to take a further lock
6311 // which has lower rank. We need to recallibrate the two
6312 // lock-ranks involved in order to be able to rpint the
6313 // messages below. (Or defer the printing to the caller.
6314 // For now we take the expedient path of just disabling the
6315 // messages for the problematic case.)
6316 void CMSMarkStack::expand() {
6317 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
6318 if (_capacity == CMSMarkStackSizeMax) {
6319 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6320 // We print a warning message only once per CMS cycle.
6321 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6322 }
6323 return;
6324 }
6325 // Double capacity if possible
6326 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
6327 // Do not give up existing stack until we have managed to
6328 // get the double capacity that we desired.
6329 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6330 new_capacity * sizeof(oop)));
6331 if (rs.is_reserved()) {
6332 // Release the backing store associated with old stack
6333 _virtual_space.release();
6334 // Reinitialize virtual space for new stack
6335 if (!_virtual_space.initialize(rs, rs.size())) {
6336 fatal("Not enough swap for expanded marking stack");
6337 }
6338 _base = (oop*)(_virtual_space.low());
6339 _index = 0;
6340 _capacity = new_capacity;
6341 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6342 // Failed to double capacity, continue;
6343 // we print a detail message only once per CMS cycle.
6344 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6345 SIZE_FORMAT"K",
6346 _capacity / K, new_capacity / K);
6347 }
6348 }
6349
6350
6351 // Closures
6352 // XXX: there seems to be a lot of code duplication here;
6353 // should refactor and consolidate common code.
6354
6355 // This closure is used to mark refs into the CMS generation in
6356 // the CMS bit map. Called at the first checkpoint. This closure
6357 // assumes that we do not need to re-mark dirty cards; if the CMS
6358 // generation on which this is used is not an oldest (modulo perm gen)
6359 // generation then this will lose younger_gen cards!
6360
6361 MarkRefsIntoClosure::MarkRefsIntoClosure(
6362 MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
6363 _span(span),
6364 _bitMap(bitMap),
6365 _should_do_nmethods(should_do_nmethods)
6366 {
6367 assert(_ref_processor == NULL, "deliberately left NULL");
6368 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6369 }
6370
6371 void MarkRefsIntoClosure::do_oop(oop* p) {
6372 // if p points into _span, then mark corresponding bit in _markBitMap
6373 oop thisOop = *p;
6374 if (thisOop != NULL) {
6375 assert(thisOop->is_oop(), "expected an oop");
6376 HeapWord* addr = (HeapWord*)thisOop;
6377 if (_span.contains(addr)) {
6378 // this should be made more efficient
6379 _bitMap->mark(addr);
6380 }
6381 }
6382 }
6383
6384 // A variant of the above, used for CMS marking verification.
6385 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6386 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6387 bool should_do_nmethods):
6388 _span(span),
6389 _verification_bm(verification_bm),
6390 _cms_bm(cms_bm),
6391 _should_do_nmethods(should_do_nmethods) {
6392 assert(_ref_processor == NULL, "deliberately left NULL");
6393 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6394 }
6395
6396 void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
6397 // if p points into _span, then mark corresponding bit in _markBitMap
6398 oop this_oop = *p;
6399 if (this_oop != NULL) {
6400 assert(this_oop->is_oop(), "expected an oop");
6401 HeapWord* addr = (HeapWord*)this_oop;
6402 if (_span.contains(addr)) {
6403 _verification_bm->mark(addr);
6404 if (!_cms_bm->isMarked(addr)) {
6405 oop(addr)->print();
6406 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
6407 fatal("... aborting");
6408 }
6409 }
6410 }
6411 }
6412
6413 //////////////////////////////////////////////////
6414 // MarkRefsIntoAndScanClosure
6415 //////////////////////////////////////////////////
6416
6417 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6418 ReferenceProcessor* rp,
6419 CMSBitMap* bit_map,
6420 CMSBitMap* mod_union_table,
6421 CMSMarkStack* mark_stack,
6422 CMSMarkStack* revisit_stack,
6423 CMSCollector* collector,
6424 bool should_yield,
6425 bool concurrent_precleaning):
6426 _collector(collector),
6427 _span(span),
6428 _bit_map(bit_map),
6429 _mark_stack(mark_stack),
6430 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6431 mark_stack, revisit_stack, concurrent_precleaning),
6432 _yield(should_yield),
6433 _concurrent_precleaning(concurrent_precleaning),
6434 _freelistLock(NULL)
6435 {
6436 _ref_processor = rp;
6437 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6438 }
6439
6440 // This closure is used to mark refs into the CMS generation at the
6441 // second (final) checkpoint, and to scan and transitively follow
6442 // the unmarked oops. It is also used during the concurrent precleaning
6443 // phase while scanning objects on dirty cards in the CMS generation.
6444 // The marks are made in the marking bit map and the marking stack is
6445 // used for keeping the (newly) grey objects during the scan.
6446 // The parallel version (Par_...) appears further below.
6447 void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
6448 oop this_oop = *p;
6449 if (this_oop != NULL) {
6450 assert(this_oop->is_oop(), "expected an oop");
6451 HeapWord* addr = (HeapWord*)this_oop;
6452 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6453 assert(_collector->overflow_list_is_empty(), "should be empty");
6454 if (_span.contains(addr) &&
6455 !_bit_map->isMarked(addr)) {
6456 // mark bit map (object is now grey)
6457 _bit_map->mark(addr);
6458 // push on marking stack (stack should be empty), and drain the
6459 // stack by applying this closure to the oops in the oops popped
6460 // from the stack (i.e. blacken the grey objects)
6461 bool res = _mark_stack->push(this_oop);
6462 assert(res, "Should have space to push on empty stack");
6463 do {
6464 oop new_oop = _mark_stack->pop();
6465 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6466 assert(new_oop->is_parsable(), "Found unparsable oop");
6467 assert(_bit_map->isMarked((HeapWord*)new_oop),
6468 "only grey objects on this stack");
6469 // iterate over the oops in this oop, marking and pushing
6470 // the ones in CMS heap (i.e. in _span).
6471 new_oop->oop_iterate(&_pushAndMarkClosure);
6472 // check if it's time to yield
6473 do_yield_check();
6474 } while (!_mark_stack->isEmpty() ||
6475 (!_concurrent_precleaning && take_from_overflow_list()));
6476 // if marking stack is empty, and we are not doing this
6477 // during precleaning, then check the overflow list
6478 }
6479 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6480 assert(_collector->overflow_list_is_empty(),
6481 "overflow list was drained above");
6482 // We could restore evacuated mark words, if any, used for
6483 // overflow list links here because the overflow list is
6484 // provably empty here. That would reduce the maximum
6485 // size requirements for preserved_{oop,mark}_stack.
6486 // But we'll just postpone it until we are all done
6487 // so we can just stream through.
6488 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6489 _collector->restore_preserved_marks_if_any();
6490 assert(_collector->no_preserved_marks(), "No preserved marks");
6491 }
6492 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6493 "All preserved marks should have been restored above");
6494 }
6495 }
6496
6497 void MarkRefsIntoAndScanClosure::do_yield_work() {
6498 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6499 "CMS thread should hold CMS token");
6500 assert_lock_strong(_freelistLock);
6501 assert_lock_strong(_bit_map->lock());
6502 // relinquish the free_list_lock and bitMaplock()
6503 _bit_map->lock()->unlock();
6504 _freelistLock->unlock();
6505 ConcurrentMarkSweepThread::desynchronize(true);
6506 ConcurrentMarkSweepThread::acknowledge_yield_request();
6507 _collector->stopTimer();
6508 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6509 if (PrintCMSStatistics != 0) {
6510 _collector->incrementYields();
6511 }
6512 _collector->icms_wait();
6513
6514 // See the comment in coordinator_yield()
6515 for (unsigned i = 0; i < CMSYieldSleepCount &&
6516 ConcurrentMarkSweepThread::should_yield() &&
6517 !CMSCollector::foregroundGCIsActive(); ++i) {
6518 os::sleep(Thread::current(), 1, false);
6519 ConcurrentMarkSweepThread::acknowledge_yield_request();
6520 }
6521
6522 ConcurrentMarkSweepThread::synchronize(true);
6523 _freelistLock->lock_without_safepoint_check();
6524 _bit_map->lock()->lock_without_safepoint_check();
6525 _collector->startTimer();
6526 }
6527
6528 ///////////////////////////////////////////////////////////
6529 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6530 // MarkRefsIntoAndScanClosure
6531 ///////////////////////////////////////////////////////////
6532 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6533 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6534 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack):
6535 _span(span),
6536 _bit_map(bit_map),
6537 _work_queue(work_queue),
6538 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6539 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6540 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6541 revisit_stack)
6542 {
6543 _ref_processor = rp;
6544 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6545 }
6546
6547 // This closure is used to mark refs into the CMS generation at the
6548 // second (final) checkpoint, and to scan and transitively follow
6549 // the unmarked oops. The marks are made in the marking bit map and
6550 // the work_queue is used for keeping the (newly) grey objects during
6551 // the scan phase whence they are also available for stealing by parallel
6552 // threads. Since the marking bit map is shared, updates are
6553 // synchronized (via CAS).
6554 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
6555 oop this_oop = *p;
6556 if (this_oop != NULL) {
6557 // Ignore mark word because this could be an already marked oop
6558 // that may be chained at the end of the overflow list.
6559 assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
6560 HeapWord* addr = (HeapWord*)this_oop;
6561 if (_span.contains(addr) &&
6562 !_bit_map->isMarked(addr)) {
6563 // mark bit map (object will become grey):
6564 // It is possible for several threads to be
6565 // trying to "claim" this object concurrently;
6566 // the unique thread that succeeds in marking the
6567 // object first will do the subsequent push on
6568 // to the work queue (or overflow list).
6569 if (_bit_map->par_mark(addr)) {
6570 // push on work_queue (which may not be empty), and trim the
6571 // queue to an appropriate length by applying this closure to
6572 // the oops in the oops popped from the stack (i.e. blacken the
6573 // grey objects)
6574 bool res = _work_queue->push(this_oop);
6575 assert(res, "Low water mark should be less than capacity?");
6576 trim_queue(_low_water_mark);
6577 } // Else, another thread claimed the object
6578 }
6579 }
6580 }
6581
6582 // This closure is used to rescan the marked objects on the dirty cards
6583 // in the mod union table and the card table proper.
6584 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6585 oop p, MemRegion mr) {
6586
6587 size_t size = 0;
6588 HeapWord* addr = (HeapWord*)p;
6589 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6590 assert(_span.contains(addr), "we are scanning the CMS generation");
6591 // check if it's time to yield
6592 if (do_yield_check()) {
6593 // We yielded for some foreground stop-world work,
6594 // and we have been asked to abort this ongoing preclean cycle.
6595 return 0;
6596 }
6597 if (_bitMap->isMarked(addr)) {
6598 // it's marked; is it potentially uninitialized?
6599 if (p->klass() != NULL) {
6600 if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
6601 // Signal precleaning to redirty the card since
6602 // the klass pointer is already installed.
6603 assert(size == 0, "Initial value");
6604 } else {
6605 assert(p->is_parsable(), "must be parsable.");
6606 // an initialized object; ignore mark word in verification below
6607 // since we are running concurrent with mutators
6608 assert(p->is_oop(true), "should be an oop");
6609 if (p->is_objArray()) {
6610 // objArrays are precisely marked; restrict scanning
6611 // to dirty cards only.
6612 size = p->oop_iterate(_scanningClosure, mr);
6613 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6614 "adjustObjectSize should be the identity for array sizes, "
6615 "which are necessarily larger than minimum object size of "
6616 "two heap words");
6617 } else {
6618 // A non-array may have been imprecisely marked; we need
6619 // to scan object in its entirety.
6620 size = CompactibleFreeListSpace::adjustObjectSize(
6621 p->oop_iterate(_scanningClosure));
6622 }
6623 #ifdef DEBUG
6624 size_t direct_size =
6625 CompactibleFreeListSpace::adjustObjectSize(p->size());
6626 assert(size == direct_size, "Inconsistency in size");
6627 assert(size >= 3, "Necessary for Printezis marks to work");
6628 if (!_bitMap->isMarked(addr+1)) {
6629 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6630 } else {
6631 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6632 assert(_bitMap->isMarked(addr+size-1),
6633 "inconsistent Printezis mark");
6634 }
6635 #endif // DEBUG
6636 }
6637 } else {
6638 // an unitialized object
6639 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6640 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6641 size = pointer_delta(nextOneAddr + 1, addr);
6642 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6643 "alignment problem");
6644 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6645 // will dirty the card when the klass pointer is installed in the
6646 // object (signalling the completion of initialization).
6647 }
6648 } else {
6649 // Either a not yet marked object or an uninitialized object
6650 if (p->klass() == NULL || !p->is_parsable()) {
6651 // An uninitialized object, skip to the next card, since
6652 // we may not be able to read its P-bits yet.
6653 assert(size == 0, "Initial value");
6654 } else {
6655 // An object not (yet) reached by marking: we merely need to
6656 // compute its size so as to go look at the next block.
6657 assert(p->is_oop(true), "should be an oop");
6658 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6659 }
6660 }
6661 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6662 return size;
6663 }
6664
6665 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6666 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6667 "CMS thread should hold CMS token");
6668 assert_lock_strong(_freelistLock);
6669 assert_lock_strong(_bitMap->lock());
6670 // relinquish the free_list_lock and bitMaplock()
6671 _bitMap->lock()->unlock();
6672 _freelistLock->unlock();
6673 ConcurrentMarkSweepThread::desynchronize(true);
6674 ConcurrentMarkSweepThread::acknowledge_yield_request();
6675 _collector->stopTimer();
6676 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6677 if (PrintCMSStatistics != 0) {
6678 _collector->incrementYields();
6679 }
6680 _collector->icms_wait();
6681
6682 // See the comment in coordinator_yield()
6683 for (unsigned i = 0; i < CMSYieldSleepCount &&
6684 ConcurrentMarkSweepThread::should_yield() &&
6685 !CMSCollector::foregroundGCIsActive(); ++i) {
6686 os::sleep(Thread::current(), 1, false);
6687 ConcurrentMarkSweepThread::acknowledge_yield_request();
6688 }
6689
6690 ConcurrentMarkSweepThread::synchronize(true);
6691 _freelistLock->lock_without_safepoint_check();
6692 _bitMap->lock()->lock_without_safepoint_check();
6693 _collector->startTimer();
6694 }
6695
6696
6697 //////////////////////////////////////////////////////////////////
6698 // SurvivorSpacePrecleanClosure
6699 //////////////////////////////////////////////////////////////////
6700 // This (single-threaded) closure is used to preclean the oops in
6701 // the survivor spaces.
6702 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6703
6704 HeapWord* addr = (HeapWord*)p;
6705 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6706 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6707 assert(p->klass() != NULL, "object should be initializd");
6708 assert(p->is_parsable(), "must be parsable.");
6709 // an initialized object; ignore mark word in verification below
6710 // since we are running concurrent with mutators
6711 assert(p->is_oop(true), "should be an oop");
6712 // Note that we do not yield while we iterate over
6713 // the interior oops of p, pushing the relevant ones
6714 // on our marking stack.
6715 size_t size = p->oop_iterate(_scanning_closure);
6716 do_yield_check();
6717 // Observe that below, we do not abandon the preclean
6718 // phase as soon as we should; rather we empty the
6719 // marking stack before returning. This is to satisfy
6720 // some existing assertions. In general, it may be a
6721 // good idea to abort immediately and complete the marking
6722 // from the grey objects at a later time.
6723 while (!_mark_stack->isEmpty()) {
6724 oop new_oop = _mark_stack->pop();
6725 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6726 assert(new_oop->is_parsable(), "Found unparsable oop");
6727 assert(_bit_map->isMarked((HeapWord*)new_oop),
6728 "only grey objects on this stack");
6729 // iterate over the oops in this oop, marking and pushing
6730 // the ones in CMS heap (i.e. in _span).
6731 new_oop->oop_iterate(_scanning_closure);
6732 // check if it's time to yield
6733 do_yield_check();
6734 }
6735 unsigned int after_count =
6736 GenCollectedHeap::heap()->total_collections();
6737 bool abort = (_before_count != after_count) ||
6738 _collector->should_abort_preclean();
6739 return abort ? 0 : size;
6740 }
6741
6742 void SurvivorSpacePrecleanClosure::do_yield_work() {
6743 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6744 "CMS thread should hold CMS token");
6745 assert_lock_strong(_bit_map->lock());
6746 // Relinquish the bit map lock
6747 _bit_map->lock()->unlock();
6748 ConcurrentMarkSweepThread::desynchronize(true);
6749 ConcurrentMarkSweepThread::acknowledge_yield_request();
6750 _collector->stopTimer();
6751 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6752 if (PrintCMSStatistics != 0) {
6753 _collector->incrementYields();
6754 }
6755 _collector->icms_wait();
6756
6757 // See the comment in coordinator_yield()
6758 for (unsigned i = 0; i < CMSYieldSleepCount &&
6759 ConcurrentMarkSweepThread::should_yield() &&
6760 !CMSCollector::foregroundGCIsActive(); ++i) {
6761 os::sleep(Thread::current(), 1, false);
6762 ConcurrentMarkSweepThread::acknowledge_yield_request();
6763 }
6764
6765 ConcurrentMarkSweepThread::synchronize(true);
6766 _bit_map->lock()->lock_without_safepoint_check();
6767 _collector->startTimer();
6768 }
6769
6770 // This closure is used to rescan the marked objects on the dirty cards
6771 // in the mod union table and the card table proper. In the parallel
6772 // case, although the bitMap is shared, we do a single read so the
6773 // isMarked() query is "safe".
6774 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6775 // Ignore mark word because we are running concurrent with mutators
6776 assert(p->is_oop_or_null(true), "expected an oop or null");
6777 HeapWord* addr = (HeapWord*)p;
6778 assert(_span.contains(addr), "we are scanning the CMS generation");
6779 bool is_obj_array = false;
6780 #ifdef DEBUG
6781 if (!_parallel) {
6782 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6783 assert(_collector->overflow_list_is_empty(),
6784 "overflow list should be empty");
6785
6786 }
6787 #endif // DEBUG
6788 if (_bit_map->isMarked(addr)) {
6789 // Obj arrays are precisely marked, non-arrays are not;
6790 // so we scan objArrays precisely and non-arrays in their
6791 // entirety.
6792 if (p->is_objArray()) {
6793 is_obj_array = true;
6794 if (_parallel) {
6795 p->oop_iterate(_par_scan_closure, mr);
6796 } else {
6797 p->oop_iterate(_scan_closure, mr);
6798 }
6799 } else {
6800 if (_parallel) {
6801 p->oop_iterate(_par_scan_closure);
6802 } else {
6803 p->oop_iterate(_scan_closure);
6804 }
6805 }
6806 }
6807 #ifdef DEBUG
6808 if (!_parallel) {
6809 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6810 assert(_collector->overflow_list_is_empty(),
6811 "overflow list should be empty");
6812
6813 }
6814 #endif // DEBUG
6815 return is_obj_array;
6816 }
6817
6818 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6819 MemRegion span,
6820 CMSBitMap* bitMap, CMSMarkStack* markStack,
6821 CMSMarkStack* revisitStack,
6822 bool should_yield, bool verifying):
6823 _collector(collector),
6824 _span(span),
6825 _bitMap(bitMap),
6826 _mut(&collector->_modUnionTable),
6827 _markStack(markStack),
6828 _revisitStack(revisitStack),
6829 _yield(should_yield),
6830 _skipBits(0)
6831 {
6832 assert(_markStack->isEmpty(), "stack should be empty");
6833 _finger = _bitMap->startWord();
6834 _threshold = _finger;
6835 assert(_collector->_restart_addr == NULL, "Sanity check");
6836 assert(_span.contains(_finger), "Out of bounds _finger?");
6837 DEBUG_ONLY(_verifying = verifying;)
6838 }
6839
6840 void MarkFromRootsClosure::reset(HeapWord* addr) {
6841 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6842 assert(_span.contains(addr), "Out of bounds _finger?");
6843 _finger = addr;
6844 _threshold = (HeapWord*)round_to(
6845 (intptr_t)_finger, CardTableModRefBS::card_size);
6846 }
6847
6848 // Should revisit to see if this should be restructured for
6849 // greater efficiency.
6850 void MarkFromRootsClosure::do_bit(size_t offset) {
6851 if (_skipBits > 0) {
6852 _skipBits--;
6853 return;
6854 }
6855 // convert offset into a HeapWord*
6856 HeapWord* addr = _bitMap->startWord() + offset;
6857 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6858 "address out of range");
6859 assert(_bitMap->isMarked(addr), "tautology");
6860 if (_bitMap->isMarked(addr+1)) {
6861 // this is an allocated but not yet initialized object
6862 assert(_skipBits == 0, "tautology");
6863 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6864 oop p = oop(addr);
6865 if (p->klass() == NULL || !p->is_parsable()) {
6866 DEBUG_ONLY(if (!_verifying) {)
6867 // We re-dirty the cards on which this object lies and increase
6868 // the _threshold so that we'll come back to scan this object
6869 // during the preclean or remark phase. (CMSCleanOnEnter)
6870 if (CMSCleanOnEnter) {
6871 size_t sz = _collector->block_size_using_printezis_bits(addr);
6872 HeapWord* start_card_addr = (HeapWord*)round_down(
6873 (intptr_t)addr, CardTableModRefBS::card_size);
6874 HeapWord* end_card_addr = (HeapWord*)round_to(
6875 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6876 MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
6877 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6878 // Bump _threshold to end_card_addr; note that
6879 // _threshold cannot possibly exceed end_card_addr, anyhow.
6880 // This prevents future clearing of the card as the scan proceeds
6881 // to the right.
6882 assert(_threshold <= end_card_addr,
6883 "Because we are just scanning into this object");
6884 if (_threshold < end_card_addr) {
6885 _threshold = end_card_addr;
6886 }
6887 if (p->klass() != NULL) {
6888 // Redirty the range of cards...
6889 _mut->mark_range(redirty_range);
6890 } // ...else the setting of klass will dirty the card anyway.
6891 }
6892 DEBUG_ONLY(})
6893 return;
6894 }
6895 }
6896 scanOopsInOop(addr);
6897 }
6898
6899 // We take a break if we've been at this for a while,
6900 // so as to avoid monopolizing the locks involved.
6901 void MarkFromRootsClosure::do_yield_work() {
6902 // First give up the locks, then yield, then re-lock
6903 // We should probably use a constructor/destructor idiom to
6904 // do this unlock/lock or modify the MutexUnlocker class to
6905 // serve our purpose. XXX
6906 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6907 "CMS thread should hold CMS token");
6908 assert_lock_strong(_bitMap->lock());
6909 _bitMap->lock()->unlock();
6910 ConcurrentMarkSweepThread::desynchronize(true);
6911 ConcurrentMarkSweepThread::acknowledge_yield_request();
6912 _collector->stopTimer();
6913 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6914 if (PrintCMSStatistics != 0) {
6915 _collector->incrementYields();
6916 }
6917 _collector->icms_wait();
6918
6919 // See the comment in coordinator_yield()
6920 for (unsigned i = 0; i < CMSYieldSleepCount &&
6921 ConcurrentMarkSweepThread::should_yield() &&
6922 !CMSCollector::foregroundGCIsActive(); ++i) {
6923 os::sleep(Thread::current(), 1, false);
6924 ConcurrentMarkSweepThread::acknowledge_yield_request();
6925 }
6926
6927 ConcurrentMarkSweepThread::synchronize(true);
6928 _bitMap->lock()->lock_without_safepoint_check();
6929 _collector->startTimer();
6930 }
6931
6932 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6933 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6934 assert(_markStack->isEmpty(),
6935 "should drain stack to limit stack usage");
6936 // convert ptr to an oop preparatory to scanning
6937 oop this_oop = oop(ptr);
6938 // Ignore mark word in verification below, since we
6939 // may be running concurrent with mutators.
6940 assert(this_oop->is_oop(true), "should be an oop");
6941 assert(_finger <= ptr, "_finger runneth ahead");
6942 // advance the finger to right end of this object
6943 _finger = ptr + this_oop->size();
6944 assert(_finger > ptr, "we just incremented it above");
6945 // On large heaps, it may take us some time to get through
6946 // the marking phase (especially if running iCMS). During
6947 // this time it's possible that a lot of mutations have
6948 // accumulated in the card table and the mod union table --
6949 // these mutation records are redundant until we have
6950 // actually traced into the corresponding card.
6951 // Here, we check whether advancing the finger would make
6952 // us cross into a new card, and if so clear corresponding
6953 // cards in the MUT (preclean them in the card-table in the
6954 // future).
6955
6956 DEBUG_ONLY(if (!_verifying) {)
6957 // The clean-on-enter optimization is disabled by default,
6958 // until we fix 6178663.
6959 if (CMSCleanOnEnter && (_finger > _threshold)) {
6960 // [_threshold, _finger) represents the interval
6961 // of cards to be cleared in MUT (or precleaned in card table).
6962 // The set of cards to be cleared is all those that overlap
6963 // with the interval [_threshold, _finger); note that
6964 // _threshold is always kept card-aligned but _finger isn't
6965 // always card-aligned.
6966 HeapWord* old_threshold = _threshold;
6967 assert(old_threshold == (HeapWord*)round_to(
6968 (intptr_t)old_threshold, CardTableModRefBS::card_size),
6969 "_threshold should always be card-aligned");
6970 _threshold = (HeapWord*)round_to(
6971 (intptr_t)_finger, CardTableModRefBS::card_size);
6972 MemRegion mr(old_threshold, _threshold);
6973 assert(!mr.is_empty(), "Control point invariant");
6974 assert(_span.contains(mr), "Should clear within span");
6975 // XXX When _finger crosses from old gen into perm gen
6976 // we may be doing unnecessary cleaning; do better in the
6977 // future by detecting that condition and clearing fewer
6978 // MUT/CT entries.
6979 _mut->clear_range(mr);
6980 }
6981 DEBUG_ONLY(})
6982
6983 // Note: the finger doesn't advance while we drain
6984 // the stack below.
6985 PushOrMarkClosure pushOrMarkClosure(_collector,
6986 _span, _bitMap, _markStack,
6987 _revisitStack,
6988 _finger, this);
6989 bool res = _markStack->push(this_oop);
6990 assert(res, "Empty non-zero size stack should have space for single push");
6991 while (!_markStack->isEmpty()) {
6992 oop new_oop = _markStack->pop();
6993 // Skip verifying header mark word below because we are
6994 // running concurrent with mutators.
6995 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6996 // now scan this oop's oops
6997 new_oop->oop_iterate(&pushOrMarkClosure);
6998 do_yield_check();
6999 }
7000 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7001 }
7002
7003 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7004 CMSCollector* collector, MemRegion span,
7005 CMSBitMap* bit_map,
7006 OopTaskQueue* work_queue,
7007 CMSMarkStack* overflow_stack,
7008 CMSMarkStack* revisit_stack,
7009 bool should_yield):
7010 _collector(collector),
7011 _whole_span(collector->_span),
7012 _span(span),
7013 _bit_map(bit_map),
7014 _mut(&collector->_modUnionTable),
7015 _work_queue(work_queue),
7016 _overflow_stack(overflow_stack),
7017 _revisit_stack(revisit_stack),
7018 _yield(should_yield),
7019 _skip_bits(0),
7020 _task(task)
7021 {
7022 assert(_work_queue->size() == 0, "work_queue should be empty");
7023 _finger = span.start();
7024 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7025 assert(_span.contains(_finger), "Out of bounds _finger?");
7026 }
7027
7028 // Should revisit to see if this should be restructured for
7029 // greater efficiency.
7030 void Par_MarkFromRootsClosure::do_bit(size_t offset) {
7031 if (_skip_bits > 0) {
7032 _skip_bits--;
7033 return;
7034 }
7035 // convert offset into a HeapWord*
7036 HeapWord* addr = _bit_map->startWord() + offset;
7037 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7038 "address out of range");
7039 assert(_bit_map->isMarked(addr), "tautology");
7040 if (_bit_map->isMarked(addr+1)) {
7041 // this is an allocated object that might not yet be initialized
7042 assert(_skip_bits == 0, "tautology");
7043 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7044 oop p = oop(addr);
7045 if (p->klass() == NULL || !p->is_parsable()) {
7046 // in the case of Clean-on-Enter optimization, redirty card
7047 // and avoid clearing card by increasing the threshold.
7048 return;
7049 }
7050 }
7051 scan_oops_in_oop(addr);
7052 }
7053
7054 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7055 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7056 // Should we assert that our work queue is empty or
7057 // below some drain limit?
7058 assert(_work_queue->size() == 0,
7059 "should drain stack to limit stack usage");
7060 // convert ptr to an oop preparatory to scanning
7061 oop this_oop = oop(ptr);
7062 // Ignore mark word in verification below, since we
7063 // may be running concurrent with mutators.
7064 assert(this_oop->is_oop(true), "should be an oop");
7065 assert(_finger <= ptr, "_finger runneth ahead");
7066 // advance the finger to right end of this object
7067 _finger = ptr + this_oop->size();
7068 assert(_finger > ptr, "we just incremented it above");
7069 // On large heaps, it may take us some time to get through
7070 // the marking phase (especially if running iCMS). During
7071 // this time it's possible that a lot of mutations have
7072 // accumulated in the card table and the mod union table --
7073 // these mutation records are redundant until we have
7074 // actually traced into the corresponding card.
7075 // Here, we check whether advancing the finger would make
7076 // us cross into a new card, and if so clear corresponding
7077 // cards in the MUT (preclean them in the card-table in the
7078 // future).
7079
7080 // The clean-on-enter optimization is disabled by default,
7081 // until we fix 6178663.
7082 if (CMSCleanOnEnter && (_finger > _threshold)) {
7083 // [_threshold, _finger) represents the interval
7084 // of cards to be cleared in MUT (or precleaned in card table).
7085 // The set of cards to be cleared is all those that overlap
7086 // with the interval [_threshold, _finger); note that
7087 // _threshold is always kept card-aligned but _finger isn't
7088 // always card-aligned.
7089 HeapWord* old_threshold = _threshold;
7090 assert(old_threshold == (HeapWord*)round_to(
7091 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7092 "_threshold should always be card-aligned");
7093 _threshold = (HeapWord*)round_to(
7094 (intptr_t)_finger, CardTableModRefBS::card_size);
7095 MemRegion mr(old_threshold, _threshold);
7096 assert(!mr.is_empty(), "Control point invariant");
7097 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7098 // XXX When _finger crosses from old gen into perm gen
7099 // we may be doing unnecessary cleaning; do better in the
7100 // future by detecting that condition and clearing fewer
7101 // MUT/CT entries.
7102 _mut->clear_range(mr);
7103 }
7104
7105 // Note: the local finger doesn't advance while we drain
7106 // the stack below, but the global finger sure can and will.
7107 HeapWord** gfa = _task->global_finger_addr();
7108 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7109 _span, _bit_map,
7110 _work_queue,
7111 _overflow_stack,
7112 _revisit_stack,
7113 _finger,
7114 gfa, this);
7115 bool res = _work_queue->push(this_oop); // overflow could occur here
7116 assert(res, "Will hold once we use workqueues");
7117 while (true) {
7118 oop new_oop;
7119 if (!_work_queue->pop_local(new_oop)) {
7120 // We emptied our work_queue; check if there's stuff that can
7121 // be gotten from the overflow stack.
7122 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7123 _overflow_stack, _work_queue)) {
7124 do_yield_check();
7125 continue;
7126 } else { // done
7127 break;
7128 }
7129 }
7130 // Skip verifying header mark word below because we are
7131 // running concurrent with mutators.
7132 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7133 // now scan this oop's oops
7134 new_oop->oop_iterate(&pushOrMarkClosure);
7135 do_yield_check();
7136 }
7137 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7138 }
7139
7140 // Yield in response to a request from VM Thread or
7141 // from mutators.
7142 void Par_MarkFromRootsClosure::do_yield_work() {
7143 assert(_task != NULL, "sanity");
7144 _task->yield();
7145 }
7146
7147 // A variant of the above used for verifying CMS marking work.
7148 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7149 MemRegion span,
7150 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7151 CMSMarkStack* mark_stack):
7152 _collector(collector),
7153 _span(span),
7154 _verification_bm(verification_bm),
7155 _cms_bm(cms_bm),
7156 _mark_stack(mark_stack),
7157 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7158 mark_stack)
7159 {
7160 assert(_mark_stack->isEmpty(), "stack should be empty");
7161 _finger = _verification_bm->startWord();
7162 assert(_collector->_restart_addr == NULL, "Sanity check");
7163 assert(_span.contains(_finger), "Out of bounds _finger?");
7164 }
7165
7166 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7167 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7168 assert(_span.contains(addr), "Out of bounds _finger?");
7169 _finger = addr;
7170 }
7171
7172 // Should revisit to see if this should be restructured for
7173 // greater efficiency.
7174 void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7175 // convert offset into a HeapWord*
7176 HeapWord* addr = _verification_bm->startWord() + offset;
7177 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7178 "address out of range");
7179 assert(_verification_bm->isMarked(addr), "tautology");
7180 assert(_cms_bm->isMarked(addr), "tautology");
7181
7182 assert(_mark_stack->isEmpty(),
7183 "should drain stack to limit stack usage");
7184 // convert addr to an oop preparatory to scanning
7185 oop this_oop = oop(addr);
7186 assert(this_oop->is_oop(), "should be an oop");
7187 assert(_finger <= addr, "_finger runneth ahead");
7188 // advance the finger to right end of this object
7189 _finger = addr + this_oop->size();
7190 assert(_finger > addr, "we just incremented it above");
7191 // Note: the finger doesn't advance while we drain
7192 // the stack below.
7193 bool res = _mark_stack->push(this_oop);
7194 assert(res, "Empty non-zero size stack should have space for single push");
7195 while (!_mark_stack->isEmpty()) {
7196 oop new_oop = _mark_stack->pop();
7197 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7198 // now scan this oop's oops
7199 new_oop->oop_iterate(&_pam_verify_closure);
7200 }
7201 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7202 }
7203
7204 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7205 CMSCollector* collector, MemRegion span,
7206 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7207 CMSMarkStack* mark_stack):
7208 OopClosure(collector->ref_processor()),
7209 _collector(collector),
7210 _span(span),
7211 _verification_bm(verification_bm),
7212 _cms_bm(cms_bm),
7213 _mark_stack(mark_stack)
7214 { }
7215
7216
7217 // Upon stack overflow, we discard (part of) the stack,
7218 // remembering the least address amongst those discarded
7219 // in CMSCollector's _restart_address.
7220 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7221 // Remember the least grey address discarded
7222 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7223 _collector->lower_restart_addr(ra);
7224 _mark_stack->reset(); // discard stack contents
7225 _mark_stack->expand(); // expand the stack if possible
7226 }
7227
7228 void PushAndMarkVerifyClosure::do_oop(oop* p) {
7229 oop this_oop = *p;
7230 assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
7231 HeapWord* addr = (HeapWord*)this_oop;
7232 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7233 // Oop lies in _span and isn't yet grey or black
7234 _verification_bm->mark(addr); // now grey
7235 if (!_cms_bm->isMarked(addr)) {
7236 oop(addr)->print();
7237 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
7238 fatal("... aborting");
7239 }
7240
7241 if (!_mark_stack->push(this_oop)) { // stack overflow
7242 if (PrintCMSStatistics != 0) {
7243 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7244 SIZE_FORMAT, _mark_stack->capacity());
7245 }
7246 assert(_mark_stack->isFull(), "Else push should have succeeded");
7247 handle_stack_overflow(addr);
7248 }
7249 // anything including and to the right of _finger
7250 // will be scanned as we iterate over the remainder of the
7251 // bit map
7252 }
7253 }
7254
7255 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7256 MemRegion span,
7257 CMSBitMap* bitMap, CMSMarkStack* markStack,
7258 CMSMarkStack* revisitStack,
7259 HeapWord* finger, MarkFromRootsClosure* parent) :
7260 OopClosure(collector->ref_processor()),
7261 _collector(collector),
7262 _span(span),
7263 _bitMap(bitMap),
7264 _markStack(markStack),
7265 _revisitStack(revisitStack),
7266 _finger(finger),
7267 _parent(parent),
7268 _should_remember_klasses(collector->should_unload_classes())
7269 { }
7270
7271 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7272 MemRegion span,
7273 CMSBitMap* bit_map,
7274 OopTaskQueue* work_queue,
7275 CMSMarkStack* overflow_stack,
7276 CMSMarkStack* revisit_stack,
7277 HeapWord* finger,
7278 HeapWord** global_finger_addr,
7279 Par_MarkFromRootsClosure* parent) :
7280 OopClosure(collector->ref_processor()),
7281 _collector(collector),
7282 _whole_span(collector->_span),
7283 _span(span),
7284 _bit_map(bit_map),
7285 _work_queue(work_queue),
7286 _overflow_stack(overflow_stack),
7287 _revisit_stack(revisit_stack),
7288 _finger(finger),
7289 _global_finger_addr(global_finger_addr),
7290 _parent(parent),
7291 _should_remember_klasses(collector->should_unload_classes())
7292 { }
7293
7294
7295 void CMSCollector::lower_restart_addr(HeapWord* low) {
7296 assert(_span.contains(low), "Out of bounds addr");
7297 if (_restart_addr == NULL) {
7298 _restart_addr = low;
7299 } else {
7300 _restart_addr = MIN2(_restart_addr, low);
7301 }
7302 }
7303
7304 // Upon stack overflow, we discard (part of) the stack,
7305 // remembering the least address amongst those discarded
7306 // in CMSCollector's _restart_address.
7307 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7308 // Remember the least grey address discarded
7309 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7310 _collector->lower_restart_addr(ra);
7311 _markStack->reset(); // discard stack contents
7312 _markStack->expand(); // expand the stack if possible
7313 }
7314
7315 // Upon stack overflow, we discard (part of) the stack,
7316 // remembering the least address amongst those discarded
7317 // in CMSCollector's _restart_address.
7318 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7319 // We need to do this under a mutex to prevent other
7320 // workers from interfering with the expansion below.
7321 MutexLockerEx ml(_overflow_stack->par_lock(),
7322 Mutex::_no_safepoint_check_flag);
7323 // Remember the least grey address discarded
7324 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7325 _collector->lower_restart_addr(ra);
7326 _overflow_stack->reset(); // discard stack contents
7327 _overflow_stack->expand(); // expand the stack if possible
7328 }
7329
7330
7331 void PushOrMarkClosure::do_oop(oop* p) {
7332 oop thisOop = *p;
7333 // Ignore mark word because we are running concurrent with mutators.
7334 assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
7335 HeapWord* addr = (HeapWord*)thisOop;
7336 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7337 // Oop lies in _span and isn't yet grey or black
7338 _bitMap->mark(addr); // now grey
7339 if (addr < _finger) {
7340 // the bit map iteration has already either passed, or
7341 // sampled, this bit in the bit map; we'll need to
7342 // use the marking stack to scan this oop's oops.
7343 bool simulate_overflow = false;
7344 NOT_PRODUCT(
7345 if (CMSMarkStackOverflowALot &&
7346 _collector->simulate_overflow()) {
7347 // simulate a stack overflow
7348 simulate_overflow = true;
7349 }
7350 )
7351 if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
7352 if (PrintCMSStatistics != 0) {
7353 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7354 SIZE_FORMAT, _markStack->capacity());
7355 }
7356 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7357 handle_stack_overflow(addr);
7358 }
7359 }
7360 // anything including and to the right of _finger
7361 // will be scanned as we iterate over the remainder of the
7362 // bit map
7363 do_yield_check();
7364 }
7365 }
7366
7367 void Par_PushOrMarkClosure::do_oop(oop* p) {
7368 oop this_oop = *p;
7369 // Ignore mark word because we are running concurrent with mutators.
7370 assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
7371 HeapWord* addr = (HeapWord*)this_oop;
7372 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7373 // Oop lies in _span and isn't yet grey or black
7374 // We read the global_finger (volatile read) strictly after marking oop
7375 bool res = _bit_map->par_mark(addr); // now grey
7376 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7377 // Should we push this marked oop on our stack?
7378 // -- if someone else marked it, nothing to do
7379 // -- if target oop is above global finger nothing to do
7380 // -- if target oop is in chunk and above local finger
7381 // then nothing to do
7382 // -- else push on work queue
7383 if ( !res // someone else marked it, they will deal with it
7384 || (addr >= *gfa) // will be scanned in a later task
7385 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7386 return;
7387 }
7388 // the bit map iteration has already either passed, or
7389 // sampled, this bit in the bit map; we'll need to
7390 // use the marking stack to scan this oop's oops.
7391 bool simulate_overflow = false;
7392 NOT_PRODUCT(
7393 if (CMSMarkStackOverflowALot &&
7394 _collector->simulate_overflow()) {
7395 // simulate a stack overflow
7396 simulate_overflow = true;
7397 }
7398 )
7399 if (simulate_overflow ||
7400 !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
7401 // stack overflow
7402 if (PrintCMSStatistics != 0) {
7403 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7404 SIZE_FORMAT, _overflow_stack->capacity());
7405 }
7406 // We cannot assert that the overflow stack is full because
7407 // it may have been emptied since.
7408 assert(simulate_overflow ||
7409 _work_queue->size() == _work_queue->max_elems(),
7410 "Else push should have succeeded");
7411 handle_stack_overflow(addr);
7412 }
7413 do_yield_check();
7414 }
7415 }
7416
7417
7418 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7419 MemRegion span,
7420 ReferenceProcessor* rp,
7421 CMSBitMap* bit_map,
7422 CMSBitMap* mod_union_table,
7423 CMSMarkStack* mark_stack,
7424 CMSMarkStack* revisit_stack,
7425 bool concurrent_precleaning):
7426 OopClosure(rp),
7427 _collector(collector),
7428 _span(span),
7429 _bit_map(bit_map),
7430 _mod_union_table(mod_union_table),
7431 _mark_stack(mark_stack),
7432 _revisit_stack(revisit_stack),
7433 _concurrent_precleaning(concurrent_precleaning),
7434 _should_remember_klasses(collector->should_unload_classes())
7435 {
7436 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7437 }
7438
7439 // Grey object rescan during pre-cleaning and second checkpoint phases --
7440 // the non-parallel version (the parallel version appears further below.)
7441 void PushAndMarkClosure::do_oop(oop* p) {
7442 oop this_oop = *p;
7443 // Ignore mark word verification. If during concurrent precleaning
7444 // the object monitor may be locked. If during the checkpoint
7445 // phases, the object may already have been reached by a different
7446 // path and may be at the end of the global overflow list (so
7447 // the mark word may be NULL).
7448 assert(this_oop->is_oop_or_null(true/* ignore mark word */),
7449 "expected an oop or NULL");
7450 HeapWord* addr = (HeapWord*)this_oop;
7451 // Check if oop points into the CMS generation
7452 // and is not marked
7453 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7454 // a white object ...
7455 _bit_map->mark(addr); // ... now grey
7456 // push on the marking stack (grey set)
7457 bool simulate_overflow = false;
7458 NOT_PRODUCT(
7459 if (CMSMarkStackOverflowALot &&
7460 _collector->simulate_overflow()) {
7461 // simulate a stack overflow
7462 simulate_overflow = true;
7463 }
7464 )
7465 if (simulate_overflow || !_mark_stack->push(this_oop)) {
7466 if (_concurrent_precleaning) {
7467 // During precleaning we can just dirty the appropriate card
7468 // in the mod union table, thus ensuring that the object remains
7469 // in the grey set and continue. Note that no one can be intefering
7470 // with us in this action of dirtying the mod union table, so
7471 // no locking is required.
7472 _mod_union_table->mark(addr);
7473 _collector->_ser_pmc_preclean_ovflw++;
7474 } else {
7475 // During the remark phase, we need to remember this oop
7476 // in the overflow list.
7477 _collector->push_on_overflow_list(this_oop);
7478 _collector->_ser_pmc_remark_ovflw++;
7479 }
7480 }
7481 }
7482 }
7483
7484 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7485 MemRegion span,
7486 ReferenceProcessor* rp,
7487 CMSBitMap* bit_map,
7488 OopTaskQueue* work_queue,
7489 CMSMarkStack* revisit_stack):
7490 OopClosure(rp),
7491 _collector(collector),
7492 _span(span),
7493 _bit_map(bit_map),
7494 _work_queue(work_queue),
7495 _revisit_stack(revisit_stack),
7496 _should_remember_klasses(collector->should_unload_classes())
7497 {
7498 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7499 }
7500
7501 // Grey object rescan during second checkpoint phase --
7502 // the parallel version.
7503 void Par_PushAndMarkClosure::do_oop(oop* p) {
7504 oop this_oop = *p;
7505 // In the assert below, we ignore the mark word because
7506 // this oop may point to an already visited object that is
7507 // on the overflow stack (in which case the mark word has
7508 // been hijacked for chaining into the overflow stack --
7509 // if this is the last object in the overflow stack then
7510 // its mark word will be NULL). Because this object may
7511 // have been subsequently popped off the global overflow
7512 // stack, and the mark word possibly restored to the prototypical
7513 // value, by the time we get to examined this failing assert in
7514 // the debugger, is_oop_or_null(false) may subsequently start
7515 // to hold.
7516 assert(this_oop->is_oop_or_null(true),
7517 "expected an oop or NULL");
7518 HeapWord* addr = (HeapWord*)this_oop;
7519 // Check if oop points into the CMS generation
7520 // and is not marked
7521 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7522 // a white object ...
7523 // If we manage to "claim" the object, by being the
7524 // first thread to mark it, then we push it on our
7525 // marking stack
7526 if (_bit_map->par_mark(addr)) { // ... now grey
7527 // push on work queue (grey set)
7528 bool simulate_overflow = false;
7529 NOT_PRODUCT(
7530 if (CMSMarkStackOverflowALot &&
7531 _collector->par_simulate_overflow()) {
7532 // simulate a stack overflow
7533 simulate_overflow = true;
7534 }
7535 )
7536 if (simulate_overflow || !_work_queue->push(this_oop)) {
7537 _collector->par_push_on_overflow_list(this_oop);
7538 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7539 }
7540 } // Else, some other thread got there first
7541 }
7542 }
7543
7544 void PushAndMarkClosure::remember_klass(Klass* k) {
7545 if (!_revisit_stack->push(oop(k))) {
7546 fatal("Revisit stack overflowed in PushAndMarkClosure");
7547 }
7548 }
7549
7550 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
7551 if (!_revisit_stack->par_push(oop(k))) {
7552 fatal("Revist stack overflowed in Par_PushAndMarkClosure");
7553 }
7554 }
7555
7556 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7557 Mutex* bml = _collector->bitMapLock();
7558 assert_lock_strong(bml);
7559 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7560 "CMS thread should hold CMS token");
7561
7562 bml->unlock();
7563 ConcurrentMarkSweepThread::desynchronize(true);
7564
7565 ConcurrentMarkSweepThread::acknowledge_yield_request();
7566
7567 _collector->stopTimer();
7568 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7569 if (PrintCMSStatistics != 0) {
7570 _collector->incrementYields();
7571 }
7572 _collector->icms_wait();
7573
7574 // See the comment in coordinator_yield()
7575 for (unsigned i = 0; i < CMSYieldSleepCount &&
7576 ConcurrentMarkSweepThread::should_yield() &&
7577 !CMSCollector::foregroundGCIsActive(); ++i) {
7578 os::sleep(Thread::current(), 1, false);
7579 ConcurrentMarkSweepThread::acknowledge_yield_request();
7580 }
7581
7582 ConcurrentMarkSweepThread::synchronize(true);
7583 bml->lock();
7584
7585 _collector->startTimer();
7586 }
7587
7588 bool CMSPrecleanRefsYieldClosure::should_return() {
7589 if (ConcurrentMarkSweepThread::should_yield()) {
7590 do_yield_work();
7591 }
7592 return _collector->foregroundGCIsActive();
7593 }
7594
7595 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7596 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7597 "mr should be aligned to start at a card boundary");
7598 // We'd like to assert:
7599 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7600 // "mr should be a range of cards");
7601 // However, that would be too strong in one case -- the last
7602 // partition ends at _unallocated_block which, in general, can be
7603 // an arbitrary boundary, not necessarily card aligned.
7604 if (PrintCMSStatistics != 0) {
7605 _num_dirty_cards +=
7606 mr.word_size()/CardTableModRefBS::card_size_in_words;
7607 }
7608 _space->object_iterate_mem(mr, &_scan_cl);
7609 }
7610
7611 SweepClosure::SweepClosure(CMSCollector* collector,
7612 ConcurrentMarkSweepGeneration* g,
7613 CMSBitMap* bitMap, bool should_yield) :
7614 _collector(collector),
7615 _g(g),
7616 _sp(g->cmsSpace()),
7617 _limit(_sp->sweep_limit()),
7618 _freelistLock(_sp->freelistLock()),
7619 _bitMap(bitMap),
7620 _yield(should_yield),
7621 _inFreeRange(false), // No free range at beginning of sweep
7622 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7623 _lastFreeRangeCoalesced(false),
7624 _freeFinger(g->used_region().start())
7625 {
7626 NOT_PRODUCT(
7627 _numObjectsFreed = 0;
7628 _numWordsFreed = 0;
7629 _numObjectsLive = 0;
7630 _numWordsLive = 0;
7631 _numObjectsAlreadyFree = 0;
7632 _numWordsAlreadyFree = 0;
7633 _last_fc = NULL;
7634
7635 _sp->initializeIndexedFreeListArrayReturnedBytes();
7636 _sp->dictionary()->initializeDictReturnedBytes();
7637 )
7638 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7639 "sweep _limit out of bounds");
7640 if (CMSTraceSweeper) {
7641 gclog_or_tty->print("\n====================\nStarting new sweep\n");
7642 }
7643 }
7644
7645 // We need this destructor to reclaim any space at the end
7646 // of the space, which do_blk below may not have added back to
7647 // the free lists. [basically dealing with the "fringe effect"]
7648 SweepClosure::~SweepClosure() {
7649 assert_lock_strong(_freelistLock);
7650 // this should be treated as the end of a free run if any
7651 // The current free range should be returned to the free lists
7652 // as one coalesced chunk.
7653 if (inFreeRange()) {
7654 flushCurFreeChunk(freeFinger(),
7655 pointer_delta(_limit, freeFinger()));
7656 assert(freeFinger() < _limit, "the finger pointeth off base");
7657 if (CMSTraceSweeper) {
7658 gclog_or_tty->print("destructor:");
7659 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
7660 "[coalesced:"SIZE_FORMAT"]\n",
7661 freeFinger(), pointer_delta(_limit, freeFinger()),
7662 lastFreeRangeCoalesced());
7663 }
7664 }
7665 NOT_PRODUCT(
7666 if (Verbose && PrintGC) {
7667 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
7668 SIZE_FORMAT " bytes",
7669 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7670 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7671 SIZE_FORMAT" bytes "
7672 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7673 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7674 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7675 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
7676 sizeof(HeapWord);
7677 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7678
7679 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7680 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7681 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
7682 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
7683 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
7684 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7685 indexListReturnedBytes);
7686 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7687 dictReturnedBytes);
7688 }
7689 }
7690 )
7691 // Now, in debug mode, just null out the sweep_limit
7692 NOT_PRODUCT(_sp->clear_sweep_limit();)
7693 if (CMSTraceSweeper) {
7694 gclog_or_tty->print("end of sweep\n================\n");
7695 }
7696 }
7697
7698 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7699 bool freeRangeInFreeLists) {
7700 if (CMSTraceSweeper) {
7701 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
7702 freeFinger, _sp->block_size(freeFinger),
7703 freeRangeInFreeLists);
7704 }
7705 assert(!inFreeRange(), "Trampling existing free range");
7706 set_inFreeRange(true);
7707 set_lastFreeRangeCoalesced(false);
7708
7709 set_freeFinger(freeFinger);
7710 set_freeRangeInFreeLists(freeRangeInFreeLists);
7711 if (CMSTestInFreeList) {
7712 if (freeRangeInFreeLists) {
7713 FreeChunk* fc = (FreeChunk*) freeFinger;
7714 assert(fc->isFree(), "A chunk on the free list should be free.");
7715 assert(fc->size() > 0, "Free range should have a size");
7716 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
7717 }
7718 }
7719 }
7720
7721 // Note that the sweeper runs concurrently with mutators. Thus,
7722 // it is possible for direct allocation in this generation to happen
7723 // in the middle of the sweep. Note that the sweeper also coalesces
7724 // contiguous free blocks. Thus, unless the sweeper and the allocator
7725 // synchronize appropriately freshly allocated blocks may get swept up.
7726 // This is accomplished by the sweeper locking the free lists while
7727 // it is sweeping. Thus blocks that are determined to be free are
7728 // indeed free. There is however one additional complication:
7729 // blocks that have been allocated since the final checkpoint and
7730 // mark, will not have been marked and so would be treated as
7731 // unreachable and swept up. To prevent this, the allocator marks
7732 // the bit map when allocating during the sweep phase. This leads,
7733 // however, to a further complication -- objects may have been allocated
7734 // but not yet initialized -- in the sense that the header isn't yet
7735 // installed. The sweeper can not then determine the size of the block
7736 // in order to skip over it. To deal with this case, we use a technique
7737 // (due to Printezis) to encode such uninitialized block sizes in the
7738 // bit map. Since the bit map uses a bit per every HeapWord, but the
7739 // CMS generation has a minimum object size of 3 HeapWords, it follows
7740 // that "normal marks" won't be adjacent in the bit map (there will
7741 // always be at least two 0 bits between successive 1 bits). We make use
7742 // of these "unused" bits to represent uninitialized blocks -- the bit
7743 // corresponding to the start of the uninitialized object and the next
7744 // bit are both set. Finally, a 1 bit marks the end of the object that
7745 // started with the two consecutive 1 bits to indicate its potentially
7746 // uninitialized state.
7747
7748 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7749 FreeChunk* fc = (FreeChunk*)addr;
7750 size_t res;
7751
7752 // check if we are done sweepinrg
7753 if (addr == _limit) { // we have swept up to the limit, do nothing more
7754 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7755 "sweep _limit out of bounds");
7756 // help the closure application finish
7757 return pointer_delta(_sp->end(), _limit);
7758 }
7759 assert(addr <= _limit, "sweep invariant");
7760
7761 // check if we should yield
7762 do_yield_check(addr);
7763 if (fc->isFree()) {
7764 // Chunk that is already free
7765 res = fc->size();
7766 doAlreadyFreeChunk(fc);
7767 debug_only(_sp->verifyFreeLists());
7768 assert(res == fc->size(), "Don't expect the size to change");
7769 NOT_PRODUCT(
7770 _numObjectsAlreadyFree++;
7771 _numWordsAlreadyFree += res;
7772 )
7773 NOT_PRODUCT(_last_fc = fc;)
7774 } else if (!_bitMap->isMarked(addr)) {
7775 // Chunk is fresh garbage
7776 res = doGarbageChunk(fc);
7777 debug_only(_sp->verifyFreeLists());
7778 NOT_PRODUCT(
7779 _numObjectsFreed++;
7780 _numWordsFreed += res;
7781 )
7782 } else {
7783 // Chunk that is alive.
7784 res = doLiveChunk(fc);
7785 debug_only(_sp->verifyFreeLists());
7786 NOT_PRODUCT(
7787 _numObjectsLive++;
7788 _numWordsLive += res;
7789 )
7790 }
7791 return res;
7792 }
7793
7794 // For the smart allocation, record following
7795 // split deaths - a free chunk is removed from its free list because
7796 // it is being split into two or more chunks.
7797 // split birth - a free chunk is being added to its free list because
7798 // a larger free chunk has been split and resulted in this free chunk.
7799 // coal death - a free chunk is being removed from its free list because
7800 // it is being coalesced into a large free chunk.
7801 // coal birth - a free chunk is being added to its free list because
7802 // it was created when two or more free chunks where coalesced into
7803 // this free chunk.
7804 //
7805 // These statistics are used to determine the desired number of free
7806 // chunks of a given size. The desired number is chosen to be relative
7807 // to the end of a CMS sweep. The desired number at the end of a sweep
7808 // is the
7809 // count-at-end-of-previous-sweep (an amount that was enough)
7810 // - count-at-beginning-of-current-sweep (the excess)
7811 // + split-births (gains in this size during interval)
7812 // - split-deaths (demands on this size during interval)
7813 // where the interval is from the end of one sweep to the end of the
7814 // next.
7815 //
7816 // When sweeping the sweeper maintains an accumulated chunk which is
7817 // the chunk that is made up of chunks that have been coalesced. That
7818 // will be termed the left-hand chunk. A new chunk of garbage that
7819 // is being considered for coalescing will be referred to as the
7820 // right-hand chunk.
7821 //
7822 // When making a decision on whether to coalesce a right-hand chunk with
7823 // the current left-hand chunk, the current count vs. the desired count
7824 // of the left-hand chunk is considered. Also if the right-hand chunk
7825 // is near the large chunk at the end of the heap (see
7826 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7827 // left-hand chunk is coalesced.
7828 //
7829 // When making a decision about whether to split a chunk, the desired count
7830 // vs. the current count of the candidate to be split is also considered.
7831 // If the candidate is underpopulated (currently fewer chunks than desired)
7832 // a chunk of an overpopulated (currently more chunks than desired) size may
7833 // be chosen. The "hint" associated with a free list, if non-null, points
7834 // to a free list which may be overpopulated.
7835 //
7836
7837 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
7838 size_t size = fc->size();
7839 // Chunks that cannot be coalesced are not in the
7840 // free lists.
7841 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7842 assert(_sp->verifyChunkInFreeLists(fc),
7843 "free chunk should be in free lists");
7844 }
7845 // a chunk that is already free, should not have been
7846 // marked in the bit map
7847 HeapWord* addr = (HeapWord*) fc;
7848 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7849 // Verify that the bit map has no bits marked between
7850 // addr and purported end of this block.
7851 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7852
7853 // Some chunks cannot be coalesced in under any circumstances.
7854 // See the definition of cantCoalesce().
7855 if (!fc->cantCoalesce()) {
7856 // This chunk can potentially be coalesced.
7857 if (_sp->adaptive_freelists()) {
7858 // All the work is done in
7859 doPostIsFreeOrGarbageChunk(fc, size);
7860 } else { // Not adaptive free lists
7861 // this is a free chunk that can potentially be coalesced by the sweeper;
7862 if (!inFreeRange()) {
7863 // if the next chunk is a free block that can't be coalesced
7864 // it doesn't make sense to remove this chunk from the free lists
7865 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7866 assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
7867 if ((HeapWord*)nextChunk < _limit && // there's a next chunk...
7868 nextChunk->isFree() && // which is free...
7869 nextChunk->cantCoalesce()) { // ... but cant be coalesced
7870 // nothing to do
7871 } else {
7872 // Potentially the start of a new free range:
7873 // Don't eagerly remove it from the free lists.
7874 // No need to remove it if it will just be put
7875 // back again. (Also from a pragmatic point of view
7876 // if it is a free block in a region that is beyond
7877 // any allocated blocks, an assertion will fail)
7878 // Remember the start of a free run.
7879 initialize_free_range(addr, true);
7880 // end - can coalesce with next chunk
7881 }
7882 } else {
7883 // the midst of a free range, we are coalescing
7884 debug_only(record_free_block_coalesced(fc);)
7885 if (CMSTraceSweeper) {
7886 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
7887 }
7888 // remove it from the free lists
7889 _sp->removeFreeChunkFromFreeLists(fc);
7890 set_lastFreeRangeCoalesced(true);
7891 // If the chunk is being coalesced and the current free range is
7892 // in the free lists, remove the current free range so that it
7893 // will be returned to the free lists in its entirety - all
7894 // the coalesced pieces included.
7895 if (freeRangeInFreeLists()) {
7896 FreeChunk* ffc = (FreeChunk*) freeFinger();
7897 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7898 "Size of free range is inconsistent with chunk size.");
7899 if (CMSTestInFreeList) {
7900 assert(_sp->verifyChunkInFreeLists(ffc),
7901 "free range is not in free lists");
7902 }
7903 _sp->removeFreeChunkFromFreeLists(ffc);
7904 set_freeRangeInFreeLists(false);
7905 }
7906 }
7907 }
7908 } else {
7909 // Code path common to both original and adaptive free lists.
7910
7911 // cant coalesce with previous block; this should be treated
7912 // as the end of a free run if any
7913 if (inFreeRange()) {
7914 // we kicked some butt; time to pick up the garbage
7915 assert(freeFinger() < addr, "the finger pointeth off base");
7916 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
7917 }
7918 // else, nothing to do, just continue
7919 }
7920 }
7921
7922 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
7923 // This is a chunk of garbage. It is not in any free list.
7924 // Add it to a free list or let it possibly be coalesced into
7925 // a larger chunk.
7926 HeapWord* addr = (HeapWord*) fc;
7927 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7928
7929 if (_sp->adaptive_freelists()) {
7930 // Verify that the bit map has no bits marked between
7931 // addr and purported end of just dead object.
7932 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7933
7934 doPostIsFreeOrGarbageChunk(fc, size);
7935 } else {
7936 if (!inFreeRange()) {
7937 // start of a new free range
7938 assert(size > 0, "A free range should have a size");
7939 initialize_free_range(addr, false);
7940
7941 } else {
7942 // this will be swept up when we hit the end of the
7943 // free range
7944 if (CMSTraceSweeper) {
7945 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
7946 }
7947 // If the chunk is being coalesced and the current free range is
7948 // in the free lists, remove the current free range so that it
7949 // will be returned to the free lists in its entirety - all
7950 // the coalesced pieces included.
7951 if (freeRangeInFreeLists()) {
7952 FreeChunk* ffc = (FreeChunk*)freeFinger();
7953 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7954 "Size of free range is inconsistent with chunk size.");
7955 if (CMSTestInFreeList) {
7956 assert(_sp->verifyChunkInFreeLists(ffc),
7957 "free range is not in free lists");
7958 }
7959 _sp->removeFreeChunkFromFreeLists(ffc);
7960 set_freeRangeInFreeLists(false);
7961 }
7962 set_lastFreeRangeCoalesced(true);
7963 }
7964 // this will be swept up when we hit the end of the free range
7965
7966 // Verify that the bit map has no bits marked between
7967 // addr and purported end of just dead object.
7968 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7969 }
7970 return size;
7971 }
7972
7973 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
7974 HeapWord* addr = (HeapWord*) fc;
7975 // The sweeper has just found a live object. Return any accumulated
7976 // left hand chunk to the free lists.
7977 if (inFreeRange()) {
7978 if (_sp->adaptive_freelists()) {
7979 flushCurFreeChunk(freeFinger(),
7980 pointer_delta(addr, freeFinger()));
7981 } else { // not adaptive freelists
7982 set_inFreeRange(false);
7983 // Add the free range back to the free list if it is not already
7984 // there.
7985 if (!freeRangeInFreeLists()) {
7986 assert(freeFinger() < addr, "the finger pointeth off base");
7987 if (CMSTraceSweeper) {
7988 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
7989 "[coalesced:%d]\n",
7990 freeFinger(), pointer_delta(addr, freeFinger()),
7991 lastFreeRangeCoalesced());
7992 }
7993 _sp->addChunkAndRepairOffsetTable(freeFinger(),
7994 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
7995 }
7996 }
7997 }
7998
7999 // Common code path for original and adaptive free lists.
8000
8001 // this object is live: we'd normally expect this to be
8002 // an oop, and like to assert the following:
8003 // assert(oop(addr)->is_oop(), "live block should be an oop");
8004 // However, as we commented above, this may be an object whose
8005 // header hasn't yet been initialized.
8006 size_t size;
8007 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8008 if (_bitMap->isMarked(addr + 1)) {
8009 // Determine the size from the bit map, rather than trying to
8010 // compute it from the object header.
8011 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8012 size = pointer_delta(nextOneAddr + 1, addr);
8013 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8014 "alignment problem");
8015
8016 #ifdef DEBUG
8017 if (oop(addr)->klass() != NULL &&
8018 ( !_collector->should_unload_classes()
8019 || oop(addr)->is_parsable())) {
8020 // Ignore mark word because we are running concurrent with mutators
8021 assert(oop(addr)->is_oop(true), "live block should be an oop");
8022 assert(size ==
8023 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8024 "P-mark and computed size do not agree");
8025 }
8026 #endif
8027
8028 } else {
8029 // This should be an initialized object that's alive.
8030 assert(oop(addr)->klass() != NULL &&
8031 (!_collector->should_unload_classes()
8032 || oop(addr)->is_parsable()),
8033 "Should be an initialized object");
8034 // Ignore mark word because we are running concurrent with mutators
8035 assert(oop(addr)->is_oop(true), "live block should be an oop");
8036 // Verify that the bit map has no bits marked between
8037 // addr and purported end of this block.
8038 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8039 assert(size >= 3, "Necessary for Printezis marks to work");
8040 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8041 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8042 }
8043 return size;
8044 }
8045
8046 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
8047 size_t chunkSize) {
8048 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
8049 // scheme.
8050 bool fcInFreeLists = fc->isFree();
8051 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8052 assert((HeapWord*)fc <= _limit, "sweep invariant");
8053 if (CMSTestInFreeList && fcInFreeLists) {
8054 assert(_sp->verifyChunkInFreeLists(fc),
8055 "free chunk is not in free lists");
8056 }
8057
8058
8059 if (CMSTraceSweeper) {
8060 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8061 }
8062
8063 HeapWord* addr = (HeapWord*) fc;
8064
8065 bool coalesce;
8066 size_t left = pointer_delta(addr, freeFinger());
8067 size_t right = chunkSize;
8068 switch (FLSCoalescePolicy) {
8069 // numeric value forms a coalition aggressiveness metric
8070 case 0: { // never coalesce
8071 coalesce = false;
8072 break;
8073 }
8074 case 1: { // coalesce if left & right chunks on overpopulated lists
8075 coalesce = _sp->coalOverPopulated(left) &&
8076 _sp->coalOverPopulated(right);
8077 break;
8078 }
8079 case 2: { // coalesce if left chunk on overpopulated list (default)
8080 coalesce = _sp->coalOverPopulated(left);
8081 break;
8082 }
8083 case 3: { // coalesce if left OR right chunk on overpopulated list
8084 coalesce = _sp->coalOverPopulated(left) ||
8085 _sp->coalOverPopulated(right);
8086 break;
8087 }
8088 case 4: { // always coalesce
8089 coalesce = true;
8090 break;
8091 }
8092 default:
8093 ShouldNotReachHere();
8094 }
8095
8096 // Should the current free range be coalesced?
8097 // If the chunk is in a free range and either we decided to coalesce above
8098 // or the chunk is near the large block at the end of the heap
8099 // (isNearLargestChunk() returns true), then coalesce this chunk.
8100 bool doCoalesce = inFreeRange() &&
8101 (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
8102 if (doCoalesce) {
8103 // Coalesce the current free range on the left with the new
8104 // chunk on the right. If either is on a free list,
8105 // it must be removed from the list and stashed in the closure.
8106 if (freeRangeInFreeLists()) {
8107 FreeChunk* ffc = (FreeChunk*)freeFinger();
8108 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8109 "Size of free range is inconsistent with chunk size.");
8110 if (CMSTestInFreeList) {
8111 assert(_sp->verifyChunkInFreeLists(ffc),
8112 "Chunk is not in free lists");
8113 }
8114 _sp->coalDeath(ffc->size());
8115 _sp->removeFreeChunkFromFreeLists(ffc);
8116 set_freeRangeInFreeLists(false);
8117 }
8118 if (fcInFreeLists) {
8119 _sp->coalDeath(chunkSize);
8120 assert(fc->size() == chunkSize,
8121 "The chunk has the wrong size or is not in the free lists");
8122 _sp->removeFreeChunkFromFreeLists(fc);
8123 }
8124 set_lastFreeRangeCoalesced(true);
8125 } else { // not in a free range and/or should not coalesce
8126 // Return the current free range and start a new one.
8127 if (inFreeRange()) {
8128 // In a free range but cannot coalesce with the right hand chunk.
8129 // Put the current free range into the free lists.
8130 flushCurFreeChunk(freeFinger(),
8131 pointer_delta(addr, freeFinger()));
8132 }
8133 // Set up for new free range. Pass along whether the right hand
8134 // chunk is in the free lists.
8135 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8136 }
8137 }
8138 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
8139 assert(inFreeRange(), "Should only be called if currently in a free range.");
8140 assert(size > 0,
8141 "A zero sized chunk cannot be added to the free lists.");
8142 if (!freeRangeInFreeLists()) {
8143 if(CMSTestInFreeList) {
8144 FreeChunk* fc = (FreeChunk*) chunk;
8145 fc->setSize(size);
8146 assert(!_sp->verifyChunkInFreeLists(fc),
8147 "chunk should not be in free lists yet");
8148 }
8149 if (CMSTraceSweeper) {
8150 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8151 chunk, size);
8152 }
8153 // A new free range is going to be starting. The current
8154 // free range has not been added to the free lists yet or
8155 // was removed so add it back.
8156 // If the current free range was coalesced, then the death
8157 // of the free range was recorded. Record a birth now.
8158 if (lastFreeRangeCoalesced()) {
8159 _sp->coalBirth(size);
8160 }
8161 _sp->addChunkAndRepairOffsetTable(chunk, size,
8162 lastFreeRangeCoalesced());
8163 }
8164 set_inFreeRange(false);
8165 set_freeRangeInFreeLists(false);
8166 }
8167
8168 // We take a break if we've been at this for a while,
8169 // so as to avoid monopolizing the locks involved.
8170 void SweepClosure::do_yield_work(HeapWord* addr) {
8171 // Return current free chunk being used for coalescing (if any)
8172 // to the appropriate freelist. After yielding, the next
8173 // free block encountered will start a coalescing range of
8174 // free blocks. If the next free block is adjacent to the
8175 // chunk just flushed, they will need to wait for the next
8176 // sweep to be coalesced.
8177 if (inFreeRange()) {
8178 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
8179 }
8180
8181 // First give up the locks, then yield, then re-lock.
8182 // We should probably use a constructor/destructor idiom to
8183 // do this unlock/lock or modify the MutexUnlocker class to
8184 // serve our purpose. XXX
8185 assert_lock_strong(_bitMap->lock());
8186 assert_lock_strong(_freelistLock);
8187 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8188 "CMS thread should hold CMS token");
8189 _bitMap->lock()->unlock();
8190 _freelistLock->unlock();
8191 ConcurrentMarkSweepThread::desynchronize(true);
8192 ConcurrentMarkSweepThread::acknowledge_yield_request();
8193 _collector->stopTimer();
8194 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8195 if (PrintCMSStatistics != 0) {
8196 _collector->incrementYields();
8197 }
8198 _collector->icms_wait();
8199
8200 // See the comment in coordinator_yield()
8201 for (unsigned i = 0; i < CMSYieldSleepCount &&
8202 ConcurrentMarkSweepThread::should_yield() &&
8203 !CMSCollector::foregroundGCIsActive(); ++i) {
8204 os::sleep(Thread::current(), 1, false);
8205 ConcurrentMarkSweepThread::acknowledge_yield_request();
8206 }
8207
8208 ConcurrentMarkSweepThread::synchronize(true);
8209 _freelistLock->lock();
8210 _bitMap->lock()->lock_without_safepoint_check();
8211 _collector->startTimer();
8212 }
8213
8214 #ifndef PRODUCT
8215 // This is actually very useful in a product build if it can
8216 // be called from the debugger. Compile it into the product
8217 // as needed.
8218 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
8219 return debug_cms_space->verifyChunkInFreeLists(fc);
8220 }
8221
8222 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
8223 if (CMSTraceSweeper) {
8224 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
8225 }
8226 }
8227 #endif
8228
8229 // CMSIsAliveClosure
8230 bool CMSIsAliveClosure::do_object_b(oop obj) {
8231 HeapWord* addr = (HeapWord*)obj;
8232 return addr != NULL &&
8233 (!_span.contains(addr) || _bit_map->isMarked(addr));
8234 }
8235
8236 // CMSKeepAliveClosure: the serial version
8237 void CMSKeepAliveClosure::do_oop(oop* p) {
8238 oop this_oop = *p;
8239 HeapWord* addr = (HeapWord*)this_oop;
8240 if (_span.contains(addr) &&
8241 !_bit_map->isMarked(addr)) {
8242 _bit_map->mark(addr);
8243 bool simulate_overflow = false;
8244 NOT_PRODUCT(
8245 if (CMSMarkStackOverflowALot &&
8246 _collector->simulate_overflow()) {
8247 // simulate a stack overflow
8248 simulate_overflow = true;
8249 }
8250 )
8251 if (simulate_overflow || !_mark_stack->push(this_oop)) {
8252 _collector->push_on_overflow_list(this_oop);
8253 _collector->_ser_kac_ovflw++;
8254 }
8255 }
8256 }
8257
8258 // CMSParKeepAliveClosure: a parallel version of the above.
8259 // The work queues are private to each closure (thread),
8260 // but (may be) available for stealing by other threads.
8261 void CMSParKeepAliveClosure::do_oop(oop* p) {
8262 oop this_oop = *p;
8263 HeapWord* addr = (HeapWord*)this_oop;
8264 if (_span.contains(addr) &&
8265 !_bit_map->isMarked(addr)) {
8266 // In general, during recursive tracing, several threads
8267 // may be concurrently getting here; the first one to
8268 // "tag" it, claims it.
8269 if (_bit_map->par_mark(addr)) {
8270 bool res = _work_queue->push(this_oop);
8271 assert(res, "Low water mark should be much less than capacity");
8272 // Do a recursive trim in the hope that this will keep
8273 // stack usage lower, but leave some oops for potential stealers
8274 trim_queue(_low_water_mark);
8275 } // Else, another thread got there first
8276 }
8277 }
8278
8279 void CMSParKeepAliveClosure::trim_queue(uint max) {
8280 while (_work_queue->size() > max) {
8281 oop new_oop;
8282 if (_work_queue->pop_local(new_oop)) {
8283 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8284 assert(_bit_map->isMarked((HeapWord*)new_oop),
8285 "no white objects on this stack!");
8286 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8287 // iterate over the oops in this oop, marking and pushing
8288 // the ones in CMS heap (i.e. in _span).
8289 new_oop->oop_iterate(&_mark_and_push);
8290 }
8291 }
8292 }
8293
8294 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
8295 oop this_oop = *p;
8296 HeapWord* addr = (HeapWord*)this_oop;
8297 if (_span.contains(addr) &&
8298 !_bit_map->isMarked(addr)) {
8299 if (_bit_map->par_mark(addr)) {
8300 bool simulate_overflow = false;
8301 NOT_PRODUCT(
8302 if (CMSMarkStackOverflowALot &&
8303 _collector->par_simulate_overflow()) {
8304 // simulate a stack overflow
8305 simulate_overflow = true;
8306 }
8307 )
8308 if (simulate_overflow || !_work_queue->push(this_oop)) {
8309 _collector->par_push_on_overflow_list(this_oop);
8310 _collector->_par_kac_ovflw++;
8311 }
8312 } // Else another thread got there already
8313 }
8314 }
8315
8316 //////////////////////////////////////////////////////////////////
8317 // CMSExpansionCause /////////////////////////////
8318 //////////////////////////////////////////////////////////////////
8319 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8320 switch (cause) {
8321 case _no_expansion:
8322 return "No expansion";
8323 case _satisfy_free_ratio:
8324 return "Free ratio";
8325 case _satisfy_promotion:
8326 return "Satisfy promotion";
8327 case _satisfy_allocation:
8328 return "allocation";
8329 case _allocate_par_lab:
8330 return "Par LAB";
8331 case _allocate_par_spooling_space:
8332 return "Par Spooling Space";
8333 case _adaptive_size_policy:
8334 return "Ergonomics";
8335 default:
8336 return "unknown";
8337 }
8338 }
8339
8340 void CMSDrainMarkingStackClosure::do_void() {
8341 // the max number to take from overflow list at a time
8342 const size_t num = _mark_stack->capacity()/4;
8343 while (!_mark_stack->isEmpty() ||
8344 // if stack is empty, check the overflow list
8345 _collector->take_from_overflow_list(num, _mark_stack)) {
8346 oop this_oop = _mark_stack->pop();
8347 HeapWord* addr = (HeapWord*)this_oop;
8348 assert(_span.contains(addr), "Should be within span");
8349 assert(_bit_map->isMarked(addr), "Should be marked");
8350 assert(this_oop->is_oop(), "Should be an oop");
8351 this_oop->oop_iterate(_keep_alive);
8352 }
8353 }
8354
8355 void CMSParDrainMarkingStackClosure::do_void() {
8356 // drain queue
8357 trim_queue(0);
8358 }
8359
8360 // Trim our work_queue so its length is below max at return
8361 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8362 while (_work_queue->size() > max) {
8363 oop new_oop;
8364 if (_work_queue->pop_local(new_oop)) {
8365 assert(new_oop->is_oop(), "Expected an oop");
8366 assert(_bit_map->isMarked((HeapWord*)new_oop),
8367 "no white objects on this stack!");
8368 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8369 // iterate over the oops in this oop, marking and pushing
8370 // the ones in CMS heap (i.e. in _span).
8371 new_oop->oop_iterate(&_mark_and_push);
8372 }
8373 }
8374 }
8375
8376 ////////////////////////////////////////////////////////////////////
8377 // Support for Marking Stack Overflow list handling and related code
8378 ////////////////////////////////////////////////////////////////////
8379 // Much of the following code is similar in shape and spirit to the
8380 // code used in ParNewGC. We should try and share that code
8381 // as much as possible in the future.
8382
8383 #ifndef PRODUCT
8384 // Debugging support for CMSStackOverflowALot
8385
8386 // It's OK to call this multi-threaded; the worst thing
8387 // that can happen is that we'll get a bunch of closely
8388 // spaced simulated oveflows, but that's OK, in fact
8389 // probably good as it would exercise the overflow code
8390 // under contention.
8391 bool CMSCollector::simulate_overflow() {
8392 if (_overflow_counter-- <= 0) { // just being defensive
8393 _overflow_counter = CMSMarkStackOverflowInterval;
8394 return true;
8395 } else {
8396 return false;
8397 }
8398 }
8399
8400 bool CMSCollector::par_simulate_overflow() {
8401 return simulate_overflow();
8402 }
8403 #endif
8404
8405 // Single-threaded
8406 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8407 assert(stack->isEmpty(), "Expected precondition");
8408 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8409 size_t i = num;
8410 oop cur = _overflow_list;
8411 const markOop proto = markOopDesc::prototype();
8412 NOT_PRODUCT(size_t n = 0;)
8413 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8414 next = oop(cur->mark());
8415 cur->set_mark(proto); // until proven otherwise
8416 assert(cur->is_oop(), "Should be an oop");
8417 bool res = stack->push(cur);
8418 assert(res, "Bit off more than can chew?");
8419 NOT_PRODUCT(n++;)
8420 }
8421 _overflow_list = cur;
8422 #ifndef PRODUCT
8423 assert(_num_par_pushes >= n, "Too many pops?");
8424 _num_par_pushes -=n;
8425 #endif
8426 return !stack->isEmpty();
8427 }
8428
8429 // Multi-threaded; use CAS to break off a prefix
8430 bool CMSCollector::par_take_from_overflow_list(size_t num,
8431 OopTaskQueue* work_q) {
8432 assert(work_q->size() == 0, "That's the current policy");
8433 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8434 if (_overflow_list == NULL) {
8435 return false;
8436 }
8437 // Grab the entire list; we'll put back a suffix
8438 oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
8439 if (prefix == NULL) { // someone grabbed it before we did ...
8440 // ... we could spin for a short while, but for now we don't
8441 return false;
8442 }
8443 size_t i = num;
8444 oop cur = prefix;
8445 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8446 if (cur->mark() != NULL) {
8447 oop suffix_head = cur->mark(); // suffix will be put back on global list
8448 cur->set_mark(NULL); // break off suffix
8449 // Find tail of suffix so we can prepend suffix to global list
8450 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8451 oop suffix_tail = cur;
8452 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8453 "Tautology");
8454 oop observed_overflow_list = _overflow_list;
8455 do {
8456 cur = observed_overflow_list;
8457 suffix_tail->set_mark(markOop(cur));
8458 observed_overflow_list =
8459 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
8460 } while (cur != observed_overflow_list);
8461 }
8462
8463 // Push the prefix elements on work_q
8464 assert(prefix != NULL, "control point invariant");
8465 const markOop proto = markOopDesc::prototype();
8466 oop next;
8467 NOT_PRODUCT(size_t n = 0;)
8468 for (cur = prefix; cur != NULL; cur = next) {
8469 next = oop(cur->mark());
8470 cur->set_mark(proto); // until proven otherwise
8471 assert(cur->is_oop(), "Should be an oop");
8472 bool res = work_q->push(cur);
8473 assert(res, "Bit off more than we can chew?");
8474 NOT_PRODUCT(n++;)
8475 }
8476 #ifndef PRODUCT
8477 assert(_num_par_pushes >= n, "Too many pops?");
8478 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8479 #endif
8480 return true;
8481 }
8482
8483 // Single-threaded
8484 void CMSCollector::push_on_overflow_list(oop p) {
8485 NOT_PRODUCT(_num_par_pushes++;)
8486 assert(p->is_oop(), "Not an oop");
8487 preserve_mark_if_necessary(p);
8488 p->set_mark((markOop)_overflow_list);
8489 _overflow_list = p;
8490 }
8491
8492 // Multi-threaded; use CAS to prepend to overflow list
8493 void CMSCollector::par_push_on_overflow_list(oop p) {
8494 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8495 assert(p->is_oop(), "Not an oop");
8496 par_preserve_mark_if_necessary(p);
8497 oop observed_overflow_list = _overflow_list;
8498 oop cur_overflow_list;
8499 do {
8500 cur_overflow_list = observed_overflow_list;
8501 p->set_mark(markOop(cur_overflow_list));
8502 observed_overflow_list =
8503 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8504 } while (cur_overflow_list != observed_overflow_list);
8505 }
8506
8507 // Single threaded
8508 // General Note on GrowableArray: pushes may silently fail
8509 // because we are (temporarily) out of C-heap for expanding
8510 // the stack. The problem is quite ubiquitous and affects
8511 // a lot of code in the JVM. The prudent thing for GrowableArray
8512 // to do (for now) is to exit with an error. However, that may
8513 // be too draconian in some cases because the caller may be
8514 // able to recover without much harm. For suych cases, we
8515 // should probably introduce a "soft_push" method which returns
8516 // an indication of success or failure with the assumption that
8517 // the caller may be able to recover from a failure; code in
8518 // the VM can then be changed, incrementally, to deal with such
8519 // failures where possible, thus, incrementally hardening the VM
8520 // in such low resource situations.
8521 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8522 int PreserveMarkStackSize = 128;
8523
8524 if (_preserved_oop_stack == NULL) {
8525 assert(_preserved_mark_stack == NULL,
8526 "bijection with preserved_oop_stack");
8527 // Allocate the stacks
8528 _preserved_oop_stack = new (ResourceObj::C_HEAP)
8529 GrowableArray<oop>(PreserveMarkStackSize, true);
8530 _preserved_mark_stack = new (ResourceObj::C_HEAP)
8531 GrowableArray<markOop>(PreserveMarkStackSize, true);
8532 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
8533 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
8534 "Preserved Mark/Oop Stack for CMS (C-heap)");
8535 }
8536 }
8537 _preserved_oop_stack->push(p);
8538 _preserved_mark_stack->push(m);
8539 assert(m == p->mark(), "Mark word changed");
8540 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
8541 "bijection");
8542 }
8543
8544 // Single threaded
8545 void CMSCollector::preserve_mark_if_necessary(oop p) {
8546 markOop m = p->mark();
8547 if (m->must_be_preserved(p)) {
8548 preserve_mark_work(p, m);
8549 }
8550 }
8551
8552 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8553 markOop m = p->mark();
8554 if (m->must_be_preserved(p)) {
8555 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8556 // Even though we read the mark word without holding
8557 // the lock, we are assured that it will not change
8558 // because we "own" this oop, so no other thread can
8559 // be trying to push it on the overflow list; see
8560 // the assertion in preserve_mark_work() that checks
8561 // that m == p->mark().
8562 preserve_mark_work(p, m);
8563 }
8564 }
8565
8566 // We should be able to do this multi-threaded,
8567 // a chunk of stack being a task (this is
8568 // correct because each oop only ever appears
8569 // once in the overflow list. However, it's
8570 // not very easy to completely overlap this with
8571 // other operations, so will generally not be done
8572 // until all work's been completed. Because we
8573 // expect the preserved oop stack (set) to be small,
8574 // it's probably fine to do this single-threaded.
8575 // We can explore cleverer concurrent/overlapped/parallel
8576 // processing of preserved marks if we feel the
8577 // need for this in the future. Stack overflow should
8578 // be so rare in practice and, when it happens, its
8579 // effect on performance so great that this will
8580 // likely just be in the noise anyway.
8581 void CMSCollector::restore_preserved_marks_if_any() {
8582 if (_preserved_oop_stack == NULL) {
8583 assert(_preserved_mark_stack == NULL,
8584 "bijection with preserved_oop_stack");
8585 return;
8586 }
8587
8588 assert(SafepointSynchronize::is_at_safepoint(),
8589 "world should be stopped");
8590 assert(Thread::current()->is_ConcurrentGC_thread() ||
8591 Thread::current()->is_VM_thread(),
8592 "should be single-threaded");
8593
8594 int length = _preserved_oop_stack->length();
8595 assert(_preserved_mark_stack->length() == length, "bijection");
8596 for (int i = 0; i < length; i++) {
8597 oop p = _preserved_oop_stack->at(i);
8598 assert(p->is_oop(), "Should be an oop");
8599 assert(_span.contains(p), "oop should be in _span");
8600 assert(p->mark() == markOopDesc::prototype(),
8601 "Set when taken from overflow list");
8602 markOop m = _preserved_mark_stack->at(i);
8603 p->set_mark(m);
8604 }
8605 _preserved_mark_stack->clear();
8606 _preserved_oop_stack->clear();
8607 assert(_preserved_mark_stack->is_empty() &&
8608 _preserved_oop_stack->is_empty(),
8609 "stacks were cleared above");
8610 }
8611
8612 #ifndef PRODUCT
8613 bool CMSCollector::no_preserved_marks() const {
8614 return ( ( _preserved_mark_stack == NULL
8615 && _preserved_oop_stack == NULL)
8616 || ( _preserved_mark_stack->is_empty()
8617 && _preserved_oop_stack->is_empty()));
8618 }
8619 #endif
8620
8621 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
8622 {
8623 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8624 CMSAdaptiveSizePolicy* size_policy =
8625 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
8626 assert(size_policy->is_gc_cms_adaptive_size_policy(),
8627 "Wrong type for size policy");
8628 return size_policy;
8629 }
8630
8631 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
8632 size_t desired_promo_size) {
8633 if (cur_promo_size < desired_promo_size) {
8634 size_t expand_bytes = desired_promo_size - cur_promo_size;
8635 if (PrintAdaptiveSizePolicy && Verbose) {
8636 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8637 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
8638 expand_bytes);
8639 }
8640 expand(expand_bytes,
8641 MinHeapDeltaBytes,
8642 CMSExpansionCause::_adaptive_size_policy);
8643 } else if (desired_promo_size < cur_promo_size) {
8644 size_t shrink_bytes = cur_promo_size - desired_promo_size;
8645 if (PrintAdaptiveSizePolicy && Verbose) {
8646 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
8647 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
8648 shrink_bytes);
8649 }
8650 shrink(shrink_bytes);
8651 }
8652 }
8653
8654 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
8655 GenCollectedHeap* gch = GenCollectedHeap::heap();
8656 CMSGCAdaptivePolicyCounters* counters =
8657 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
8658 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
8659 "Wrong kind of counters");
8660 return counters;
8661 }
8662
8663
8664 void ASConcurrentMarkSweepGeneration::update_counters() {
8665 if (UsePerfData) {
8666 _space_counters->update_all();
8667 _gen_counters->update_all();
8668 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8669 GenCollectedHeap* gch = GenCollectedHeap::heap();
8670 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8671 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8672 "Wrong gc statistics type");
8673 counters->update_counters(gc_stats_l);
8674 }
8675 }
8676
8677 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
8678 if (UsePerfData) {
8679 _space_counters->update_used(used);
8680 _space_counters->update_capacity();
8681 _gen_counters->update_all();
8682
8683 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8684 GenCollectedHeap* gch = GenCollectedHeap::heap();
8685 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
8686 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
8687 "Wrong gc statistics type");
8688 counters->update_counters(gc_stats_l);
8689 }
8690 }
8691
8692 // The desired expansion delta is computed so that:
8693 // . desired free percentage or greater is used
8694 void ASConcurrentMarkSweepGeneration::compute_new_size() {
8695 assert_locked_or_safepoint(Heap_lock);
8696
8697 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
8698
8699 // If incremental collection failed, we just want to expand
8700 // to the limit.
8701 if (incremental_collection_failed()) {
8702 clear_incremental_collection_failed();
8703 grow_to_reserved();
8704 return;
8705 }
8706
8707 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
8708
8709 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
8710 "Wrong type of heap");
8711 int prev_level = level() - 1;
8712 assert(prev_level >= 0, "The cms generation is the lowest generation");
8713 Generation* prev_gen = gch->get_gen(prev_level);
8714 assert(prev_gen->kind() == Generation::ASParNew,
8715 "Wrong type of young generation");
8716 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
8717 size_t cur_eden = younger_gen->eden()->capacity();
8718 CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
8719 size_t cur_promo = free();
8720 size_policy->compute_tenured_generation_free_space(cur_promo,
8721 max_available(),
8722 cur_eden);
8723 resize(cur_promo, size_policy->promo_size());
8724
8725 // Record the new size of the space in the cms generation
8726 // that is available for promotions. This is temporary.
8727 // It should be the desired promo size.
8728 size_policy->avg_cms_promo()->sample(free());
8729 size_policy->avg_old_live()->sample(used());
8730
8731 if (UsePerfData) {
8732 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
8733 counters->update_cms_capacity_counter(capacity());
8734 }
8735 }
8736
8737 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
8738 assert_locked_or_safepoint(Heap_lock);
8739 assert_lock_strong(freelistLock());
8740 HeapWord* old_end = _cmsSpace->end();
8741 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
8742 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
8743 FreeChunk* chunk_at_end = find_chunk_at_end();
8744 if (chunk_at_end == NULL) {
8745 // No room to shrink
8746 if (PrintGCDetails && Verbose) {
8747 gclog_or_tty->print_cr("No room to shrink: old_end "
8748 PTR_FORMAT " unallocated_start " PTR_FORMAT
8749 " chunk_at_end " PTR_FORMAT,
8750 old_end, unallocated_start, chunk_at_end);
8751 }
8752 return;
8753 } else {
8754
8755 // Find the chunk at the end of the space and determine
8756 // how much it can be shrunk.
8757 size_t shrinkable_size_in_bytes = chunk_at_end->size();
8758 size_t aligned_shrinkable_size_in_bytes =
8759 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
8760 assert(unallocated_start <= chunk_at_end->end(),
8761 "Inconsistent chunk at end of space");
8762 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
8763 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
8764
8765 // Shrink the underlying space
8766 _virtual_space.shrink_by(bytes);
8767 if (PrintGCDetails && Verbose) {
8768 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
8769 " desired_bytes " SIZE_FORMAT
8770 " shrinkable_size_in_bytes " SIZE_FORMAT
8771 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
8772 " bytes " SIZE_FORMAT,
8773 desired_bytes, shrinkable_size_in_bytes,
8774 aligned_shrinkable_size_in_bytes, bytes);
8775 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
8776 " unallocated_start " SIZE_FORMAT,
8777 old_end, unallocated_start);
8778 }
8779
8780 // If the space did shrink (shrinking is not guaranteed),
8781 // shrink the chunk at the end by the appropriate amount.
8782 if (((HeapWord*)_virtual_space.high()) < old_end) {
8783 size_t new_word_size =
8784 heap_word_size(_virtual_space.committed_size());
8785
8786 // Have to remove the chunk from the dictionary because it is changing
8787 // size and might be someplace elsewhere in the dictionary.
8788
8789 // Get the chunk at end, shrink it, and put it
8790 // back.
8791 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
8792 size_t word_size_change = word_size_before - new_word_size;
8793 size_t chunk_at_end_old_size = chunk_at_end->size();
8794 assert(chunk_at_end_old_size >= word_size_change,
8795 "Shrink is too large");
8796 chunk_at_end->setSize(chunk_at_end_old_size -
8797 word_size_change);
8798 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
8799 word_size_change);
8800
8801 _cmsSpace->returnChunkToDictionary(chunk_at_end);
8802
8803 MemRegion mr(_cmsSpace->bottom(), new_word_size);
8804 _bts->resize(new_word_size); // resize the block offset shared array
8805 Universe::heap()->barrier_set()->resize_covered_region(mr);
8806 _cmsSpace->assert_locked();
8807 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
8808
8809 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
8810
8811 // update the space and generation capacity counters
8812 if (UsePerfData) {
8813 _space_counters->update_capacity();
8814 _gen_counters->update_all();
8815 }
8816
8817 if (Verbose && PrintGCDetails) {
8818 size_t new_mem_size = _virtual_space.committed_size();
8819 size_t old_mem_size = new_mem_size + bytes;
8820 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
8821 name(), old_mem_size/K, bytes/K, new_mem_size/K);
8822 }
8823 }
8824
8825 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
8826 "Inconsistency at end of space");
8827 assert(chunk_at_end->end() == _cmsSpace->end(),
8828 "Shrinking is inconsistent");
8829 return;
8830 }
8831 }
8832
8833 // Transfer some number of overflown objects to usual marking
8834 // stack. Return true if some objects were transferred.
8835 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8836 size_t num = MIN2((size_t)_mark_stack->capacity()/4,
8837 (size_t)ParGCDesiredObjsFromOverflowList);
8838
8839 bool res = _collector->take_from_overflow_list(num, _mark_stack);
8840 assert(_collector->overflow_list_is_empty() || res,
8841 "If list is not empty, we should have taken something");
8842 assert(!res || !_mark_stack->isEmpty(),
8843 "If we took something, it should now be on our stack");
8844 return res;
8845 }
8846
8847 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8848 size_t res = _sp->block_size_no_stall(addr, _collector);
8849 assert(res != 0, "Should always be able to compute a size");
8850 if (_sp->block_is_obj(addr)) {
8851 if (_live_bit_map->isMarked(addr)) {
8852 // It can't have been dead in a previous cycle
8853 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8854 } else {
8855 _dead_bit_map->mark(addr); // mark the dead object
8856 }
8857 }
8858 return res;
8859 }