Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
1 1 /*
2 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 inline void CMSBitMap::clear_all() {
26 26 assert_locked();
27 27 // CMS bitmaps are usually cover large memory regions
28 28 _bm.clear_large();
29 29 return;
30 30 }
31 31
32 32 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
33 33 return (pointer_delta(addr, _bmStartWord)) >> _shifter;
34 34 }
35 35
36 36 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
37 37 return _bmStartWord + (offset << _shifter);
38 38 }
39 39
40 40 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
41 41 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
42 42 return diff >> _shifter;
43 43 }
44 44
45 45 inline void CMSBitMap::mark(HeapWord* addr) {
46 46 assert_locked();
47 47 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
48 48 "outside underlying space?");
49 49 _bm.set_bit(heapWordToOffset(addr));
50 50 }
51 51
52 52 inline bool CMSBitMap::par_mark(HeapWord* addr) {
53 53 assert_locked();
54 54 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
55 55 "outside underlying space?");
56 56 return _bm.par_at_put(heapWordToOffset(addr), true);
57 57 }
58 58
59 59 inline void CMSBitMap::par_clear(HeapWord* addr) {
60 60 assert_locked();
61 61 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
62 62 "outside underlying space?");
63 63 _bm.par_at_put(heapWordToOffset(addr), false);
64 64 }
65 65
66 66 inline void CMSBitMap::mark_range(MemRegion mr) {
67 67 NOT_PRODUCT(region_invariant(mr));
68 68 // Range size is usually just 1 bit.
69 69 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
70 70 BitMap::small_range);
71 71 }
72 72
73 73 inline void CMSBitMap::clear_range(MemRegion mr) {
74 74 NOT_PRODUCT(region_invariant(mr));
75 75 // Range size is usually just 1 bit.
76 76 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
77 77 BitMap::small_range);
78 78 }
79 79
80 80 inline void CMSBitMap::par_mark_range(MemRegion mr) {
81 81 NOT_PRODUCT(region_invariant(mr));
82 82 // Range size is usually just 1 bit.
83 83 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
84 84 BitMap::small_range);
85 85 }
86 86
87 87 inline void CMSBitMap::par_clear_range(MemRegion mr) {
88 88 NOT_PRODUCT(region_invariant(mr));
89 89 // Range size is usually just 1 bit.
90 90 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
91 91 BitMap::small_range);
92 92 }
93 93
94 94 inline void CMSBitMap::mark_large_range(MemRegion mr) {
95 95 NOT_PRODUCT(region_invariant(mr));
96 96 // Range size must be greater than 32 bytes.
97 97 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
98 98 BitMap::large_range);
99 99 }
100 100
101 101 inline void CMSBitMap::clear_large_range(MemRegion mr) {
102 102 NOT_PRODUCT(region_invariant(mr));
103 103 // Range size must be greater than 32 bytes.
104 104 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
105 105 BitMap::large_range);
106 106 }
107 107
108 108 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
109 109 NOT_PRODUCT(region_invariant(mr));
110 110 // Range size must be greater than 32 bytes.
111 111 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
112 112 BitMap::large_range);
113 113 }
114 114
115 115 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
116 116 NOT_PRODUCT(region_invariant(mr));
117 117 // Range size must be greater than 32 bytes.
118 118 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
119 119 BitMap::large_range);
120 120 }
121 121
122 122 // Starting at "addr" (inclusive) return a memory region
123 123 // corresponding to the first maximally contiguous marked ("1") region.
124 124 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
125 125 return getAndClearMarkedRegion(addr, endWord());
126 126 }
127 127
128 128 // Starting at "start_addr" (inclusive) return a memory region
129 129 // corresponding to the first maximal contiguous marked ("1") region
130 130 // strictly less than end_addr.
131 131 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
132 132 HeapWord* end_addr) {
133 133 HeapWord *start, *end;
134 134 assert_locked();
135 135 start = getNextMarkedWordAddress (start_addr, end_addr);
136 136 end = getNextUnmarkedWordAddress(start, end_addr);
137 137 assert(start <= end, "Consistency check");
138 138 MemRegion mr(start, end);
139 139 if (!mr.is_empty()) {
140 140 clear_range(mr);
141 141 }
142 142 return mr;
143 143 }
144 144
145 145 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
146 146 assert_locked();
147 147 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
148 148 "outside underlying space?");
149 149 return _bm.at(heapWordToOffset(addr));
150 150 }
151 151
152 152 // The same as isMarked() but without a lock check.
153 153 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
154 154 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
155 155 "outside underlying space?");
156 156 return _bm.at(heapWordToOffset(addr));
157 157 }
158 158
159 159
160 160 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
161 161 assert_locked();
162 162 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
163 163 "outside underlying space?");
164 164 return !_bm.at(heapWordToOffset(addr));
165 165 }
166 166
167 167 // Return the HeapWord address corresponding to next "1" bit
168 168 // (inclusive).
169 169 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
170 170 return getNextMarkedWordAddress(addr, endWord());
171 171 }
172 172
173 173 // Return the least HeapWord address corresponding to next "1" bit
174 174 // starting at start_addr (inclusive) but strictly less than end_addr.
175 175 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
176 176 HeapWord* start_addr, HeapWord* end_addr) const {
177 177 assert_locked();
178 178 size_t nextOffset = _bm.get_next_one_offset(
179 179 heapWordToOffset(start_addr),
180 180 heapWordToOffset(end_addr));
181 181 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
182 182 assert(nextAddr >= start_addr &&
183 183 nextAddr <= end_addr, "get_next_one postcondition");
184 184 assert((nextAddr == end_addr) ||
185 185 isMarked(nextAddr), "get_next_one postcondition");
186 186 return nextAddr;
187 187 }
188 188
189 189
190 190 // Return the HeapWord address corrsponding to the next "0" bit
191 191 // (inclusive).
192 192 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
193 193 return getNextUnmarkedWordAddress(addr, endWord());
194 194 }
195 195
196 196 // Return the HeapWord address corrsponding to the next "0" bit
197 197 // (inclusive).
198 198 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
199 199 HeapWord* start_addr, HeapWord* end_addr) const {
200 200 assert_locked();
201 201 size_t nextOffset = _bm.get_next_zero_offset(
202 202 heapWordToOffset(start_addr),
203 203 heapWordToOffset(end_addr));
204 204 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
205 205 assert(nextAddr >= start_addr &&
206 206 nextAddr <= end_addr, "get_next_zero postcondition");
207 207 assert((nextAddr == end_addr) ||
208 208 isUnmarked(nextAddr), "get_next_zero postcondition");
209 209 return nextAddr;
210 210 }
211 211
212 212 inline bool CMSBitMap::isAllClear() const {
213 213 assert_locked();
214 214 return getNextMarkedWordAddress(startWord()) >= endWord();
215 215 }
216 216
217 217 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
218 218 HeapWord* right) {
219 219 assert_locked();
220 220 left = MAX2(_bmStartWord, left);
221 221 right = MIN2(_bmStartWord + _bmWordSize, right);
222 222 if (right > left) {
223 223 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
224 224 }
225 225 }
226 226
227 227 inline void CMSCollector::start_icms() {
228 228 if (CMSIncrementalMode) {
229 229 ConcurrentMarkSweepThread::start_icms();
230 230 }
231 231 }
232 232
233 233 inline void CMSCollector::stop_icms() {
234 234 if (CMSIncrementalMode) {
235 235 ConcurrentMarkSweepThread::stop_icms();
236 236 }
237 237 }
238 238
239 239 inline void CMSCollector::disable_icms() {
240 240 if (CMSIncrementalMode) {
241 241 ConcurrentMarkSweepThread::disable_icms();
242 242 }
243 243 }
244 244
245 245 inline void CMSCollector::enable_icms() {
246 246 if (CMSIncrementalMode) {
247 247 ConcurrentMarkSweepThread::enable_icms();
248 248 }
249 249 }
250 250
251 251 inline void CMSCollector::icms_wait() {
252 252 if (CMSIncrementalMode) {
253 253 cmsThread()->icms_wait();
254 254 }
255 255 }
256 256
257 257 inline void CMSCollector::save_sweep_limits() {
258 258 _cmsGen->save_sweep_limit();
259 259 _permGen->save_sweep_limit();
↓ open down ↓ |
259 lines elided |
↑ open up ↑ |
260 260 }
261 261
262 262 inline bool CMSCollector::is_dead_obj(oop obj) const {
263 263 HeapWord* addr = (HeapWord*)obj;
264 264 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
265 265 && _cmsGen->cmsSpace()->block_is_obj(addr))
266 266 ||
267 267 (_permGen->cmsSpace()->is_in_reserved(addr)
268 268 && _permGen->cmsSpace()->block_is_obj(addr)),
269 269 "must be object");
270 - return cms_should_unload_classes() &&
270 + return should_unload_classes() &&
271 271 _collectorState == Sweeping &&
272 272 !_markBitMap.isMarked(addr);
273 273 }
274 274
275 275 inline bool CMSCollector::should_abort_preclean() const {
276 276 // We are in the midst of an "abortable preclean" and either
277 277 // scavenge is done or foreground GC wants to take over collection
278 278 return _collectorState == AbortablePreclean &&
279 279 (_abort_preclean || _foregroundGCIsActive ||
280 280 GenCollectedHeap::heap()->incremental_collection_will_fail());
281 281 }
282 282
283 283 inline size_t CMSCollector::get_eden_used() const {
284 284 return _young_gen->as_DefNewGeneration()->eden()->used();
285 285 }
286 286
287 287 inline size_t CMSCollector::get_eden_capacity() const {
288 288 return _young_gen->as_DefNewGeneration()->eden()->capacity();
289 289 }
290 290
291 291 inline bool CMSStats::valid() const {
292 292 return _valid_bits == _ALL_VALID;
293 293 }
294 294
295 295 inline void CMSStats::record_gc0_begin() {
296 296 if (_gc0_begin_time.is_updated()) {
297 297 float last_gc0_period = _gc0_begin_time.seconds();
298 298 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
299 299 last_gc0_period, _gc0_alpha);
300 300 _gc0_alpha = _saved_alpha;
301 301 _valid_bits |= _GC0_VALID;
302 302 }
303 303 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
304 304
305 305 _gc0_begin_time.update();
306 306 }
307 307
308 308 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
309 309 float last_gc0_duration = _gc0_begin_time.seconds();
310 310 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
311 311 last_gc0_duration, _gc0_alpha);
312 312
313 313 // Amount promoted.
314 314 _cms_used_at_gc0_end = cms_gen_bytes_used;
315 315
316 316 size_t promoted_bytes = 0;
317 317 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
318 318 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
319 319 }
320 320
321 321 // If the younger gen collections were skipped, then the
322 322 // number of promoted bytes will be 0 and adding it to the
323 323 // average will incorrectly lessen the average. It is, however,
324 324 // also possible that no promotion was needed.
325 325 //
326 326 // _gc0_promoted used to be calculated as
327 327 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
328 328 // promoted_bytes, _gc0_alpha);
329 329 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
330 330 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
331 331
332 332 // Amount directly allocated.
333 333 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
334 334 _cms_gen->reset_direct_allocated_words();
335 335 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
336 336 allocated_bytes, _gc0_alpha);
337 337 }
338 338
339 339 inline void CMSStats::record_cms_begin() {
340 340 _cms_timer.stop();
341 341
342 342 // This is just an approximate value, but is good enough.
343 343 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
344 344
345 345 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
346 346 (float) _cms_timer.seconds(), _cms_alpha);
347 347 _cms_begin_time.update();
348 348
349 349 _cms_timer.reset();
350 350 _cms_timer.start();
351 351 }
352 352
353 353 inline void CMSStats::record_cms_end() {
354 354 _cms_timer.stop();
355 355
356 356 float cur_duration = _cms_timer.seconds();
357 357 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
358 358 cur_duration, _cms_alpha);
359 359
360 360 // Avoid division by 0.
361 361 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
362 362 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
363 363 cur_duration / cms_used_mb,
364 364 _cms_alpha);
365 365
366 366 _cms_end_time.update();
367 367 _cms_alpha = _saved_alpha;
368 368 _allow_duty_cycle_reduction = true;
369 369 _valid_bits |= _CMS_VALID;
370 370
371 371 _cms_timer.start();
372 372 }
373 373
374 374 inline double CMSStats::cms_time_since_begin() const {
375 375 return _cms_begin_time.seconds();
376 376 }
377 377
378 378 inline double CMSStats::cms_time_since_end() const {
379 379 return _cms_end_time.seconds();
380 380 }
381 381
382 382 inline double CMSStats::promotion_rate() const {
383 383 assert(valid(), "statistics not valid yet");
384 384 return gc0_promoted() / gc0_period();
385 385 }
386 386
387 387 inline double CMSStats::cms_allocation_rate() const {
388 388 assert(valid(), "statistics not valid yet");
389 389 return cms_allocated() / gc0_period();
390 390 }
391 391
392 392 inline double CMSStats::cms_consumption_rate() const {
393 393 assert(valid(), "statistics not valid yet");
394 394 return (gc0_promoted() + cms_allocated()) / gc0_period();
395 395 }
396 396
397 397 inline unsigned int CMSStats::icms_update_duty_cycle() {
398 398 // Update the duty cycle only if pacing is enabled and the stats are valid
399 399 // (after at least one young gen gc and one cms cycle have completed).
400 400 if (CMSIncrementalPacing && valid()) {
401 401 return icms_update_duty_cycle_impl();
402 402 }
403 403 return _icms_duty_cycle;
404 404 }
405 405
406 406 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
407 407 cmsSpace()->save_sweep_limit();
408 408 }
409 409
410 410 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
411 411 return _cmsSpace->capacity();
412 412 }
413 413
414 414 inline size_t ConcurrentMarkSweepGeneration::used() const {
415 415 return _cmsSpace->used();
416 416 }
417 417
418 418 inline size_t ConcurrentMarkSweepGeneration::free() const {
419 419 return _cmsSpace->free();
420 420 }
421 421
422 422 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
423 423 return _cmsSpace->used_region();
424 424 }
425 425
426 426 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
427 427 return _cmsSpace->used_region_at_save_marks();
428 428 }
429 429
430 430 inline void MarkFromRootsClosure::do_yield_check() {
431 431 if (ConcurrentMarkSweepThread::should_yield() &&
432 432 !_collector->foregroundGCIsActive() &&
433 433 _yield) {
434 434 do_yield_work();
435 435 }
436 436 }
437 437
438 438 inline void Par_MarkFromRootsClosure::do_yield_check() {
439 439 if (ConcurrentMarkSweepThread::should_yield() &&
440 440 !_collector->foregroundGCIsActive() &&
441 441 _yield) {
442 442 do_yield_work();
443 443 }
444 444 }
445 445
446 446 // Return value of "true" indicates that the on-going preclean
447 447 // should be aborted.
448 448 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
449 449 if (ConcurrentMarkSweepThread::should_yield() &&
450 450 !_collector->foregroundGCIsActive() &&
451 451 _yield) {
452 452 // Sample young gen size before and after yield
453 453 _collector->sample_eden();
454 454 do_yield_work();
455 455 _collector->sample_eden();
456 456 return _collector->should_abort_preclean();
457 457 }
458 458 return false;
459 459 }
460 460
461 461 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
462 462 if (ConcurrentMarkSweepThread::should_yield() &&
463 463 !_collector->foregroundGCIsActive() &&
464 464 _yield) {
465 465 // Sample young gen size before and after yield
466 466 _collector->sample_eden();
467 467 do_yield_work();
468 468 _collector->sample_eden();
469 469 }
470 470 }
471 471
472 472 inline void SweepClosure::do_yield_check(HeapWord* addr) {
473 473 if (ConcurrentMarkSweepThread::should_yield() &&
474 474 !_collector->foregroundGCIsActive() &&
475 475 _yield) {
476 476 do_yield_work(addr);
477 477 }
478 478 }
479 479
480 480 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
481 481 // The conditions are ordered for the remarking phase
482 482 // when _yield is false.
483 483 if (_yield &&
484 484 !_collector->foregroundGCIsActive() &&
485 485 ConcurrentMarkSweepThread::should_yield()) {
486 486 do_yield_work();
487 487 }
488 488 }
489 489
490 490
491 491 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
492 492 // Align the end of mr so it's at a card boundary.
493 493 // This is superfluous except at the end of the space;
494 494 // we should do better than this XXX
495 495 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
496 496 CardTableModRefBS::card_size /* bytes */));
497 497 _t->mark_range(mr2);
498 498 }
499 499
500 500 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
501 501 // Align the end of mr so it's at a card boundary.
502 502 // This is superfluous except at the end of the space;
503 503 // we should do better than this XXX
504 504 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
505 505 CardTableModRefBS::card_size /* bytes */));
506 506 _t->par_mark_range(mr2);
507 507 }
↓ open down ↓ |
227 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX