401 // by the MemRegion parameter.
402 void reset(MemRegion mr);
403 // Return the total number of words in the indexed free lists.
404 size_t totalSizeInIndexedFreeLists() const;
405
406 public:
407 // Constructor...
408 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
409 bool use_adaptive_freelists,
410 FreeBlockDictionary::DictionaryChoice);
411 // accessors
412 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
413 FreeBlockDictionary* dictionary() const { return _dictionary; }
414 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
415 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
416
417 // Return the free chunk at the end of the space. If no such
418 // chunk exists, return NULL.
419 FreeChunk* find_chunk_at_end();
420
421 bool adaptive_freelists() { return _adaptive_freelists; }
422
423 void set_collector(CMSCollector* collector) { _collector = collector; }
424
425 // Support for parallelization of rescan and marking
426 const size_t rescan_task_size() const { return _rescan_task_size; }
427 const size_t marking_task_size() const { return _marking_task_size; }
428 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
429 void initialize_sequential_subtasks_for_rescan(int n_threads);
430 void initialize_sequential_subtasks_for_marking(int n_threads,
431 HeapWord* low = NULL);
432
433 #if CFLS_LAB_REFILL_STATS
434 void print_par_alloc_stats();
435 #endif
436
437 // Space enquiries
438 size_t used() const;
439 size_t free() const;
440 size_t max_alloc_in_words() const;
441 // XXX: should have a less conservative used_region() than that of
549 // and that the space can do any related house-keeping functions.
550 void sweep_completed();
551
552 // For an object in this space, the mark-word's two
553 // LSB's having the value [11] indicates that it has been
554 // promoted since the most recent call to save_marks() on
555 // this generation and has not subsequently been iterated
556 // over (using oop_since_save_marks_iterate() above).
557 bool obj_allocated_since_save_marks(const oop obj) const {
558 assert(is_in_reserved(obj), "Wrong space?");
559 return ((PromotedObject*)obj)->hasPromotedMark();
560 }
561
562 // A worst-case estimate of the space required (in HeapWords) to expand the
563 // heap when promoting an obj of size obj_size.
564 size_t expansionSpaceRequired(size_t obj_size) const;
565
566 FreeChunk* allocateScratch(size_t size);
567
568 // returns true if either the small or large linear allocation buffer is empty.
569 bool linearAllocationWouldFail();
570
571 // Adjust the chunk for the minimum size. This version is called in
572 // most cases in CompactibleFreeListSpace methods.
573 inline static size_t adjustObjectSize(size_t size) {
574 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
575 }
576 // This is a virtual version of adjustObjectSize() that is called
577 // only occasionally when the compaction space changes and the type
578 // of the new compaction space is is only known to be CompactibleSpace.
579 size_t adjust_object_size_v(size_t size) const {
580 return adjustObjectSize(size);
581 }
582 // Minimum size of a free block.
583 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
584 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
585 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
586 bool coalesced);
587
588 // Support for compaction
589 void prepare_for_compaction(CompactPoint* cp);
590 void adjust_pointers();
591 void compact();
592 // reset the space to reflect the fact that a compaction of the
593 // space has been done.
594 virtual void reset_after_compaction();
595
596 // Debugging support
597 void print() const;
598 void prepare_for_verify();
599 void verify(bool allow_dirty) const;
600 void verifyFreeLists() const PRODUCT_RETURN;
601 void verifyIndexedFreeLists() const;
602 void verifyIndexedFreeList(size_t size) const;
603 // verify that the given chunk is in the free lists.
604 bool verifyChunkInFreeLists(FreeChunk* fc) const;
605 // Do some basic checks on the the free lists.
606 void checkFreeListConsistency() const PRODUCT_RETURN;
607
623
624 // Print the statistics for the free lists.
625 void printFLCensus(int sweepCt) const;
626
627 // Statistics functions
628 // Initialize census for lists before the sweep.
629 void beginSweepFLCensus(float sweep_current,
630 float sweep_estimate);
631 // Set the surplus for each of the free lists.
632 void setFLSurplus();
633 // Set the hint for each of the free lists.
634 void setFLHints();
635 // Clear the census for each of the free lists.
636 void clearFLCensus();
637 // Perform functions for the census after the end of the sweep.
638 void endSweepFLCensus(int sweepCt);
639 // Return true if the count of free chunks is greater
640 // than the desired number of free chunks.
641 bool coalOverPopulated(size_t size);
642
643
644 // Record (for each size):
645 //
646 // split-births = #chunks added due to splits in (prev-sweep-end,
647 // this-sweep-start)
648 // split-deaths = #chunks removed for splits in (prev-sweep-end,
649 // this-sweep-start)
650 // num-curr = #chunks at start of this sweep
651 // num-prev = #chunks at end of previous sweep
652 //
653 // The above are quantities that are measured. Now define:
654 //
655 // num-desired := num-prev + split-births - split-deaths - num-curr
656 //
657 // Roughly, num-prev + split-births is the supply,
658 // split-deaths is demand due to other sizes
659 // and num-curr is what we have left.
660 //
661 // Thus, num-desired is roughly speaking the "legitimate demand"
662 // for blocks of this size and what we are striving to reach at the
663 // end of the current sweep.
|
401 // by the MemRegion parameter.
402 void reset(MemRegion mr);
403 // Return the total number of words in the indexed free lists.
404 size_t totalSizeInIndexedFreeLists() const;
405
406 public:
407 // Constructor...
408 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
409 bool use_adaptive_freelists,
410 FreeBlockDictionary::DictionaryChoice);
411 // accessors
412 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
413 FreeBlockDictionary* dictionary() const { return _dictionary; }
414 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
415 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
416
417 // Return the free chunk at the end of the space. If no such
418 // chunk exists, return NULL.
419 FreeChunk* find_chunk_at_end();
420
421 bool adaptive_freelists() const { return _adaptive_freelists; }
422
423 void set_collector(CMSCollector* collector) { _collector = collector; }
424
425 // Support for parallelization of rescan and marking
426 const size_t rescan_task_size() const { return _rescan_task_size; }
427 const size_t marking_task_size() const { return _marking_task_size; }
428 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
429 void initialize_sequential_subtasks_for_rescan(int n_threads);
430 void initialize_sequential_subtasks_for_marking(int n_threads,
431 HeapWord* low = NULL);
432
433 #if CFLS_LAB_REFILL_STATS
434 void print_par_alloc_stats();
435 #endif
436
437 // Space enquiries
438 size_t used() const;
439 size_t free() const;
440 size_t max_alloc_in_words() const;
441 // XXX: should have a less conservative used_region() than that of
549 // and that the space can do any related house-keeping functions.
550 void sweep_completed();
551
552 // For an object in this space, the mark-word's two
553 // LSB's having the value [11] indicates that it has been
554 // promoted since the most recent call to save_marks() on
555 // this generation and has not subsequently been iterated
556 // over (using oop_since_save_marks_iterate() above).
557 bool obj_allocated_since_save_marks(const oop obj) const {
558 assert(is_in_reserved(obj), "Wrong space?");
559 return ((PromotedObject*)obj)->hasPromotedMark();
560 }
561
562 // A worst-case estimate of the space required (in HeapWords) to expand the
563 // heap when promoting an obj of size obj_size.
564 size_t expansionSpaceRequired(size_t obj_size) const;
565
566 FreeChunk* allocateScratch(size_t size);
567
568 // returns true if either the small or large linear allocation buffer is empty.
569 bool linearAllocationWouldFail() const;
570
571 // Adjust the chunk for the minimum size. This version is called in
572 // most cases in CompactibleFreeListSpace methods.
573 inline static size_t adjustObjectSize(size_t size) {
574 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
575 }
576 // This is a virtual version of adjustObjectSize() that is called
577 // only occasionally when the compaction space changes and the type
578 // of the new compaction space is is only known to be CompactibleSpace.
579 size_t adjust_object_size_v(size_t size) const {
580 return adjustObjectSize(size);
581 }
582 // Minimum size of a free block.
583 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
584 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
585 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
586 bool coalesced);
587
588 // Support for decisions regarding concurrent collection policy
589 bool should_concurrent_collect() const;
590
591 // Support for compaction
592 void prepare_for_compaction(CompactPoint* cp);
593 void adjust_pointers();
594 void compact();
595 // reset the space to reflect the fact that a compaction of the
596 // space has been done.
597 virtual void reset_after_compaction();
598
599 // Debugging support
600 void print() const;
601 void prepare_for_verify();
602 void verify(bool allow_dirty) const;
603 void verifyFreeLists() const PRODUCT_RETURN;
604 void verifyIndexedFreeLists() const;
605 void verifyIndexedFreeList(size_t size) const;
606 // verify that the given chunk is in the free lists.
607 bool verifyChunkInFreeLists(FreeChunk* fc) const;
608 // Do some basic checks on the the free lists.
609 void checkFreeListConsistency() const PRODUCT_RETURN;
610
626
627 // Print the statistics for the free lists.
628 void printFLCensus(int sweepCt) const;
629
630 // Statistics functions
631 // Initialize census for lists before the sweep.
632 void beginSweepFLCensus(float sweep_current,
633 float sweep_estimate);
634 // Set the surplus for each of the free lists.
635 void setFLSurplus();
636 // Set the hint for each of the free lists.
637 void setFLHints();
638 // Clear the census for each of the free lists.
639 void clearFLCensus();
640 // Perform functions for the census after the end of the sweep.
641 void endSweepFLCensus(int sweepCt);
642 // Return true if the count of free chunks is greater
643 // than the desired number of free chunks.
644 bool coalOverPopulated(size_t size);
645
646 // Record (for each size):
647 //
648 // split-births = #chunks added due to splits in (prev-sweep-end,
649 // this-sweep-start)
650 // split-deaths = #chunks removed for splits in (prev-sweep-end,
651 // this-sweep-start)
652 // num-curr = #chunks at start of this sweep
653 // num-prev = #chunks at end of previous sweep
654 //
655 // The above are quantities that are measured. Now define:
656 //
657 // num-desired := num-prev + split-births - split-deaths - num-curr
658 //
659 // Roughly, num-prev + split-births is the supply,
660 // split-deaths is demand due to other sizes
661 // and num-curr is what we have left.
662 //
663 // Thus, num-desired is roughly speaking the "legitimate demand"
664 // for blocks of this size and what we are striving to reach at the
665 // end of the current sweep.
|