Print this page

        

*** 416,426 **** // Return the free chunk at the end of the space. If no such // chunk exists, return NULL. FreeChunk* find_chunk_at_end(); ! bool adaptive_freelists() { return _adaptive_freelists; } void set_collector(CMSCollector* collector) { _collector = collector; } // Support for parallelization of rescan and marking const size_t rescan_task_size() const { return _rescan_task_size; } --- 416,426 ---- // Return the free chunk at the end of the space. If no such // chunk exists, return NULL. FreeChunk* find_chunk_at_end(); ! bool adaptive_freelists() const { return _adaptive_freelists; } void set_collector(CMSCollector* collector) { _collector = collector; } // Support for parallelization of rescan and marking const size_t rescan_task_size() const { return _rescan_task_size; }
*** 564,574 **** size_t expansionSpaceRequired(size_t obj_size) const; FreeChunk* allocateScratch(size_t size); // returns true if either the small or large linear allocation buffer is empty. ! bool linearAllocationWouldFail(); // Adjust the chunk for the minimum size. This version is called in // most cases in CompactibleFreeListSpace methods. inline static size_t adjustObjectSize(size_t size) { return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); --- 564,574 ---- size_t expansionSpaceRequired(size_t obj_size) const; FreeChunk* allocateScratch(size_t size); // returns true if either the small or large linear allocation buffer is empty. ! bool linearAllocationWouldFail() const; // Adjust the chunk for the minimum size. This version is called in // most cases in CompactibleFreeListSpace methods. inline static size_t adjustObjectSize(size_t size) { return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
*** 583,592 **** --- 583,595 ---- virtual size_t minimum_free_block_size() const { return MinChunkSize; } void removeFreeChunkFromFreeLists(FreeChunk* chunk); void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, bool coalesced); + // Support for decisions regarding concurrent collection policy + bool should_concurrent_collect() const; + // Support for compaction void prepare_for_compaction(CompactPoint* cp); void adjust_pointers(); void compact(); // reset the space to reflect the fact that a compaction of the
*** 638,648 **** void endSweepFLCensus(int sweepCt); // Return true if the count of free chunks is greater // than the desired number of free chunks. bool coalOverPopulated(size_t size); - // Record (for each size): // // split-births = #chunks added due to splits in (prev-sweep-end, // this-sweep-start) // split-deaths = #chunks removed for splits in (prev-sweep-end, --- 641,650 ----