1818 assert(noPromotions(), "post-condition violation"); \
1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
1822 }
1823
1824 // This should have been ALL_SINCE_...() just like the others,
1825 // but, because the body of the method above is somehwat longer,
1826 // the MSVC compiler cannot cope; as a workaround, we split the
1827 // macro into its 3 constituent parts below (see original macro
1828 // definition in specializedOopClosures.hpp).
1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1831
1832
1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1834 // ugghh... how would one do this efficiently for a non-contiguous space?
1835 guarantee(false, "NYI");
1836 }
1837
1838 bool CompactibleFreeListSpace::linearAllocationWouldFail() {
1839 return _smallLinearAllocBlock._word_size == 0;
1840 }
1841
1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1843 // Fix up linear allocation blocks to look like free blocks
1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
1845 }
1846
1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1848 assert_locked();
1849 if (blk->_ptr != NULL) {
1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1851 "Minimum block size requirement");
1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1853 fc->setSize(blk->_word_size);
1854 fc->linkPrev(NULL); // mark as free
1855 fc->dontCoalesce();
1856 assert(fc->isFree(), "just marked it free");
1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1858 }
1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1890 assert_locked();
1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1892 "linear allocation block should be empty");
1893 FreeChunk* fc;
1894 if (blk->_refillSize < SmallForDictionary &&
1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1896 // A linAB's strategy might be to use small sizes to reduce
1897 // fragmentation but still get the benefits of allocation from a
1898 // linAB.
1899 } else {
1900 fc = getChunkFromDictionary(blk->_refillSize);
1901 }
1902 if (fc != NULL) {
1903 blk->_ptr = (HeapWord*)fc;
1904 blk->_word_size = fc->size();
1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1906 }
1907 }
1908
1909 // Support for compaction
1910
1911 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1912 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1913 // prepare_for_compaction() uses the space between live objects
1914 // so that later phase can skip dead space quickly. So verification
1915 // of the free lists doesn't work after.
1916 }
1917
1918 #define obj_size(q) adjustObjectSize(oop(q)->size())
1919 #define adjust_obj_size(s) adjustObjectSize(s)
1920
1921 void CompactibleFreeListSpace::adjust_pointers() {
1922 // In other versions of adjust_pointers(), a bail out
1923 // based on the amount of live data in the generation
1924 // (i.e., if 0, bail out) may be used.
1925 // Cannot test used() == 0 here because the free lists have already
1926 // been mangled by the compaction.
1927
1928 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
1996 fl->set_hint(h);
1997 if (fl->surplus() > 0) {
1998 h = i;
1999 }
2000 }
2001 }
2002
2003 void CompactibleFreeListSpace::clearFLCensus() {
2004 assert_locked();
2005 int i;
2006 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2007 FreeList *fl = &_indexedFreeList[i];
2008 fl->set_prevSweep(fl->count());
2009 fl->set_coalBirths(0);
2010 fl->set_coalDeaths(0);
2011 fl->set_splitBirths(0);
2012 fl->set_splitDeaths(0);
2013 }
2014 }
2015
2016 void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
2017 setFLSurplus();
2018 setFLHints();
2019 if (PrintGC && PrintFLSCensus > 0) {
2020 printFLCensus(sweepCt);
2021 }
2022 clearFLCensus();
2023 assert_locked();
2024 _dictionary->endSweepDictCensus(SplitSurplusPercent);
2025 }
2026
2027 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2028 if (size < SmallForDictionary) {
2029 FreeList *fl = &_indexedFreeList[size];
2030 return (fl->coalDesired() < 0) ||
2031 ((int)fl->count() > fl->coalDesired());
2032 } else {
2033 return dictionary()->coalDictOverPopulated(size);
2034 }
2035 }
2036
2037 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2038 assert(size < SmallForDictionary, "Size too large for indexed list");
2039 FreeList *fl = &_indexedFreeList[size];
2040 fl->increment_coalBirths();
2276 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2277 }
2278 }
2279
2280 #ifndef PRODUCT
2281 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2282 assert(_dictionary->minSize() <= IndexSetSize,
2283 "Some sizes can't be allocated without recourse to"
2284 " linear allocation buffers");
2285 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2286 "else MIN_TREE_CHUNK_SIZE is wrong");
2287 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2288 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2289 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2290 "Some for-loops may be incorrectly initialized");
2291 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2292 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2293 }
2294 #endif
2295
2296 void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
2297 assert_lock_strong(&_freelistLock);
2298 ssize_t bfrSurp = 0;
2299 ssize_t surplus = 0;
2300 ssize_t desired = 0;
2301 ssize_t prevSweep = 0;
2302 ssize_t beforeSweep = 0;
2303 ssize_t count = 0;
2304 ssize_t coalBirths = 0;
2305 ssize_t coalDeaths = 0;
2306 ssize_t splitBirths = 0;
2307 ssize_t splitDeaths = 0;
2308 gclog_or_tty->print("end sweep# %d\n", sweepCt);
2309 gclog_or_tty->print("%4s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
2310 "%7s\t" "%7s\t" "%7s\t" "%7s\t" "%7s\t"
2311 "%7s\t" "\n",
2312 "size", "bfrsurp", "surplus", "desired", "prvSwep",
2313 "bfrSwep", "count", "cBirths", "cDeaths", "sBirths",
2314 "sDeaths");
2315
2316 size_t totalFree = 0;
2317 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2318 const FreeList *fl = &_indexedFreeList[i];
2319 totalFree += fl->count() * fl->size();
2320
2321 gclog_or_tty->print("%4d\t" "%7d\t" "%7d\t" "%7d\t"
2322 "%7d\t" "%7d\t" "%7d\t" "%7d\t"
2323 "%7d\t" "%7d\t" "%7d\t" "\n",
2324 fl->size(), fl->bfrSurp(), fl->surplus(), fl->desired(),
2325 fl->prevSweep(), fl->beforeSweep(), fl->count(), fl->coalBirths(),
2326 fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
2327 bfrSurp += fl->bfrSurp();
2328 surplus += fl->surplus();
2329 desired += fl->desired();
2330 prevSweep += fl->prevSweep();
2331 beforeSweep += fl->beforeSweep();
2332 count += fl->count();
2333 coalBirths += fl->coalBirths();
2334 coalDeaths += fl->coalDeaths();
2335 splitBirths += fl->splitBirths();
2336 splitDeaths += fl->splitDeaths();
2337 }
2338 gclog_or_tty->print("%4s\t"
2339 "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t"
2340 "%7d\t" "%7d\t" "%7d\t" "%7d\t" "%7d\t" "\n",
2341 "totl",
2342 bfrSurp, surplus, desired, prevSweep, beforeSweep,
2343 count, coalBirths, coalDeaths, splitBirths, splitDeaths);
2344 gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
2345 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2346 (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
2347 (prevSweep != 0 ? (double)prevSweep : 1.0),
2348 (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
2349 _dictionary->printDictCensus();
2350 }
2351
2352 // Return the next displaced header, incrementing the pointer and
2353 // recycling spool area as necessary.
2354 markOop PromotionInfo::nextDisplacedHeader() {
2355 assert(_spoolHead != NULL, "promotionInfo inconsistency");
2356 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2357 "Empty spool space: no displaced header can be fetched");
2358 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2359 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2360 // Spool forward
2361 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2362 // forward to next block, recycling this block into spare spool buffer
2363 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2364 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2365 _spoolHead->nextSpoolBlock = _spareSpool;
2366 _spareSpool = _spoolHead;
2367 _spoolHead = tmp;
2368 _firstIndex = 1;
|
1818 assert(noPromotions(), "post-condition violation"); \
1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
1822 }
1823
1824 // This should have been ALL_SINCE_...() just like the others,
1825 // but, because the body of the method above is somehwat longer,
1826 // the MSVC compiler cannot cope; as a workaround, we split the
1827 // macro into its 3 constituent parts below (see original macro
1828 // definition in specializedOopClosures.hpp).
1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1831
1832
1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1834 // ugghh... how would one do this efficiently for a non-contiguous space?
1835 guarantee(false, "NYI");
1836 }
1837
1838 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1839 return _smallLinearAllocBlock._word_size == 0;
1840 }
1841
1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1843 // Fix up linear allocation blocks to look like free blocks
1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
1845 }
1846
1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1848 assert_locked();
1849 if (blk->_ptr != NULL) {
1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1851 "Minimum block size requirement");
1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1853 fc->setSize(blk->_word_size);
1854 fc->linkPrev(NULL); // mark as free
1855 fc->dontCoalesce();
1856 assert(fc->isFree(), "just marked it free");
1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1858 }
1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1890 assert_locked();
1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1892 "linear allocation block should be empty");
1893 FreeChunk* fc;
1894 if (blk->_refillSize < SmallForDictionary &&
1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1896 // A linAB's strategy might be to use small sizes to reduce
1897 // fragmentation but still get the benefits of allocation from a
1898 // linAB.
1899 } else {
1900 fc = getChunkFromDictionary(blk->_refillSize);
1901 }
1902 if (fc != NULL) {
1903 blk->_ptr = (HeapWord*)fc;
1904 blk->_word_size = fc->size();
1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1906 }
1907 }
1908
1909 // Support for concurrent collection policy decisions.
1910 bool CompactibleFreeListSpace::should_concurrent_collect() const {
1911 // In the future we might want to add in frgamentation stats --
1912 // including erosion of the "mountain" into this decision as well.
1913 return !adaptive_freelists() && linearAllocationWouldFail();
1914 }
1915
1916 // Support for compaction
1917
1918 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1919 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1920 // prepare_for_compaction() uses the space between live objects
1921 // so that later phase can skip dead space quickly. So verification
1922 // of the free lists doesn't work after.
1923 }
1924
1925 #define obj_size(q) adjustObjectSize(oop(q)->size())
1926 #define adjust_obj_size(s) adjustObjectSize(s)
1927
1928 void CompactibleFreeListSpace::adjust_pointers() {
1929 // In other versions of adjust_pointers(), a bail out
1930 // based on the amount of live data in the generation
1931 // (i.e., if 0, bail out) may be used.
1932 // Cannot test used() == 0 here because the free lists have already
1933 // been mangled by the compaction.
1934
1935 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2003 fl->set_hint(h);
2004 if (fl->surplus() > 0) {
2005 h = i;
2006 }
2007 }
2008 }
2009
2010 void CompactibleFreeListSpace::clearFLCensus() {
2011 assert_locked();
2012 int i;
2013 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2014 FreeList *fl = &_indexedFreeList[i];
2015 fl->set_prevSweep(fl->count());
2016 fl->set_coalBirths(0);
2017 fl->set_coalDeaths(0);
2018 fl->set_splitBirths(0);
2019 fl->set_splitDeaths(0);
2020 }
2021 }
2022
2023 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2024 setFLSurplus();
2025 setFLHints();
2026 if (PrintGC && PrintFLSCensus > 0) {
2027 printFLCensus(sweep_count);
2028 }
2029 clearFLCensus();
2030 assert_locked();
2031 _dictionary->endSweepDictCensus(SplitSurplusPercent);
2032 }
2033
2034 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2035 if (size < SmallForDictionary) {
2036 FreeList *fl = &_indexedFreeList[size];
2037 return (fl->coalDesired() < 0) ||
2038 ((int)fl->count() > fl->coalDesired());
2039 } else {
2040 return dictionary()->coalDictOverPopulated(size);
2041 }
2042 }
2043
2044 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2045 assert(size < SmallForDictionary, "Size too large for indexed list");
2046 FreeList *fl = &_indexedFreeList[size];
2047 fl->increment_coalBirths();
2283 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2284 }
2285 }
2286
2287 #ifndef PRODUCT
2288 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2289 assert(_dictionary->minSize() <= IndexSetSize,
2290 "Some sizes can't be allocated without recourse to"
2291 " linear allocation buffers");
2292 assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2293 "else MIN_TREE_CHUNK_SIZE is wrong");
2294 assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2295 (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2296 assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2297 "Some for-loops may be incorrectly initialized");
2298 assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2299 "For-loops that iterate over IndexSet with stride 2 may be wrong");
2300 }
2301 #endif
2302
2303 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2304 assert_lock_strong(&_freelistLock);
2305 FreeList total;
2306 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2307 FreeList::print_labels_on(gclog_or_tty, "size");
2308 size_t totalFree = 0;
2309 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2310 const FreeList *fl = &_indexedFreeList[i];
2311 totalFree += fl->count() * fl->size();
2312 if (i % (40*IndexSetStride) == 0) {
2313 FreeList::print_labels_on(gclog_or_tty, "size");
2314 }
2315 fl->print_on(gclog_or_tty);
2316 total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
2317 total.set_surplus( total.surplus() + fl->surplus() );
2318 total.set_desired( total.desired() + fl->desired() );
2319 total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
2320 total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2321 total.set_count( total.count() + fl->count() );
2322 total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
2323 total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
2324 total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2325 total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2326 }
2327 total.print_on(gclog_or_tty, "TOTAL");
2328 gclog_or_tty->print_cr("Total free in indexed lists "
2329 SIZE_FORMAT " words", totalFree);
2330 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2331 (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2332 (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2333 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2334 _dictionary->printDictCensus();
2335 }
2336
2337 // Return the next displaced header, incrementing the pointer and
2338 // recycling spool area as necessary.
2339 markOop PromotionInfo::nextDisplacedHeader() {
2340 assert(_spoolHead != NULL, "promotionInfo inconsistency");
2341 assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2342 "Empty spool space: no displaced header can be fetched");
2343 assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2344 markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2345 // Spool forward
2346 if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2347 // forward to next block, recycling this block into spare spool buffer
2348 SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2349 assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2350 _spoolHead->nextSpoolBlock = _spareSpool;
2351 _spareSpool = _spoolHead;
2352 _spoolHead = tmp;
2353 _firstIndex = 1;
|