1818 assert(noPromotions(), "post-condition violation"); \
1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
1822 }
1823
1824 // This should have been ALL_SINCE_...() just like the others,
1825 // but, because the body of the method above is somehwat longer,
1826 // the MSVC compiler cannot cope; as a workaround, we split the
1827 // macro into its 3 constituent parts below (see original macro
1828 // definition in specializedOopClosures.hpp).
1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1831
1832
1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1834 // ugghh... how would one do this efficiently for a non-contiguous space?
1835 guarantee(false, "NYI");
1836 }
1837
1838 bool CompactibleFreeListSpace::linearAllocationWouldFail() {
1839 return _smallLinearAllocBlock._word_size == 0;
1840 }
1841
1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1843 // Fix up linear allocation blocks to look like free blocks
1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
1845 }
1846
1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1848 assert_locked();
1849 if (blk->_ptr != NULL) {
1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1851 "Minimum block size requirement");
1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1853 fc->setSize(blk->_word_size);
1854 fc->linkPrev(NULL); // mark as free
1855 fc->dontCoalesce();
1856 assert(fc->isFree(), "just marked it free");
1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1858 }
1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1890 assert_locked();
1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1892 "linear allocation block should be empty");
1893 FreeChunk* fc;
1894 if (blk->_refillSize < SmallForDictionary &&
1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1896 // A linAB's strategy might be to use small sizes to reduce
1897 // fragmentation but still get the benefits of allocation from a
1898 // linAB.
1899 } else {
1900 fc = getChunkFromDictionary(blk->_refillSize);
1901 }
1902 if (fc != NULL) {
1903 blk->_ptr = (HeapWord*)fc;
1904 blk->_word_size = fc->size();
1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1906 }
1907 }
1908
1909 // Support for compaction
1910
1911 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1912 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1913 // prepare_for_compaction() uses the space between live objects
1914 // so that later phase can skip dead space quickly. So verification
1915 // of the free lists doesn't work after.
1916 }
1917
1918 #define obj_size(q) adjustObjectSize(oop(q)->size())
1919 #define adjust_obj_size(s) adjustObjectSize(s)
1920
1921 void CompactibleFreeListSpace::adjust_pointers() {
1922 // In other versions of adjust_pointers(), a bail out
1923 // based on the amount of live data in the generation
1924 // (i.e., if 0, bail out) may be used.
1925 // Cannot test used() == 0 here because the free lists have already
1926 // been mangled by the compaction.
1927
1928 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|
1818 assert(noPromotions(), "post-condition violation"); \
1819 assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1820 assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \
1821 assert(_firstIndex == _nextIndex, "empty buffer"); \
1822 }
1823
1824 // This should have been ALL_SINCE_...() just like the others,
1825 // but, because the body of the method above is somehwat longer,
1826 // the MSVC compiler cannot cope; as a workaround, we split the
1827 // macro into its 3 constituent parts below (see original macro
1828 // definition in specializedOopClosures.hpp).
1829 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1830 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1831
1832
1833 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1834 // ugghh... how would one do this efficiently for a non-contiguous space?
1835 guarantee(false, "NYI");
1836 }
1837
1838 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1839 return _smallLinearAllocBlock._word_size == 0;
1840 }
1841
1842 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1843 // Fix up linear allocation blocks to look like free blocks
1844 repairLinearAllocBlock(&_smallLinearAllocBlock);
1845 }
1846
1847 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1848 assert_locked();
1849 if (blk->_ptr != NULL) {
1850 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1851 "Minimum block size requirement");
1852 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1853 fc->setSize(blk->_word_size);
1854 fc->linkPrev(NULL); // mark as free
1855 fc->dontCoalesce();
1856 assert(fc->isFree(), "just marked it free");
1857 assert(fc->cantCoalesce(), "just marked it uncoalescable");
1858 }
1889 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1890 assert_locked();
1891 assert(blk->_word_size == 0 && blk->_ptr == NULL,
1892 "linear allocation block should be empty");
1893 FreeChunk* fc;
1894 if (blk->_refillSize < SmallForDictionary &&
1895 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1896 // A linAB's strategy might be to use small sizes to reduce
1897 // fragmentation but still get the benefits of allocation from a
1898 // linAB.
1899 } else {
1900 fc = getChunkFromDictionary(blk->_refillSize);
1901 }
1902 if (fc != NULL) {
1903 blk->_ptr = (HeapWord*)fc;
1904 blk->_word_size = fc->size();
1905 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
1906 }
1907 }
1908
1909 // Support for concurrent collection policy decisions.
1910 bool CompactibleFreeListSpace::should_concurrent_collect() const {
1911 // In the future we might want to add in frgamentation stats --
1912 // including erosion of the "mountain" into this decision as well.
1913 return !adaptive_freelists() && linearAllocationWouldFail();
1914 }
1915
1916 // Support for compaction
1917
1918 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1919 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1920 // prepare_for_compaction() uses the space between live objects
1921 // so that later phase can skip dead space quickly. So verification
1922 // of the free lists doesn't work after.
1923 }
1924
1925 #define obj_size(q) adjustObjectSize(oop(q)->size())
1926 #define adjust_obj_size(s) adjustObjectSize(s)
1927
1928 void CompactibleFreeListSpace::adjust_pointers() {
1929 // In other versions of adjust_pointers(), a bail out
1930 // based on the amount of live data in the generation
1931 // (i.e., if 0, bail out) may be used.
1932 // Cannot test used() == 0 here because the free lists have already
1933 // been mangled by the compaction.
1934
1935 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
|