518 // The following array-pair keeps track of mark words
519 // displaced for accomodating overflow list above.
520 // This code will likely be revisited under RFE#4922830.
521 GrowableArray<oop>* _preserved_oop_stack;
522 GrowableArray<markOop>* _preserved_mark_stack;
523
524 int* _hash_seed;
525
526 // In support of multi-threaded concurrent phases
527 YieldingFlexibleWorkGang* _conc_workers;
528
529 // Performance Counters
530 CollectorCounters* _gc_counters;
531
532 // Initialization Errors
533 bool _completed_initialization;
534
535 // In support of ExplicitGCInvokesConcurrent
536 static bool _full_gc_requested;
537 unsigned int _collection_count_start;
538 // Should we unload classes this concurrent cycle?
539 // Set in response to a concurrent full gc request.
540 bool _unload_classes;
541 bool _unloaded_classes_last_cycle;
542 // Did we (allow) unload classes in the previous concurrent cycle?
543 bool cms_unloaded_classes_last_cycle() const {
544 return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled;
545 }
546
547 // Verification support
548 CMSBitMap _verification_mark_bm;
549 void verify_after_remark_work_1();
550 void verify_after_remark_work_2();
551
552 // true if any verification flag is on.
553 bool _verifying;
554 bool verifying() const { return _verifying; }
555 void set_verifying(bool v) { _verifying = v; }
556
557 // Collector policy
558 ConcurrentMarkSweepPolicy* _collector_policy;
559 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
560
561 // Check whether the gc time limit has been
562 // exceeded and set the size policy flag
563 // appropriately.
564 void check_gc_time_limit();
634 bool _between_prologue_and_epilogue;
635
636 // Signalling/State related to coordination between fore- and backgroud GC
637 // Note: When the baton has been passed from background GC to foreground GC,
638 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
639 static bool _foregroundGCIsActive; // true iff foreground collector is active or
640 // wants to go active
641 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
642 // yet passed the baton to the foreground GC
643
644 // Support for CMSScheduleRemark (abortable preclean)
645 bool _abort_preclean;
646 bool _start_sampling;
647
648 int _numYields;
649 size_t _numDirtyCards;
650 uint _sweepCount;
651 // number of full gc's since the last concurrent gc.
652 uint _full_gcs_since_conc_gc;
653
654 // if occupancy exceeds this, start a new gc cycle
655 double _initiatingOccupancy;
656 // occupancy used for bootstrapping stats
657 double _bootstrap_occupancy;
658
659 // timer
660 elapsedTimer _timer;
661
662 // Timing, allocation and promotion statistics, used for scheduling.
663 CMSStats _stats;
664
665 // Allocation limits installed in the young gen, used only in
666 // CMSIncrementalMode. When an allocation in the young gen would cross one of
667 // these limits, the cms generation is notified and the cms thread is started
668 // or stopped, respectively.
669 HeapWord* _icms_start_limit;
670 HeapWord* _icms_stop_limit;
671
672 enum CMS_op_type {
673 CMS_op_checkpointRootsInitial,
674 CMS_op_checkpointRootsFinal
675 };
808 // allocation limits in the young gen.
809 void icms_update_allocation_limits();
810
811 size_t block_size_using_printezis_bits(HeapWord* addr) const;
812 size_t block_size_if_printezis_bits(HeapWord* addr) const;
813 HeapWord* next_card_start_after_block(HeapWord* addr) const;
814
815 void setup_cms_unloading_and_verification_state();
816 public:
817 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
818 ConcurrentMarkSweepGeneration* permGen,
819 CardTableRS* ct,
820 ConcurrentMarkSweepPolicy* cp);
821 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
822
823 ReferenceProcessor* ref_processor() { return _ref_processor; }
824 void ref_processor_init();
825
826 Mutex* bitMapLock() const { return _markBitMap.lock(); }
827 static CollectorState abstract_state() { return _collectorState; }
828 double initiatingOccupancy() const { return _initiatingOccupancy; }
829
830 bool should_abort_preclean() const; // Whether preclean should be aborted.
831 size_t get_eden_used() const;
832 size_t get_eden_capacity() const;
833
834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
835
836 // locking checks
837 NOT_PRODUCT(static bool have_cms_token();)
838
839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
840 bool shouldConcurrentCollect();
841
842 void collect(bool full,
843 bool clear_all_soft_refs,
844 size_t size,
845 bool tlab);
846 void collect_in_background(bool clear_all_soft_refs);
847 void collect_in_foreground(bool clear_all_soft_refs);
848
849 // In support of ExplicitGCInvokesConcurrent
850 static void request_full_gc(unsigned int full_gc_count);
851 // Should we unload classes in a particular concurrent cycle?
852 bool cms_should_unload_classes() const {
853 assert(!_unload_classes || ExplicitGCInvokesConcurrentAndUnloadsClasses,
854 "Inconsistency; see CR 6541037");
855 return _unload_classes || CMSClassUnloadingEnabled;
856 }
857
858 void direct_allocated(HeapWord* start, size_t size);
859
860 // Object is dead if not marked and current phase is sweeping.
861 bool is_dead_obj(oop obj) const;
862
863 // After a promotion (of "start"), do any necessary marking.
864 // If "par", then it's being done by a parallel GC thread.
865 // The last two args indicate if we need precise marking
866 // and if so the size of the object so it can be dirtied
867 // in its entirety.
868 void promoted(bool par, HeapWord* start,
869 bool is_obj_array, size_t obj_size);
870
871 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
872 size_t word_size);
873
874 void getFreelistLocks() const;
875 void releaseFreelistLocks() const;
876 bool haveFreelistLocks() const;
1005 // Non-product stat counters
1006 NOT_PRODUCT(
1007 int _numObjectsPromoted;
1008 int _numWordsPromoted;
1009 int _numObjectsAllocated;
1010 int _numWordsAllocated;
1011 )
1012
1013 // Used for sizing decisions
1014 bool _incremental_collection_failed;
1015 bool incremental_collection_failed() {
1016 return _incremental_collection_failed;
1017 }
1018 void set_incremental_collection_failed() {
1019 _incremental_collection_failed = true;
1020 }
1021 void clear_incremental_collection_failed() {
1022 _incremental_collection_failed = false;
1023 }
1024
1025 private:
1026 // For parallel young-gen GC support.
1027 CMSParGCThreadState** _par_gc_thread_states;
1028
1029 // Reason generation was expanded
1030 CMSExpansionCause::Cause _expansion_cause;
1031
1032 // accessors
1033 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1034 CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
1035
1036 // In support of MinChunkSize being larger than min object size
1037 const double _dilatation_factor;
1038
1039 enum CollectionTypes {
1040 Concurrent_collection_type = 0,
1041 MS_foreground_collection_type = 1,
1042 MSC_foreground_collection_type = 2,
1043 Unknown_collection_type = 3
1044 };
1045
1046 CollectionTypes _debug_collection_type;
1047
1048 protected:
1049 // Grow generation by specified size (returns false if unable to grow)
1050 bool grow_by(size_t bytes);
1051 // Grow generation to reserved size.
1052 bool grow_to_reserved();
1053 // Shrink generation by specified size (returns false if unable to shrink)
1054 virtual void shrink_by(size_t bytes);
1055
1056 // Update statistics for GC
1057 virtual void update_gc_stats(int level, bool full);
1058
1059 // Maximum available space in the generation (including uncommitted)
1060 // space.
1061 size_t max_available() const;
1062
1063 public:
1064 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1065 int level, CardTableRS* ct,
1066 bool use_adaptive_freelists,
1067 FreeBlockDictionary::DictionaryChoice);
1068
1069 // Accessors
1070 CMSCollector* collector() const { return _collector; }
1071 static void set_collector(CMSCollector* collector) {
1072 assert(_collector == NULL, "already set");
1073 _collector = collector;
1074 }
1075 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1076
1077 Mutex* freelistLock() const;
1078
1079 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1080
1081 // Adaptive size policy
1082 CMSAdaptiveSizePolicy* size_policy();
1086 // Note: CMS does MT-discovery during the parallel-remark
1087 // phases. Use ReferenceProcessorMTMutator to make refs
1088 // discovery MT-safe during such phases or other parallel
1089 // discovery phases in the future. This may all go away
1090 // if/when we decide that refs discovery is sufficiently
1091 // rare that the cost of the CAS's involved is in the
1092 // noise. That's a measurement that should be done, and
1093 // the code simplified if that turns out to be the case.
1094 return false;
1095 }
1096
1097 // Override
1098 virtual void ref_processor_init();
1099
1100 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1101
1102 // Space enquiries
1103 size_t capacity() const;
1104 size_t used() const;
1105 size_t free() const;
1106 double occupancy() { return ((double)used())/((double)capacity()); }
1107 size_t contiguous_available() const;
1108 size_t unsafe_max_alloc_nogc() const;
1109
1110 // over-rides
1111 MemRegion used_region() const;
1112 MemRegion used_region_at_save_marks() const;
1113
1114 // Does a "full" (forced) collection invoked on this generation collect
1115 // all younger generations as well? Note that the second conjunct is a
1116 // hack to allow the collection of the younger gen first if the flag is
1117 // set. This is better than using th policy's should_collect_gen0_first()
1118 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1119 virtual bool full_collects_younger_generations() const {
1120 return UseCMSCompactAtFullCollection && !CollectGen0First;
1121 }
1122
1123 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1124
1125 // Support for compaction
1126 CompactibleSpace* first_compaction_space() const;
1141 size_t word_size);
1142
1143 // Used by CMSStats to track direct allocation. The value is sampled and
1144 // reset after each young gen collection.
1145 size_t direct_allocated_words() const { return _direct_allocated_words; }
1146 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1147
1148 // Overrides for parallel promotion.
1149 virtual oop par_promote(int thread_num,
1150 oop obj, markOop m, size_t word_sz);
1151 // This one should not be called for CMS.
1152 virtual void par_promote_alloc_undo(int thread_num,
1153 HeapWord* obj, size_t word_sz);
1154 virtual void par_promote_alloc_done(int thread_num);
1155 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1156
1157 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1158 bool younger_handles_promotion_failure) const;
1159
1160 bool should_collect(bool full, size_t size, bool tlab);
1161 // XXXPERM
1162 bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
1163 void collect(bool full,
1164 bool clear_all_soft_refs,
1165 size_t size,
1166 bool tlab);
1167
1168 HeapWord* expand_and_allocate(size_t word_size,
1169 bool tlab,
1170 bool parallel = false);
1171
1172 // GC prologue and epilogue
1173 void gc_prologue(bool full);
1174 void gc_prologue_work(bool full, bool registerClosure,
1175 ModUnionClosure* modUnionClosure);
1176 void gc_epilogue(bool full);
1177 void gc_epilogue_work(bool full);
1178
1179 // Time since last GC of this generation
1180 jlong time_of_last_gc(jlong now) {
1181 return collector()->time_of_last_gc(now);
1182 }
|
518 // The following array-pair keeps track of mark words
519 // displaced for accomodating overflow list above.
520 // This code will likely be revisited under RFE#4922830.
521 GrowableArray<oop>* _preserved_oop_stack;
522 GrowableArray<markOop>* _preserved_mark_stack;
523
524 int* _hash_seed;
525
526 // In support of multi-threaded concurrent phases
527 YieldingFlexibleWorkGang* _conc_workers;
528
529 // Performance Counters
530 CollectorCounters* _gc_counters;
531
532 // Initialization Errors
533 bool _completed_initialization;
534
535 // In support of ExplicitGCInvokesConcurrent
536 static bool _full_gc_requested;
537 unsigned int _collection_count_start;
538
539 // Should we unload classes this concurrent cycle?
540 bool _should_unload_classes;
541 unsigned int _concurrent_cycles_since_last_unload;
542 unsigned int concurrent_cycles_since_last_unload() const {
543 return _concurrent_cycles_since_last_unload;
544 }
545 // Did we (allow) unload classes in the previous concurrent cycle?
546 bool unloaded_classes_last_cycle() const {
547 return concurrent_cycles_since_last_unload() == 0;
548 }
549
550 // Verification support
551 CMSBitMap _verification_mark_bm;
552 void verify_after_remark_work_1();
553 void verify_after_remark_work_2();
554
555 // true if any verification flag is on.
556 bool _verifying;
557 bool verifying() const { return _verifying; }
558 void set_verifying(bool v) { _verifying = v; }
559
560 // Collector policy
561 ConcurrentMarkSweepPolicy* _collector_policy;
562 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
563
564 // Check whether the gc time limit has been
565 // exceeded and set the size policy flag
566 // appropriately.
567 void check_gc_time_limit();
637 bool _between_prologue_and_epilogue;
638
639 // Signalling/State related to coordination between fore- and backgroud GC
640 // Note: When the baton has been passed from background GC to foreground GC,
641 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
642 static bool _foregroundGCIsActive; // true iff foreground collector is active or
643 // wants to go active
644 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
645 // yet passed the baton to the foreground GC
646
647 // Support for CMSScheduleRemark (abortable preclean)
648 bool _abort_preclean;
649 bool _start_sampling;
650
651 int _numYields;
652 size_t _numDirtyCards;
653 uint _sweepCount;
654 // number of full gc's since the last concurrent gc.
655 uint _full_gcs_since_conc_gc;
656
657 // occupancy used for bootstrapping stats
658 double _bootstrap_occupancy;
659
660 // timer
661 elapsedTimer _timer;
662
663 // Timing, allocation and promotion statistics, used for scheduling.
664 CMSStats _stats;
665
666 // Allocation limits installed in the young gen, used only in
667 // CMSIncrementalMode. When an allocation in the young gen would cross one of
668 // these limits, the cms generation is notified and the cms thread is started
669 // or stopped, respectively.
670 HeapWord* _icms_start_limit;
671 HeapWord* _icms_stop_limit;
672
673 enum CMS_op_type {
674 CMS_op_checkpointRootsInitial,
675 CMS_op_checkpointRootsFinal
676 };
809 // allocation limits in the young gen.
810 void icms_update_allocation_limits();
811
812 size_t block_size_using_printezis_bits(HeapWord* addr) const;
813 size_t block_size_if_printezis_bits(HeapWord* addr) const;
814 HeapWord* next_card_start_after_block(HeapWord* addr) const;
815
816 void setup_cms_unloading_and_verification_state();
817 public:
818 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
819 ConcurrentMarkSweepGeneration* permGen,
820 CardTableRS* ct,
821 ConcurrentMarkSweepPolicy* cp);
822 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
823
824 ReferenceProcessor* ref_processor() { return _ref_processor; }
825 void ref_processor_init();
826
827 Mutex* bitMapLock() const { return _markBitMap.lock(); }
828 static CollectorState abstract_state() { return _collectorState; }
829
830 bool should_abort_preclean() const; // Whether preclean should be aborted.
831 size_t get_eden_used() const;
832 size_t get_eden_capacity() const;
833
834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
835
836 // locking checks
837 NOT_PRODUCT(static bool have_cms_token();)
838
839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
840 bool shouldConcurrentCollect();
841
842 void collect(bool full,
843 bool clear_all_soft_refs,
844 size_t size,
845 bool tlab);
846 void collect_in_background(bool clear_all_soft_refs);
847 void collect_in_foreground(bool clear_all_soft_refs);
848
849 // In support of ExplicitGCInvokesConcurrent
850 static void request_full_gc(unsigned int full_gc_count);
851 // Should we unload classes in a particular concurrent cycle?
852 bool should_unload_classes() const {
853 return _should_unload_classes;
854 }
855 bool update_should_unload_classes();
856
857 void direct_allocated(HeapWord* start, size_t size);
858
859 // Object is dead if not marked and current phase is sweeping.
860 bool is_dead_obj(oop obj) const;
861
862 // After a promotion (of "start"), do any necessary marking.
863 // If "par", then it's being done by a parallel GC thread.
864 // The last two args indicate if we need precise marking
865 // and if so the size of the object so it can be dirtied
866 // in its entirety.
867 void promoted(bool par, HeapWord* start,
868 bool is_obj_array, size_t obj_size);
869
870 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
871 size_t word_size);
872
873 void getFreelistLocks() const;
874 void releaseFreelistLocks() const;
875 bool haveFreelistLocks() const;
1004 // Non-product stat counters
1005 NOT_PRODUCT(
1006 int _numObjectsPromoted;
1007 int _numWordsPromoted;
1008 int _numObjectsAllocated;
1009 int _numWordsAllocated;
1010 )
1011
1012 // Used for sizing decisions
1013 bool _incremental_collection_failed;
1014 bool incremental_collection_failed() {
1015 return _incremental_collection_failed;
1016 }
1017 void set_incremental_collection_failed() {
1018 _incremental_collection_failed = true;
1019 }
1020 void clear_incremental_collection_failed() {
1021 _incremental_collection_failed = false;
1022 }
1023
1024 // accessors
1025 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1026 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1027
1028 private:
1029 // For parallel young-gen GC support.
1030 CMSParGCThreadState** _par_gc_thread_states;
1031
1032 // Reason generation was expanded
1033 CMSExpansionCause::Cause _expansion_cause;
1034
1035 // In support of MinChunkSize being larger than min object size
1036 const double _dilatation_factor;
1037
1038 enum CollectionTypes {
1039 Concurrent_collection_type = 0,
1040 MS_foreground_collection_type = 1,
1041 MSC_foreground_collection_type = 2,
1042 Unknown_collection_type = 3
1043 };
1044
1045 CollectionTypes _debug_collection_type;
1046
1047 // Fraction of current occupancy at which to start a CMS collection which
1048 // will collect this generation (at least).
1049 double _initiating_occupancy;
1050
1051 protected:
1052 // Grow generation by specified size (returns false if unable to grow)
1053 bool grow_by(size_t bytes);
1054 // Grow generation to reserved size.
1055 bool grow_to_reserved();
1056 // Shrink generation by specified size (returns false if unable to shrink)
1057 virtual void shrink_by(size_t bytes);
1058
1059 // Update statistics for GC
1060 virtual void update_gc_stats(int level, bool full);
1061
1062 // Maximum available space in the generation (including uncommitted)
1063 // space.
1064 size_t max_available() const;
1065
1066 // getter and initializer for _initiating_occupancy field.
1067 double initiating_occupancy() const { return _initiating_occupancy; }
1068 void init_initiating_occupancy(intx io, intx tr);
1069
1070 public:
1071 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1072 int level, CardTableRS* ct,
1073 bool use_adaptive_freelists,
1074 FreeBlockDictionary::DictionaryChoice);
1075
1076 // Accessors
1077 CMSCollector* collector() const { return _collector; }
1078 static void set_collector(CMSCollector* collector) {
1079 assert(_collector == NULL, "already set");
1080 _collector = collector;
1081 }
1082 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1083
1084 Mutex* freelistLock() const;
1085
1086 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1087
1088 // Adaptive size policy
1089 CMSAdaptiveSizePolicy* size_policy();
1093 // Note: CMS does MT-discovery during the parallel-remark
1094 // phases. Use ReferenceProcessorMTMutator to make refs
1095 // discovery MT-safe during such phases or other parallel
1096 // discovery phases in the future. This may all go away
1097 // if/when we decide that refs discovery is sufficiently
1098 // rare that the cost of the CAS's involved is in the
1099 // noise. That's a measurement that should be done, and
1100 // the code simplified if that turns out to be the case.
1101 return false;
1102 }
1103
1104 // Override
1105 virtual void ref_processor_init();
1106
1107 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1108
1109 // Space enquiries
1110 size_t capacity() const;
1111 size_t used() const;
1112 size_t free() const;
1113 double occupancy() const { return ((double)used())/((double)capacity()); }
1114 size_t contiguous_available() const;
1115 size_t unsafe_max_alloc_nogc() const;
1116
1117 // over-rides
1118 MemRegion used_region() const;
1119 MemRegion used_region_at_save_marks() const;
1120
1121 // Does a "full" (forced) collection invoked on this generation collect
1122 // all younger generations as well? Note that the second conjunct is a
1123 // hack to allow the collection of the younger gen first if the flag is
1124 // set. This is better than using th policy's should_collect_gen0_first()
1125 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1126 virtual bool full_collects_younger_generations() const {
1127 return UseCMSCompactAtFullCollection && !CollectGen0First;
1128 }
1129
1130 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1131
1132 // Support for compaction
1133 CompactibleSpace* first_compaction_space() const;
1148 size_t word_size);
1149
1150 // Used by CMSStats to track direct allocation. The value is sampled and
1151 // reset after each young gen collection.
1152 size_t direct_allocated_words() const { return _direct_allocated_words; }
1153 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1154
1155 // Overrides for parallel promotion.
1156 virtual oop par_promote(int thread_num,
1157 oop obj, markOop m, size_t word_sz);
1158 // This one should not be called for CMS.
1159 virtual void par_promote_alloc_undo(int thread_num,
1160 HeapWord* obj, size_t word_sz);
1161 virtual void par_promote_alloc_done(int thread_num);
1162 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1163
1164 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
1165 bool younger_handles_promotion_failure) const;
1166
1167 bool should_collect(bool full, size_t size, bool tlab);
1168 virtual bool should_concurrent_collect() const;
1169 virtual bool is_too_full() const;
1170 void collect(bool full,
1171 bool clear_all_soft_refs,
1172 size_t size,
1173 bool tlab);
1174
1175 HeapWord* expand_and_allocate(size_t word_size,
1176 bool tlab,
1177 bool parallel = false);
1178
1179 // GC prologue and epilogue
1180 void gc_prologue(bool full);
1181 void gc_prologue_work(bool full, bool registerClosure,
1182 ModUnionClosure* modUnionClosure);
1183 void gc_epilogue(bool full);
1184 void gc_epilogue_work(bool full);
1185
1186 // Time since last GC of this generation
1187 jlong time_of_last_gc(jlong now) {
1188 return collector()->time_of_last_gc(now);
1189 }
|