Print this page
*** 533,549 ****
bool _completed_initialization;
// In support of ExplicitGCInvokesConcurrent
static bool _full_gc_requested;
unsigned int _collection_count_start;
// Should we unload classes this concurrent cycle?
! // Set in response to a concurrent full gc request.
! bool _unload_classes;
! bool _unloaded_classes_last_cycle;
// Did we (allow) unload classes in the previous concurrent cycle?
! bool cms_unloaded_classes_last_cycle() const {
! return _unloaded_classes_last_cycle || CMSClassUnloadingEnabled;
}
// Verification support
CMSBitMap _verification_mark_bm;
void verify_after_remark_work_1();
--- 533,552 ----
bool _completed_initialization;
// In support of ExplicitGCInvokesConcurrent
static bool _full_gc_requested;
unsigned int _collection_count_start;
+
// Should we unload classes this concurrent cycle?
! bool _should_unload_classes;
! unsigned int _concurrent_cycles_since_last_unload;
! unsigned int concurrent_cycles_since_last_unload() const {
! return _concurrent_cycles_since_last_unload;
! }
// Did we (allow) unload classes in the previous concurrent cycle?
! bool unloaded_classes_last_cycle() const {
! return concurrent_cycles_since_last_unload() == 0;
}
// Verification support
CMSBitMap _verification_mark_bm;
void verify_after_remark_work_1();
*** 649,660 ****
size_t _numDirtyCards;
uint _sweepCount;
// number of full gc's since the last concurrent gc.
uint _full_gcs_since_conc_gc;
- // if occupancy exceeds this, start a new gc cycle
- double _initiatingOccupancy;
// occupancy used for bootstrapping stats
double _bootstrap_occupancy;
// timer
elapsedTimer _timer;
--- 652,661 ----
*** 823,833 ****
ReferenceProcessor* ref_processor() { return _ref_processor; }
void ref_processor_init();
Mutex* bitMapLock() const { return _markBitMap.lock(); }
static CollectorState abstract_state() { return _collectorState; }
- double initiatingOccupancy() const { return _initiatingOccupancy; }
bool should_abort_preclean() const; // Whether preclean should be aborted.
size_t get_eden_used() const;
size_t get_eden_capacity() const;
--- 824,833 ----
*** 847,861 ****
void collect_in_foreground(bool clear_all_soft_refs);
// In support of ExplicitGCInvokesConcurrent
static void request_full_gc(unsigned int full_gc_count);
// Should we unload classes in a particular concurrent cycle?
! bool cms_should_unload_classes() const {
! assert(!_unload_classes || ExplicitGCInvokesConcurrentAndUnloadsClasses,
! "Inconsistency; see CR 6541037");
! return _unload_classes || CMSClassUnloadingEnabled;
}
void direct_allocated(HeapWord* start, size_t size);
// Object is dead if not marked and current phase is sweeping.
bool is_dead_obj(oop obj) const;
--- 847,860 ----
void collect_in_foreground(bool clear_all_soft_refs);
// In support of ExplicitGCInvokesConcurrent
static void request_full_gc(unsigned int full_gc_count);
// Should we unload classes in a particular concurrent cycle?
! bool should_unload_classes() const {
! return _should_unload_classes;
}
+ bool update_should_unload_classes();
void direct_allocated(HeapWord* start, size_t size);
// Object is dead if not marked and current phase is sweeping.
bool is_dead_obj(oop obj) const;
*** 1020,1040 ****
}
void clear_incremental_collection_failed() {
_incremental_collection_failed = false;
}
private:
// For parallel young-gen GC support.
CMSParGCThreadState** _par_gc_thread_states;
// Reason generation was expanded
CMSExpansionCause::Cause _expansion_cause;
- // accessors
- void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
- CMSExpansionCause::Cause expansion_cause() { return _expansion_cause; }
-
// In support of MinChunkSize being larger than min object size
const double _dilatation_factor;
enum CollectionTypes {
Concurrent_collection_type = 0,
--- 1019,1039 ----
}
void clear_incremental_collection_failed() {
_incremental_collection_failed = false;
}
+ // accessors
+ void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
+ CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
+
private:
// For parallel young-gen GC support.
CMSParGCThreadState** _par_gc_thread_states;
// Reason generation was expanded
CMSExpansionCause::Cause _expansion_cause;
// In support of MinChunkSize being larger than min object size
const double _dilatation_factor;
enum CollectionTypes {
Concurrent_collection_type = 0,
*** 1043,1052 ****
--- 1042,1055 ----
Unknown_collection_type = 3
};
CollectionTypes _debug_collection_type;
+ // Fraction of current occupancy at which to start a CMS collection which
+ // will collect this generation (at least).
+ double _initiating_occupancy;
+
protected:
// Grow generation by specified size (returns false if unable to grow)
bool grow_by(size_t bytes);
// Grow generation to reserved size.
bool grow_to_reserved();
*** 1058,1067 ****
--- 1061,1074 ----
// Maximum available space in the generation (including uncommitted)
// space.
size_t max_available() const;
+ // getter and initializer for _initiating_occupancy field.
+ double initiating_occupancy() const { return _initiating_occupancy; }
+ void init_initiating_occupancy(intx io, intx tr);
+
public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice);
*** 1101,1111 ****
// Space enquiries
size_t capacity() const;
size_t used() const;
size_t free() const;
! double occupancy() { return ((double)used())/((double)capacity()); }
size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const;
// over-rides
MemRegion used_region() const;
--- 1108,1118 ----
// Space enquiries
size_t capacity() const;
size_t used() const;
size_t free() const;
! double occupancy() const { return ((double)used())/((double)capacity()); }
size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const;
// over-rides
MemRegion used_region() const;
*** 1156,1167 ****
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
bool should_collect(bool full, size_t size, bool tlab);
! // XXXPERM
! bool shouldConcurrentCollect(double initiatingOccupancy); // XXXPERM
void collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool tlab);
--- 1163,1174 ----
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
bool should_collect(bool full, size_t size, bool tlab);
! virtual bool should_concurrent_collect() const;
! virtual bool is_too_full() const;
void collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool tlab);