Print this page




  21  * have any questions.
  22  *
  23  */
  24 
  25 class CompactibleFreeListSpace;
  26 
  27 // A class for maintaining a free list of FreeChunk's.  The FreeList
  28 // maintains a the structure of the list (head, tail, etc.) plus
  29 // statistics for allocations from the list.  The links between items
  30 // are not part of FreeList.  The statistics are
  31 // used to make decisions about coalescing FreeChunk's when they
  32 // are swept during collection.
  33 //
  34 // See the corresponding .cpp file for a description of the specifics
  35 // for that implementation.
  36 
  37 class Mutex;
  38 
  39 class FreeList VALUE_OBJ_CLASS_SPEC {
  40   friend class CompactibleFreeListSpace;

  41   FreeChunk*    _head;          // List of free chunks
  42   FreeChunk*    _tail;          // Tail of list of free chunks
  43   size_t        _size;          // Size in Heap words of each chunks
  44   ssize_t       _count;         // Number of entries in list
  45   size_t        _hint;          // next larger size list with a positive surplus
  46 
  47   AllocationStats _allocation_stats;            // statistics for smart allocation
  48 
  49 #ifdef ASSERT
  50   Mutex*        _protecting_lock;
  51 #endif
  52 
  53   // Asserts false if the protecting lock (if any) is not held.
  54   void assert_proper_lock_protection_work() const PRODUCT_RETURN;
  55   void assert_proper_lock_protection() const {
  56 #ifdef ASSERT
  57     if (_protecting_lock != NULL)
  58       assert_proper_lock_protection_work();
  59 #endif
  60   }
  61 
  62   // Initialize the allocation statistics.
  63  protected:
  64   void init_statistics();
  65   void set_count(ssize_t v) { _count = v;}
  66   void increment_count() { _count++; }
  67   void decrement_count() {
  68     _count--;
  69     assert(_count >= 0, "Count should not be negative"); }

  70 
  71  public:
  72   // Constructor
  73   // Construct a list without any entries.
  74   FreeList();
  75   // Construct a list with "fc" as the first (and lone) entry in the list.
  76   FreeList(FreeChunk* fc);
  77   // Construct a list which will have a FreeChunk at address "addr" and
  78   // of size "size" as the first (and lone) entry in the list.
  79   FreeList(HeapWord* addr, size_t size);
  80 
  81   // Reset the head, tail, hint, and count of a free list.
  82   void reset(size_t hint);
  83 
  84   // Declare the current free list to be protected by the given lock.
  85 #ifdef ASSERT
  86   void set_protecting_lock(Mutex* protecting_lock) {
  87     _protecting_lock = protecting_lock;
  88   }
  89 #endif


 142   ssize_t count() const {
 143     return _count;
 144   }
 145   size_t hint() const {
 146     return _hint;
 147   }
 148   void set_hint(size_t v) {
 149     assert_proper_lock_protection();
 150     assert(v == 0 || _size < v, "Bad hint"); _hint = v;
 151   }
 152 
 153   // Accessors for statistics
 154   AllocationStats* allocation_stats() {
 155     assert_proper_lock_protection();
 156     return &_allocation_stats;
 157   }
 158 
 159   ssize_t desired() const {
 160     return _allocation_stats.desired();
 161   }




 162   void compute_desired(float inter_sweep_current,
 163                        float inter_sweep_estimate) {
 164     assert_proper_lock_protection();
 165     _allocation_stats.compute_desired(_count,
 166                                       inter_sweep_current,
 167                                       inter_sweep_estimate);
 168   }
 169   ssize_t coalDesired() const {
 170     return _allocation_stats.coalDesired();
 171   }
 172   void set_coalDesired(ssize_t v) {
 173     assert_proper_lock_protection();
 174     _allocation_stats.set_coalDesired(v);
 175   }
 176 
 177   ssize_t surplus() const {
 178     return _allocation_stats.surplus();
 179   }
 180   void set_surplus(ssize_t v) {
 181     assert_proper_lock_protection();


 281 
 282   // Unlink this chunk from it's free list
 283   void removeChunk(FreeChunk* fc);
 284 
 285   // Add this chunk to this free list.
 286   void returnChunkAtHead(FreeChunk* fc);
 287   void returnChunkAtTail(FreeChunk* fc);
 288 
 289   // Similar to returnChunk* but also records some diagnostic
 290   // information.
 291   void returnChunkAtHead(FreeChunk* fc, bool record_return);
 292   void returnChunkAtTail(FreeChunk* fc, bool record_return);
 293 
 294   // Prepend "fl" (whose size is required to be the same as that of "this")
 295   // to the front of "this" list.
 296   void prepend(FreeList* fl);
 297 
 298   // Verify that the chunk is in the list.
 299   // found.  Return NULL if "fc" is not found.
 300   bool verifyChunkInFreeLists(FreeChunk* fc) const;




 301 };


  21  * have any questions.
  22  *
  23  */
  24 
  25 class CompactibleFreeListSpace;
  26 
  27 // A class for maintaining a free list of FreeChunk's.  The FreeList
  28 // maintains a the structure of the list (head, tail, etc.) plus
  29 // statistics for allocations from the list.  The links between items
  30 // are not part of FreeList.  The statistics are
  31 // used to make decisions about coalescing FreeChunk's when they
  32 // are swept during collection.
  33 //
  34 // See the corresponding .cpp file for a description of the specifics
  35 // for that implementation.
  36 
  37 class Mutex;
  38 
  39 class FreeList VALUE_OBJ_CLASS_SPEC {
  40   friend class CompactibleFreeListSpace;
  41   friend class printTreeCensusClosure;
  42   FreeChunk*    _head;          // List of free chunks
  43   FreeChunk*    _tail;          // Tail of list of free chunks
  44   size_t        _size;          // Size in Heap words of each chunks
  45   ssize_t       _count;         // Number of entries in list
  46   size_t        _hint;          // next larger size list with a positive surplus
  47 
  48   AllocationStats _allocation_stats;            // statistics for smart allocation
  49 
  50 #ifdef ASSERT
  51   Mutex*        _protecting_lock;
  52 #endif
  53 
  54   // Asserts false if the protecting lock (if any) is not held.
  55   void assert_proper_lock_protection_work() const PRODUCT_RETURN;
  56   void assert_proper_lock_protection() const {
  57 #ifdef ASSERT
  58     if (_protecting_lock != NULL)
  59       assert_proper_lock_protection_work();
  60 #endif
  61   }
  62 
  63   // Initialize the allocation statistics.
  64  protected:
  65   void init_statistics();
  66   void set_count(ssize_t v) { _count = v;}
  67   void increment_count()    { _count++; }
  68   void decrement_count() {
  69     _count--;
  70     assert(_count >= 0, "Count should not be negative");
  71   }
  72 
  73  public:
  74   // Constructor
  75   // Construct a list without any entries.
  76   FreeList();
  77   // Construct a list with "fc" as the first (and lone) entry in the list.
  78   FreeList(FreeChunk* fc);
  79   // Construct a list which will have a FreeChunk at address "addr" and
  80   // of size "size" as the first (and lone) entry in the list.
  81   FreeList(HeapWord* addr, size_t size);
  82 
  83   // Reset the head, tail, hint, and count of a free list.
  84   void reset(size_t hint);
  85 
  86   // Declare the current free list to be protected by the given lock.
  87 #ifdef ASSERT
  88   void set_protecting_lock(Mutex* protecting_lock) {
  89     _protecting_lock = protecting_lock;
  90   }
  91 #endif


 144   ssize_t count() const {
 145     return _count;
 146   }
 147   size_t hint() const {
 148     return _hint;
 149   }
 150   void set_hint(size_t v) {
 151     assert_proper_lock_protection();
 152     assert(v == 0 || _size < v, "Bad hint"); _hint = v;
 153   }
 154 
 155   // Accessors for statistics
 156   AllocationStats* allocation_stats() {
 157     assert_proper_lock_protection();
 158     return &_allocation_stats;
 159   }
 160 
 161   ssize_t desired() const {
 162     return _allocation_stats.desired();
 163   }
 164   void set_desired(ssize_t v) {
 165     assert_proper_lock_protection();
 166     _allocation_stats.set_desired(v);
 167   }
 168   void compute_desired(float inter_sweep_current,
 169                        float inter_sweep_estimate) {
 170     assert_proper_lock_protection();
 171     _allocation_stats.compute_desired(_count,
 172                                       inter_sweep_current,
 173                                       inter_sweep_estimate);
 174   }
 175   ssize_t coalDesired() const {
 176     return _allocation_stats.coalDesired();
 177   }
 178   void set_coalDesired(ssize_t v) {
 179     assert_proper_lock_protection();
 180     _allocation_stats.set_coalDesired(v);
 181   }
 182 
 183   ssize_t surplus() const {
 184     return _allocation_stats.surplus();
 185   }
 186   void set_surplus(ssize_t v) {
 187     assert_proper_lock_protection();


 287 
 288   // Unlink this chunk from it's free list
 289   void removeChunk(FreeChunk* fc);
 290 
 291   // Add this chunk to this free list.
 292   void returnChunkAtHead(FreeChunk* fc);
 293   void returnChunkAtTail(FreeChunk* fc);
 294 
 295   // Similar to returnChunk* but also records some diagnostic
 296   // information.
 297   void returnChunkAtHead(FreeChunk* fc, bool record_return);
 298   void returnChunkAtTail(FreeChunk* fc, bool record_return);
 299 
 300   // Prepend "fl" (whose size is required to be the same as that of "this")
 301   // to the front of "this" list.
 302   void prepend(FreeList* fl);
 303 
 304   // Verify that the chunk is in the list.
 305   // found.  Return NULL if "fc" is not found.
 306   bool verifyChunkInFreeLists(FreeChunk* fc) const;
 307 
 308   // Printing support
 309   static void print_labels_on(outputStream* st, const char* c);
 310   void print_on(outputStream* st, const char* c = NULL) const;
 311 };