/hg/release/icedtea6-1.11: Summary: Backport 20130618 sec fixes ...

chrisphi at icedtea.classpath.org chrisphi at icedtea.classpath.org
Wed Jun 26 13:01:24 PDT 2013


changeset ea7bce0f610a in /hg/release/icedtea6-1.11
details: http://icedtea.classpath.org/hg/release/icedtea6-1.11?cmd=changeset;node=ea7bce0f610a
author: chrisphi
date: Wed Jun 26 15:10:59 2013 -0400

	Summary: Backport 20130618 sec fixes 7158805 and 8001330

	 added patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch
	 added patches/security/20130618/8001330-checking_order_improvement-it6.patch
	 changed ChangeLog
	 changed Makefile.am


diffstat:

 ChangeLog                                                               |   23 +
 Makefile.am                                                             |   15 +-
 patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch |  743 ++++++++++
 patches/security/20130618/8001330-checking_order_improvement-it6.patch  |  500 ++++++
 4 files changed, 1268 insertions(+), 13 deletions(-)

diffs (truncated from 1306 to 500 lines):

diff -r a1cb163cb044 -r ea7bce0f610a ChangeLog
--- a/ChangeLog	Wed Jun 26 13:45:20 2013 -0400
+++ b/ChangeLog	Wed Jun 26 15:10:59 2013 -0400
@@ -1,3 +1,26 @@
+2013-06-26  Chris Phillips <chrisphi at redhat.com>
+	* Makefile.am (SECURITY_PATCHES): Drop
+        patches/security/20130618/7158805-nested_subroutine_rewriting.patch
+        patches/security/20130618/8001330-checking_order_improvement.patch
+        patches/openjdk/7036747-elfstringtable.patch
+        patches/openjdk/7017732-move_static_fields_to_class.patch
+        patches/openjdk/6990754-use_native_memory_for_symboltable.patch
+        patches/openjdk/6990754-handle_renames.patch
+        patches/openjdk/7008809-report_class_in_arraystoreexception.patch
+        patches/openjdk/7086585-flexible_field_injection.patch
+        patches/security/20130618/hs_merge-01.patch
+        patches/security/20130618/hs_merge-02.patch
+        patches/security/20130618/hs_merge-03.patch
+        patches/openjdk/7014851-unused_parallel_compaction_code.patch
+        patches/security/20130618/hs_merge-04.patch
+            Add:
+        patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch
+        patches/security/20130618/8001330-checking_order_improvement-it6.patch
+        * patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch:
+        Backported fix for 7158805.
+	* patches/security/20130618/8001330-checking_order_improvement-it6.patch:
+        Backported fix for hs portion of 8001330.
+
 2013-06-26  Omair Majid  <omajid at redhat.com>
             Severin Gehwolf  <sgehwolf at redhat.com>
 
diff -r a1cb163cb044 -r ea7bce0f610a Makefile.am
--- a/Makefile.am	Wed Jun 26 13:45:20 2013 -0400
+++ b/Makefile.am	Wed Jun 26 15:10:59 2013 -0400
@@ -377,19 +377,8 @@
 	patches/security/20130618/diamond_fix.patch \
 	patches/security/20130618/handle_npe.patch \
 	patches/security/20130618/javac_issue.patch \
-	patches/security/20130618/7158805-nested_subroutine_rewriting.patch \
-	patches/security/20130618/8001330-checking_order_improvement.patch \
-	patches/openjdk/7036747-elfstringtable.patch \
-	patches/openjdk/7017732-move_static_fields_to_class.patch \
-	patches/openjdk/6990754-use_native_memory_for_symboltable.patch \
-	patches/openjdk/6990754-handle_renames.patch \
-	patches/openjdk/7008809-report_class_in_arraystoreexception.patch \
-	patches/openjdk/7086585-flexible_field_injection.patch \
-	patches/security/20130618/hs_merge-01.patch \
-	patches/security/20130618/hs_merge-02.patch \
-	patches/security/20130618/hs_merge-03.patch \
-	patches/openjdk/7014851-unused_parallel_compaction_code.patch \
-	patches/security/20130618/hs_merge-04.patch \
+	patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch \
+	patches/security/20130618/8001330-checking_order_improvement-it6.patch \
 	patches/security/20130618/8000642-better_transportation_handling.patch \
 	patches/openjdk/6786028-wcag_bold_tags.patch \
 	patches/openjdk/6786682-wcag_lang.patch \
diff -r a1cb163cb044 -r ea7bce0f610a patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/security/20130618/7158805-nested_subroutine_rewriting-it6.patch	Wed Jun 26 15:10:59 2013 -0400
@@ -0,0 +1,743 @@
+*** openjdk/hotspot/src/share/vm/memory/allocation.cpp	2011-11-14 17:07:35.000000000 -0500
+--- openjdk/hotspot/src/share/vm/memory/allocation.cpp	2013-06-25 14:55:54.749915166 -0400
+***************
+*** 1,5 ****
+  /*
+!  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+   *
+   * This code is free software; you can redistribute it and/or modify it
+--- 1,5 ----
+  /*
+!  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+   *
+   * This code is free software; you can redistribute it and/or modify it
+***************
+*** 199,205 ****
+     ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
+  
+    // Allocate a new chunk from the pool (might expand the pool)
+!   void* allocate(size_t bytes) {
+      assert(bytes == _size, "bad size");
+      void* p = NULL;
+      { ThreadCritical tc;
+--- 199,205 ----
+     ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
+  
+    // Allocate a new chunk from the pool (might expand the pool)
+!   void* allocate(size_t bytes, AllocFailType alloc_failmode) {
+      assert(bytes == _size, "bad size");
+      void* p = NULL;
+      { ThreadCritical tc;
+***************
+*** 207,215 ****
+        p = get_first();
+        if (p == NULL) p = os::malloc(bytes);
+      }
+!     if (p == NULL)
+        vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
+! 
+      return p;
+    }
+  
+--- 207,215 ----
+        p = get_first();
+        if (p == NULL) p = os::malloc(bytes);
+      }
+!     if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+        vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
+!     }
+      return p;
+    }
+  
+***************
+*** 300,306 ****
+  //--------------------------------------------------------------------------------------
+  // Chunk implementation
+  
+! void* Chunk::operator new(size_t requested_size, size_t length) {
+    // requested_size is equal to sizeof(Chunk) but in order for the arena
+    // allocations to come out aligned as expected the size must be aligned
+    // to expected arean alignment.
+--- 300,306 ----
+  //--------------------------------------------------------------------------------------
+  // Chunk implementation
+  
+! void* Chunk::operator new(size_t requested_size, AllocFailType alloc_failmode, size_t length) {
+    // requested_size is equal to sizeof(Chunk) but in order for the arena
+    // allocations to come out aligned as expected the size must be aligned
+    // to expected arean alignment.
+***************
+*** 308,320 ****
+    assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
+    size_t bytes = ARENA_ALIGN(requested_size) + length;
+    switch (length) {
+!    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
+!    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
+!    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
+     default: {
+!      void *p =  os::malloc(bytes);
+!      if (p == NULL)
+         vm_exit_out_of_memory(bytes, "Chunk::new");
+       return p;
+     }
+    }
+--- 308,321 ----
+    assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
+    size_t bytes = ARENA_ALIGN(requested_size) + length;
+    switch (length) {
+!    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
+!    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
+!    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
+     default: {
+!      void* p = os::malloc(bytes);
+!      if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+         vm_exit_out_of_memory(bytes, "Chunk::new");
++      }
+       return p;
+     }
+    }
+***************
+*** 367,380 ****
+  Arena::Arena(size_t init_size) {
+    size_t round_size = (sizeof (char *)) - 1;
+    init_size = (init_size+round_size) & ~round_size;
+!   _first = _chunk = new (init_size) Chunk(init_size);
+    _hwm = _chunk->bottom();      // Save the cached hwm, max
+    _max = _chunk->top();
+    set_size_in_bytes(init_size);
+  }
+  
+  Arena::Arena() {
+!   _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
+    _hwm = _chunk->bottom();      // Save the cached hwm, max
+    _max = _chunk->top();
+    set_size_in_bytes(Chunk::init_size);
+--- 368,381 ----
+  Arena::Arena(size_t init_size) {
+    size_t round_size = (sizeof (char *)) - 1;
+    init_size = (init_size+round_size) & ~round_size;
+!   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
+    _hwm = _chunk->bottom();      // Save the cached hwm, max
+    _max = _chunk->top();
+    set_size_in_bytes(init_size);
+  }
+  
+  Arena::Arena() {
+!   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
+    _hwm = _chunk->bottom();      // Save the cached hwm, max
+    _max = _chunk->top();
+    set_size_in_bytes(Chunk::init_size);
+***************
+*** 427,441 ****
+  }
+  
+  // Grow a new Chunk
+! void* Arena::grow( size_t x ) {
+    // Get minimal required size.  Either real big, or even bigger for giant objs
+    size_t len = MAX2(x, (size_t) Chunk::size);
+  
+    Chunk *k = _chunk;            // Get filled-up chunk address
+!   _chunk = new (len) Chunk(len);
+  
+    if (_chunk == NULL) {
+!     signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
+    }
+  
+    if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
+--- 428,442 ----
+  }
+  
+  // Grow a new Chunk
+! void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
+    // Get minimal required size.  Either real big, or even bigger for giant objs
+    size_t len = MAX2(x, (size_t) Chunk::size);
+  
+    Chunk *k = _chunk;            // Get filled-up chunk address
+!   _chunk = new (alloc_failmode, len) Chunk(len);
+  
+    if (_chunk == NULL) {
+!     return NULL;
+    }
+  
+    if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
+***************
+*** 451,463 ****
+  
+  
+  // Reallocate storage in Arena.
+! void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
+    assert(new_size >= 0, "bad size");
+    if (new_size == 0) return NULL;
+  #ifdef ASSERT
+    if (UseMallocOnly) {
+      // always allocate a new object  (otherwise we'll free this one twice)
+!     char* copy = (char*)Amalloc(new_size);
+      size_t n = MIN2(old_size, new_size);
+      if (n > 0) memcpy(copy, old_ptr, n);
+      Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
+--- 452,467 ----
+  
+  
+  // Reallocate storage in Arena.
+! void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
+    assert(new_size >= 0, "bad size");
+    if (new_size == 0) return NULL;
+  #ifdef ASSERT
+    if (UseMallocOnly) {
+      // always allocate a new object  (otherwise we'll free this one twice)
+!     char* copy = (char*)Amalloc(new_size, alloc_failmode);
+!     if (copy == NULL) {
+!       return NULL;
+!     }
+      size_t n = MIN2(old_size, new_size);
+      if (n > 0) memcpy(copy, old_ptr, n);
+      Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
+***************
+*** 483,489 ****
+    }
+  
+    // Oops, got to relocate guts
+!   void *new_ptr = Amalloc(new_size);
+    memcpy( new_ptr, c_old, old_size );
+    Afree(c_old,old_size);        // Mostly done to keep stats accurate
+    return new_ptr;
+--- 487,496 ----
+    }
+  
+    // Oops, got to relocate guts
+!   void *new_ptr = Amalloc(new_size, alloc_failmode);
+!   if (new_ptr == NULL) {
+!     return NULL;
+!   }
+    memcpy( new_ptr, c_old, old_size );
+    Afree(c_old,old_size);        // Mostly done to keep stats accurate
+    return new_ptr;
+*** openjdk/hotspot/src/share/vm/memory/allocation.hpp	2011-11-14 17:07:35.000000000 -0500
+--- openjdk/hotspot/src/share/vm/memory/allocation.hpp	2013-06-25 15:13:06.325141250 -0400
+***************
+*** 1,5 ****
+  /*
+!  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+   *
+   * This code is free software; you can redistribute it and/or modify it
+--- 1,5 ----
+  /*
+!  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+   *
+   * This code is free software; you can redistribute it and/or modify it
+***************
+*** 34,43 ****
+--- 34,51 ----
+  #include "opto/c2_globals.hpp"
+  #endif
+  
++ #include <new>
++ 
+  #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
+  #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
+  #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
+  
++ class AllocFailStrategy {
++ public:
++   enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
++ };
++ typedef AllocFailStrategy::AllocFailEnum AllocFailType;
++ 
+  // All classes in the virtual machine must be subclassed
+  // by one of the following allocation classes:
+  //
+***************
+*** 152,158 ****
+    Chunk*       _next;     // Next Chunk in list
+    const size_t _len;      // Size of this Chunk
+   public:
+!   void* operator new(size_t size, size_t length);
+    void  operator delete(void* p);
+    Chunk(size_t length);
+  
+--- 160,166 ----
+    Chunk*       _next;     // Next Chunk in list
+    const size_t _len;      // Size of this Chunk
+   public:
+!   void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
+    void  operator delete(void* p);
+    Chunk(size_t length);
+  
+***************
+*** 200,206 ****
+    Chunk *_first;                // First chunk
+    Chunk *_chunk;                // current chunk
+    char *_hwm, *_max;            // High water mark and max in current chunk
+!   void* grow(size_t x);         // Get a new Chunk of at least size x
+    NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing)
+    NOT_PRODUCT(static size_t _bytes_allocated;) // total #bytes allocated since start
+    friend class AllocStats;
+--- 208,215 ----
+    Chunk *_first;                // First chunk
+    Chunk *_chunk;                // current chunk
+    char *_hwm, *_max;            // High water mark and max in current chunk
+!   // Get a new Chunk of at least size x
+!   void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+    NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing)
+    NOT_PRODUCT(static size_t _bytes_allocated;) // total #bytes allocated since start
+    friend class AllocStats;
+***************
+*** 209,218 ****
+  
+    void signal_out_of_memory(size_t request, const char* whence) const;
+  
+!   void check_for_overflow(size_t request, const char* whence) const {
+      if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
+        signal_out_of_memory(request, whence);
+      }
+    }
+  
+   public:
+--- 218,232 ----
+  
+    void signal_out_of_memory(size_t request, const char* whence) const;
+  
+!   bool check_for_overflow(size_t request, const char* whence,
+!       AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
+      if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
++       if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
++         return false;
++       }
+        signal_out_of_memory(request, whence);
+      }
++     return true;
+    }
+  
+   public:
+***************
+*** 224,237 ****
+    char* hwm() const             { return _hwm; }
+  
+    // Fast allocate in the arena.  Common case is: pointer test + increment.
+!   void* Amalloc(size_t x) {
+      assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
+      x = ARENA_ALIGN(x);
+      debug_only(if (UseMallocOnly) return malloc(x);)
+!     check_for_overflow(x, "Arena::Amalloc");
+      NOT_PRODUCT(_bytes_allocated += x);
+      if (_hwm + x > _max) {
+!       return grow(x);
+      } else {
+        char *old = _hwm;
+        _hwm += x;
+--- 238,252 ----
+    char* hwm() const             { return _hwm; }
+  
+    // Fast allocate in the arena.  Common case is: pointer test + increment.
+!   void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
+      assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
+      x = ARENA_ALIGN(x);
+      debug_only(if (UseMallocOnly) return malloc(x);)
+!     if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
+!       return NULL;
+      NOT_PRODUCT(_bytes_allocated += x);
+      if (_hwm + x > _max) {
+!       return grow(x, alloc_failmode);
+      } else {
+        char *old = _hwm;
+        _hwm += x;
+***************
+*** 239,251 ****
+      }
+    }
+    // Further assume size is padded out to words
+!   void *Amalloc_4(size_t x) {
+      assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+      debug_only(if (UseMallocOnly) return malloc(x);)
+!     check_for_overflow(x, "Arena::Amalloc_4");
+      NOT_PRODUCT(_bytes_allocated += x);
+      if (_hwm + x > _max) {
+!       return grow(x);
+      } else {
+        char *old = _hwm;
+        _hwm += x;
+--- 254,267 ----
+      }
+    }
+    // Further assume size is padded out to words
+!   void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
+      assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+      debug_only(if (UseMallocOnly) return malloc(x);)
+!     if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
+!       return NULL;
+      NOT_PRODUCT(_bytes_allocated += x);
+      if (_hwm + x > _max) {
+!       return grow(x, alloc_failmode);
+      } else {
+        char *old = _hwm;
+        _hwm += x;
+***************
+*** 255,261 ****
+  
+    // Allocate with 'double' alignment. It is 8 bytes on sparc.
+    // In other cases Amalloc_D() should be the same as Amalloc_4().
+!   void* Amalloc_D(size_t x) {
+      assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+      debug_only(if (UseMallocOnly) return malloc(x);)
+  #if defined(SPARC) && !defined(_LP64)
+--- 271,277 ----
+  
+    // Allocate with 'double' alignment. It is 8 bytes on sparc.
+    // In other cases Amalloc_D() should be the same as Amalloc_4().
+!   void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
+      assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
+      debug_only(if (UseMallocOnly) return malloc(x);)
+  #if defined(SPARC) && !defined(_LP64)
+***************
+*** 263,272 ****
+      size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
+      x += delta;
+  #endif
+!     check_for_overflow(x, "Arena::Amalloc_D");
+      NOT_PRODUCT(_bytes_allocated += x);
+      if (_hwm + x > _max) {
+!       return grow(x); // grow() returns a result aligned >= 8 bytes.
+      } else {
+        char *old = _hwm;
+        _hwm += x;
+--- 279,289 ----
+      size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
+      x += delta;
+  #endif
+!     if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
+!       return NULL;
+      NOT_PRODUCT(_bytes_allocated += x);
+      if (_hwm + x > _max) {
+!       return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
+      } else {
+        char *old = _hwm;
+        _hwm += x;
+***************
+*** 286,292 ****
+      if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
+    }
+  
+!   void *Arealloc( void *old_ptr, size_t old_size, size_t new_size );
+  
+    // Move contents of this arena into an empty arena
+    Arena *move_contents(Arena *empty_arena);
+--- 303,310 ----
+      if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
+    }
+  
+!   void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
+!      AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+  
+    // Move contents of this arena into an empty arena
+    Arena *move_contents(Arena *empty_arena);
+***************
+*** 328,336 ****
+  
+  
+  //%note allocation_1



More information about the distro-pkg-dev mailing list