[aarch64-port-dev ] /hg/icedtea7-forest-aarch64/hotspot: allow for 0x400 aligned off...
Andrew Dinn
adinn at redhat.com
Fri Oct 31 16:24:54 UTC 2014
forwarding bounced check-in message
Return-path: <adinn at icedtea.classpath.org>
Received: from localhost ([127.0.0.1] helo=icedtea.classpath.org)
by icedtea.classpath.org with esmtp (Exim 4.69)
(envelope-from <adinn at icedtea.classpath.org>)
id 1XkEsf-00007V-SI
for aarch64-port-dev at openjdk.java.net; Fri, 31 Oct 2014 16:16:54 +0000
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Date: Fri, 31 Oct 2014 16:16:53 +0000
Subject: /hg/icedtea7-forest-aarch64/hotspot: allow for 0x400 aligned off...
From: adinn at icedtea.classpath.org
X-Hg-Notification: changeset 1759697c36a4
Message-Id:
<hg.1759697c36a4.1414772213.-5017525213744097322 at icedtea.classpath.org>
To: aarch64-port-dev at openjdk.java.net
changeset 1759697c36a4 in /hg/icedtea7-forest-aarch64/hotspot
details:
http://icedtea.classpath.org/hg/icedtea7-forest-aarch64/hotspot?cmd=changeset;node=1759697c36a4
author: adinn
date: Fri Oct 31 12:08:03 2014 -0400
allow for 0x400 aligned offsets for byte_map_base
this reverts change set: 5664 SHA1 id: eff2294b2b17 which fixed the
same problem as this fix but only at the cost of padding up to 1.5
extra Mbs between heap regions. That stopped jtreg running some of its
tests with -Xms8Mb. That might, admittedly, be considered unimportant
but we don't want to make it hard for anyone to run standard tests.
diffstat:
src/cpu/aarch64/vm/aarch64.ad
| 7 ++-
src/cpu/aarch64/vm/assembler_aarch64.cpp
| 28 ++++++---
src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
| 7 ++-
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp | 5 -
4 files changed, 31 insertions(+), 16 deletions(-)
diffs (104 lines):
diff -r 99a0ed4de8a1 -r 1759697c36a4 src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad Fri Oct 31 14:50:26 2014 +0000
+++ b/src/cpu/aarch64/vm/aarch64.ad Fri Oct 31 12:08:03 2014 -0400
@@ -2589,7 +2589,12 @@
Register dst_reg = as_Register($dst$$reg);
unsigned long off;
__ adrp(dst_reg, ExternalAddress(page), off);
- assert(off == 0, "assumed offset == 0");
+ assert((off & 0x3ffL) == 0, "assumed offset aligned to 0x400");
+ // n.b. intra-page offset will never change even if this gets
+ // relocated so it is safe to omit the lea when off == 0
+ if (off != 0) {
+ __ lea(dst_reg, Address(dst_reg, off));
+ }
%}
enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
diff -r 99a0ed4de8a1 -r 1759697c36a4
src/cpu/aarch64/vm/assembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.cpp Fri Oct 31 14:50:26 2014
+0000
+++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp Fri Oct 31 12:08:03 2014
-0400
@@ -1609,15 +1609,22 @@
// 2 - adrp Rx, target_page
// add Ry, Rx, #offset_in_page
// 3 - adrp Rx, target_page (page aligned reloc, offset == 0)
- // In the first 2 cases we must check that Rx is the same in the
adrp and the
- // subsequent ldr/str or add instruction. Otherwise we could
accidentally end
- // up treating a type 3 relocation as a type 1 or 2 just because
it happened
- // to be followed by a random unrelated ldr/str or add instruction.
//
- // In the case of a type 3 relocation, we know that these are
only generated
- // for the safepoint polling page, or for the card type byte map
base so we
- // assert as much and of course that the offset is 0.
+ // In the first 2 cases we must check that Rx is the same in the
+ // adrp and the subsequent ldr/str or add instruction. Otherwise
+ // we could accidentally end up treating a type 3 relocation as
+ // a type 1 or 2 just because it happened to be followed by a
+ // random unrelated ldr/str or add instruction.
//
+ // In the case of a type 3 relocation, we know that these are
+ // only generated for the safepoint polling page, the crc table
+ // base or the card type byte map base so we assert as much
+ // and of course that the offset is 0.
+ //
+ // In jdk7 the card type byte map base is aligned on a 1K
+ // boundary which may fail to be 4K aligned. In that case the
+ // card table load will fall into category 2.
+
unsigned insn2 = ((unsigned*)branch)[1];
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
Instruction_aarch64::extract(insn, 4, 0) ==
@@ -1631,6 +1638,9 @@
Instruction_aarch64::extract(insn, 4, 0) ==
Instruction_aarch64::extract(insn2, 4, 0)) {
// add (immediate)
+ assert (((jbyte *)target !=
+
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base) ||
+ (offset_lo & 0x3FFl) == 0, "offset must be 0x400 aligned for crc_table");
Instruction_aarch64::patch(branch + sizeof (unsigned),
21, 10, offset_lo);
} else {
@@ -1638,8 +1648,8 @@
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base ||
target == StubRoutines::crc_table_addr() ||
(address)target == os::get_polling_page(),
- "adrp must be polling page or byte map base");
- assert(offset_lo == 0, "offset must be 0 for polling page or byte map
base");
+ "adrp must be polling page, crc_table or byte map base");
+ assert(offset_lo == 0, "offset must be 0 for polling page, crc_table
or byte map base");
}
}
int offset_lo = offset & 3;
diff -r 99a0ed4de8a1 -r 1759697c36a4
src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp Fri Oct 31 14:50:26
2014 +0000
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp Fri Oct 31 12:08:03
2014 -0400
@@ -1235,7 +1235,12 @@
__ lsr(card_addr, card_addr, CardTableModRefBS::card_shift);
unsigned long offset;
__ adrp(rscratch1, cardtable, offset);
- assert(offset == 0, "assumed offset == 0");
+ assert((offset & 0x3ffL) == 0, "assumed offset aligned to 0x400");
+ // n.b. intra-page offset will never change even if this gets
+ // relocated so it is safe to omit the lea when offset == 0
+ if (offset != 0) {
+ __ lea(rscratch1, Address(rscratch1, offset));
+ }
__ add(card_addr, card_addr, rscratch1);
__ ldrb(rscratch1, Address(card_addr, offset));
__ cmpw(rscratch1,
(int)G1SATBCardTableModRefBS::g1_young_card_val());
diff -r 99a0ed4de8a1 -r 1759697c36a4
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
---
a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
Fri Oct 31 14:50:26 2014 +0000
+++
b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
Fri Oct 31 12:08:03 2014 -0400
@@ -129,12 +129,7 @@
// The alignment used for eden and survivors within the young gen
// and for boundary between young gen and old gen.
-#ifdef TARGET_ARCH_aarch64
- // ensure byte_map_base can be loaded using adrp with no following ldr
- size_t intra_heap_alignment() const { return 256 * K * HeapWordSize; }
-#else
size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
-#endif
size_t capacity() const;
size_t used() const;
More information about the aarch64-port-dev
mailing list