[8u] RFR: Initial backport of AArch32 platform support from OpenJDK 9 to OpenJDK 8u
Triple Yang
triple.yang at linaro.org
Sat Jan 16 03:26:23 UTC 2016
Hi,
I make a patch to adjust all the misalignment I found except for some of
that in entry(CodeBuffer *cb) in file assembler_aarch32.cpp, about which I
am not sure.
Best wishes.
On 5 January 2016 at 21:51, Edward Nevill <edward.nevill at gmail.com> wrote:
> Hi,
>
> I used the 'expand' command which expands tabs to multiples of 8.
>
> The default indentation in OpenJDK is indeed 2, but this is separate
> from the tab character which usually tabs to multiples of 8, unless you
> set your editor/terminal to a different setting.
>
> Are you aware of specific instances where the source has been
> misformatted due to tab expansion?
>
> All the best,
> Ed.
>
> On Sun, 2016-01-03 at 22:38 +0800, Triple Yang wrote:
> > Hi, Ed,
> >
> >
> > It seems you replace a tab with 8 spaces in this changeset, wheras 2
> > space are most often used for alignment.
> > Would you please confirm it? Thank you.
> >
> >
>
>
>
>
>
-------------- next part --------------
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/assembler_aarch32.cpp
--- a/src/cpu/aarch32/vm/assembler_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/assembler_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -85,615 +85,615 @@
__ bind(back);
// ThreeRegSft
- __ add(r8, r2, r11, ::lsr(10)); // add r8, r2, r11, lsr #10
- __ adds(r1, r3, r7, ::asr(1), Assembler::EQ); // addEQs r1, r3, r7, asr #1
- __ eor(r0, r9, r4, ::lsl(5)); // eor r0, r9, r4, lsl #5
- __ eors(r9, r2, r6, ::rrx(), Assembler::GT); // eorGTs r9, r2, r6, rrx
- __ sub(r0, r12, lr, ::lsr(0), Assembler::GT); // subGT r0, r12, lr, lsr #0
- __ subs(r8, r2, r4, ::ror(6), Assembler::EQ); // subEQs r8, r2, r4, ror #6
- __ rsb(r8, r9, sp, ::lsl(3)); // rsb r8, r9, sp, lsl #3
- __ rsbs(r8, r0, r4, ::ror(16), Assembler::VS); // rsbVSs r8, r0, r4, ror #16
- __ add(r9, r5, r1, ::lsr(15), Assembler::LE); // addLE r9, r5, r1, lsr #15
- __ adds(r1, sp, r6, ::asr(5)); // adds r1, sp, r6, asr #5
- __ adc(r11, sp, r7, ::asr(1), Assembler::GT); // adcGT r11, sp, r7, asr #1
- __ adcs(r0, r8, r9, ::lsr(6)); // adcs r0, r8, r9, lsr #6
- __ sbc(r9, r3, r6, ::ror(5)); // sbc r9, r3, r6, ror #5
- __ sbcs(r1, sp, r5, ::asr(16), Assembler::HI); // sbcHIs r1, sp, r5, asr #16
- __ rsc(r8, r2, r6, ::lsl(9), Assembler::CC); // rscCC r8, r2, r6, lsl #9
- __ rscs(r10, r4, sp, ::ror(14)); // rscs r10, r4, sp, ror #14
- __ orr(r11, sp, r5, ::lsl(15), Assembler::NE); // orrNE r11, sp, r5, lsl #15
- __ orrs(r9, r10, r4, ::ror(14)); // orrs r9, r10, r4, ror #14
- __ bic(r9, sp, r5, ::ror(1)); // bic r9, sp, r5, ror #1
- __ bics(r0, r2, r7, ::asr(10)); // bics r0, r2, r7, asr #10
+ __ add(r8, r2, r11, ::lsr(10)); // add r8, r2, r11, lsr #10
+ __ adds(r1, r3, r7, ::asr(1), Assembler::EQ); // addEQs r1, r3, r7, asr #1
+ __ eor(r0, r9, r4, ::lsl(5)); // eor r0, r9, r4, lsl #5
+ __ eors(r9, r2, r6, ::rrx(), Assembler::GT); // eorGTs r9, r2, r6, rrx
+ __ sub(r0, r12, lr, ::lsr(0), Assembler::GT); // subGT r0, r12, lr, lsr #0
+ __ subs(r8, r2, r4, ::ror(6), Assembler::EQ); // subEQs r8, r2, r4, ror #6
+ __ rsb(r8, r9, sp, ::lsl(3)); // rsb r8, r9, sp, lsl #3
+ __ rsbs(r8, r0, r4, ::ror(16), Assembler::VS); // rsbVSs r8, r0, r4, ror #16
+ __ add(r9, r5, r1, ::lsr(15), Assembler::LE); // addLE r9, r5, r1, lsr #15
+ __ adds(r1, sp, r6, ::asr(5)); // adds r1, sp, r6, asr #5
+ __ adc(r11, sp, r7, ::asr(1), Assembler::GT); // adcGT r11, sp, r7, asr #1
+ __ adcs(r0, r8, r9, ::lsr(6)); // adcs r0, r8, r9, lsr #6
+ __ sbc(r9, r3, r6, ::ror(5)); // sbc r9, r3, r6, ror #5
+ __ sbcs(r1, sp, r5, ::asr(16), Assembler::HI); // sbcHIs r1, sp, r5, asr #16
+ __ rsc(r8, r2, r6, ::lsl(9), Assembler::CC); // rscCC r8, r2, r6, lsl #9
+ __ rscs(r10, r4, sp, ::ror(14)); // rscs r10, r4, sp, ror #14
+ __ orr(r11, sp, r5, ::lsl(15), Assembler::NE); // orrNE r11, sp, r5, lsl #15
+ __ orrs(r9, r10, r4, ::ror(14)); // orrs r9, r10, r4, ror #14
+ __ bic(r9, sp, r5, ::ror(1)); // bic r9, sp, r5, ror #1
+ __ bics(r0, r2, r7, ::asr(10)); // bics r0, r2, r7, asr #10
// ThreeRegRSR
- __ add(sp, r6, r7, ::ror(r7)); // add sp, r6, r7, ror r7
- __ adds(r4, r12, r6, ::ror(r7), Assembler::HI); // addHIs r4, r12, r6, ror r7
- __ eor(r5, r6, r7, ::asr(r12), Assembler::LS); // eorLS r5, r6, r7, asr r12
- __ eors(r8, r5, sp, ::lsl(r4), Assembler::AL); // eorALs r8, r5, sp, lsl r4
- __ sub(r2, r12, r5, ::asr(r0)); // sub r2, r12, r5, asr r0
- __ subs(r9, r3, r7, ::lsl(r12), Assembler::HS); // subHSs r9, r3, r7, lsl r12
- __ rsb(r9, r12, r4, ::lsl(r6), Assembler::GT); // rsbGT r9, r12, r4, lsl r6
- __ rsbs(r8, r2, r12, ::lsl(r1)); // rsbs r8, r2, r12, lsl r1
- __ add(r4, r12, sp, ::lsl(sp)); // add r4, r12, sp, lsl sp
- __ adds(r8, r11, r6, ::ror(sp)); // adds r8, r11, r6, ror sp
- __ adc(r0, r2, r5, ::lsl(r4), Assembler::NE); // adcNE r0, r2, r5, lsl r4
- __ adcs(r11, lr, r6, ::asr(r2)); // adcs r11, lr, r6, asr r2
- __ sbc(r8, r10, lr, ::asr(r3), Assembler::HI); // sbcHI r8, r10, lr, asr r3
- __ sbcs(r1, r12, r5, ::lsl(r6)); // sbcs r1, r12, r5, lsl r6
- __ rsc(r4, r5, lr, ::ror(r10), Assembler::VS); // rscVS r4, r5, lr, ror r10
- __ rscs(r1, r12, sp, ::lsl(r8)); // rscs r1, r12, sp, lsl r8
- __ orr(r8, r1, r6, ::ror(r0), Assembler::VS); // orrVS r8, r1, r6, ror r0
- __ orrs(r11, sp, r7, ::ror(r5)); // orrs r11, sp, r7, ror r5
- __ bic(r4, lr, r6, ::lsl(r2), Assembler::AL); // bicAL r4, lr, r6, lsl r2
- __ bics(r10, r11, sp, ::lsl(r3)); // bics r10, r11, sp, lsl r3
+ __ add(sp, r6, r7, ::ror(r7)); // add sp, r6, r7, ror r7
+ __ adds(r4, r12, r6, ::ror(r7), Assembler::HI); // addHIs r4, r12, r6, ror r7
+ __ eor(r5, r6, r7, ::asr(r12), Assembler::LS); // eorLS r5, r6, r7, asr r12
+ __ eors(r8, r5, sp, ::lsl(r4), Assembler::AL); // eorALs r8, r5, sp, lsl r4
+ __ sub(r2, r12, r5, ::asr(r0)); // sub r2, r12, r5, asr r0
+ __ subs(r9, r3, r7, ::lsl(r12), Assembler::HS); // subHSs r9, r3, r7, lsl r12
+ __ rsb(r9, r12, r4, ::lsl(r6), Assembler::GT); // rsbGT r9, r12, r4, lsl r6
+ __ rsbs(r8, r2, r12, ::lsl(r1)); // rsbs r8, r2, r12, lsl r1
+ __ add(r4, r12, sp, ::lsl(sp)); // add r4, r12, sp, lsl sp
+ __ adds(r8, r11, r6, ::ror(sp)); // adds r8, r11, r6, ror sp
+ __ adc(r0, r2, r5, ::lsl(r4), Assembler::NE); // adcNE r0, r2, r5, lsl r4
+ __ adcs(r11, lr, r6, ::asr(r2)); // adcs r11, lr, r6, asr r2
+ __ sbc(r8, r10, lr, ::asr(r3), Assembler::HI); // sbcHI r8, r10, lr, asr r3
+ __ sbcs(r1, r12, r5, ::lsl(r6)); // sbcs r1, r12, r5, lsl r6
+ __ rsc(r4, r5, lr, ::ror(r10), Assembler::VS); // rscVS r4, r5, lr, ror r10
+ __ rscs(r1, r12, sp, ::lsl(r8)); // rscs r1, r12, sp, lsl r8
+ __ orr(r8, r1, r6, ::ror(r0), Assembler::VS); // orrVS r8, r1, r6, ror r0
+ __ orrs(r11, sp, r7, ::ror(r5)); // orrs r11, sp, r7, ror r5
+ __ bic(r4, lr, r6, ::lsl(r2), Assembler::AL); // bicAL r4, lr, r6, lsl r2
+ __ bics(r10, r11, sp, ::lsl(r3)); // bics r10, r11, sp, lsl r3
// TwoRegImm
- __ add(r8, sp, (unsigned)268435462U, Assembler::HI); // addHI r8, sp, #268435462
- __ adds(sp, lr, (unsigned)162529280U); // adds sp, lr, #162529280
- __ eor(lr, r6, (unsigned)8192000U); // eor lr, r6, #8192000
- __ eors(r2, r3, (unsigned)292U); // eors r2, r3, #292
- __ sub(r4, sp, (unsigned)227540992U); // sub r4, sp, #227540992
- __ subs(r1, lr, (unsigned)33554432U, Assembler::LT); // subLTs r1, lr, #33554432
- __ rsb(r0, r5, (unsigned)2483027968U); // rsb r0, r5, #2483027968
- __ rsbs(r8, r4, (unsigned)3080192U, Assembler::LO); // rsbLOs r8, r4, #3080192
- __ add(r9, r4, (unsigned)2147483648U, Assembler::LT); // addLT r9, r4, #2147483648
- __ adds(r8, r4, (unsigned)32768U, Assembler::AL); // addALs r8, r4, #32768
- __ adc(r10, lr, (unsigned)10752U, Assembler::CS); // adcCS r10, lr, #10752
- __ adcs(r10, r6, (unsigned)774144U); // adcs r10, r6, #774144
- __ sbc(r2, r12, (unsigned)637534208U); // sbc r2, r12, #637534208
- __ sbcs(r8, r10, (unsigned)692060160U); // sbcs r8, r10, #692060160
- __ rsc(sp, r6, (unsigned)7405568U); // rsc sp, r6, #7405568
- __ rscs(r10, r11, (unsigned)244318208U, Assembler::NE); // rscNEs r10, r11, #244318208
- __ orr(r3, r7, (unsigned)66846720U, Assembler::VS); // orrVS r3, r7, #66846720
- __ orrs(r2, r5, (unsigned)1327104U, Assembler::EQ); // orrEQs r2, r5, #1327104
- __ bic(r8, r1, (unsigned)3744U, Assembler::VS); // bicVS r8, r1, #3744
- __ bics(r0, r2, (unsigned)2684354560U, Assembler::LO); // bicLOs r0, r2, #2684354560
+ __ add(r8, sp, (unsigned)268435462U, Assembler::HI); // addHI r8, sp, #268435462
+ __ adds(sp, lr, (unsigned)162529280U); // adds sp, lr, #162529280
+ __ eor(lr, r6, (unsigned)8192000U); // eor lr, r6, #8192000
+ __ eors(r2, r3, (unsigned)292U); // eors r2, r3, #292
+ __ sub(r4, sp, (unsigned)227540992U); // sub r4, sp, #227540992
+ __ subs(r1, lr, (unsigned)33554432U, Assembler::LT); // subLTs r1, lr, #33554432
+ __ rsb(r0, r5, (unsigned)2483027968U); // rsb r0, r5, #2483027968
+ __ rsbs(r8, r4, (unsigned)3080192U, Assembler::LO); // rsbLOs r8, r4, #3080192
+ __ add(r9, r4, (unsigned)2147483648U, Assembler::LT); // addLT r9, r4, #2147483648
+ __ adds(r8, r4, (unsigned)32768U, Assembler::AL); // addALs r8, r4, #32768
+ __ adc(r10, lr, (unsigned)10752U, Assembler::CS); // adcCS r10, lr, #10752
+ __ adcs(r10, r6, (unsigned)774144U); // adcs r10, r6, #774144
+ __ sbc(r2, r12, (unsigned)637534208U); // sbc r2, r12, #637534208
+ __ sbcs(r8, r10, (unsigned)692060160U); // sbcs r8, r10, #692060160
+ __ rsc(sp, r6, (unsigned)7405568U); // rsc sp, r6, #7405568
+ __ rscs(r10, r11, (unsigned)244318208U, Assembler::NE); // rscNEs r10, r11, #244318208
+ __ orr(r3, r7, (unsigned)66846720U, Assembler::VS); // orrVS r3, r7, #66846720
+ __ orrs(r2, r5, (unsigned)1327104U, Assembler::EQ); // orrEQs r2, r5, #1327104
+ __ bic(r8, r1, (unsigned)3744U, Assembler::VS); // bicVS r8, r1, #3744
+ __ bics(r0, r2, (unsigned)2684354560U, Assembler::LO); // bicLOs r0, r2, #2684354560
// TwoRegSft
- __ tst(r8, sp, ::lsl(5)); // tst r8, sp, lsl #5
- __ teq(r6, r7, ::lsr(3)); // teq r6, r7, lsr #3
- __ cmp(r12, r4, ::ror(2)); // cmp r12, r4, ror #2
- __ cmn(r5, r7, ::lsl(16), Assembler::LT); // cmnLT r5, r7, lsl #16
+ __ tst(r8, sp, ::lsl(5)); // tst r8, sp, lsl #5
+ __ teq(r6, r7, ::lsr(3)); // teq r6, r7, lsr #3
+ __ cmp(r12, r4, ::ror(2)); // cmp r12, r4, ror #2
+ __ cmn(r5, r7, ::lsl(16), Assembler::LT); // cmnLT r5, r7, lsl #16
// TwoRegRSR
- __ tst(r2, lr, ::lsr(r7)); // tst r2, lr, lsr r7
- __ teq(r0, r2, ::ror(r5), Assembler::CC); // teqCC r0, r2, ror r5
- __ cmp(lr, r7, ::lsr(r11), Assembler::LS); // cmpLS lr, r7, lsr r11
- __ cmn(r10, r7, ::lsl(r11), Assembler::VS); // cmnVS r10, r7, lsl r11
+ __ tst(r2, lr, ::lsr(r7)); // tst r2, lr, lsr r7
+ __ teq(r0, r2, ::ror(r5), Assembler::CC); // teqCC r0, r2, ror r5
+ __ cmp(lr, r7, ::lsr(r11), Assembler::LS); // cmpLS lr, r7, lsr r11
+ __ cmn(r10, r7, ::lsl(r11), Assembler::VS); // cmnVS r10, r7, lsl r11
// OneRegImm
- __ tst(r2, (unsigned)557842432U); // tst r2, #557842432
- __ teq(lr, (unsigned)7077888U, Assembler::MI); // teqMI lr, #7077888
- __ cmp(r5, (unsigned)939524096U); // cmp r5, #939524096
- __ cmn(r7, (unsigned)2147483650U, Assembler::LO); // cmnLO r7, #2147483650
+ __ tst(r2, (unsigned)557842432U); // tst r2, #557842432
+ __ teq(lr, (unsigned)7077888U, Assembler::MI); // teqMI lr, #7077888
+ __ cmp(r5, (unsigned)939524096U); // cmp r5, #939524096
+ __ cmn(r7, (unsigned)2147483650U, Assembler::LO); // cmnLO r7, #2147483650
// Shift op
- __ lsl(r0, r4, (unsigned)23U); // lsl r0, r4, #23
- __ lsls(r1, r4, (unsigned)9U); // lsls r1, r4, #9
- __ lsr(r0, r10, (unsigned)3U); // lsr r0, r10, #3
- __ lsrs(r0, r10, (unsigned)20U); // lsrs r0, r10, #20
- __ asr(r1, r9, (unsigned)11U); // asr r1, r9, #11
- __ asrs(r2, r11, (unsigned)10U, Assembler::VS); // asrVSs r2, r11, #10
+ __ lsl(r0, r4, (unsigned)23U); // lsl r0, r4, #23
+ __ lsls(r1, r4, (unsigned)9U); // lsls r1, r4, #9
+ __ lsr(r0, r10, (unsigned)3U); // lsr r0, r10, #3
+ __ lsrs(r0, r10, (unsigned)20U); // lsrs r0, r10, #20
+ __ asr(r1, r9, (unsigned)11U); // asr r1, r9, #11
+ __ asrs(r2, r11, (unsigned)10U, Assembler::VS); // asrVSs r2, r11, #10
// shift op
- __ ror(r8, r2, (unsigned)31U, Assembler::CC); // rorCC r8, r2, #31
- __ rors(r9, r12, (unsigned)8U); // rors r9, r12, #8
+ __ ror(r8, r2, (unsigned)31U, Assembler::CC); // rorCC r8, r2, #31
+ __ rors(r9, r12, (unsigned)8U); // rors r9, r12, #8
// ThreeRegNon
- __ ror(r8, lr, r7); // ror r8, lr, r7
- __ rors(r12, r3, r4); // rors r12, r3, r4
- __ lsl(r12, sp, lr, Assembler::GT); // lslGT r12, sp, lr
- __ lsls(r12, sp, r6, Assembler::AL); // lslALs r12, sp, r6
- __ lsr(r0, r1, r9, Assembler::GT); // lsrGT r0, r1, r9
- __ lsrs(r11, r3, r12, Assembler::GT); // lsrGTs r11, r3, r12
- __ asr(r2, r12, r6, Assembler::LE); // asrLE r2, r12, r6
- __ asrs(r1, r10, r6, Assembler::LT); // asrLTs r1, r10, r6
+ __ ror(r8, lr, r7); // ror r8, lr, r7
+ __ rors(r12, r3, r4); // rors r12, r3, r4
+ __ lsl(r12, sp, lr, Assembler::GT); // lslGT r12, sp, lr
+ __ lsls(r12, sp, r6, Assembler::AL); // lslALs r12, sp, r6
+ __ lsr(r0, r1, r9, Assembler::GT); // lsrGT r0, r1, r9
+ __ lsrs(r11, r3, r12, Assembler::GT); // lsrGTs r11, r3, r12
+ __ asr(r2, r12, r6, Assembler::LE); // asrLE r2, r12, r6
+ __ asrs(r1, r10, r6, Assembler::LT); // asrLTs r1, r10, r6
// TwoRegNon
- __ mov(r10, r3); // mov r10, r3
- __ movs(r0, r9); // movs r0, r9
+ __ mov(r10, r3); // mov r10, r3
+ __ movs(r0, r9); // movs r0, r9
// OneRegImm
- __ mov_i(r3, (unsigned)656U, Assembler::VC); // movVC r3, #656
- __ movs_i(r4, (unsigned)2064384U); // movs r4, #2064384
+ __ mov_i(r3, (unsigned)656U, Assembler::VC); // movVC r3, #656
+ __ movs_i(r4, (unsigned)2064384U); // movs r4, #2064384
// TwoRegSft
- __ mov(r12, r6, ::lsr(3)); // mov r12, r6, lsr #3
- __ movs(r5, sp, ::asr(10), Assembler::VC); // movVCs r5, sp, asr #10
+ __ mov(r12, r6, ::lsr(3)); // mov r12, r6, lsr #3
+ __ movs(r5, sp, ::asr(10), Assembler::VC); // movVCs r5, sp, asr #10
// TwoRegRSR
- __ mov(r1, lr, ::ror(r3)); // mov r1, lr, ror r3
- __ movs(r8, r12, ::ror(r9), Assembler::EQ); // movEQs r8, r12, ror r9
+ __ mov(r1, lr, ::ror(r3)); // mov r1, lr, ror r3
+ __ movs(r8, r12, ::ror(r9), Assembler::EQ); // movEQs r8, r12, ror r9
// OneRegImm16
- __ movw_i(r11, (unsigned)53041U, Assembler::LO); // movwLO r11, #53041
- __ movt_i(r9, (unsigned)11255U, Assembler::LO); // movtLO r9, #11255
+ __ movw_i(r11, (unsigned)53041U, Assembler::LO); // movwLO r11, #53041
+ __ movt_i(r9, (unsigned)11255U, Assembler::LO); // movtLO r9, #11255
// ThreeRegNon
- __ mul(r1, sp, r5, Assembler::LE); // mulLE r1, sp, r5
- __ muls(r0, r10, r11); // muls r0, r10, r11
+ __ mul(r1, sp, r5, Assembler::LE); // mulLE r1, sp, r5
+ __ muls(r0, r10, r11); // muls r0, r10, r11
// FourRegNon
- __ mla(r0, r3, r12, r7); // mla r0, r3, r12, r7
- __ mlas(r8, r11, r3, r6, Assembler::EQ); // mlaEQs r8, r11, r3, r6
- __ umull(lr, r4, r5, r6); // umull lr, r4, r5, r6
- __ umulls(r0, r4, r6, r7); // umulls r0, r4, r6, r7
- __ umlal(r8, r0, r11, lr); // umlal r8, r0, r11, lr
- __ umlals(r11, r4, lr, r7); // umlals r11, r4, lr, r7
- __ smull(r1, r5, r6, r7, Assembler::HS); // smullHS r1, r5, r6, r7
- __ smulls(r0, r11, r12, r5, Assembler::MI); // smullMIs r0, r11, r12, r5
+ __ mla(r0, r3, r12, r7); // mla r0, r3, r12, r7
+ __ mlas(r8, r11, r3, r6, Assembler::EQ); // mlaEQs r8, r11, r3, r6
+ __ umull(lr, r4, r5, r6); // umull lr, r4, r5, r6
+ __ umulls(r0, r4, r6, r7); // umulls r0, r4, r6, r7
+ __ umlal(r8, r0, r11, lr); // umlal r8, r0, r11, lr
+ __ umlals(r11, r4, lr, r7); // umlals r11, r4, lr, r7
+ __ smull(r1, r5, r6, r7, Assembler::HS); // smullHS r1, r5, r6, r7
+ __ smulls(r0, r11, r12, r5, Assembler::MI); // smullMIs r0, r11, r12, r5
// FourRegNon
- __ umaal(r8, r9, r2, r5); // umaal r8, r9, r2, r5
- __ mls(r0, r4, sp, lr, Assembler::EQ); // mlsEQ r0, r4, sp, lr
+ __ umaal(r8, r9, r2, r5); // umaal r8, r9, r2, r5
+ __ mls(r0, r4, sp, lr, Assembler::EQ); // mlsEQ r0, r4, sp, lr
// ThreeRegNon
- __ qadd(r9, r4, sp, Assembler::PL); // qaddPL r9, r4, sp
- __ qsub(r0, r12, r5, Assembler::MI); // qsubMI r0, r12, r5
- __ qdadd(r3, r5, r7); // qdadd r3, r5, r7
- __ qdsub(r9, r2, r4); // qdsub r9, r2, r4
+ __ qadd(r9, r4, sp, Assembler::PL); // qaddPL r9, r4, sp
+ __ qsub(r0, r12, r5, Assembler::MI); // qsubMI r0, r12, r5
+ __ qdadd(r3, r5, r7); // qdadd r3, r5, r7
+ __ qdsub(r9, r2, r4); // qdsub r9, r2, r4
// FourRegNon
- __ smlabb(r1, r12, r5, r6); // smlabb r1, r12, r5, r6
- __ smlabt(r0, r10, r12, r6); // smlabt r0, r10, r12, r6
- __ smlatb(r8, r1, r3, lr); // smlatb r8, r1, r3, lr
- __ smlatt(r1, sp, r6, r7); // smlatt r1, sp, r6, r7
- __ smlawb(r0, r3, r4, r6); // smlawb r0, r3, r4, r6
- __ smlawt(r11, r4, lr, r7); // smlawt r11, r4, lr, r7
- __ smlalbb(r0, r10, r6, r7); // smlalbb r0, r10, r6, r7
- __ smlalbt(r3, r11, r4, lr, Assembler::LS); // smlalbtLS r3, r11, r4, lr
- __ smlaltb(r8, r11, r3, r12); // smlaltb r8, r11, r3, r12
- __ smlaltt(r8, r1, r3, r5); // smlaltt r8, r1, r3, r5
+ __ smlabb(r1, r12, r5, r6); // smlabb r1, r12, r5, r6
+ __ smlabt(r0, r10, r12, r6); // smlabt r0, r10, r12, r6
+ __ smlatb(r8, r1, r3, lr); // smlatb r8, r1, r3, lr
+ __ smlatt(r1, sp, r6, r7); // smlatt r1, sp, r6, r7
+ __ smlawb(r0, r3, r4, r6); // smlawb r0, r3, r4, r6
+ __ smlawt(r11, r4, lr, r7); // smlawt r11, r4, lr, r7
+ __ smlalbb(r0, r10, r6, r7); // smlalbb r0, r10, r6, r7
+ __ smlalbt(r3, r11, r4, lr, Assembler::LS); // smlalbtLS r3, r11, r4, lr
+ __ smlaltb(r8, r11, r3, r12); // smlaltb r8, r11, r3, r12
+ __ smlaltt(r8, r1, r3, r5); // smlaltt r8, r1, r3, r5
// ThreeRegNon
- __ smulwb(r2, r12, sp, Assembler::HS); // smulwbHS r2, r12, sp
- __ smulwt(r8, r12, r6); // smulwt r8, r12, r6
- __ smulbb(r2, r6, lr, Assembler::GE); // smulbbGE r2, r6, lr
- __ smulbt(r8, r12, r7); // smulbt r8, r12, r7
- __ smultb(r10, r3, lr, Assembler::EQ); // smultbEQ r10, r3, lr
- __ smultt(r0, r3, sp); // smultt r0, r3, sp
+ __ smulwb(r2, r12, sp, Assembler::HS); // smulwbHS r2, r12, sp
+ __ smulwt(r8, r12, r6); // smulwt r8, r12, r6
+ __ smulbb(r2, r6, lr, Assembler::GE); // smulbbGE r2, r6, lr
+ __ smulbt(r8, r12, r7); // smulbt r8, r12, r7
+ __ smultb(r10, r3, lr, Assembler::EQ); // smultbEQ r10, r3, lr
+ __ smultt(r0, r3, sp); // smultt r0, r3, sp
// MemoryOp
- __ ldr(r10, Address(r7, r9, lsl(), Address::ADD, Address::post)); // ldr r10, [r7], r9
- __ ldrb(r0, Address(r9, 196)); // ldrb r0, [r9, #196]
+ __ ldr(r10, Address(r7, r9, lsl(), Address::ADD, Address::post)); // ldr r10, [r7], r9
+ __ ldrb(r0, Address(r9, 196)); // ldrb r0, [r9, #196]
__ ldrh(lr, Address(r4, r6, lsl(), Address::ADD, Address::pre)); // ldrh lr, [r4, r6]!
- __ ldrsb(r6, Address(__ pre(r9, 232))); // ldrsb r6, [r9, #232]!
- __ ldrsh(r2, Address(r1, r1, lsl(), Address::ADD, Address::post)); // ldrsh r2, [r1], r1
+ __ ldrsb(r6, Address(__ pre(r9, 232))); // ldrsb r6, [r9, #232]!
+ __ ldrsh(r2, Address(r1, r1, lsl(), Address::ADD, Address::post)); // ldrsh r2, [r1], r1
__ str(r0, Address(r9, r4, lsl(), Address::ADD, Address::post)); // str r0, [r9], r4
- __ strb(r3, Address(__ pre(r5, 92))); // strb r3, [r5, #92]!
- __ strh(r2, Address(r8, 160)); // strh r2, [r8, #160]
+ __ strb(r3, Address(__ pre(r5, 92))); // strb r3, [r5, #92]!
+ __ strh(r2, Address(r8, 160)); // strh r2, [r8, #160]
// MemoryOp
__ ldr(r8, Address(r12, r8, lsl(), Address::ADD, Address::off)); // ldr r8, [r12, r8]
- __ ldrb(r11, Address(__ post(r10, 16))); // ldrb r11, [r10], #16
- __ ldrh(r11, Address(r10, r6, lsl(), Address::ADD, Address::off)); // ldrh r11, [r10, r6]
- __ ldrsb(r5, Address(r11, r10, lsl(), Address::ADD, Address::pre)); // ldrsb r5, [r11, r10]!
- __ ldrsh(r6, Address(r3, r7, lsl(), Address::ADD, Address::off)); // ldrsh r6, [r3, r7]
- __ str(r7, Address(sp, r5, lsl(), Address::ADD, Address::pre)); // str r7, [sp, r5]!
- __ strb(r2, Address(r10)); // strb r2, [r10]
- __ strh(r6, Address(r4, r3, lsl(), Address::ADD, Address::post)); // strh r6, [r4], r3
+ __ ldrb(r11, Address(__ post(r10, 16))); // ldrb r11, [r10], #16
+ __ ldrh(r11, Address(r10, r6, lsl(), Address::ADD, Address::off)); // ldrh r11, [r10, r6]
+ __ ldrsb(r5, Address(r11, r10, lsl(), Address::ADD, Address::pre)); // ldrsb r5, [r11, r10]!
+ __ ldrsh(r6, Address(r3, r7, lsl(), Address::ADD, Address::off)); // ldrsh r6, [r3, r7]
+ __ str(r7, Address(sp, r5, lsl(), Address::ADD, Address::pre)); // str r7, [sp, r5]!
+ __ strb(r2, Address(r10)); // strb r2, [r10]
+ __ strh(r6, Address(r4, r3, lsl(), Address::ADD, Address::post)); // strh r6, [r4], r3
// MemoryOp
- __ ldr(r10, Address(r12)); // ldr r10, [r12]
- __ ldrb(r4, Address(__ post(r11, 132))); // ldrb r4, [r11], #132
- __ ldrh(r9, Address(r9, r12, lsl(), Address::ADD, Address::post)); // ldrh r9, [r9], r12
- __ ldrsb(r9, Address(__ post(r3, 148))); // ldrsb r9, [r3], #148
- __ ldrsh(r11, Address(__ pre(r2, 148))); // ldrsh r11, [r2, #148]!
- __ str(r11, Address(sp, r11, lsl(), Address::ADD, Address::off)); // str r11, [sp, r11]
- __ strb(r1, Address(sp, r10, lsl(), Address::ADD, Address::off)); // strb r1, [sp, r10]
- __ strh(r10, Address(lr, r9, lsl(), Address::ADD, Address::post)); // strh r10, [lr], r9
+ __ ldr(r10, Address(r12)); // ldr r10, [r12]
+ __ ldrb(r4, Address(__ post(r11, 132))); // ldrb r4, [r11], #132
+ __ ldrh(r9, Address(r9, r12, lsl(), Address::ADD, Address::post)); // ldrh r9, [r9], r12
+ __ ldrsb(r9, Address(__ post(r3, 148))); // ldrsb r9, [r3], #148
+ __ ldrsh(r11, Address(__ pre(r2, 148))); // ldrsh r11, [r2, #148]!
+ __ str(r11, Address(sp, r11, lsl(), Address::ADD, Address::off)); // str r11, [sp, r11]
+ __ strb(r1, Address(sp, r10, lsl(), Address::ADD, Address::off)); // strb r1, [sp, r10]
+ __ strh(r10, Address(lr, r9, lsl(), Address::ADD, Address::post)); // strh r10, [lr], r9
// MemoryOp
- __ ldr(r6, Address(r3, r4, lsl(), Address::ADD, Address::pre)); // ldr r6, [r3, r4]!
+ __ ldr(r6, Address(r3, r4, lsl(), Address::ADD, Address::pre)); // ldr r6, [r3, r4]!
__ ldrb(r4, Address(r6, sp, lsl(), Address::ADD, Address::pre)); // ldrb r4, [r6, sp]!
- __ ldrh(r6, Address(r7, r10, lsl(), Address::ADD, Address::post)); // ldrh r6, [r7], r10
- __ ldrsb(r0, Address(r6, r11, lsl(), Address::ADD, Address::pre)); // ldrsb r0, [r6, r11]!
- __ ldrsh(r10, Address(r6, sp, lsl(), Address::ADD, Address::post)); // ldrsh r10, [r6], sp
+ __ ldrh(r6, Address(r7, r10, lsl(), Address::ADD, Address::post)); // ldrh r6, [r7], r10
+ __ ldrsb(r0, Address(r6, r11, lsl(), Address::ADD, Address::pre)); // ldrsb r0, [r6, r11]!
+ __ ldrsh(r10, Address(r6, sp, lsl(), Address::ADD, Address::post)); // ldrsh r10, [r6], sp
__ str(r7, Address(r3, r12, lsl(), Address::ADD, Address::off)); // str r7, [r3, r12]
__ strb(r3, Address(r8, r1, lsl(), Address::ADD, Address::pre)); // strb r3, [r8, r1]!
- __ strh(r4, Address(r12, 64)); // strh r4, [r12, #64]
+ __ strh(r4, Address(r12, 64)); // strh r4, [r12, #64]
__ bind(near);
// LitMemoryOp
- __ ldr(r1, near); // ldr r1, near
- __ ldrb(r7, __ pc()); // ldrb r7, .
- __ ldrh(r2, near); // ldrh r2, near
- __ ldrsb(r10, __ pc()); // ldrsb r10, .
- __ ldrsh(lr, near_post); // ldrsh lr, near_post
+ __ ldr(r1, near); // ldr r1, near
+ __ ldrb(r7, __ pc()); // ldrb r7, .
+ __ ldrh(r2, near); // ldrh r2, near
+ __ ldrsb(r10, __ pc()); // ldrsb r10, .
+ __ ldrsh(lr, near_post); // ldrsh lr, near_post
// LitMemoryOp
- __ ldr(r2, __ pc()); // ldr r2, .
- __ ldrb(r3, __ pc()); // ldrb r3, .
- __ ldrh(r7, near_post); // ldrh r7, near_post
- __ ldrsb(sp, __ pc()); // ldrsb sp, .
- __ ldrsh(r10, near); // ldrsh r10, near
+ __ ldr(r2, __ pc()); // ldr r2, .
+ __ ldrb(r3, __ pc()); // ldrb r3, .
+ __ ldrh(r7, near_post); // ldrh r7, near_post
+ __ ldrsb(sp, __ pc()); // ldrsb sp, .
+ __ ldrsh(r10, near); // ldrsh r10, near
// LitMemoryOp
- __ ldr(r5, __ pc()); // ldr r5, .
- __ ldrb(lr, near_post); // ldrb lr, near_post
- __ ldrh(r5, near_post); // ldrh r5, near_post
- __ ldrsb(r6, near); // ldrsb r6, near
- __ ldrsh(r11, near); // ldrsh r11, near
+ __ ldr(r5, __ pc()); // ldr r5, .
+ __ ldrb(lr, near_post); // ldrb lr, near_post
+ __ ldrh(r5, near_post); // ldrh r5, near_post
+ __ ldrsb(r6, near); // ldrsb r6, near
+ __ ldrsh(r11, near); // ldrsh r11, near
// LitMemoryOp
- __ ldr(r7, near_post); // ldr r7, near_post
- __ ldrb(r5, near_post); // ldrb r5, near_post
- __ ldrh(r10, near); // ldrh r10, near
- __ ldrsb(r6, near_post); // ldrsb r6, near_post
- __ ldrsh(r9, __ pc()); // ldrsh r9, .
+ __ ldr(r7, near_post); // ldr r7, near_post
+ __ ldrb(r5, near_post); // ldrb r5, near_post
+ __ ldrh(r10, near); // ldrh r10, near
+ __ ldrsb(r6, near_post); // ldrsb r6, near_post
+ __ ldrsh(r9, __ pc()); // ldrsh r9, .
__ bind(near_post);
// MemoryRegRegSftOp
- __ ldr(r0, Address(r0, r10, ::ror(6), Address::ADD, Address::post)); // ldr r0, [r0], r10, ror #6
- __ ldrb(r3, Address(r8, lr, ::lsl(9), Address::ADD, Address::off)); // ldrb r3, [r8, lr, lsl #9]
- __ str(r5, Address(sp, r3, ::lsl(15), Address::ADD, Address::off)); // str r5, [sp, r3, lsl #15]
- __ strb(r9, Address(r9, r5, ::asr(2), Address::ADD, Address::post)); // strb r9, [r9], r5, asr #2
+ __ ldr(r0, Address(r0, r10, ::ror(6), Address::ADD, Address::post)); // ldr r0, [r0], r10, ror #6
+ __ ldrb(r3, Address(r8, lr, ::lsl(9), Address::ADD, Address::off)); // ldrb r3, [r8, lr, lsl #9]
+ __ str(r5, Address(sp, r3, ::lsl(15), Address::ADD, Address::off)); // str r5, [sp, r3, lsl #15]
+ __ strb(r9, Address(r9, r5, ::asr(2), Address::ADD, Address::post)); // strb r9, [r9], r5, asr #2
// MemoryRegRegSftOp
- __ ldr(r5, Address(r4, r0, ::ror(6), Address::ADD, Address::off)); // ldr r5, [r4, r0, ror #6]
- __ ldrb(lr, Address(r0, r4, ::lsr(9), Address::ADD, Address::off)); // ldrb lr, [r0, r4, lsr #9]
- __ str(r5, Address(r12, r12, ::asr(5), Address::ADD, Address::post)); // str r5, [r12], r12, asr #5
- __ strb(r3, Address(r1, r7, ::ror(12), Address::ADD, Address::pre)); // strb r3, [r1, r7, ror #12]!
+ __ ldr(r5, Address(r4, r0, ::ror(6), Address::ADD, Address::off)); // ldr r5, [r4, r0, ror #6]
+ __ ldrb(lr, Address(r0, r4, ::lsr(9), Address::ADD, Address::off)); // ldrb lr, [r0, r4, lsr #9]
+ __ str(r5, Address(r12, r12, ::asr(5), Address::ADD, Address::post)); // str r5, [r12], r12, asr #5
+ __ strb(r3, Address(r1, r7, ::ror(12), Address::ADD, Address::pre)); // strb r3, [r1, r7, ror #12]!
// MemoryRegRegSftOp
- __ ldr(r6, Address(r2, r3, ::rrx(), Address::ADD, Address::pre)); // ldr r6, [r2, r3, rrx]!
- __ ldrb(r8, Address(lr, r2, ::asr(16), Address::ADD, Address::pre)); // ldrb r8, [lr, r2, asr #16]!
- __ str(r6, Address(r3, r6, ::ror(7), Address::ADD, Address::pre)); // str r6, [r3, r6, ror #7]!
- __ strb(r3, Address(r8, r2, ::lsl(10), Address::ADD, Address::off)); // strb r3, [r8, r2, lsl #10]
+ __ ldr(r6, Address(r2, r3, ::rrx(), Address::ADD, Address::pre)); // ldr r6, [r2, r3, rrx]!
+ __ ldrb(r8, Address(lr, r2, ::asr(16), Address::ADD, Address::pre)); // ldrb r8, [lr, r2, asr #16]!
+ __ str(r6, Address(r3, r6, ::ror(7), Address::ADD, Address::pre)); // str r6, [r3, r6, ror #7]!
+ __ strb(r3, Address(r8, r2, ::lsl(10), Address::ADD, Address::off)); // strb r3, [r8, r2, lsl #10]
// MemoryRegRegSftOp
- __ ldr(r11, Address(sp, lr, ::lsl(8), Address::ADD, Address::off)); // ldr r11, [sp, lr, lsl #8]
- __ ldrb(r10, Address(sp, r12, ::lsl(4), Address::ADD, Address::pre)); // ldrb r10, [sp, r12, lsl #4]!
- __ str(sp, Address(r9, r2, ::asr(2), Address::ADD, Address::off)); // str sp, [r9, r2, asr #2]
- __ strb(r7, Address(r11, lr, ::asr(14), Address::ADD, Address::pre)); // strb r7, [r11, lr, asr #14]!
+ __ ldr(r11, Address(sp, lr, ::lsl(8), Address::ADD, Address::off)); // ldr r11, [sp, lr, lsl #8]
+ __ ldrb(r10, Address(sp, r12, ::lsl(4), Address::ADD, Address::pre)); // ldrb r10, [sp, r12, lsl #4]!
+ __ str(sp, Address(r9, r2, ::asr(2), Address::ADD, Address::off)); // str sp, [r9, r2, asr #2]
+ __ strb(r7, Address(r11, lr, ::asr(14), Address::ADD, Address::pre)); // strb r7, [r11, lr, asr #14]!
// LdStOne
- __ ldrex(r12, r11); // ldrex r12, [r11]
- __ ldrexb(r4, r12); // ldrexb r4, [r12]
- __ ldrexh(r11, r11); // ldrexh r11, [r11]
+ __ ldrex(r12, r11); // ldrex r12, [r11]
+ __ ldrexb(r4, r12); // ldrexb r4, [r12]
+ __ ldrexh(r11, r11); // ldrexh r11, [r11]
// LdStTwo
- __ strex(r1, r7, lr); // strex r1, r7, [lr]
- __ strexb(r12, r6, r4); // strexb r12, r6, [r4]
- __ strexh(r4, r6, r7, Assembler::HS); // strexhHS r4, r6, [r7]
+ __ strex(r1, r7, lr); // strex r1, r7, [lr]
+ __ strexb(r12, r6, r4); // strexb r12, r6, [r4]
+ __ strexh(r4, r6, r7, Assembler::HS); // strexhHS r4, r6, [r7]
// ThreeRegNon
- __ sadd16(r3, r4, r7); // sadd16 r3, r4, r7
- __ sasx(r9, r10, r3, Assembler::AL); // sasxAL r9, r10, r3
- __ ssax(r12, r5, r6); // ssax r12, r5, r6
- __ ssub16(r12, r5, lr); // ssub16 r12, r5, lr
- __ sadd8(r0, r10, r7); // sadd8 r0, r10, r7
- __ ssub8(r0, r8, r2, Assembler::VS); // ssub8VS r0, r8, r2
- __ qadd16(r11, r4, r5, Assembler::PL); // qadd16PL r11, r4, r5
- __ qasx(r11, r3, r12, Assembler::VS); // qasxVS r11, r3, r12
- __ qsax(r0, r3, r5); // qsax r0, r3, r5
- __ ssub16(r10, r12, r5, Assembler::AL); // ssub16AL r10, r12, r5
- __ qadd8(r10, r6, lr, Assembler::CC); // qadd8CC r10, r6, lr
- __ qsub8(r10, r11, r7); // qsub8 r10, r11, r7
- __ shadd16(r9, r4, lr, Assembler::PL); // shadd16PL r9, r4, lr
- __ shasx(r1, lr, r7); // shasx r1, lr, r7
- __ shsax(r9, r11, r5, Assembler::LO); // shsaxLO r9, r11, r5
- __ shsub16(r3, r1, r11, Assembler::GE); // shsub16GE r3, r1, r11
- __ shadd8(sp, r5, r7, Assembler::GT); // shadd8GT sp, r5, r7
- __ shsub8(r1, r5, r7); // shsub8 r1, r5, r7
+ __ sadd16(r3, r4, r7); // sadd16 r3, r4, r7
+ __ sasx(r9, r10, r3, Assembler::AL); // sasxAL r9, r10, r3
+ __ ssax(r12, r5, r6); // ssax r12, r5, r6
+ __ ssub16(r12, r5, lr); // ssub16 r12, r5, lr
+ __ sadd8(r0, r10, r7); // sadd8 r0, r10, r7
+ __ ssub8(r0, r8, r2, Assembler::VS); // ssub8VS r0, r8, r2
+ __ qadd16(r11, r4, r5, Assembler::PL); // qadd16PL r11, r4, r5
+ __ qasx(r11, r3, r12, Assembler::VS); // qasxVS r11, r3, r12
+ __ qsax(r0, r3, r5); // qsax r0, r3, r5
+ __ ssub16(r10, r12, r5, Assembler::AL); // ssub16AL r10, r12, r5
+ __ qadd8(r10, r6, lr, Assembler::CC); // qadd8CC r10, r6, lr
+ __ qsub8(r10, r11, r7); // qsub8 r10, r11, r7
+ __ shadd16(r9, r4, lr, Assembler::PL); // shadd16PL r9, r4, lr
+ __ shasx(r1, lr, r7); // shasx r1, lr, r7
+ __ shsax(r9, r11, r5, Assembler::LO); // shsaxLO r9, r11, r5
+ __ shsub16(r3, r1, r11, Assembler::GE); // shsub16GE r3, r1, r11
+ __ shadd8(sp, r5, r7, Assembler::GT); // shadd8GT sp, r5, r7
+ __ shsub8(r1, r5, r7); // shsub8 r1, r5, r7
// ThreeRegNon
- __ uadd16(r10, r4, r7); // uadd16 r10, r4, r7
- __ uasx(r1, r9, r7, Assembler::HS); // uasxHS r1, r9, r7
- __ usax(r11, sp, r7); // usax r11, sp, r7
- __ usub16(r11, r4, lr); // usub16 r11, r4, lr
- __ uadd8(r2, sp, r7, Assembler::LO); // uadd8LO r2, sp, r7
- __ usub8(r8, r10, lr, Assembler::GT); // usub8GT r8, r10, lr
- __ uqadd16(r3, r12, sp); // uqadd16 r3, r12, sp
- __ uqasx(r4, sp, r6); // uqasx r4, sp, r6
- __ uqsax(r1, r10, lr); // uqsax r1, r10, lr
- __ uqsub16(r2, sp, lr, Assembler::LE); // uqsub16LE r2, sp, lr
- __ uqadd8(r1, r12, r5); // uqadd8 r1, r12, r5
- __ uqsub8(r0, r4, sp, Assembler::GT); // uqsub8GT r0, r4, sp
- __ uhadd16(r0, r10, r5, Assembler::HI); // uhadd16HI r0, r10, r5
- __ uhasx(r11, r4, r7, Assembler::LE); // uhasxLE r11, r4, r7
- __ uhsax(r1, lr, r9, Assembler::GE); // uhsaxGE r1, lr, r9
- __ uhsub16(r2, r11, lr); // uhsub16 r2, r11, lr
- __ uhadd8(r9, r4, r5, Assembler::GE); // uhadd8GE r9, r4, r5
- __ uhsub8(r2, sp, lr, Assembler::HI); // uhsub8HI r2, sp, lr
+ __ uadd16(r10, r4, r7); // uadd16 r10, r4, r7
+ __ uasx(r1, r9, r7, Assembler::HS); // uasxHS r1, r9, r7
+ __ usax(r11, sp, r7); // usax r11, sp, r7
+ __ usub16(r11, r4, lr); // usub16 r11, r4, lr
+ __ uadd8(r2, sp, r7, Assembler::LO); // uadd8LO r2, sp, r7
+ __ usub8(r8, r10, lr, Assembler::GT); // usub8GT r8, r10, lr
+ __ uqadd16(r3, r12, sp); // uqadd16 r3, r12, sp
+ __ uqasx(r4, sp, r6); // uqasx r4, sp, r6
+ __ uqsax(r1, r10, lr); // uqsax r1, r10, lr
+ __ uqsub16(r2, sp, lr, Assembler::LE); // uqsub16LE r2, sp, lr
+ __ uqadd8(r1, r12, r5); // uqadd8 r1, r12, r5
+ __ uqsub8(r0, r4, sp, Assembler::GT); // uqsub8GT r0, r4, sp
+ __ uhadd16(r0, r10, r5, Assembler::HI); // uhadd16HI r0, r10, r5
+ __ uhasx(r11, r4, r7, Assembler::LE); // uhasxLE r11, r4, r7
+ __ uhsax(r1, lr, r9, Assembler::GE); // uhsaxGE r1, lr, r9
+ __ uhsub16(r2, r11, lr); // uhsub16 r2, r11, lr
+ __ uhadd8(r9, r4, r5, Assembler::GE); // uhadd8GE r9, r4, r5
+ __ uhsub8(r2, sp, lr, Assembler::HI); // uhsub8HI r2, sp, lr
// PKUPSATREV
- __ sxtab16(r10, r3, r7, ::ror(16)); // sxtab16 r10, r3, r7, ROR #16
- __ sxtab(r9, r5, r7, ::ror(24), Assembler::CS); // sxtabCS r9, r5, r7, ROR #24
- __ sxtah(r3, r5, r7, ::ror(8)); // sxtah r3, r5, r7, ROR #8
- __ uxtab16(r8, r4, r6, ::ror(8), Assembler::AL); // uxtab16AL r8, r4, r6, ROR #8
- __ uxtab(r0, r11, sp, ::rrx(), Assembler::EQ); // uxtabEQ r0, r11, sp, ROR #0
- __ uxtah(r9, r12, r5, ::rrx()); // uxtah r9, r12, r5, ROR #0
+ __ sxtab16(r10, r3, r7, ::ror(16)); // sxtab16 r10, r3, r7, ROR #16
+ __ sxtab(r9, r5, r7, ::ror(24), Assembler::CS); // sxtabCS r9, r5, r7, ROR #24
+ __ sxtah(r3, r5, r7, ::ror(8)); // sxtah r3, r5, r7, ROR #8
+ __ uxtab16(r8, r4, r6, ::ror(8), Assembler::AL); // uxtab16AL r8, r4, r6, ROR #8
+ __ uxtab(r0, r11, sp, ::rrx(), Assembler::EQ); // uxtabEQ r0, r11, sp, ROR #0
+ __ uxtah(r9, r12, r5, ::rrx()); // uxtah r9, r12, r5, ROR #0
// PKUPSATREV
- __ sxtb16(r3, r11, ::ror(16), Assembler::GE); // sxtb16GE r3, r11, ROR #16
- __ sxtb(r2, r6, ::rrx(), Assembler::HI); // sxtbHI r2, r6, ROR #0
- __ sxth(r3, sp, ::ror(24), Assembler::GT); // sxthGT r3, sp, ROR #24
- __ uxtb16(r12, r5, ::ror(16)); // uxtb16 r12, r5, ROR #16
- __ uxtb(r12, r5, ::ror(16)); // uxtb r12, r5, ROR #16
- __ uxth(r8, r5, ::ror(16)); // uxth r8, r5, ROR #16
+ __ sxtb16(r3, r11, ::ror(16), Assembler::GE); // sxtb16GE r3, r11, ROR #16
+ __ sxtb(r2, r6, ::rrx(), Assembler::HI); // sxtbHI r2, r6, ROR #0
+ __ sxth(r3, sp, ::ror(24), Assembler::GT); // sxthGT r3, sp, ROR #24
+ __ uxtb16(r12, r5, ::ror(16)); // uxtb16 r12, r5, ROR #16
+ __ uxtb(r12, r5, ::ror(16)); // uxtb r12, r5, ROR #16
+ __ uxth(r8, r5, ::ror(16)); // uxth r8, r5, ROR #16
// TwoRegNon
- __ rev(r10, r4, Assembler::EQ); // revEQ r10, r4
- __ rev16(r8, r12, Assembler::GE); // rev16GE r8, r12
- __ rbit(lr, r7); // rbit lr, r7
- __ revsh(sp, r7, Assembler::GT); // revshGT sp, r7
+ __ rev(r10, r4, Assembler::EQ); // revEQ r10, r4
+ __ rev16(r8, r12, Assembler::GE); // rev16GE r8, r12
+ __ rbit(lr, r7); // rbit lr, r7
+ __ revsh(sp, r7, Assembler::GT); // revshGT sp, r7
// ThreeRegNon
- __ sdiv(r9, sp, lr); // sdiv r9, sp, lr
- __ udiv(r2, r12, r6); // udiv r2, r12, r6
+ __ sdiv(r9, sp, lr); // sdiv r9, sp, lr
+ __ udiv(r2, r12, r6); // udiv r2, r12, r6
// TwoRegTwoImm
- __ sbfx(r0, r1, (unsigned)20U, (unsigned)3U, Assembler::MI); // sbfxMI r0, r1, #20, #3
- __ ubfx(r9, r2, (unsigned)16U, (unsigned)15U); // ubfx r9, r2, #16, #15
- __ bfi(r1, r11, (unsigned)27U, (unsigned)3U, Assembler::HI); // bfiHI r1, r11, #27, #3
+ __ sbfx(r0, r1, (unsigned)20U, (unsigned)3U, Assembler::MI); // sbfxMI r0, r1, #20, #3
+ __ ubfx(r9, r2, (unsigned)16U, (unsigned)15U); // ubfx r9, r2, #16, #15
+ __ bfi(r1, r11, (unsigned)27U, (unsigned)3U, Assembler::HI); // bfiHI r1, r11, #27, #3
// TwoRegTwoImm
- __ bfc(r3, (unsigned)7U, (unsigned)10U); // bfc r3, #7, #10
+ __ bfc(r3, (unsigned)7U, (unsigned)10U); // bfc r3, #7, #10
// MultipleMemOp
- __ stmda(r6, 3435U, false); // stmda r6, {r0, r1, r3, r5, r6, r8, r10, r11}
- __ stmed(r4, 14559U, false); // stmed r4, {r0, r1, r2, r3, r4, r6, r7, r11, r12, sp}
- __ ldmda(r0, 57812U, false); // ldmda r0, {r2, r4, r6, r7, r8, sp, lr, pc}
- __ ldmfa(r12, 39027U, true); // ldmfa r12!, {r0, r1, r4, r5, r6, r11, r12, pc}
- __ stmia(r9, 12733U, true); // stmia r9!, {r0, r2, r3, r4, r5, r7, r8, r12, sp}
- __ stmea(r11, 21955U, false); // stmea r11, {r0, r1, r6, r7, r8, r10, r12, lr}
- __ ldmia(r12, 48418U, true); // ldmia r12!, {r1, r5, r8, r10, r11, r12, sp, pc}
- __ ldmfd(sp, 41226U, true); // ldmfd sp!, {r1, r3, r8, sp, pc}
- __ stmdb(r11, 8729U, true); // stmdb r11!, {r0, r3, r4, r9, sp}
- __ stmfd(r9, 36309U, true); // stmfd r9!, {r0, r2, r4, r6, r7, r8, r10, r11, pc}
- __ ldmdb(r5, 24667U, true); // ldmdb r5!, {r0, r1, r3, r4, r6, sp, lr}
- __ ldmea(r1, 37287U, false); // ldmea r1, {r0, r1, r2, r5, r7, r8, r12, pc}
- __ stmib(r11, 28266U, true); // stmib r11!, {r1, r3, r5, r6, r9, r10, r11, sp, lr}
- __ stmfa(r11, 17671U, false); // stmfa r11, {r0, r1, r2, r8, r10, lr}
- __ ldmib(r0, 21452U, true); // ldmib r0!, {r2, r3, r6, r7, r8, r9, r12, lr}
- __ ldmed(r1, 11751U, false); // ldmed r1, {r0, r1, r2, r5, r6, r7, r8, r10, r11, sp}
+ __ stmda(r6, 3435U, false); // stmda r6, {r0, r1, r3, r5, r6, r8, r10, r11}
+ __ stmed(r4, 14559U, false); // stmed r4, {r0, r1, r2, r3, r4, r6, r7, r11, r12, sp}
+ __ ldmda(r0, 57812U, false); // ldmda r0, {r2, r4, r6, r7, r8, sp, lr, pc}
+ __ ldmfa(r12, 39027U, true); // ldmfa r12!, {r0, r1, r4, r5, r6, r11, r12, pc}
+ __ stmia(r9, 12733U, true); // stmia r9!, {r0, r2, r3, r4, r5, r7, r8, r12, sp}
+ __ stmea(r11, 21955U, false); // stmea r11, {r0, r1, r6, r7, r8, r10, r12, lr}
+ __ ldmia(r12, 48418U, true); // ldmia r12!, {r1, r5, r8, r10, r11, r12, sp, pc}
+ __ ldmfd(sp, 41226U, true); // ldmfd sp!, {r1, r3, r8, sp, pc}
+ __ stmdb(r11, 8729U, true); // stmdb r11!, {r0, r3, r4, r9, sp}
+ __ stmfd(r9, 36309U, true); // stmfd r9!, {r0, r2, r4, r6, r7, r8, r10, r11, pc}
+ __ ldmdb(r5, 24667U, true); // ldmdb r5!, {r0, r1, r3, r4, r6, sp, lr}
+ __ ldmea(r1, 37287U, false); // ldmea r1, {r0, r1, r2, r5, r7, r8, r12, pc}
+ __ stmib(r11, 28266U, true); // stmib r11!, {r1, r3, r5, r6, r9, r10, r11, sp, lr}
+ __ stmfa(r11, 17671U, false); // stmfa r11, {r0, r1, r2, r8, r10, lr}
+ __ ldmib(r0, 21452U, true); // ldmib r0!, {r2, r3, r6, r7, r8, r9, r12, lr}
+ __ ldmed(r1, 11751U, false); // ldmed r1, {r0, r1, r2, r5, r6, r7, r8, r10, r11, sp}
// BranchLabel
- __ b(forth, Assembler::CS); // bCS forth
- __ bl(__ pc(), Assembler::MI); // blMI .
+ __ b(forth, Assembler::CS); // bCS forth
+ __ bl(__ pc(), Assembler::MI); // blMI .
// OneRegNon
- __ b(r0, Assembler::VS); // bxVS r0
- __ bl(r3); // blx r3
+ __ b(r0, Assembler::VS); // bxVS r0
+ __ bl(r3); // blx r3
// BranchLabel
- __ b(__ pc(), Assembler::AL); // bAL .
- __ bl(__ pc()); // bl .
+ __ b(__ pc(), Assembler::AL); // bAL .
+ __ bl(__ pc()); // bl .
// OneRegNon
- __ b(r0, Assembler::VS); // bxVS r0
- __ bl(r5); // blx r5
+ __ b(r0, Assembler::VS); // bxVS r0
+ __ bl(r5); // blx r5
// BranchLabel
- __ b(forth, Assembler::LE); // bLE forth
- __ bl(__ pc(), Assembler::MI); // blMI .
+ __ b(forth, Assembler::LE); // bLE forth
+ __ bl(__ pc(), Assembler::MI); // blMI .
// OneRegNon
- __ b(r9, Assembler::NE); // bxNE r9
- __ bl(r12); // blx r12
+ __ b(r9, Assembler::NE); // bxNE r9
+ __ bl(r12); // blx r12
// BranchLabel
- __ b(back); // b back
- __ bl(__ pc(), Assembler::HI); // blHI .
+ __ b(back); // b back
+ __ bl(__ pc(), Assembler::HI); // blHI .
// OneRegNon
- __ b(r1, Assembler::VC); // bxVC r1
- __ bl(r7, Assembler::GT); // blxGT r7
+ __ b(r1, Assembler::VC); // bxVC r1
+ __ bl(r7, Assembler::GT); // blxGT r7
// BranchLabel
- __ b(back, Assembler::GE); // bGE back
- __ bl(__ pc(), Assembler::HI); // blHI .
+ __ b(back, Assembler::GE); // bGE back
+ __ bl(__ pc(), Assembler::HI); // blHI .
// OneRegNon
- __ b(r12); // bx r12
- __ bl(r7, Assembler::CC); // blxCC r7
+ __ b(r12); // bx r12
+ __ bl(r7, Assembler::CC); // blxCC r7
// BranchLabel
- __ b(__ pc()); // b .
- __ bl(back, Assembler::GT); // blGT back
+ __ b(__ pc()); // b .
+ __ bl(back, Assembler::GT); // blGT back
// OneRegNon
- __ b(r1, Assembler::GE); // bxGE r1
- __ bl(r0); // blx r0
+ __ b(r1, Assembler::GE); // bxGE r1
+ __ bl(r0); // blx r0
// BranchLabel
- __ b(__ pc()); // b .
- __ bl(forth); // bl forth
+ __ b(__ pc()); // b .
+ __ bl(forth); // bl forth
// OneRegNon
- __ b(lr, Assembler::GT); // bxGT lr
- __ bl(r11, Assembler::NE); // blxNE r11
+ __ b(lr, Assembler::GT); // bxGT lr
+ __ bl(r11, Assembler::NE); // blxNE r11
// BranchLabel
- __ b(__ pc(), Assembler::CS); // bCS .
- __ bl(__ pc()); // bl .
+ __ b(__ pc(), Assembler::CS); // bCS .
+ __ bl(__ pc()); // bl .
// OneRegNon
- __ b(r10, Assembler::HS); // bxHS r10
- __ bl(r4); // blx r4
+ __ b(r10, Assembler::HS); // bxHS r10
+ __ bl(r4); // blx r4
// BranchLabel
- __ b(back, Assembler::AL); // bAL back
- __ bl(__ pc()); // bl .
+ __ b(back, Assembler::AL); // bAL back
+ __ bl(__ pc()); // bl .
// OneRegNon
- __ b(r12, Assembler::LO); // bxLO r12
- __ bl(r8); // blx r8
+ __ b(r12, Assembler::LO); // bxLO r12
+ __ bl(r8); // blx r8
// BranchLabel
- __ b(forth); // b forth
- __ bl(__ pc()); // bl .
+ __ b(forth); // b forth
+ __ bl(__ pc()); // bl .
// OneRegNon
- __ b(r10); // bx r10
- __ bl(r1); // blx r1
+ __ b(r10); // bx r10
+ __ bl(r1); // blx r1
// ThreeFltNon
- __ vmla_f32(d2, d4, d6, Assembler::MI); // vmlaMI.f32 s4, s8, s12
- __ vmls_f32(d2, d5, d5); // vmls.f32 s4, s10, s10
- __ vnmla_f32(d1, d5, d6); // vnmla.f32 s2, s10, s12
- __ vnmls_f32(d4, d3, d4, Assembler::LT); // vnmlsLT.f32 s8, s6, s8
- __ vnmul_f32(d3, d6, d7, Assembler::MI); // vnmulMI.f32 s6, s12, s14
- __ vadd_f32(d0, d1, d0); // vadd.f32 s0, s2, s0
- __ vsub_f32(d1, d2, d5, Assembler::AL); // vsubAL.f32 s2, s4, s10
- __ vdiv_f32(d0, d1, d6, Assembler::CS); // vdivCS.f32 s0, s2, s12
+ __ vmla_f32(d2, d4, d6, Assembler::MI); // vmlaMI.f32 s4, s8, s12
+ __ vmls_f32(d2, d5, d5); // vmls.f32 s4, s10, s10
+ __ vnmla_f32(d1, d5, d6); // vnmla.f32 s2, s10, s12
+ __ vnmls_f32(d4, d3, d4, Assembler::LT); // vnmlsLT.f32 s8, s6, s8
+ __ vnmul_f32(d3, d6, d7, Assembler::MI); // vnmulMI.f32 s6, s12, s14
+ __ vadd_f32(d0, d1, d0); // vadd.f32 s0, s2, s0
+ __ vsub_f32(d1, d2, d5, Assembler::AL); // vsubAL.f32 s2, s4, s10
+ __ vdiv_f32(d0, d1, d6, Assembler::CS); // vdivCS.f32 s0, s2, s12
// ThreeFltNon
- __ vmla_f64(d0, d3, d6); // vmla.f64 d0, d3, d6
- __ vmls_f64(d0, d1, d5); // vmls.f64 d0, d1, d5
- __ vnmla_f64(d1, d4, d6); // vnmla.f64 d1, d4, d6
- __ vnmls_f64(d0, d1, d1, Assembler::NE); // vnmlsNE.f64 d0, d1, d1
- __ vnmul_f64(d3, d5, d5, Assembler::NE); // vnmulNE.f64 d3, d5, d5
- __ vadd_f64(d0, d2, d4, Assembler::LO); // vaddLO.f64 d0, d2, d4
- __ vsub_f64(d1, d2, d4); // vsub.f64 d1, d2, d4
- __ vdiv_f64(d0, d1, d5, Assembler::MI); // vdivMI.f64 d0, d1, d5
+ __ vmla_f64(d0, d3, d6); // vmla.f64 d0, d3, d6
+ __ vmls_f64(d0, d1, d5); // vmls.f64 d0, d1, d5
+ __ vnmla_f64(d1, d4, d6); // vnmla.f64 d1, d4, d6
+ __ vnmls_f64(d0, d1, d1, Assembler::NE); // vnmlsNE.f64 d0, d1, d1
+ __ vnmul_f64(d3, d5, d5, Assembler::NE); // vnmulNE.f64 d3, d5, d5
+ __ vadd_f64(d0, d2, d4, Assembler::LO); // vaddLO.f64 d0, d2, d4
+ __ vsub_f64(d1, d2, d4); // vsub.f64 d1, d2, d4
+ __ vdiv_f64(d0, d1, d5, Assembler::MI); // vdivMI.f64 d0, d1, d5
// TwoFltNon
- __ vabs_f32(d3, d3); // vabs.f32 s6, s6
- __ vneg_f32(d3, d4, Assembler::PL); // vnegPL.f32 s6, s8
- __ vsqrt_f32(d0, d4); // vsqrt.f32 s0, s8
+ __ vabs_f32(d3, d3); // vabs.f32 s6, s6
+ __ vneg_f32(d3, d4, Assembler::PL); // vnegPL.f32 s6, s8
+ __ vsqrt_f32(d0, d4); // vsqrt.f32 s0, s8
// TwoFltNon
- __ vabs_f64(d0, d4); // vabs.f64 d0, d4
- __ vneg_f64(d1, d4); // vneg.f64 d1, d4
- __ vsqrt_f64(d0, d1); // vsqrt.f64 d0, d1
+ __ vabs_f64(d0, d4); // vabs.f64 d0, d4
+ __ vneg_f64(d1, d4); // vneg.f64 d1, d4
+ __ vsqrt_f64(d0, d1); // vsqrt.f64 d0, d1
// vmov_f32
- __ vmov_f32(d0, lr, Assembler::PL); // vmovPL.f32 s0, lr
+ __ vmov_f32(d0, lr, Assembler::PL); // vmovPL.f32 s0, lr
// vmov_f32
- __ vmov_f32(r11, d4); // vmov.f32 r11, s8
+ __ vmov_f32(r11, d4); // vmov.f32 r11, s8
// vmov_f64
- __ vmov_f64(d1, r11, lr, Assembler::LT); // vmovLT.f64 d1, r11, lr
+ __ vmov_f64(d1, r11, lr, Assembler::LT); // vmovLT.f64 d1, r11, lr
// vmov_f64
- __ vmov_f64(r7, r5, d5); // vmov.f64 r7, r5, d5
+ __ vmov_f64(r7, r5, d5); // vmov.f64 r7, r5, d5
// vmov_f32
- __ vmov_f32(d4, d6); // vmov.f32 s8, s12
+ __ vmov_f32(d4, d6); // vmov.f32 s8, s12
// vmov_f64
- __ vmov_f64(d1, d2, Assembler::HI); // vmovHI.f64 d1, d2
+ __ vmov_f64(d1, d2, Assembler::HI); // vmovHI.f64 d1, d2
// vmov_f32
- __ vmov_f32(d2, 1.0f, Assembler::VS); // vmovVS.f32 s4, #1.0
+ __ vmov_f32(d2, 1.0f, Assembler::VS); // vmovVS.f32 s4, #1.0
// vmov_f64
- __ vmov_f64(d2, 1.0); // vmov.f64 d2, #1.0
+ __ vmov_f64(d2, 1.0); // vmov.f64 d2, #1.0
// vmov_f32
- __ vmov_f32(d3, 2.0f); // vmov.f32 s6, #2.0
+ __ vmov_f32(d3, 2.0f); // vmov.f32 s6, #2.0
// vmov_f64
- __ vmov_f64(d1, 2.0); // vmov.f64 d1, #2.0
+ __ vmov_f64(d1, 2.0); // vmov.f64 d1, #2.0
// vector memory
- __ vldr_f32(d2, Address(r5, 116)); // vldr.f32 s4, [r5, #116]
- __ vstr_f32(d1, Address(r1, 56), Assembler::CC); // vstrCC.f32 s2, [r1, #56]
+ __ vldr_f32(d2, Address(r5, 116)); // vldr.f32 s4, [r5, #116]
+ __ vstr_f32(d1, Address(r1, 56), Assembler::CC); // vstrCC.f32 s2, [r1, #56]
// vector memory
- __ vldr_f64(d7, Address(r5, 16), Assembler::NE); // vldrNE.f64 d7, [r5, #16]
- __ vstr_f64(d6, Address(r1, 228)); // vstr.f64 d6, [r1, #228]
+ __ vldr_f64(d7, Address(r5, 16), Assembler::NE); // vldrNE.f64 d7, [r5, #16]
+ __ vstr_f64(d6, Address(r1, 228)); // vstr.f64 d6, [r1, #228]
__ bind(near_flt);
// vector memory
- __ vldr_f32(d1, near_post_flt); // vldr.f32 s2, near_post_flt
- __ vstr_f32(d3, near_post_flt); // vstr.f32 s6, near_post_flt
+ __ vldr_f32(d1, near_post_flt); // vldr.f32 s2, near_post_flt
+ __ vstr_f32(d3, near_post_flt); // vstr.f32 s6, near_post_flt
// vector memory
- __ vldr_f64(d2, near_flt, Assembler::LT); // vldrLT.f64 d2, near_flt
- __ vstr_f64(d3, __ pc(), Assembler::GT); // vstrGT.f64 d3, .
+ __ vldr_f64(d2, near_flt, Assembler::LT); // vldrLT.f64 d2, near_flt
+ __ vstr_f64(d3, __ pc(), Assembler::GT); // vstrGT.f64 d3, .
// vector memory
- __ vldr_f32(d2, near_post_flt, Assembler::CC); // vldrCC.f32 s4, near_post_flt
- __ vstr_f32(d0, near_post_flt); // vstr.f32 s0, near_post_flt
+ __ vldr_f32(d2, near_post_flt, Assembler::CC); // vldrCC.f32 s4, near_post_flt
+ __ vstr_f32(d0, near_post_flt); // vstr.f32 s0, near_post_flt
// vector memory
- __ vldr_f64(d4, near_post_flt, Assembler::GT); // vldrGT.f64 d4, near_post_flt
- __ vstr_f64(d0, near_flt); // vstr.f64 d0, near_flt
+ __ vldr_f64(d4, near_post_flt, Assembler::GT); // vldrGT.f64 d4, near_post_flt
+ __ vstr_f64(d0, near_flt); // vstr.f64 d0, near_flt
// vector memory
- __ vldr_f32(d4, near_post_flt); // vldr.f32 s8, near_post_flt
- __ vstr_f32(d3, near_post_flt); // vstr.f32 s6, near_post_flt
+ __ vldr_f32(d4, near_post_flt); // vldr.f32 s8, near_post_flt
+ __ vstr_f32(d3, near_post_flt); // vstr.f32 s6, near_post_flt
// vector memory
- __ vldr_f64(d4, near_flt, Assembler::PL); // vldrPL.f64 d4, near_flt
- __ vstr_f64(d5, near_flt); // vstr.f64 d5, near_flt
+ __ vldr_f64(d4, near_flt, Assembler::PL); // vldrPL.f64 d4, near_flt
+ __ vstr_f64(d5, near_flt); // vstr.f64 d5, near_flt
// vector memory
- __ vldr_f32(d4, near_post_flt, Assembler::LS); // vldrLS.f32 s8, near_post_flt
- __ vstr_f32(d6, __ pc(), Assembler::CC); // vstrCC.f32 s12, .
+ __ vldr_f32(d4, near_post_flt, Assembler::LS); // vldrLS.f32 s8, near_post_flt
+ __ vstr_f32(d6, __ pc(), Assembler::CC); // vstrCC.f32 s12, .
// vector memory
- __ vldr_f64(d6, near_post_flt, Assembler::AL); // vldrAL.f64 d6, near_post_flt
- __ vstr_f64(d1, near_post_flt, Assembler::LT); // vstrLT.f64 d1, near_post_flt
+ __ vldr_f64(d6, near_post_flt, Assembler::AL); // vldrAL.f64 d6, near_post_flt
+ __ vstr_f64(d1, near_post_flt, Assembler::LT); // vstrLT.f64 d1, near_post_flt
__ bind(near_post_flt);
// FltMultMemOp
- __ vldmia_f32(r1, 4U, false); // vldmia.f32 r1, {s4}
- __ vstmia_f32(r6, 4U, true, Assembler::CS); // vstmiaCS.f32 r6!, {s4}
+ __ vldmia_f32(r1, 4U, false); // vldmia.f32 r1, {s4}
+ __ vstmia_f32(r6, 4U, true, Assembler::CS); // vstmiaCS.f32 r6!, {s4}
// DblMultMemOp
- __ vldmia_f64(r9, 30U, true); // vldmia.f64 r9!, {d1, d2, d3, d4}
- __ vstmia_f64(r3, 192U, true); // vstmia.f64 r3!, {d6, d7}
+ __ vldmia_f64(r9, 30U, true); // vldmia.f64 r9!, {d1, d2, d3, d4}
+ __ vstmia_f64(r3, 192U, true); // vstmia.f64 r3!, {d6, d7}
// FltMultMemOp
- __ vldmdb_f32(r2, 8U, Assembler::VS); // vldmdbVS.f32 r2!, {s6}
- __ vstmdb_f32(r6, 128U); // vstmdb.f32 r6!, {s14}
+ __ vldmdb_f32(r2, 8U, Assembler::VS); // vldmdbVS.f32 r2!, {s6}
+ __ vstmdb_f32(r6, 128U); // vstmdb.f32 r6!, {s14}
// DblMultMemOp
- __ vldmdb_f64(sp, 240U); // vldmdb.f64 sp!, {d4, d5, d6, d7}
- __ vstmdb_f64(r0, 224U); // vstmdb.f64 r0!, {d5, d6, d7}
+ __ vldmdb_f64(sp, 240U); // vldmdb.f64 sp!, {d4, d5, d6, d7}
+ __ vstmdb_f64(r0, 224U); // vstmdb.f64 r0!, {d5, d6, d7}
// vcmp_f32
- __ vcmp_f32(d1, d1); // vcmp.f32 s2, s2
+ __ vcmp_f32(d1, d1); // vcmp.f32 s2, s2
// vcmpe_f32
- __ vcmpe_f32(d4, d4, Assembler::VC); // vcmpeVC.f32 s8, s8
+ __ vcmpe_f32(d4, d4, Assembler::VC); // vcmpeVC.f32 s8, s8
// vcmp_f64
- __ vcmp_f64(d0, d6); // vcmp.f64 d0, d6
+ __ vcmp_f64(d0, d6); // vcmp.f64 d0, d6
// vcmpe_f64
- __ vcmpe_f64(d3, d7, Assembler::GE); // vcmpeGE.f64 d3, d7
+ __ vcmpe_f64(d3, d7, Assembler::GE); // vcmpeGE.f64 d3, d7
// vcmp_f32
- __ vcmp_f32(d1, 0.0f, Assembler::LT); // vcmpLT.f32 s2, #0.0
+ __ vcmp_f32(d1, 0.0f, Assembler::LT); // vcmpLT.f32 s2, #0.0
// vcmpe_f32
- __ vcmpe_f32(d7, 0.0f, Assembler::GT); // vcmpeGT.f32 s14, #0.0
+ __ vcmpe_f32(d7, 0.0f, Assembler::GT); // vcmpeGT.f32 s14, #0.0
// vcmp_f64
- __ vcmp_f64(d4, 0.0); // vcmp.f64 d4, #0.0
+ __ vcmp_f64(d4, 0.0); // vcmp.f64 d4, #0.0
// vcmpe_f64
- __ vcmpe_f64(d1, 0.0); // vcmpe.f64 d1, #0.0
+ __ vcmpe_f64(d1, 0.0); // vcmpe.f64 d1, #0.0
// vcvt
- __ vcvt_s32_f32(d1, d3, Assembler::VS); // vcvtVS.s32.f32 s2, s6
- __ vcvt_u32_f32(d3, d7, Assembler::GT); // vcvtGT.u32.f32 s6, s14
- __ vcvt_f32_s32(d0, d1, Assembler::CC); // vcvtCC.f32.s32 s0, s2
- __ vcvt_f32_u32(d1, d2, Assembler::CC); // vcvtCC.f32.u32 s2, s4
+ __ vcvt_s32_f32(d1, d3, Assembler::VS); // vcvtVS.s32.f32 s2, s6
+ __ vcvt_u32_f32(d3, d7, Assembler::GT); // vcvtGT.u32.f32 s6, s14
+ __ vcvt_f32_s32(d0, d1, Assembler::CC); // vcvtCC.f32.s32 s0, s2
+ __ vcvt_f32_u32(d1, d2, Assembler::CC); // vcvtCC.f32.u32 s2, s4
// vcvt
- __ vcvt_s32_f64(d2, d4, Assembler::HI); // vcvtHI.s32.f64 s4, d4
- __ vcvt_u32_f64(d3, d6, Assembler::HI); // vcvtHI.u32.f64 s6, d6
- __ vcvt_f32_f64(d3, d7, Assembler::LS); // vcvtLS.f32.f64 s6, d7
+ __ vcvt_s32_f64(d2, d4, Assembler::HI); // vcvtHI.s32.f64 s4, d4
+ __ vcvt_u32_f64(d3, d6, Assembler::HI); // vcvtHI.u32.f64 s6, d6
+ __ vcvt_f32_f64(d3, d7, Assembler::LS); // vcvtLS.f32.f64 s6, d7
// vcvt
- __ vcvt_f64_s32(d3, d4); // vcvt.f64.s32 d3, s8
- __ vcvt_f64_u32(d5, d7, Assembler::EQ); // vcvtEQ.f64.u32 d5, s14
- __ vcvt_f64_f32(d4, d5, Assembler::AL); // vcvtAL.f64.f32 d4, s10
+ __ vcvt_f64_s32(d3, d4); // vcvt.f64.s32 d3, s8
+ __ vcvt_f64_u32(d5, d7, Assembler::EQ); // vcvtEQ.f64.u32 d5, s14
+ __ vcvt_f64_f32(d4, d5, Assembler::AL); // vcvtAL.f64.f32 d4, s10
// BKPT
- __ bkpt((unsigned)26U); // bkpt #26
+ __ bkpt((unsigned)26U); // bkpt #26
__ bind(forth);
@@ -1603,8 +1603,8 @@
//This should really be in the macroassembler
void Assembler::mov_immediate32(Register dst, u_int32_t imm32, Condition cond, bool s)
{
- // Need to move a full 32 bit immediate, for example if we're loading an address that
- // might change later and therefore need to be updated.
+ // Need to move a full 32 bit immediate, for example if we're loading an address that
+ // might change later and therefore need to be updated.
if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2)) {
//Use a movw and a movt
Assembler::movw_i(dst, (unsigned)(imm32 & 0xffff), cond);
@@ -1614,13 +1614,13 @@
Assembler::cmp(dst, 0);
}
} else {
- // Sadly we don't have movw, movt
- // instead emit a mov and three orr
- mov_i(dst, imm32 & (0xff ), cond);
- orr(dst, dst, imm32 & (0xff << 8 ), cond);
- orr(dst, dst, imm32 & (0xff << 16), cond);
- if(s) orrs(dst, dst, imm32 & (0xff << 24), cond);
- else orr (dst, dst, imm32 & (0xff << 24), cond);
+ // Sadly we don't have movw, movt
+ // instead emit a mov and three orr
+ mov_i(dst, imm32 & (0xff ), cond);
+ orr(dst, dst, imm32 & (0xff << 8 ), cond);
+ orr(dst, dst, imm32 & (0xff << 16), cond);
+ if(s) orrs(dst, dst, imm32 & (0xff << 24), cond);
+ else orr (dst, dst, imm32 & (0xff << 24), cond);
}
}
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/assembler_aarch32.hpp
--- a/src/cpu/aarch32/vm/assembler_aarch32.hpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/assembler_aarch32.hpp Fri Jan 15 23:23:56 2016 +0800
@@ -45,22 +45,22 @@
class Argument VALUE_OBJ_CLASS_SPEC {
public:
enum {
- n_int_register_parameters_c = 4, // r0, r1, ... r3 (c_rarg0, c_rarg1, ...)
- // These are number of double registers, each double register can instead be
- // used as two single precision registers.
- #ifdef __VFP_FP__
- n_float_register_parameters_c = 8, // d0, d1, ... d7 (c_farg0, c_farg1, ... )
- #define HARD_FLOAT_CC
- #elif defined(__SOFTFP__)
- n_float_register_parameters_c = 0, // Not realy used, in this case
- // pass_float -> pass_int & pass_double -> pass_long
- #else
- #error "Floating point ABI not supported";
- #endif
+ n_int_register_parameters_c = 4, // r0, r1, ... r3 (c_rarg0, c_rarg1, ...)
+ // These are number of double registers, each double register can instead be
+ // used as two single precision registers.
+#ifdef __VFP_FP__
+ n_float_register_parameters_c = 8, // d0, d1, ... d7 (c_farg0, c_farg1, ... )
+#define HARD_FLOAT_CC
+#elif defined(__SOFTFP__)
+ n_float_register_parameters_c = 0, // Not realy used, in this case
+ // pass_float -> pass_int & pass_double -> pass_long
+#else
+ #error "Floating point ABI not supported";
+#endif
// Not that these only make sense for compiled
- // value are irrelevant currently, if c1, c2, ... will go ahead these need to be
- // sorted out.
+ // value are irrelevant currently, if c1, c2, ... will go ahead these need to be
+ // sorted out.
n_int_register_parameters_j = 4, // r1, ... r7, r0 (rj_rarg0, j_rarg1, ...
n_float_register_parameters_j = 4 // d0, d1, ... d7 (j_farg0, j_farg1, ...
};
@@ -963,8 +963,8 @@
wrap_label(Rd, L, cond, &Assembler::Assembler::adr);
}
- private:
- friend void entry(CodeBuffer *cb);
+private:
+ friend void entry(CodeBuffer *cb);
#define INSN(NAME, decode, s_flg) \
inline void NAME(Register Rd, unsigned imm, Condition cond = C_DFLT) { \
bool status = imm_instr(decode, Rd, ZERO_ADDR_REG, imm, cond, s_flg); \
@@ -1599,24 +1599,24 @@
// MCR<c> <coproc>, <opc1>, <Rt>, <CRn>, <CRm>{, <opc2>}
void mcr(int cpc_dex, int opc1, Register Rt, int cpc_reg_dex1,
int cpc_reg_dex2, int opc2, Condition cond = C_DFLT) {
- starti;
- f(cond, 31, 28), f(0b1110, 27, 24), f(opc1, 23, 21), f(0, 20);
- f(cpc_reg_dex1, 19, 16), rf(Rt, 12), f(cpc_dex, 11, 8);
- f(opc2, 7, 5), f(1, 4), f(cpc_reg_dex2, 3, 0);
+ starti;
+ f(cond, 31, 28), f(0b1110, 27, 24), f(opc1, 23, 21), f(0, 20);
+ f(cpc_reg_dex1, 19, 16), rf(Rt, 12), f(cpc_dex, 11, 8);
+ f(opc2, 7, 5), f(1, 4), f(cpc_reg_dex2, 3, 0);
}
// These instructions do not read the value of the register passed,
// can be any. Chosen r0.
void cp15dmb(Condition cond = C_DFLT) {
- mcr(15, 0, r0, 7, 10, 5, cond);
+ mcr(15, 0, r0, 7, 10, 5, cond);
}
void cp15dsb(Condition cond = C_DFLT) {
- mcr(15, 0, r0, 7, 10, 4, cond);
+ mcr(15, 0, r0, 7, 10, 4, cond);
}
void cp15isb(Condition cond = C_DFLT) {
- mcr(15, 0, r0, 7, 5, 4, cond);
+ mcr(15, 0, r0, 7, 5, 4, cond);
}
enum Membar_mask_bits {
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/interp_masm_aarch32.cpp
--- a/src/cpu/aarch32/vm/interp_masm_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/interp_masm_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -422,48 +422,48 @@
verify_oop(r0, state);
}
- /* Debugging code */
- bytecode_seen(rscratch1, r3);
+ /* Debugging code */
+ bytecode_seen(rscratch1, r3);
- /*{
- Label skip;
+ /*{
+ Label skip;
- mov(r3, (address)&MacroAssembler::bytecodes_executed);
- ldr(r2, r3);
- add(r2, r2, 1);
- str(r2, r3);
- // Print out every 16384 (needs to be a power of two).
- mov(r3, 16384 - 1);
- tst(r2, r3);
- b(skip, Assembler::NE);
- reg_printf_important("Executed %d bytecodes.\n", r2);
- bind(skip);
- }*/
+ mov(r3, (address)&MacroAssembler::bytecodes_executed);
+ ldr(r2, r3);
+ add(r2, r2, 1);
+ str(r2, r3);
+ // Print out every 16384 (needs to be a power of two).
+ mov(r3, 16384 - 1);
+ tst(r2, r3);
+ b(skip, Assembler::NE);
+ reg_printf_important("Executed %d bytecodes.\n", r2);
+ bind(skip);
+ }*/
- /*mov(r3, (address)&MacroAssembler::bytecodes_until_print);
- ldr(r2, Address(r3));
- cmp(r2, 0);
+ /*mov(r3, (address)&MacroAssembler::bytecodes_until_print);
+ ldr(r2, Address(r3));
+ cmp(r2, 0);
- sub(r2, r2, 1, Assembler::NE);
- str(r2, Address(r3), Assembler::NE);
+ sub(r2, r2, 1, Assembler::NE);
+ str(r2, Address(r3), Assembler::NE);
- mov(r2, 1, Assembler::EQ);
- mov(r3, (address)&MacroAssembler::enable_debug, Assembler::EQ);
- str(r2, Address(r3), Assembler::EQ);
+ mov(r2, 1, Assembler::EQ);
+ mov(r3, (address)&MacroAssembler::enable_debug, Assembler::EQ);
+ str(r2, Address(r3), Assembler::EQ);
- mov(r3, (address)&MacroAssembler::enable_method_debug, Assembler::EQ);
- str(r2, Address(r3), Assembler::EQ);*/
+ mov(r3, (address)&MacroAssembler::enable_method_debug, Assembler::EQ);
+ str(r2, Address(r3), Assembler::EQ);*/
- /*Label end;
- cmp(r2, 0);
- b(end, Assembler::NE);
- stop("got to end of bytecodes");
- bind(end);*/
+ /*Label end;
+ cmp(r2, 0);
+ b(end, Assembler::NE);
+ stop("got to end of bytecodes");
+ bind(end);*/
- get_bytecode(r14, rscratch1);
- reg_printf("Dispatching bytecode %s (%d) @ BCP = %p\n", r14, rscratch1, rbcp);
- /* End debugging code */
+ get_bytecode(r14, rscratch1);
+ reg_printf("Dispatching bytecode %s (%d) @ BCP = %p\n", r14, rscratch1, rbcp);
+ /* End debugging code */
if (table == Interpreter::dispatch_table(state)) {
@@ -580,7 +580,7 @@
bind(unlocked);
// r0: Might contain return value
- // FIXME r1 : Might contain the value too
+ // FIXME r1 : Might contain the value too
// Check that all monitors are unlocked
{
@@ -788,7 +788,7 @@
} else {
Label done;
- //create_breakpoint();
+ //create_breakpoint();
const Register swap_reg = c_rarg0;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/interpreterRT_aarch32.cpp
--- a/src/cpu/aarch32/vm/interpreterRT_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/interpreterRT_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -39,9 +39,9 @@
#define __ _masm->
/*#define print_copy(name, off) \
- __ mov(rscratch1, (address)name);\
- __ mov(rscratch2, off);\
- __ reg_printf("%s copied from offset %p + %d\n", rscratch1, from(), rscratch2);*/
+ __ mov(rscratch1, (address)name);\
+ __ mov(rscratch2, off);\
+ __ reg_printf("%s copied from offset %p + %d\n", rscratch1, from(), rscratch2);*/
#define print_copy(name, off)
@@ -51,7 +51,7 @@
Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; }
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
- print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset()));
+ print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset()));
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
switch (_num_int_args) {
@@ -77,7 +77,7 @@
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
- print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset() + 1));
+ print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset() + 1));
const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
// Needs to be aligned to even registers. Means also won't be split across
// registers and stack.
@@ -90,7 +90,7 @@
break;
default:
__ ldrd(r0, temp(), src);
- _stack_offset = (_stack_offset + 7) & ~7; // Align on 8-byte boundary
+ _stack_offset = (_stack_offset + 7) & ~7; // Align on 8-byte boundary
__ strd(r0, temp(), Address(to(), _stack_offset));
_stack_offset += 2 * wordSize;
_num_int_args += 2;
@@ -100,13 +100,13 @@
#ifdef HARD_FLOAT_CC
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
- print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset()));
+ print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset()));
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
if (_fp_arg_mask) {
- unsigned index = __builtin_ctz(_fp_arg_mask);
- __ vldr_f32_real(index, src);
- _fp_arg_mask ^= 1 << index;
+ unsigned index = __builtin_ctz(_fp_arg_mask);
+ __ vldr_f32_real(index, src);
+ _fp_arg_mask ^= 1 << index;
_next_double_dex += index & 1;
} else {
__ ldr(r0, src);
@@ -116,33 +116,33 @@
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
- print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset() + 1));
+ print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset() + 1));
const Address src(from(), Interpreter::local_offset_in_bytes(offset() + 1));
if (_next_double_dex < Argument::n_float_register_parameters_c) {
- _fp_arg_mask ^= 3 << _next_double_dex * 2;
- __ vldr_f64(as_FloatRegister(_next_double_dex++), src);
+ _fp_arg_mask ^= 3 << _next_double_dex * 2;
+ __ vldr_f64(as_FloatRegister(_next_double_dex++), src);
} else {
- __ ldrd(r0, temp(), src);
- _stack_offset = (_stack_offset + 7) & ~7;
- __ strd(r0, temp(), Address(to(), _stack_offset));
- _stack_offset += 2 * wordSize;
- }
+ __ ldrd(r0, temp(), src);
+ _stack_offset = (_stack_offset + 7) & ~7;
+ __ strd(r0, temp(), Address(to(), _stack_offset));
+ _stack_offset += 2 * wordSize;
+ }
}
#else
// Just pass them in integer registers and on the stack as we would
// any other argument
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
- pass_int();
+ pass_int();
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
- pass_long();
+ pass_long();
}
#endif //HARD_FLOAT_CC
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
- print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset()));
+ print_copy(__FUNCTION__, Interpreter::local_offset_in_bytes(offset()));
switch (_num_int_args) {
case 0:
@@ -234,22 +234,22 @@
virtual void pass_long()
{
- intptr_t high_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
+ intptr_t high_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
intptr_t low_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_from -= 2*Interpreter::stackElementSize;
if (_num_int_reg_args < Argument::n_int_register_parameters_c-2) {
- // Passing longs. As c_rarg0 is always reserved for jni_env we could only
- // possibly stash a long in r3:r2 due to alignment so we can only enter here
- // with either zero or one parameters.
- // Align to two
- _int_args += 1 - _num_int_reg_args; // 0 or 1
+ // Passing longs. As c_rarg0 is always reserved for jni_env we could only
+ // possibly stash a long in r3:r2 due to alignment so we can only enter here
+ // with either zero or one parameters.
+ // Align to two
+ _int_args += 1 - _num_int_reg_args; // 0 or 1
*_int_args++ = low_obj;
*_int_args++ = high_obj;
_num_int_reg_args = 3;
} else {
- _to = (intptr_t*)(((intptr_t)_to + 7) & ~7); // Align to eight bytes
- *_to++ = low_obj;
+ _to = (intptr_t*)(((intptr_t)_to + 7) & ~7); // Align to eight bytes
+ *_to++ = low_obj;
*_to++ = high_obj;
_num_int_reg_args = 3;
}
@@ -274,7 +274,7 @@
_from -= Interpreter::stackElementSize;
if (_fp_arg_mask) {
- unsigned index = __builtin_ctz(_fp_arg_mask);
+ unsigned index = __builtin_ctz(_fp_arg_mask);
_fp_args[index] = from_obj;
_fp_arg_mask ^= 1 << index;
_next_double_dex += index & 1;
@@ -285,7 +285,7 @@
virtual void pass_double()
{
- intptr_t high_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
+ intptr_t high_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
intptr_t low_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_from -= 2*Interpreter::stackElementSize;
@@ -293,13 +293,13 @@
//We can allocate to a register.
int index = 2 * (_next_double_dex++);
_fp_arg_mask ^= 3 << index;
- _fp_args[index] = low_obj;
- _fp_args[index + 1] = high_obj;
+ _fp_args[index] = low_obj;
+ _fp_args[index + 1] = high_obj;
} else {
- _to = (intptr_t*)(((intptr_t)_to + 7) & ~7); // Align to eight bytes
- *_to++ = low_obj;
- *_to++ = high_obj;
- }
+ _to = (intptr_t*)(((intptr_t)_to + 7) & ~7); // Align to eight bytes
+ *_to++ = low_obj;
+ *_to++ = high_obj;
+ }
}
#else
virtual void pass_float() { pass_int(); }
@@ -312,7 +312,7 @@
{
_from = from;
_to = to;
- // See layout in interpreter_aarch32.cpp
+ // See layout in interpreter_aarch32.cpp
_int_args = to - (method->is_static() ? 19 : 20);
_fp_args = to - 16; //each slot is for a double
_fp_identifiers = to - 21;
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/interpreter_aarch32.cpp
--- a/src/cpu/aarch32/vm/interpreter_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/interpreter_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -58,8 +58,8 @@
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
address entry = __ pc();
- // The sp should be aligned on entry to the bottom of where the integer args
- // need to be copied to.
+ // The sp should be aligned on entry to the bottom of where the integer args
+ // need to be copied to.
// rmethod
// rlocals
@@ -128,7 +128,7 @@
// r13: sender sp
// esp: args
- //if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
+ //if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
// FIXME currently ignoring this flag and inlining anyway
// These don't need a safepoint check because they aren't virtually
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/jniFastGetField_aarch32.cpp
--- a/src/cpu/aarch32/vm/jniFastGetField_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/jniFastGetField_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -49,8 +49,8 @@
Register result = c_rarg0;
Register robj = c_rarg1;
Register rcounter = c_rarg3;
- int args = RegSet::of(c_rarg0, c_rarg1, c_rarg2).bits();
- int nargs = 3;
+ int args = RegSet::of(c_rarg0, c_rarg1, c_rarg2).bits();
+ int nargs = 3;
const char *name;
switch (type) {
@@ -75,8 +75,8 @@
Label slow;
- __ lea(rcounter, SafepointSynchronize::safepoint_counter_addr());
- __ ldr(rcounter, rcounter);
+ __ lea(rcounter, SafepointSynchronize::safepoint_counter_addr());
+ __ ldr(rcounter, rcounter);
__ tst(rcounter, 1);
__ b(slow, Assembler::NE);
__ stmdb(sp, args);
@@ -95,16 +95,16 @@
case T_INT: __ ldr (result, Address(robj, c_rarg2, lsr(2))); break;
case T_BOOLEAN: __ ldrb(result, Address(robj, c_rarg2, lsr(2))); break;
default: {
- __ lsr(c_rarg2, c_rarg2, 2);
- switch(type) {
- case T_BYTE: __ ldrsb (result, Address(robj, c_rarg2)); break;
- case T_CHAR: __ ldrh (result, Address(robj, c_rarg2)); break;
- case T_SHORT: __ ldrsh (result, Address(robj, c_rarg2)); break;
- case T_DOUBLE:
- case T_LONG: __ ldrd (result, Address(robj, c_rarg2)); break;
- default: ShouldNotReachHere();
- }
- }
+ __ lsr(c_rarg2, c_rarg2, 2);
+ switch(type) {
+ case T_BYTE: __ ldrsb (result, Address(robj, c_rarg2)); break;
+ case T_CHAR: __ ldrh (result, Address(robj, c_rarg2)); break;
+ case T_SHORT: __ ldrsh (result, Address(robj, c_rarg2)); break;
+ case T_DOUBLE:
+ case T_LONG: __ ldrd (result, Address(robj, c_rarg2)); break;
+ default: ShouldNotReachHere();
+ }
+ }
}
__ lea(rscratch2, SafepointSynchronize::safepoint_counter_addr());
// rscratch2 is address dependent on result.
@@ -123,7 +123,7 @@
__ b(lr, Assembler::EQ);
// Restore args for slowcase call into the vm
- __ ldmia(sp, args);
+ __ ldmia(sp, args);
// Slowcase
slowcase_entry_pclist[count++] = __ pc();
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/jniTypes_aarch32.hpp
--- a/src/cpu/aarch32/vm/jniTypes_aarch32.hpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/jniTypes_aarch32.hpp Fri Jan 15 23:23:56 2016 +0800
@@ -65,26 +65,26 @@
*(jlong*) (to + 1 + pos) = *from;
pos += 2;
}*/
- static inline void put_long(jlong from, intptr_t *to) {
- uint64_t val = from;
- uint64_t mask = (1LL << 32) - 1;
- val = (val >> 32) | ((val & mask) << 32);
+ static inline void put_long(jlong from, intptr_t *to) {
+ uint64_t val = from;
+ uint64_t mask = (1LL << 32) - 1;
+ val = (val >> 32) | ((val & mask) << 32);
*(jlong*)to = (jlong)val;
}
static inline void put_long(jlong from, intptr_t *to, int& pos) {
- uint64_t val = from;
- uint64_t mask = (1LL << 32) - 1;
- val = (val >> 32) | ((val & mask) << 32);
+ uint64_t val = from;
+ uint64_t mask = (1LL << 32) - 1;
+ val = (val >> 32) | ((val & mask) << 32);
*(jlong*) (to + pos) = (jlong)val;
pos += 2;
}
static inline void put_long(jlong *from, intptr_t *to, int& pos) {
- uint64_t val = *from;
- uint64_t mask = (1LL << 32) - 1;
- val = (val >> 32) | ((val & mask) << 32);
+ uint64_t val = *from;
+ uint64_t mask = (1LL << 32) - 1;
+ val = (val >> 32) | ((val & mask) << 32);
*(jlong*) (to + pos) = (jlong)val;
pos += 2;
@@ -121,24 +121,24 @@
}*/
static inline void put_double(jdouble from, intptr_t *to) {
- uint64_t val = *(uint64_t*)&from;
- uint64_t mask = (1LL << 32) - 1;
- val = (val >> 32) | ((val & mask) << 32);
+ uint64_t val = *(uint64_t*)&from;
+ uint64_t mask = (1LL << 32) - 1;
+ val = (val >> 32) | ((val & mask) << 32);
*(uint64_t*)to = val;
}
static inline void put_double(jdouble from, intptr_t *to, int& pos) {
- uint64_t val = *(uint64_t*)&from;
- uint64_t mask = (1LL << 32) - 1;
- val = (val >> 32) | ((val & mask) << 32);
+ uint64_t val = *(uint64_t*)&from;
+ uint64_t mask = (1LL << 32) - 1;
+ val = (val >> 32) | ((val & mask) << 32);
*(uint64_t*) (to + pos) = val;
pos += 2;
}
static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
- uint64_t val = *(uint64_t*)from;
- uint64_t mask = (1LL << 32) - 1;
- val = (val >> 32) | ((val & mask) << 32);
+ uint64_t val = *(uint64_t*)from;
+ uint64_t mask = (1LL << 32) - 1;
+ val = (val >> 32) | ((val & mask) << 32);
*(uint64_t*) (to + pos) = val;
pos += 2;
}
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/macroAssembler_aarch32.cpp
--- a/src/cpu/aarch32/vm/macroAssembler_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/macroAssembler_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -85,7 +85,7 @@
} else if (0b0011 == opc) {
// Movw, Movt or mov, orr, orr, orr
// patch up address load to registers (absolute address).
- instructions = patch_oop(branch, target) / NativeInstruction::instruction_size;
+ instructions = patch_oop(branch, target) / NativeInstruction::instruction_size;
} else if (0b010 == (opc >> 1)) {
// LDR, LDRB, STR, STRB
Instruction_aarch32::patch(branch, 11, 0, uabs(offset));
@@ -114,34 +114,34 @@
}
int MacroAssembler::patch_oop(address insn_addr, address o) {
- unsigned insn = *(unsigned*)insn_addr;
- int opc = Instruction_aarch32::extract(insn, 27, 21);
- if(0b0011000 == opc) {
- //32-bit pointers, formed of a mov and a movt
- assert(nativeInstruction_at(insn_addr+4)->is_movt(), "wrong insns in patch");
-
- uint32_t btm = (uint32_t)o & 0xffff;
- Instruction_aarch32::patch(insn_addr, 19, 16, btm >> 12);
- Instruction_aarch32::patch(insn_addr, 11, 0, btm & 0xfff);
- uint32_t top = (uint32_t)o >> 16;
- Instruction_aarch32::patch(insn_addr + 4, 19, 16, top >> 12);
- Instruction_aarch32::patch(insn_addr + 4, 11, 0, top & 0xfff);
- return 2 * NativeInstruction::instruction_size;
+ unsigned insn = *(unsigned*)insn_addr;
+ int opc = Instruction_aarch32::extract(insn, 27, 21);
+ if(0b0011000 == opc) {
+ //32-bit pointers, formed of a mov and a movt
+ assert(nativeInstruction_at(insn_addr+4)->is_movt(), "wrong insns in patch");
+
+ uint32_t btm = (uint32_t)o & 0xffff;
+ Instruction_aarch32::patch(insn_addr, 19, 16, btm >> 12);
+ Instruction_aarch32::patch(insn_addr, 11, 0, btm & 0xfff);
+ uint32_t top = (uint32_t)o >> 16;
+ Instruction_aarch32::patch(insn_addr + 4, 19, 16, top >> 12);
+ Instruction_aarch32::patch(insn_addr + 4, 11, 0, top & 0xfff);
+ return 2 * NativeInstruction::instruction_size;
} else if(0b0011101 == opc) {
- //Instead 32bit load sequence uses mov, orr, orr, orr
- assert(nativeInstruction_at(insn_addr+4 )->is_orr(), "wrong insns in patch");
- assert(nativeInstruction_at(insn_addr+8 )->is_orr(), "wrong insns in patch");
- assert(nativeInstruction_at(insn_addr+12)->is_orr(), "wrong insns in patch");
- // FIXME this could carry us outside valid memory
-
- uint32_t addr = (uint32_t)o;
- Instruction_aarch32::patch(insn_addr + 0, 11, 0, (0b0000 << 8) | ((addr >> 0) & 0xff));
- Instruction_aarch32::patch(insn_addr + 4, 11, 0, (0b1100 << 8) | ((addr >> 8) & 0xff));
- Instruction_aarch32::patch(insn_addr + 8, 11, 0, (0b1000 << 8) | ((addr >> 16) & 0xff));
- Instruction_aarch32::patch(insn_addr + 12, 11, 0, (0b0100 << 8) | ((addr >> 24) & 0xff));
- return 4 * NativeInstruction::instruction_size;
+ //Instead 32bit load sequence uses mov, orr, orr, orr
+ assert(nativeInstruction_at(insn_addr+4 )->is_orr(), "wrong insns in patch");
+ assert(nativeInstruction_at(insn_addr+8 )->is_orr(), "wrong insns in patch");
+ assert(nativeInstruction_at(insn_addr+12)->is_orr(), "wrong insns in patch");
+ // FIXME this could carry us outside valid memory
+
+ uint32_t addr = (uint32_t)o;
+ Instruction_aarch32::patch(insn_addr + 0, 11, 0, (0b0000 << 8) | ((addr >> 0) & 0xff));
+ Instruction_aarch32::patch(insn_addr + 4, 11, 0, (0b1100 << 8) | ((addr >> 8) & 0xff));
+ Instruction_aarch32::patch(insn_addr + 8, 11, 0, (0b1000 << 8) | ((addr >> 16) & 0xff));
+ Instruction_aarch32::patch(insn_addr + 12, 11, 0, (0b0100 << 8) | ((addr >> 24) & 0xff));
+ return 4 * NativeInstruction::instruction_size;
} else {
- ShouldNotReachHere();
+ ShouldNotReachHere();
}
return 0; //won't reach here
}
@@ -157,28 +157,28 @@
unsigned *insn_buf = (unsigned*)insn_addr;
int opc2 = Instruction_aarch32::extract(insn, 23, 21);
if(0b000 == opc2) {
- // movw, movt (only on newer ARMs)
- assert(nativeInstruction_at(&insn_buf[1])->is_movt(), "wrong insns in patch");
- u_int32_t addr;
- addr = Instruction_aarch32::extract(insn_buf[1], 19, 16) << 28;
- addr |= Instruction_aarch32::extract(insn_buf[1], 11, 0) << 16;
- addr |= Instruction_aarch32::extract(insn_buf[0], 19, 16) << 12;
- addr |= Instruction_aarch32::extract(insn_buf[0], 11, 0);
- return address(addr);
- } else if(0b101 == opc2) {
- // mov, orr, orr, orr
- assert(nativeInstruction_at(&insn_buf[1])->is_orr(), "wrong insns in patch");
- assert(nativeInstruction_at(&insn_buf[2])->is_orr(), "wrong insns in patch");
- assert(nativeInstruction_at(&insn_buf[3])->is_orr(), "wrong insns in patch");
- u_int32_t addr;
- // TODO Check that the rotations are in the expected order.
- addr = Instruction_aarch32::extract(insn_buf[0], 7, 0) << 0;
- addr |= Instruction_aarch32::extract(insn_buf[1], 7, 0) << 8;
- addr |= Instruction_aarch32::extract(insn_buf[2], 7, 0) << 16;
- addr |= Instruction_aarch32::extract(insn_buf[3], 7, 0) << 24;
- return address(addr);
+ // movw, movt (only on newer ARMs)
+ assert(nativeInstruction_at(&insn_buf[1])->is_movt(), "wrong insns in patch");
+ u_int32_t addr;
+ addr = Instruction_aarch32::extract(insn_buf[1], 19, 16) << 28;
+ addr |= Instruction_aarch32::extract(insn_buf[1], 11, 0) << 16;
+ addr |= Instruction_aarch32::extract(insn_buf[0], 19, 16) << 12;
+ addr |= Instruction_aarch32::extract(insn_buf[0], 11, 0);
+ return address(addr);
+ } else if(0b101 == opc2) {
+ // mov, orr, orr, orr
+ assert(nativeInstruction_at(&insn_buf[1])->is_orr(), "wrong insns in patch");
+ assert(nativeInstruction_at(&insn_buf[2])->is_orr(), "wrong insns in patch");
+ assert(nativeInstruction_at(&insn_buf[3])->is_orr(), "wrong insns in patch");
+ u_int32_t addr;
+ // TODO Check that the rotations are in the expected order.
+ addr = Instruction_aarch32::extract(insn_buf[0], 7, 0) << 0;
+ addr |= Instruction_aarch32::extract(insn_buf[1], 7, 0) << 8;
+ addr |= Instruction_aarch32::extract(insn_buf[2], 7, 0) << 16;
+ addr |= Instruction_aarch32::extract(insn_buf[3], 7, 0) << 24;
+ return address(addr);
} else {
- ShouldNotReachHere();
+ ShouldNotReachHere();
}
} else if (0b010 == (opc >> 1)) {
// LDR, LDRB, STR, STRB
@@ -570,8 +570,8 @@
set_last_Java_frame(last_java_sp, rfp, l, rscratch2);
- // FIXME - Can save lr in more elegant way ?
- //str(lr, pre(sp, -wordSize));
+ // FIXME - Can save lr in more elegant way ?
+ //str(lr, pre(sp, -wordSize));
// do the call, remove parameters
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
@@ -1591,7 +1591,7 @@
void MacroAssembler::debug32(char* msg, int32_t pc, int32_t regs[])
{
- print_unseen_bytecodes();
+ print_unseen_bytecodes();
// In order to get locks to work, we need to fake a in_VM state
if (ShowMessageBoxOnError) {
JavaThread* thread = JavaThread::current();
@@ -2238,57 +2238,57 @@
}
void MacroAssembler::divide32(Register res, Register num, Register den, bool want_mod) {
- Register cnt = rscratch1;
- Register mod = rscratch2;
- Register sign = r14;
- assert_different_registers(num, den, rscratch1, rscratch2, r14);
-
- // FIXME This works by first converting any negative values to positive ones, however
- // it is not possible to express |INT_MIN|. Need to fix this
-
- //Convert to positive values
- mov(sign, 0);
-
- cmp(num, 0);
- mov(sign, 1, MI);
- rsb(num, num, 0, MI);
-
- cmp(den, 0);
- if(!want_mod) eor(sign, sign, 1, MI);
- rsb(den, den, 0, MI);
-
- // Algorithm from
- // http://www.chiark.greenend.org.uk/~theom/riscos/docs/ultimate/a252div.txt
- // Graeme Williams
- mov(cnt, 28);
- mov(mod, num, lsr(4));
- cmp(den, mod, lsr(12));
- sub(cnt, cnt, 16, Assembler::LE);
- mov(mod, mod, lsr(16), Assembler::LE);
- cmp(den, mod, lsr(4));
- sub(cnt, cnt, 8, Assembler::LE);
- mov(mod, mod, lsr(8), Assembler::LE);
- cmp(den, mod);
- sub(cnt, cnt, 4, Assembler::LE);
- mov(mod, mod, lsr(4), Assembler::LE);
- mov(num, num, lsl(cnt));
- rsb(den, den, 0);
-
- adds(num, num, num);
- //Now skip over cnt copies of the 3 instr. loop.
- add(cnt, cnt, cnt, lsl(1));
- add(r15_pc, r15_pc, cnt, lsl(2));
- mov(r0, r0);
-
- for(int i = 0; i < 32; i++) {
- adcs(mod, den, mod, lsl(1));
- sub(mod, mod, den, Assembler::LO);
- adcs(num, num, num);
- }
-
- cmp(sign, 0);
- rsb(res, want_mod? mod : num, 0, NE);
- mov(res, want_mod? mod : num, EQ);
+ Register cnt = rscratch1;
+ Register mod = rscratch2;
+ Register sign = r14;
+ assert_different_registers(num, den, rscratch1, rscratch2, r14);
+
+ // FIXME This works by first converting any negative values to positive ones, however
+ // it is not possible to express |INT_MIN|. Need to fix this
+
+ //Convert to positive values
+ mov(sign, 0);
+
+ cmp(num, 0);
+ mov(sign, 1, MI);
+ rsb(num, num, 0, MI);
+
+ cmp(den, 0);
+ if(!want_mod) eor(sign, sign, 1, MI);
+ rsb(den, den, 0, MI);
+
+ // Algorithm from
+ // http://www.chiark.greenend.org.uk/~theom/riscos/docs/ultimate/a252div.txt
+ // Graeme Williams
+ mov(cnt, 28);
+ mov(mod, num, lsr(4));
+ cmp(den, mod, lsr(12));
+ sub(cnt, cnt, 16, Assembler::LE);
+ mov(mod, mod, lsr(16), Assembler::LE);
+ cmp(den, mod, lsr(4));
+ sub(cnt, cnt, 8, Assembler::LE);
+ mov(mod, mod, lsr(8), Assembler::LE);
+ cmp(den, mod);
+ sub(cnt, cnt, 4, Assembler::LE);
+ mov(mod, mod, lsr(4), Assembler::LE);
+ mov(num, num, lsl(cnt));
+ rsb(den, den, 0);
+
+ adds(num, num, num);
+ //Now skip over cnt copies of the 3 instr. loop.
+ add(cnt, cnt, cnt, lsl(1));
+ add(r15_pc, r15_pc, cnt, lsl(2));
+ mov(r0, r0);
+
+ for(int i = 0; i < 32; i++) {
+ adcs(mod, den, mod, lsl(1));
+ sub(mod, mod, den, Assembler::LO);
+ adcs(num, num, num);
+ }
+
+ cmp(sign, 0);
+ rsb(res, want_mod? mod : num, 0, NE);
+ mov(res, want_mod? mod : num, EQ);
}
@@ -2297,7 +2297,7 @@
// <Rd> = <Rn> / <Rm>
// <Rd> = <Rn> % <Rm>
void MacroAssembler::divide(Register Rd, Register Rn, Register Rm, int width, bool want_remainder) {
- //Dispatch to best possible
+ //Dispatch to best possible
Register Rdh = (Register)(Rd->encoding_nocheck() + 1);
Register Rnh = (Register)(Rn->encoding_nocheck() + 1);
Register Rmh = (Register)(Rm->encoding_nocheck() + 1);
@@ -2306,75 +2306,75 @@
bool is64b = 64 == width;
if(is64b) {
- assert_different_registers(Rn, Rnh, Rm, Rmh, rscratch1, rscratch2);
+ assert_different_registers(Rn, Rnh, Rm, Rmh, rscratch1, rscratch2);
} else {
- assert_different_registers(Rn, Rm, rscratch1, rscratch2, r14); // r14 used by divide32
+ assert_different_registers(Rn, Rm, rscratch1, rscratch2, r14); // r14 used by divide32
}
if(!is64b && VM_Version::features() & FT_HW_DIVIDE) {
- // Emit a hw instruction sequnce.
- if(want_remainder) {
- sdiv(rscratch1, Rn, Rm);
- mls(Rd, rscratch1, Rm, Rn);
- } else {
- sdiv(Rd, Rn, Rm);
- }
+ // Emit a hw instruction sequnce.
+ if(want_remainder) {
+ sdiv(rscratch1, Rn, Rm);
+ mls(Rd, rscratch1, Rm, Rn);
+ } else {
+ sdiv(Rd, Rn, Rm);
+ }
} else if(!is64b) {
- // Fall back to assembly software routine
- divide32(Rd, Rn, Rm, want_remainder);
+ // Fall back to assembly software routine
+ divide32(Rd, Rn, Rm, want_remainder);
} else {
- // Fall back to C software routine for
- // 64 bit divide/mod
- if(Rn != r0) {
- mov(rscratch1, Rm);
- mov(rscratch2, Rmh);
-
- mov(r0, Rn);
- mov(r1, Rnh);
-
- mov(r2, rscratch1);
- mov(r3, rscratch2);
- } else if(Rm != r2) {
- mov(r2, Rm);
- mov(r3, Rmh);
- }
- address function;
- if(want_remainder) function = (address)internal_lmod;
- else function = (address)internal_ldiv;
-
- mov(rscratch1, function);
- bl(rscratch1);
- if(Rd != r0) {
- mov(Rd, r0);
- if(is64b) mov(Rdh, r1);
- }
+ // Fall back to C software routine for
+ // 64 bit divide/mod
+ if(Rn != r0) {
+ mov(rscratch1, Rm);
+ mov(rscratch2, Rmh);
+
+ mov(r0, Rn);
+ mov(r1, Rnh);
+
+ mov(r2, rscratch1);
+ mov(r3, rscratch2);
+ } else if(Rm != r2) {
+ mov(r2, Rm);
+ mov(r3, Rmh);
+ }
+ address function;
+ if(want_remainder) function = (address)internal_lmod;
+ else function = (address)internal_ldiv;
+
+ mov(rscratch1, function);
+ bl(rscratch1);
+ if(Rd != r0) {
+ mov(Rd, r0);
+ if(is64b) mov(Rdh, r1);
+ }
}
}
void MacroAssembler::extract_bits(Register dest, Register source, int lsb, int width) {
- assert(lsb >= 0 && lsb + width <= 32 && width != 0, "Invalid lsb/width");
- // Dispatch to the best sequence
- if(0 == (lsb & 7) && (width == 8 || width == 16 || width == 32)) {
- // Can use extend X
- switch(width){
- case 8: uxtb(dest, source, ror(lsb)); break;
- case 16: uxth(dest, source, ror(lsb)); break;
- default: break;
- }
- } else if(VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2)) {
- ubfx(dest, source, lsb, width);
- } else {
- // Do two shifts
- lsl(dest, source, 32 - (width + lsb));
- lsr(dest, dest, 32 - width);
- }
+ assert(lsb >= 0 && lsb + width <= 32 && width != 0, "Invalid lsb/width");
+ // Dispatch to the best sequence
+ if(0 == (lsb & 7) && (width == 8 || width == 16 || width == 32)) {
+ // Can use extend X
+ switch(width){
+ case 8: uxtb(dest, source, ror(lsb)); break;
+ case 16: uxth(dest, source, ror(lsb)); break;
+ default: break;
+ }
+ } else if(VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2)) {
+ ubfx(dest, source, lsb, width);
+ } else {
+ // Do two shifts
+ lsl(dest, source, 32 - (width + lsb));
+ lsr(dest, dest, 32 - width);
+ }
}
void MacroAssembler::atomic_ldrd(Register Rt, Register Rt2, Register Rbase) {
assert(Rt->encoding_nocheck() % 2 == 0, "Must be an even register");
assert((Register) (Rt + 1) == Rt2, "Must be contiguous");
- if (VM_Version::features() & FT_SINGLE_CORE) {
+ if(VM_Version::features() & FT_SINGLE_CORE) {
ldrd(Rt, Rbase);
} else if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6K)) {
#ifdef ASSERT
@@ -2389,7 +2389,7 @@
} else {
// TODO: Find Java way of logging
static bool warning_printed = false;
- if (!warning_printed) {
+ if(!warning_printed) {
fprintf(stderr, "Unable to provide atomic doubleword load.\n");
warning_printed = true;
}
@@ -2403,7 +2403,7 @@
assert((Register) (Rt + 1) == Rt2, "Must be contiguous");
assert((Register) (temp + 1) == temp2, "Must be contiguous");
assert_different_registers(temp, Rt, Rbase, temp2);
- if (VM_Version::features() & FT_SINGLE_CORE) {
+ if(VM_Version::features() & FT_SINGLE_CORE) {
strd(Rt, Rbase);
} else if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6K)) {
// First need to gain exclusive access
@@ -2423,7 +2423,7 @@
} else {
// TODO: Find Java way of logging
static bool warning_printed = false;
- if (!warning_printed) {
+ if(!warning_printed) {
fprintf(stderr, "Unable to provide atomic doubleword store.\n");
warning_printed = true;
}
@@ -2470,49 +2470,49 @@
int bytecodes_seen[256];
void MacroAssembler::init_unseen_bytecodes() {
- for(int i = 0; i < 256; i++ ) {
- bytecodes_seen[i] = 0;
- }
+ for(int i = 0; i < 256; i++ ) {
+ bytecodes_seen[i] = 0;
+ }
}
void MacroAssembler::bytecode_seen(Register bc_reg, Register scratch) {
- if(ENABLE_DEBUGGING) {
- mov(scratch, (address)bytecodes_seen);
- add(scratch, scratch, bc_reg, lsl(2));
- add(bc_reg, bc_reg, 1);
- str(bc_reg, Address(scratch));
- sub(bc_reg, bc_reg, 1);
- }
+ if(ENABLE_DEBUGGING) {
+ mov(scratch, (address)bytecodes_seen);
+ add(scratch, scratch, bc_reg, lsl(2));
+ add(bc_reg, bc_reg, 1);
+ str(bc_reg, Address(scratch));
+ sub(bc_reg, bc_reg, 1);
+ }
}
void MacroAssembler::print_unseen_bytecodes() {
- if(ENABLE_DEBUGGING) {
- printf("=== Unseen bytecodes ===\n");
- for(int i = 0; i < N_J_BYTECODES; i++) {
- if(0 == bytecodes_seen[i]) {
- printf("\t%s\n", j_bytecodes[i]);
- }
- }
- printf("=== End unseen ===\n");
- } else {
- printf("Not kept track, enable debugging to view info\n");
- }
- fflush(stdout);
+ if(ENABLE_DEBUGGING) {
+ printf("=== Unseen bytecodes ===\n");
+ for(int i = 0; i < N_J_BYTECODES; i++) {
+ if(0 == bytecodes_seen[i]) {
+ printf("\t%s\n", j_bytecodes[i]);
+ }
+ }
+ printf("=== End unseen ===\n");
+ } else {
+ printf("Not kept track, enable debugging to view info\n");
+ }
+ fflush(stdout);
}
int machine_state_regset = 0b0101111111111111;
int machine_state_float_regset = 0b11;
void MacroAssembler::save_machine_state() {
- stmdb(sp, machine_state_regset);
- vstmdb_f64(sp, machine_state_float_regset);
- enter();
+ stmdb(sp, machine_state_regset);
+ vstmdb_f64(sp, machine_state_float_regset);
+ enter();
}
void MacroAssembler::restore_machine_state() {
- leave();
- vldmia_f64(sp, machine_state_float_regset);
- ldmia(sp, machine_state_regset);
+ leave();
+ vldmia_f64(sp, machine_state_float_regset);
+ ldmia(sp, machine_state_regset);
}
void internal_internal_printf(const char *fmt, ...) {
@@ -2524,108 +2524,108 @@
}
void internal_printf(const char *format, uint32_t a, uint32_t b, uint32_t c) {
- char buf[2048];
- char fmt[2048];
- buf[0] = '\0';
- const char *thread_str = "THREAD 0x%08x : ";
- int id = pthread_self();
- strcpy(fmt, format);
-
- char *str = strtok(fmt, "\n");
- int nreplace = 0;
- while(str) {
- strcpy(buf, thread_str);
- strcat(buf, str);
- strcat(buf, "\n");
- internal_internal_printf((const char*)buf, id, a, b, c);
- str = strtok(NULL, "\n");
- }
-}
-
-void MacroAssembler::get_bytecode(Register dst, Register bc) {
- if(ENABLE_DEBUGGING) {
- int nbytecodes = N_J_BYTECODES;
- mov(dst, (address)j_bytecodes);
- cmp(bc, nbytecodes);
-
- ldr(dst, Address(dst, bc, lsl(2)), Assembler::LT);
- ldr(dst, Address(dst, wordSize * nbytecodes), Assembler::GE);
+ char buf[2048];
+ char fmt[2048];
+ buf[0] = '\0';
+ const char *thread_str = "THREAD 0x%08x : ";
+ int id = pthread_self();
+ strcpy(fmt, format);
+
+ char *str = strtok(fmt, "\n");
+ int nreplace = 0;
+ while(str) {
+ strcpy(buf, thread_str);
+ strcat(buf, str);
+ strcat(buf, "\n");
+ internal_internal_printf((const char*)buf, id, a, b, c);
+ str = strtok(NULL, "\n");
}
}
+void MacroAssembler::get_bytecode(Register dst, Register bc) {
+ if(ENABLE_DEBUGGING) {
+ int nbytecodes = N_J_BYTECODES;
+ mov(dst, (address)j_bytecodes);
+ cmp(bc, nbytecodes);
+
+ ldr(dst, Address(dst, bc, lsl(2)), Assembler::LT);
+ ldr(dst, Address(dst, wordSize * nbytecodes), Assembler::GE);
+ }
+}
+
int invocation_depth_count = -1; //TODO remove this with debugging info
#define MAX_FCALL_DEPTH 4096
struct thread_method_record{
- int thread_id;
- char names[MAX_FCALL_DEPTH][512];
- int invocation_depth_count;
+ int thread_id;
+ char names[MAX_FCALL_DEPTH][512];
+ int invocation_depth_count;
};
int ntmrs = 0;
#define MAX_TMRS 10
thread_method_record tmr_list[MAX_TMRS];
void push_tmr(Method *meth, int *thread_id, int *invocation_depth_count, char **name) {
- int id = pthread_self();
- *thread_id = id;
- for(int i = 0; i < ntmrs; i++) {
- thread_method_record *tmr = &tmr_list[i];
- if(id == tmr->thread_id) {
- // Add a new frame
- if(tmr->invocation_depth_count >= -1 &&
- tmr->invocation_depth_count < (MAX_FCALL_DEPTH - 1)) {
- *invocation_depth_count = ++(tmr->invocation_depth_count);
- *name = tmr->names[tmr->invocation_depth_count];
- meth->name_and_sig_as_C_string(tmr->names[tmr->invocation_depth_count], 512);
- return;
- } else {
- fprintf(stderr, "%s : Invalid fcall depth index, %d\n", __FUNCTION__, tmr->invocation_depth_count);
- exit(1);
- }
- }
- }
- // Add a new thread
- if(ntmrs >= MAX_TMRS) {
- fprintf(stderr, "Too many tmrs\n");
- exit(1);
- }
- //Create a new tmr
- tmr_list[ntmrs].thread_id = id;
- tmr_list[ntmrs].invocation_depth_count = 0;
- meth->name_and_sig_as_C_string(tmr_list[ntmrs].names[0], 512);
+ int id = pthread_self();
+ *thread_id = id;
+ for(int i = 0; i < ntmrs; i++) {
+ thread_method_record *tmr = &tmr_list[i];
+ if(id == tmr->thread_id) {
+ // Add a new frame
+ if(tmr->invocation_depth_count >= -1 &&
+ tmr->invocation_depth_count < (MAX_FCALL_DEPTH - 1)) {
+ *invocation_depth_count = ++(tmr->invocation_depth_count);
+ *name = tmr->names[tmr->invocation_depth_count];
+ meth->name_and_sig_as_C_string(tmr->names[tmr->invocation_depth_count], 512);
+ return;
+ } else {
+ fprintf(stderr, "%s : Invalid fcall depth index, %d\n", __FUNCTION__, tmr->invocation_depth_count);
+ exit(1);
+ }
+ }
+ }
+ // Add a new thread
+ if(ntmrs >= MAX_TMRS) {
+ fprintf(stderr, "Too many tmrs\n");
+ exit(1);
+ }
+ //Create a new tmr
+ tmr_list[ntmrs].thread_id = id;
+ tmr_list[ntmrs].invocation_depth_count = 0;
+ meth->name_and_sig_as_C_string(tmr_list[ntmrs].names[0], 512);
+ *invocation_depth_count = 0;
+ *name = tmr_list[ntmrs].names[0];
+ ntmrs++;
+}
+
+void pop_tmr(int *thread_id, int *invocation_depth_count, char **name) {
+ int id = pthread_self();
+ *thread_id = id;
+ for(int i = 0; i < ntmrs; i++) {
+ thread_method_record *tmr = &tmr_list[i];
+ if(id == tmr->thread_id) {
+ if(tmr->invocation_depth_count >= 0 &&
+ tmr->invocation_depth_count < MAX_FCALL_DEPTH) {
+ // Pop frame
+ *name = tmr->names[tmr->invocation_depth_count];
+ *invocation_depth_count = (tmr->invocation_depth_count)--;
+ return;
+ } else if ( -1 == tmr->invocation_depth_count) {
+ *name = (char*)"JVM-EXCEPTION-EXIT:(NOT-REALLY-A-FRAME)";
*invocation_depth_count = 0;
- *name = tmr_list[ntmrs].names[0];
- ntmrs++;
+ return;
+ } else {
+ fprintf(stderr, "%s : Invalid fcall depth index, %d\n", __FUNCTION__, tmr->invocation_depth_count);
+ exit(1);
+ }
+ }
+ }
+ fprintf(stderr, "Unable to find suitable tmr\n");
+ exit(1);
}
-void pop_tmr(int *thread_id, int *invocation_depth_count, char **name) {
- int id = pthread_self();
- *thread_id = id;
- for(int i = 0; i < ntmrs; i++) {
- thread_method_record *tmr = &tmr_list[i];
- if(id == tmr->thread_id) {
- if(tmr->invocation_depth_count >= 0 &&
- tmr->invocation_depth_count < MAX_FCALL_DEPTH) {
- // Pop frame
- *name = tmr->names[tmr->invocation_depth_count];
- *invocation_depth_count = (tmr->invocation_depth_count)--;
- return;
- } else if ( -1 == tmr->invocation_depth_count) {
- *name = (char*)"JVM-EXCEPTION-EXIT:(NOT-REALLY-A-FRAME)";
- *invocation_depth_count = 0;
- return;
- } else {
- fprintf(stderr, "%s : Invalid fcall depth index, %d\n", __FUNCTION__, tmr->invocation_depth_count);
- exit(1);
- }
- }
- }
- fprintf(stderr, "Unable to find suitable tmr\n");
- exit(1);
-}
-
void prepare_entry_exit_prefix(char *buf, int id, int invocation_depth_count) {
- sprintf(buf, "THREAD 0x%08x : ", id);
+ sprintf(buf, "THREAD 0x%08x : ", id);
for(int i = 0; i < invocation_depth_count; i++) {
strcat(buf, " ");
}
@@ -2633,132 +2633,132 @@
void print_entry(Method *meth, int native) {
- char *name;
- int invocation_depth_count, id;
- push_tmr(meth, &id, &invocation_depth_count, &name);
-
- if(MacroAssembler::enable_method_debug) {
- char buf[4096], buf_b[2048];
- prepare_entry_exit_prefix(buf, id, invocation_depth_count);
- if(native) {
- sprintf(buf_b, "CALL NATIVE : %s\n", name);
- } else {
- sprintf(buf_b, "CALL JAVA : %s\n", name);
- }
- strcat(buf, buf_b);
- printf("%s", buf);
- fflush(stdout);
- }
+ char *name;
+ int invocation_depth_count, id;
+ push_tmr(meth, &id, &invocation_depth_count, &name);
+
+ if(MacroAssembler::enable_method_debug) {
+ char buf[4096], buf_b[2048];
+ prepare_entry_exit_prefix(buf, id, invocation_depth_count);
+ if(native) {
+ sprintf(buf_b, "CALL NATIVE : %s\n", name);
+ } else {
+ sprintf(buf_b, "CALL JAVA : %s\n", name);
+ }
+ strcat(buf, buf_b);
+ printf("%s", buf);
+ fflush(stdout);
+ }
}
void print_exit(bool normal) {
- char *name;
- int invocation_depth_count, id;
- pop_tmr(&id, &invocation_depth_count, &name);
-
- if(MacroAssembler::enable_method_debug) {
- char buf[4096], buf_b[2048];
- prepare_entry_exit_prefix(buf, id, invocation_depth_count);
- sprintf(buf_b, normal ? "EXIT : %s\n" : "EXCPN EXIT : %s\n", name);
- strcat(buf, buf_b);
- printf("%s", buf);
- fflush(stdout);
- }
+ char *name;
+ int invocation_depth_count, id;
+ pop_tmr(&id, &invocation_depth_count, &name);
+
+ if(MacroAssembler::enable_method_debug) {
+ char buf[4096], buf_b[2048];
+ prepare_entry_exit_prefix(buf, id, invocation_depth_count);
+ sprintf(buf_b, normal ? "EXIT : %s\n" : "EXCPN EXIT : %s\n", name);
+ strcat(buf, buf_b);
+ printf("%s", buf);
+ fflush(stdout);
+ }
}
void MacroAssembler::print_method_entry(Register rmethod, bool native) {
- if(ENABLE_DEBUGGING) {
- save_machine_state();
-
- bic(sp, sp, 7); // 8-byte align stack
- mov(rscratch2, (address)print_entry);
- mov(r0, rmethod);
- mov(r1, native);
- bl(rscratch2);
-
- restore_machine_state();
- }
+ if(ENABLE_DEBUGGING) {
+ save_machine_state();
+
+ bic(sp, sp, 7); // 8-byte align stack
+ mov(rscratch2, (address)print_entry);
+ mov(r0, rmethod);
+ mov(r1, native);
+ bl(rscratch2);
+
+ restore_machine_state();
+ }
}
void MacroAssembler::print_method_exit(bool normal) {
- if(ENABLE_DEBUGGING) {
- save_machine_state();
-
- bic(sp, sp, 7); // 8-byte align stack
- mov(rscratch2, (address)print_exit);
- mov(r0, normal);
- bl(rscratch2);
-
- restore_machine_state();
- }
+ if(ENABLE_DEBUGGING) {
+ save_machine_state();
+
+ bic(sp, sp, 7); // 8-byte align stack
+ mov(rscratch2, (address)print_exit);
+ mov(r0, normal);
+ bl(rscratch2);
+
+ restore_machine_state();
+ }
}
void MacroAssembler::reg_printf_internal(bool important, const char *fmt, Register ra, Register rb, Register rc) {
- if(ENABLE_DEBUGGING) {
- Label skip;
- save_machine_state();
-
- str(ra, Address(pre(sp, -wordSize)));
- str(rb, Address(pre(sp, -wordSize)));
- str(rc, Address(pre(sp, -wordSize)));
-
- if(!important) {
- mov(r0, (address)&enable_debug);
- ldr(r0, Address(r0));
- cmp(r0, 0);
- b(skip, Assembler::EQ);
- }
-
- int sp_difference = wordSize * (count_bits(machine_state_regset) +
- 2 * count_bits(machine_state_float_regset) +
- 2 + 3); //Frame entry and saved
-
- mov(r0, (address)fmt);
- if(ra != sp) ldr(r1, Address(sp, 2 * wordSize));
- else add(r1, sp, sp_difference);
-
- if(rb != sp) ldr(r2, Address(sp, wordSize));
- else add(r2, sp, sp_difference);
-
- if(rc != sp) ldr(r3, Address(sp));
- else add(r3, sp, sp_difference);
-
- bic(sp, sp, 7); // 8-byte align stack
-
- mov(rscratch2, (address)internal_printf);
- bl(rscratch2);
-
- bind(skip);
- restore_machine_state();
- }
+ if(ENABLE_DEBUGGING) {
+ Label skip;
+ save_machine_state();
+
+ str(ra, Address(pre(sp, -wordSize)));
+ str(rb, Address(pre(sp, -wordSize)));
+ str(rc, Address(pre(sp, -wordSize)));
+
+ if(!important) {
+ mov(r0, (address)&enable_debug);
+ ldr(r0, Address(r0));
+ cmp(r0, 0);
+ b(skip, Assembler::EQ);
+ }
+
+ int sp_difference = wordSize * (count_bits(machine_state_regset) +
+ 2 * count_bits(machine_state_float_regset) +
+ 2 + 3); //Frame entry and saved
+
+ mov(r0, (address)fmt);
+ if(ra != sp) ldr(r1, Address(sp, 2 * wordSize));
+ else add(r1, sp, sp_difference);
+
+ if(rb != sp) ldr(r2, Address(sp, wordSize));
+ else add(r2, sp, sp_difference);
+
+ if(rc != sp) ldr(r3, Address(sp));
+ else add(r3, sp, sp_difference);
+
+ bic(sp, sp, 7); // 8-byte align stack
+
+ mov(rscratch2, (address)internal_printf);
+ bl(rscratch2);
+
+ bind(skip);
+ restore_machine_state();
+ }
}
void MacroAssembler::reg_printf(const char *fmt, Register ra, Register rb, Register rc) {
- reg_printf_internal(false, fmt, ra, rb, rc);
+ reg_printf_internal(false, fmt, ra, rb, rc);
}
void MacroAssembler::reg_printf_important(const char *fmt, Register ra, Register rb, Register rc) {
- reg_printf_internal(true, fmt, ra, rb, rc);
+ reg_printf_internal(true, fmt, ra, rb, rc);
}
// When debugging, set the break on bkpnt
void bkpnt() { return; }
void MacroAssembler::create_breakpoint() {
- if(ENABLE_DEBUGGING) {
- save_machine_state();
- bic(sp, sp, 7); // 8-byte align stack
-
- mov(rscratch2, (address) bkpnt);
- bl(rscratch2);
-
- restore_machine_state();
- }
+ if(ENABLE_DEBUGGING) {
+ save_machine_state();
+ bic(sp, sp, 7); // 8-byte align stack
+
+ mov(rscratch2, (address) bkpnt);
+ bl(rscratch2);
+
+ restore_machine_state();
+ }
}
void MacroAssembler::print_cpool(InstanceKlass *klass) {
- ttyLocker ttyl;
- klass->constants()->print_on(tty);
+ ttyLocker ttyl;
+ klass->constants()->print_on(tty);
}
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/macroAssembler_aarch32.hpp
--- a/src/cpu/aarch32/vm/macroAssembler_aarch32.hpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/macroAssembler_aarch32.hpp Fri Jan 15 23:23:56 2016 +0800
@@ -92,7 +92,7 @@
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
public:
- void init_unseen_bytecodes();
+ void init_unseen_bytecodes();
MacroAssembler(CodeBuffer* code) : Assembler(code) { init_unseen_bytecodes();}
// Biased locking support
@@ -690,33 +690,33 @@
//address read_polling_page(Register r, relocInfo::relocType rtype);
- // Auto dispatch for barriers isb, dmb & dsb.
- void isb() {
- if(VM_Version::features() & FT_ARMV7) {
- Assembler::isb();
- } else {
- cp15isb();
- }
- }
+ // Auto dispatch for barriers isb, dmb & dsb.
+ void isb() {
+ if(VM_Version::features() & FT_ARMV7) {
+ Assembler::isb();
+ } else {
+ cp15isb();
+ }
+ }
- void dsb(enum barrier option) {
- if(VM_Version::features() & FT_ARMV7) {
- Assembler::dsb(option);
- } else {
- cp15dsb();
- }
+ void dsb(enum barrier option) {
+ if(VM_Version::features() & FT_ARMV7) {
+ Assembler::dsb(option);
+ } else {
+ cp15dsb();
+ }
}
void dmb(enum barrier option) {
- if(VM_Version::features() & FT_ARMV7) {
- Assembler::dmb(option);
- } else {
- cp15dmb();
- }
+ if(VM_Version::features() & FT_ARMV7) {
+ Assembler::dmb(option);
+ } else {
+ cp15dmb();
+ }
}
void membar(Membar_mask_bits order_constraint) {
- dmb(Assembler::barrier(order_constraint));
+ dmb(Assembler::barrier(order_constraint));
}
// ISB may be needed because of a safepoint
@@ -727,13 +727,13 @@
void mult_long(Register Rd, Register Rn, Register Rm);
private:
- void divide32(Register res, Register num, Register den, bool want_mod);
+ void divide32(Register res, Register num, Register den, bool want_mod);
public:
- // <Rd+1:Rd> = <Rn+1:Rn> / <Rm+1:Rm>
- // <Rd+1:Rd> = <Rn+1:Rn> % <Rm+1:Rm>
- // <Rd> = <Rn> / <Rm>
- // <Rd> = <Rn> % <Rm>
- void divide(Register Rd, Register Rn, Register Rm, int width, bool want_remainder);
+ // <Rd+1:Rd> = <Rn+1:Rn> / <Rm+1:Rm>
+ // <Rd+1:Rd> = <Rn+1:Rn> % <Rm+1:Rm>
+ // <Rd> = <Rn> / <Rm>
+ // <Rd> = <Rn> % <Rm>
+ void divide(Register Rd, Register Rn, Register Rm, int width, bool want_remainder);
void extract_bits(Register dest, Register source, int lsb, int width);
@@ -750,8 +750,8 @@
void save_machine_state();
void restore_machine_state();
- static uint32_t bytecodes_until_print;
- static uint32_t bytecodes_executed;
+ static uint32_t bytecodes_until_print;
+ static uint32_t bytecodes_executed;
static int enable_debug;
static int enable_method_debug;
static int enable_debugging_static;
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/sharedRuntime_aarch32.cpp
--- a/src/cpu/aarch32/vm/sharedRuntime_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/sharedRuntime_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -231,7 +231,7 @@
int total_args_passed,
int is_outgoing) {
- // FIXME This looks like it needs to be fixed
+ // FIXME This looks like it needs to be fixed
// Create the mapping between argument positions and
// registers.
static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/stubGenerator_aarch32.cpp
--- a/src/cpu/aarch32/vm/stubGenerator_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/stubGenerator_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -241,17 +241,17 @@
#endif
if(MacroAssembler::enable_debugging_static){
- // FIXME Remove this hacky debugging code
- Label L;
+ // FIXME Remove this hacky debugging code
+ Label L;
__ ldr(rscratch2, Address(rthread, Thread::pending_exception_offset()));
- __ cbnz(rscratch2, L);
- // If we're returning via an exception then we shouldn't report exit,
- // the exception handler will have already reported the exit and reporting
- // via our progress through the call stub will result in an extra method
- // being reported as exited.
- __ print_method_exit();
- __ bind(L);
- }
+ __ cbnz(rscratch2, L);
+ // If we're returning via an exception then we shouldn't report exit,
+ // the exception handler will have already reported the exit and reporting
+ // via our progress through the call stub will result in an extra method
+ // being reported as exited.
+ __ print_method_exit();
+ __ bind(L);
+ }
// NOTE Horrible tricks here
// We saved c_rarg0, c_rarg1, c_rarg2, however we don't need to restore
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/templateInterpreter_aarch32.cpp
--- a/src/cpu/aarch32/vm/templateInterpreter_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/templateInterpreter_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -653,7 +653,7 @@
// point rfp to location of old pc
__ add(rfp, sp, 9 * wordSize);
- __ reg_printf("Three-quarters through\n");
+ __ reg_printf("Three-quarters through\n");
// set sender sp
// leave last_sp as null
__ mov(rscratch1, 0);
@@ -946,11 +946,11 @@
// However, large signatures cannot be cached and are generated
// each time here. The slow-path generator can do a GC on return,
// so we must reload it after the call.
- __ reg_printf("**BEFORE**\nrlocals = %p,locals_esp = %p, sp = %p\n", rlocals, locals_esp, sp);
- __ reg_printf("About to call the Method::signature_handler = %p\n", rscratch1);
+ __ reg_printf("**BEFORE**\nrlocals = %p,locals_esp = %p, sp = %p\n", rlocals, locals_esp, sp);
+ __ reg_printf("About to call the Method::signature_handler = %p\n", rscratch1);
__ bl(rscratch1);
__ reg_printf("**AFER**\nr0 : %p, r1 : %p, r2 : %p\n", r0, r1, r2);
- __ reg_printf("r3 : %p, sp : %p\n", r3, sp);
+ __ reg_printf("r3 : %p, sp : %p\n", r3, sp);
__ get_method(rmethod); // slow path can do a GC, reload rmethod
@@ -1025,8 +1025,8 @@
__ reg_printf("Calling native method, lr = %p & rmethod = %p\n", lr, rmethod);
// Call the native method.
/*__ reg_printf("**ONCALL**\nr0 : %p\nr1 : %p\nr2 : %p\n", r0, r1, r2);
- __ reg_printf("r3 : %p\n\nr4 : %p\nrloc : %p\n", r3, r4, rlocals);*/
- __ reg_printf("Stack Pointer on entry to native, sp = %p\n", sp);
+ __ reg_printf("r3 : %p\n\nr4 : %p\nrloc : %p\n", r3, r4, rlocals);*/
+ __ reg_printf("Stack Pointer on entry to native, sp = %p\n", sp);
__ bl(native_entry_point);
__ reg_printf("Returned from native, lr = %p, r1 = %p, r0 = %p\n", lr, r1, r0);
__ maybe_isb();
@@ -1813,7 +1813,7 @@
Interpreter::_remove_activation_entry = __ pc();
__ print_method_exit(false);
- __ reg_printf("remove_activation_entry\n");
+ __ reg_printf("remove_activation_entry\n");
// preserve exception over this code sequence
__ pop_ptr(r0);
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/templateTable_aarch32.cpp
--- a/src/cpu/aarch32/vm/templateTable_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/templateTable_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -82,7 +82,7 @@
}
static inline Address iaddress(Register r) {
- // FIXME
+ // FIXME
return Address(rlocals, r, lsl(2));
}
@@ -341,11 +341,11 @@
float fval = value;
assert(value == 0 || value == 1 || value == 2, "invalid float const");
if(VM_Version::features() & FT_VFPV3) {
- __ vmov_f32(d0, fval);
- } else {
- __ mov(r0, *((uint32_t*)&fval));
- __ vmov_f32(d0, r0);
- }
+ __ vmov_f32(d0, fval);
+ } else {
+ __ mov(r0, *((uint32_t*)&fval));
+ __ vmov_f32(d0, r0);
+ }
}
void TemplateTable::dconst(int value)
@@ -354,13 +354,13 @@
double dval = value;
assert(value == 0 || value == 1 || value == 2, "invalid double const");
if(VM_Version::features() & FT_VFPV3) {
- __ vmov_f64(d0, dval);
- } else {
- uint32_t* ptr = (uint32_t*)&dval;
- __ mov(r0, *ptr);
- __ mov(r1, *(ptr + 1));
- __ vmov_f64(d0, r0, r1);
- }
+ __ vmov_f64(d0, dval);
+ } else {
+ uint32_t* ptr = (uint32_t*)&dval;
+ __ mov(r0, *ptr);
+ __ mov(r1, *(ptr + 1));
+ __ vmov_f64(d0, r0, r1);
+ }
}
void TemplateTable::bipush()
@@ -1354,14 +1354,14 @@
// shift count is in r0 - take shift from bottom six bits only
__ andr(r0, r0, 63);
__ pop_l(r2); //LSB in lowest reg
- int word_bytes = 8 * wordSize;
-
- __ sub(r1, r0, word_bytes);
- __ lsl(r3, r3, r0);
- __ orr(r3, r3, r2, lsl(r1));
- __ rsb(r1, r0, word_bytes);
- __ orr(r1, r3, r2, lsr(r1));
- __ lsl(r0, r2, r0);
+ int word_bytes = 8 * wordSize;
+
+ __ sub(r1, r0, word_bytes);
+ __ lsl(r3, r3, r0);
+ __ orr(r3, r3, r2, lsl(r1));
+ __ rsb(r1, r0, word_bytes);
+ __ orr(r1, r3, r2, lsr(r1));
+ __ lsl(r0, r2, r0);
}
void TemplateTable::lshr()
@@ -1420,10 +1420,10 @@
__ vcvt_f64_f32(d1, d0);
__ pop_f(d0);
__ vcvt_f64_f32(d0, d0);
- #ifndef HARD_FLOAT_CC
+#ifndef HARD_FLOAT_CC
__ vmov_f64(r0, r1, d0);
__ vmov_f64(r2, r3, d1);
- #endif
+#endif
__ mov(rscratch1, (address)fmod);
__ bl(rscratch1);
__ vcvt_f32_f64(d0, d0);
@@ -1457,10 +1457,10 @@
case rem:
__ vmov_f64(d1, d0);
__ pop_d(d0);
- #ifndef HARD_FLOAT_CC
+#ifndef HARD_FLOAT_CC
__ vmov_f64(r0, r1, d0);
__ vmov_f64(r2, r3, d1);
- #endif
+#endif
__ mov(rscratch1, (address)fmod);
__ bl(rscratch1);
break;
@@ -1579,7 +1579,7 @@
__ reg_printf("Convert i2l (after) 0x%08x%08x\n", r1, r0);
break;
case Bytecodes::_i2f:
- //__ bkpt(735);
+ //__ bkpt(735);
//__ scvtfws(d0, r0);
//__ reg_printf("VCVT Convert i2f, (before) 0x%08x\n", r0);
__ vmov_f32(d0, r0);
@@ -1635,10 +1635,10 @@
case Bytecodes::_f2l:
{
//float already in d0 long goes to <r1:r0>
- #ifndef HARD_FLOAT_CC
+#ifndef HARD_FLOAT_CC
//Need to move float in d0 to r0
__ vmov_f32(r0, d0);
- #endif
+#endif
__ call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 0);
}
break;
@@ -1665,10 +1665,10 @@
case Bytecodes::_d2l:
{
// d0 -> <r1:r0>
- #ifndef HARD_FLOAT_CC
+#ifndef HARD_FLOAT_CC
//Need to move float in d0 to r0
__ vmov_f64(r0, r1, d0);
- #endif
+#endif
__ call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 0);
}
break;
@@ -1713,7 +1713,7 @@
void TemplateTable::float_cmp(bool is_float, int unordered_result)
{
- //__ bkpt(400);
+ //__ bkpt(400);
if (is_float) {
__ pop_f(d1);
__ vcmp_f32(d1, d0);
@@ -2113,7 +2113,7 @@
transition(itos, vtos);
Label loop_entry, loop, found, continue_execution;
- __ reg_printf("Linearswitching to value %d\n", r0);
+ __ reg_printf("Linearswitching to value %d\n", r0);
// bswap r0 so we can avoid bswapping the table entries
__ rev(r0, r0);
@@ -2526,7 +2526,7 @@
Label Done, notByte, notInt, notShort, notChar,
notLong, notFloat, notObj, notDouble;
- //__ bkpt(324);
+ //__ bkpt(324);
// x86 uses a shift and mask or wings it with a shift plus assert
// the mask is not needed. aarch32 just uses bitfield extract
__ extract_bits(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
@@ -2596,7 +2596,7 @@
__ cmp(flags, ltos);
__ b(notLong, Assembler::NE);
// ltos
- __ lea(rscratch1, field);
+ __ lea(rscratch1, field);
__ atomic_ldrd(r0, r1, rscratch1);
__ push(ltos);
// Rewrite bytecode to be faster
@@ -3006,7 +3006,7 @@
do_oop_store(_masm, field, r0, _bs->kind(), false);
break;
case Bytecodes::_fast_lputfield:
- __ lea(rscratch1, field);
+ __ lea(rscratch1, field);
__ atomic_strd(r0, r1, rscratch1, r2, r3);
break;
case Bytecodes::_fast_iputfield:
@@ -3025,8 +3025,8 @@
__ vstr_f32(d0, Address(rscratch1));
break;
case Bytecodes::_fast_dputfield:
- __ lea(rscratch1, field);
- __ vmov_f64(r0, r1, d0);
+ __ lea(rscratch1, field);
+ __ vmov_f64(r0, r1, d0);
__ atomic_strd(r0, r1, rscratch1, r2, r3);
break;
default:
@@ -3107,10 +3107,10 @@
__ lea(r0, field); // r0 <= field
__ vldr_f32(d0, Address(r0));
__ vmov_f32(rscratch1, d0);
- break;
+ break;
case Bytecodes::_fast_dgetfield:
__ lea(rscratch1, field); // r0 <= field
- __ atomic_ldrd(r0, r1, rscratch1);
+ __ atomic_ldrd(r0, r1, rscratch1);
__ vmov_f64(d0, r0, r1);
break;
default:
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/vm_version_aarch32.cpp
--- a/src/cpu/aarch32/vm/vm_version_aarch32.cpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/vm_version_aarch32.cpp Fri Jan 15 23:23:56 2016 +0800
@@ -97,19 +97,19 @@
bool VM_Version::identify_procline(const char *tag, char **line) {
- char *i = *line;
- const char EOT = '\t', EOT2 = ':'; // the longest has no tabs
- for(; '\0' != *i && EOT != *i && EOT2 != *i; i++);
- if(EOT == *i || EOT2 == *i) {
- if(!memcmp(*line, tag, i - *line)) {
- for(i++; (EOT == *i || EOT2 == *i || ' ' == *i) && '\0' != *i; i++);
- if('\0' != *i) {
- *line = i;
- return true;
- }
- }
- }
- return false;
+ char *i = *line;
+ const char EOT = '\t', EOT2 = ':'; // the longest has no tabs
+ for(; '\0' != *i && EOT != *i && EOT2 != *i; i++);
+ if(EOT == *i || EOT2 == *i) {
+ if(!memcmp(*line, tag, i - *line)) {
+ for(i++; (EOT == *i || EOT2 == *i || ' ' == *i) && '\0' != *i; i++);
+ if('\0' != *i) {
+ *line = i;
+ return true;
+ }
+ }
+ }
+ return false;
}
void VM_Version::get_processor_features() {
@@ -129,67 +129,67 @@
FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
int ncores = 0, cpu, variant, model, revision;
- enum ProcessorFeatures f = FT_NONE;
- char buf[2048], *i;
- if(FILE *fp = fopen("/proc/cpuinfo", "r")) {
- while((i = fgets(buf, 2048, fp))) {
- if(identify_procline("Features", &i)) {
- i = strtok(i, " \n");
- while(i) {
- if(!strcmp("idiva", i)) {
- f = (ProcessorFeatures)(f | FT_HW_DIVIDE);
- } else if(!strcmp("vfpv3", i) || !strcmp("vfpv4", i)) {
- // Assuming that vfpv4 implements all of vfpv3
- // and that they both implement all of v2.
- f = (ProcessorFeatures)(f | FT_VFPV3 | FT_VFPV2);
- } else if(!strcmp("vfp", i)) {
- // Assuming that VFPv2 is identified by plain vfp
- f = (ProcessorFeatures)(f | FT_VFPV2);
- }
- i = strtok(NULL, " \n");
- }
- } else if(identify_procline("Processor", &i)) {
- i = strtok(i, " \n");
- while(i) {
- // if the info is read correctly do
- if(!strcmp("ARMv7", i) || !strcmp("AArch64", i)) {
- f = (ProcessorFeatures)(f | FT_ARMV7);
- } else if(!strcmp("ARMv6-compatible", i)) {
- //TODO sort out the ARMv6 identification code
- }
- i = strtok(NULL, " \n");
- }
- } else if(identify_procline("model name", &i)) {
- i = strtok(i, " \n");
- while(i) {
- // if the info is read correctly do
- if(!strcmp("ARMv7", i)) {
- f = (ProcessorFeatures)(f | FT_ARMV7);
- } else if(!strcmp("ARMv6-compatible", i)) {
- //TODO sort out the ARMv6 identification code
- }
- i = strtok(NULL, " \n");
- }
- } else if(identify_procline("processor", &i)) {
- ncores++;
- } else if(identify_procline("CPU implementer", &i)) {
- cpu = strtol(i, NULL, 0);
- } else if(identify_procline("CPU variant", &i)) {
- variant = strtol(i, NULL, 0);
- } else if(identify_procline("CPU part", &i)) {
- model = strtol(i, NULL, 0);
- } else if(identify_procline("CPU revision", &i)) {
- revision = strtol(i, NULL, 0);
- }
- }
- fclose(fp);
+ enum ProcessorFeatures f = FT_NONE;
+ char buf[2048], *i;
+ if(FILE *fp = fopen("/proc/cpuinfo", "r")) {
+ while((i = fgets(buf, 2048, fp))) {
+ if(identify_procline("Features", &i)) {
+ i = strtok(i, " \n");
+ while(i) {
+ if(!strcmp("idiva", i)) {
+ f = (ProcessorFeatures)(f | FT_HW_DIVIDE);
+ } else if(!strcmp("vfpv3", i) || !strcmp("vfpv4", i)) {
+ // Assuming that vfpv4 implements all of vfpv3
+ // and that they both implement all of v2.
+ f = (ProcessorFeatures)(f | FT_VFPV3 | FT_VFPV2);
+ } else if(!strcmp("vfp", i)) {
+ // Assuming that VFPv2 is identified by plain vfp
+ f = (ProcessorFeatures)(f | FT_VFPV2);
+ }
+ i = strtok(NULL, " \n");
}
- if(1 == ncores) {
- f = (ProcessorFeatures)(f | FT_SINGLE_CORE);
+ } else if(identify_procline("Processor", &i)) {
+ i = strtok(i, " \n");
+ while(i) {
+ // if the info is read correctly do
+ if(!strcmp("ARMv7", i) || !strcmp("AArch64", i)) {
+ f = (ProcessorFeatures)(f | FT_ARMV7);
+ } else if(!strcmp("ARMv6-compatible", i)) {
+ //TODO sort out the ARMv6 identification code
+ }
+ i = strtok(NULL, " \n");
}
- _features = f;
- sprintf(buf, "0x%02x:0x%x:0x%03x:%d", cpu, variant, model, revision);
- _cpu_features = os::strdup(buf);
+ } else if(identify_procline("model name", &i)) {
+ i = strtok(i, " \n");
+ while(i) {
+ // if the info is read correctly do
+ if(!strcmp("ARMv7", i)) {
+ f = (ProcessorFeatures)(f | FT_ARMV7);
+ } else if(!strcmp("ARMv6-compatible", i)) {
+ //TODO sort out the ARMv6 identification code
+ }
+ i = strtok(NULL, " \n");
+ }
+ } else if(identify_procline("processor", &i)) {
+ ncores++;
+ } else if(identify_procline("CPU implementer", &i)) {
+ cpu = strtol(i, NULL, 0);
+ } else if(identify_procline("CPU variant", &i)) {
+ variant = strtol(i, NULL, 0);
+ } else if(identify_procline("CPU part", &i)) {
+ model = strtol(i, NULL, 0);
+ } else if(identify_procline("CPU revision", &i)) {
+ revision = strtol(i, NULL, 0);
+ }
+ }
+ fclose(fp);
+ }
+ if(1 == ncores) {
+ f = (ProcessorFeatures)(f | FT_SINGLE_CORE);
+ }
+ _features = f;
+ sprintf(buf, "0x%02x:0x%x:0x%03x:%d", cpu, variant, model, revision);
+ _cpu_features = os::strdup(buf);
#ifdef COMPILER2
if (UseMultiplyToLenIntrinsic) {
@@ -205,10 +205,10 @@
}*/
/*if(!(f & FT_ARMV7) && FLAG_IS_DEFAULT(UseMembar)) {
- UseMembar = false;
+ UseMembar = false;
} else if(UseMembar) {
- fprintf(stderr, "Unable to use memory barriers as not on ARMv7, disabling.\n");
- UseMembar = false;
+ fprintf(stderr, "Unable to use memory barriers as not on ARMv7, disabling.\n");
+ UseMembar = false;
}*/
}
diff -r dc9b82a9fbc7 src/cpu/aarch32/vm/vm_version_aarch32.hpp
--- a/src/cpu/aarch32/vm/vm_version_aarch32.hpp Mon Jan 11 14:58:34 2016 +0000
+++ b/src/cpu/aarch32/vm/vm_version_aarch32.hpp Fri Jan 15 23:23:56 2016 +0800
@@ -31,19 +31,19 @@
#include "runtime/vm_version.hpp"
enum ProcessorFeatures {
- FT_NONE = 0,
- FT_HW_DIVIDE = 1,
- FT_VFPV2 = 2,
- FT_VFPV3 = 4,
- FT_ARMV7 = 8,
- FT_ARMV6T2 = 16,
- FT_ARMV6K = 32,
- FT_SINGLE_CORE = 64
+ FT_NONE = 0,
+ FT_HW_DIVIDE = 1,
+ FT_VFPV2 = 2,
+ FT_VFPV3 = 4,
+ FT_ARMV7 = 8,
+ FT_ARMV6T2 = 16,
+ FT_ARMV6K = 32,
+ FT_SINGLE_CORE = 64
};
class VM_Version : public Abstract_VM_Version {
public:
- // Processor feature lookup.
+ // Processor feature lookup.
enum {
CPU_ARM = 'A',
@@ -63,17 +63,17 @@
static void initialize();
private:
- static enum ProcessorFeatures _features;
- static const char* _cpu_features;
+ static enum ProcessorFeatures _features;
+ static const char* _cpu_features;
static void get_processor_features();
- static bool identify_procline(const char *tag, char **line);
+ static bool identify_procline(const char *tag, char **line);
public:
- static enum ProcessorFeatures features() {
- return _features;
- }
- static const char* cpu_features() { return _cpu_features; }
+ static enum ProcessorFeatures features() {
+ return _features;
+ }
+ static const char* cpu_features() { return _cpu_features; }
};
#endif // CPU_AARCH32_VM_VM_VERSION_AARCH32_HPP
More information about the aarch32-port-dev
mailing list