[aarch64-port-dev ] JDK8: Delete dead code

Andrew Haley aph at redhat.com
Mon Jul 28 09:13:40 UTC 2014


This is the first part of a cleanup to remove all the stuff that we
thought might be useful but hasn't turned out to be, along with
obsolete comments.

Andrew.



comparing with ssh://hg.openjdk.java.net/aarch64-port/jdk8/hotspot
searching for changes
remote: X11 forwarding request failed on channel 0
changeset:   7202:4020f25a52c2
tag:         tip
user:        aph
date:        Fri Jul 25 08:17:44 2014 -0400
summary:     Delete dead code.

diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/aarch64.ad	Fri Jul 25 08:17:44 2014 -0400
@@ -892,11 +892,6 @@

 //=============================================================================

-// Emit an interrupt that is caught by the debugger (for debugging compiler).
-void emit_break(CodeBuffer &cbuf) {
-  Unimplemented();
-}
-
 #ifndef PRODUCT
 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
   st->print("BREAKPOINT");
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/aarch64Test.cpp
--- a/src/cpu/aarch64/vm/aarch64Test.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/aarch64Test.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -35,43 +35,4 @@
   CodeBuffer code(b);
   MacroAssembler _masm(&code);
   entry(&code);
-  // dive now before we hit all the Unimplemented() calls
-  // exit(0);
-
-#if 0
-  // old test code to compute sum of squares
-  enum { r0, r1, r2, r3, r4, LR = 30 };
-
-  address entry = __ pc();
-
-  __ _mov_imm(r0, 100);
-  address loop = __ pc();
-  __ _sub_imm(r0, r0, 1);
-  __ _cbnz(r0, loop);
-  // __ _br(LR);
-
-  char stack[4096];
-  unsigned long memory[100];
-
-  __ _mov_imm(r0, 1);
-  __ _mov_imm(r4, 100);
-  loop = __ pc();
-  __ _mov(r1, r0);
-  __ _mul(r2, r1, r1);
-  __ _str_post(r2, r3, 8);
-  __ _add_imm(r0, r0, 1);
-  __ _sub_imm(r4, r4, 1);
-  __ _cbnz(r4, loop);
-  __ _br(LR);
-
-  Disassembler::decode(entry, __ pc());
-
-  sim.init((u_int64_t)entry, (u_int64_t)stack + sizeof stack,
-	   (u_int64_t)stack);
-  sim.getCPUState().xreg((GReg)r3, 0) = (u_int64_t)memory;
-  sim.run();
-  printf("Table of squares:\n");
-  for (int i = 0; i < 100; i++)
-    printf("  %d\n", memory[i]);
-#endif
 }
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/assembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -1244,12 +1244,6 @@
       f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
       long offset = (adr.target() - pc()) >> 2;
       sf(offset, 23, 5);
-#if 0
-      Relocation* reloc = adr.rspec().reloc();
-      relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
-      assert(rtype == relocInfo::internal_word_type,
-	     "only internal_word_type relocs make sense here");
-#endif
       code_section()->relocate(pc(), adr.rspec());
       return;
     }
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -2994,7 +2994,9 @@

 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }

-void LIR_Assembler::get_thread(LIR_Opr result_reg) { Unimplemented(); }
+void LIR_Assembler::get_thread(LIR_Opr result_reg) {
+  __ mov(result_reg->as_register(), rthread);
+}


 void LIR_Assembler::peephole(LIR_List *lir) {
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LinearScan_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1246 +0,0 @@
-/*
- * Copyright (c) 2013, Red Hat Inc.
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates.
- * All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "c1/c1_Instruction.hpp"
-#include "c1/c1_LinearScan.hpp"
-#include "utilities/bitMap.inline.hpp"
-
-
-//----------------------------------------------------------------------
-// Allocation of FPU stack slots (Intel x86 only)
-//----------------------------------------------------------------------
-
-void LinearScan::allocate_fpu_stack() {
-  // First compute which FPU registers are live at the start of each basic block
-  // (To minimize the amount of work we have to do if we have to merge FPU stacks)
-  if (ComputeExactFPURegisterUsage) {
-    Interval* intervals_in_register, *intervals_in_memory;
-    create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL);
-
-    // ignore memory intervals by overwriting intervals_in_memory
-    // the dummy interval is needed to enforce the walker to walk until the given id:
-    // without it, the walker stops when the unhandled-list is empty -> live information
-    // beyond this point would be incorrect.
-    Interval* dummy_interval = new Interval(any_reg);
-    dummy_interval->add_range(max_jint - 2, max_jint - 1);
-    dummy_interval->set_next(Interval::end());
-    intervals_in_memory = dummy_interval;
-
-    IntervalWalker iw(this, intervals_in_register, intervals_in_memory);
-
-    const int num_blocks = block_count();
-    for (int i = 0; i < num_blocks; i++) {
-      BlockBegin* b = block_at(i);
-
-      // register usage is only needed for merging stacks -> compute only
-      // when more than one predecessor.
-      // the block must not have any spill moves at the beginning (checked by assertions)
-      // spill moves would use intervals that are marked as handled and so the usage bit
-      // would been set incorrectly
-
-      // NOTE: the check for number_of_preds > 1 is necessary. A block with only one
-      //       predecessor may have spill moves at the begin of the block.
-      //       If an interval ends at the current instruction id, it is not possible
-      //       to decide if the register is live or not at the block begin -> the
-      //       register information would be incorrect.
-      if (b->number_of_preds() > 1) {
-        int id = b->first_lir_instruction_id();
-        BitMap regs(FrameMap::nof_fpu_regs);
-        regs.clear();
-
-        iw.walk_to(id);   // walk after the first instruction (always a label) of the block
-        assert(iw.current_position() == id, "did not walk completely to id");
-
-        // Only consider FPU values in registers
-        Interval* interval = iw.active_first(fixedKind);
-        while (interval != Interval::end()) {
-          int reg = interval->assigned_reg();
-          assert(reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg, "no fpu register");
-          assert(interval->assigned_regHi() == -1, "must not have hi register (doubles stored in one register)");
-          assert(interval->from() <= id && id < interval->to(), "interval out of range");
-
-#ifndef PRODUCT
-          if (TraceFPURegisterUsage) {
-            tty->print("fpu reg %d is live because of ", reg - pd_first_fpu_reg); interval->print();
-          }
-#endif
-
-          regs.set_bit(reg - pd_first_fpu_reg);
-          interval = interval->next();
-        }
-
-        b->set_fpu_register_usage(regs);
-
-#ifndef PRODUCT
-        if (TraceFPURegisterUsage) {
-          tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->cr();
-        }
-#endif
-      }
-    }
-  }
-
-#ifndef TARGET_ARCH_aarch64
-  FpuStackAllocator alloc(ir()->compilation(), this);
-  _fpu_stack_allocator = &alloc;
-  alloc.allocate();
-  _fpu_stack_allocator = NULL;
-#endif
-}
-
-
-FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
-  : _compilation(compilation)
-  , _lir(NULL)
-  , _pos(-1)
-  , _allocator(allocator)
-  , _sim(compilation)
-  , _temp_sim(compilation)
-{}
-
-void FpuStackAllocator::allocate() {
-  int num_blocks = allocator()->block_count();
-  for (int i = 0; i < num_blocks; i++) {
-    // Set up to process block
-    BlockBegin* block = allocator()->block_at(i);
-    intArray* fpu_stack_state = block->fpu_stack_state();
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->cr();
-      tty->print_cr("------- Begin of new Block %d -------", block->block_id());
-    }
-#endif
-
-    assert(fpu_stack_state != NULL ||
-           block->end()->as_Base() != NULL ||
-           block->is_set(BlockBegin::exception_entry_flag),
-           "FPU stack state must be present due to linear-scan order for FPU stack allocation");
-    // note: exception handler entries always start with an empty fpu stack
-    //       because stack merging would be too complicated
-
-    if (fpu_stack_state != NULL) {
-      sim()->read_state(fpu_stack_state);
-    } else {
-      sim()->clear();
-    }
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->print("Reading FPU state for block %d:", block->block_id());
-      sim()->print();
-      tty->cr();
-    }
-#endif
-
-    allocate_block(block);
-    CHECK_BAILOUT();
-  }
-}
-
-void FpuStackAllocator::allocate_block(BlockBegin* block) {
-  bool processed_merge = false;
-  LIR_OpList* insts = block->lir()->instructions_list();
-  set_lir(block->lir());
-  set_pos(0);
-
-
-  // Note: insts->length() may change during loop
-  while (pos() < insts->length()) {
-    LIR_Op* op = insts->at(pos());
-    _debug_information_computed = false;
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      op->print();
-    }
-    check_invalid_lir_op(op);
-#endif
-
-    LIR_OpBranch* branch = op->as_OpBranch();
-    LIR_Op1* op1 = op->as_Op1();
-    LIR_Op2* op2 = op->as_Op2();
-    LIR_OpCall* opCall = op->as_OpCall();
-
-    if (branch != NULL && branch->block() != NULL) {
-      if (!processed_merge) {
-        // propagate stack at first branch to a successor
-        processed_merge = true;
-        bool required_merge = merge_fpu_stack_with_successors(block);
-
-        assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches");
-      }
-
-    } else if (op1 != NULL) {
-      handle_op1(op1);
-    } else if (op2 != NULL) {
-      handle_op2(op2);
-    } else if (opCall != NULL) {
-      handle_opCall(opCall);
-    }
-
-    compute_debug_information(op);
-
-    set_pos(1 + pos());
-  }
-
-  // Propagate stack when block does not end with branch
-  if (!processed_merge) {
-    merge_fpu_stack_with_successors(block);
-  }
-}
-
-
-void FpuStackAllocator::compute_debug_information(LIR_Op* op) {
-  if (!_debug_information_computed && op->id() != -1 && allocator()->has_info(op->id())) {
-    visitor.visit(op);
-
-    // exception handling
-    if (allocator()->compilation()->has_exception_handlers()) {
-      XHandlers* xhandlers = visitor.all_xhandler();
-      int n = xhandlers->length();
-      for (int k = 0; k < n; k++) {
-        allocate_exception_handler(xhandlers->handler_at(k));
-      }
-    } else {
-      assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
-    }
-
-    // compute debug information
-    int n = visitor.info_count();
-    assert(n > 0, "should not visit operation otherwise");
-
-    for (int j = 0; j < n; j++) {
-      CodeEmitInfo* info = visitor.info_at(j);
-      // Compute debug information
-      allocator()->compute_debug_info(info, op->id());
-    }
-  }
-  _debug_information_computed = true;
-}
-
-void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
-  if (!sim()->is_empty()) {
-    LIR_List* old_lir = lir();
-    int old_pos = pos();
-    intArray* old_state = sim()->write_state();
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->cr();
-      tty->print_cr("------- begin of exception handler -------");
-    }
-#endif
-
-    if (xhandler->entry_code() == NULL) {
-      // need entry code to clear FPU stack
-      LIR_List* entry_code = new LIR_List(_compilation);
-      entry_code->jump(xhandler->entry_block());
-      xhandler->set_entry_code(entry_code);
-    }
-
-    LIR_OpList* insts = xhandler->entry_code()->instructions_list();
-    set_lir(xhandler->entry_code());
-    set_pos(0);
-
-    // Note: insts->length() may change during loop
-    while (pos() < insts->length()) {
-      LIR_Op* op = insts->at(pos());
-
-#ifndef PRODUCT
-      if (TraceFPUStack) {
-        op->print();
-      }
-      check_invalid_lir_op(op);
-#endif
-
-      switch (op->code()) {
-        case lir_move:
-          assert(op->as_Op1() != NULL, "must be LIR_Op1");
-          assert(pos() != insts->length() - 1, "must not be last operation");
-
-          handle_op1((LIR_Op1*)op);
-          break;
-
-        case lir_branch:
-          assert(op->as_OpBranch()->cond() == lir_cond_always, "must be unconditional branch");
-          assert(pos() == insts->length() - 1, "must be last operation");
-
-          // remove all remaining dead registers from FPU stack
-          clear_fpu_stack(LIR_OprFact::illegalOpr);
-          break;
-
-        default:
-          // other operations not allowed in exception entry code
-          ShouldNotReachHere();
-      }
-
-      set_pos(pos() + 1);
-    }
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->cr();
-      tty->print_cr("------- end of exception handler -------");
-    }
-#endif
-
-    set_lir(old_lir);
-    set_pos(old_pos);
-    sim()->read_state(old_state);
-  }
-}
-
-
-int FpuStackAllocator::fpu_num(LIR_Opr opr) {
-  assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
-  return opr->is_single_fpu() ? opr->fpu_regnr() : opr->fpu_regnrLo();
-}
-
-int FpuStackAllocator::tos_offset(LIR_Opr opr) {
-  return sim()->offset_from_tos(fpu_num(opr));
-}
-
-
-LIR_Opr FpuStackAllocator::to_fpu_stack(LIR_Opr opr) {
-  assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
-
-  int stack_offset = tos_offset(opr);
-  if (opr->is_single_fpu()) {
-    return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
-  } else {
-    assert(opr->is_double_fpu(), "shouldn't call this otherwise");
-    return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
-  }
-}
-
-LIR_Opr FpuStackAllocator::to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset) {
-  assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
-  assert(dont_check_offset || tos_offset(opr) == 0, "operand is not on stack top");
-
-  int stack_offset = 0;
-  if (opr->is_single_fpu()) {
-    return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
-  } else {
-    assert(opr->is_double_fpu(), "shouldn't call this otherwise");
-    return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
-  }
-}
-
-
-
-void FpuStackAllocator::insert_op(LIR_Op* op) {
-  lir()->insert_before(pos(), op);
-  set_pos(1 + pos());
-}
-
-
-void FpuStackAllocator::insert_exchange(int offset) {
-  if (offset > 0) {
-    LIR_Op1* fxch_op = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
-    insert_op(fxch_op);
-    sim()->swap(offset);
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->print("Exchanged register: %d         New state: ", sim()->get_slot(0)); sim()->print(); tty->cr();
-    }
-#endif
-
-  }
-}
-
-void FpuStackAllocator::insert_exchange(LIR_Opr opr) {
-  insert_exchange(tos_offset(opr));
-}
-
-
-void FpuStackAllocator::insert_free(int offset) {
-  // move stack slot to the top of stack and then pop it
-  insert_exchange(offset);
-
-  LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
-  insert_op(fpop);
-  sim()->pop();
-
-#ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->print("Inserted pop                   New state: "); sim()->print(); tty->cr();
-    }
-#endif
-}
-
-
-void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr) {
-  if (sim()->contains(fpu_num(opr))) {
-    int res_slot = tos_offset(opr);
-    insert_free(res_slot);
-  }
-}
-
-void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore) {
-  if (fpu_num(opr) != fpu_num(ignore) && sim()->contains(fpu_num(opr))) {
-    int res_slot = tos_offset(opr);
-    insert_free(res_slot);
-  }
-}
-
-void FpuStackAllocator::insert_copy(LIR_Opr from, LIR_Opr to) {
-  int offset = tos_offset(from);
-  LIR_Op1* fld = new LIR_Op1(lir_fld, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
-  insert_op(fld);
-
-  sim()->push(fpu_num(to));
-
-#ifndef PRODUCT
-  if (TraceFPUStack) {
-    tty->print("Inserted copy (%d -> %d)         New state: ", fpu_num(from), fpu_num(to)); sim()->print(); tty->cr();
-  }
-#endif
-}
-
-void FpuStackAllocator::do_rename(LIR_Opr from, LIR_Opr to) {
-  sim()->rename(fpu_num(from), fpu_num(to));
-}
-
-void FpuStackAllocator::do_push(LIR_Opr opr) {
-  sim()->push(fpu_num(opr));
-}
-
-void FpuStackAllocator::pop_if_last_use(LIR_Op* op, LIR_Opr opr) {
-  assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
-  assert(tos_offset(opr) == 0, "can only pop stack top");
-
-  if (opr->is_last_use()) {
-    op->set_fpu_pop_count(1);
-    sim()->pop();
-  }
-}
-
-void FpuStackAllocator::pop_always(LIR_Op* op, LIR_Opr opr) {
-  assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
-  assert(tos_offset(opr) == 0, "can only pop stack top");
-
-  op->set_fpu_pop_count(1);
-  sim()->pop();
-}
-
-void FpuStackAllocator::clear_fpu_stack(LIR_Opr preserve) {
-  int result_stack_size = (preserve->is_fpu_register() && !preserve->is_xmm_register() ? 1 : 0);
-  while (sim()->stack_size() > result_stack_size) {
-    assert(!sim()->slot_is_empty(0), "not allowed");
-
-    if (result_stack_size == 0 || sim()->get_slot(0) != fpu_num(preserve)) {
-      insert_free(0);
-    } else {
-      // move "preserve" to bottom of stack so that all other stack slots can be popped
-      insert_exchange(sim()->stack_size() - 1);
-    }
-  }
-}
-
-
-void FpuStackAllocator::handle_op1(LIR_Op1* op1) {
-  LIR_Opr in  = op1->in_opr();
-  LIR_Opr res = op1->result_opr();
-
-  LIR_Opr new_in  = in;  // new operands relative to the actual fpu stack top
-  LIR_Opr new_res = res;
-
-  // Note: this switch is processed for all LIR_Op1, regardless if they have FPU-arguments,
-  //       so checks for is_float_kind() are necessary inside the cases
-  switch (op1->code()) {
-
-    case lir_return: {
-      // FPU-Stack must only contain the (optional) fpu return value.
-      // All remaining dead values are popped from the stack
-      // If the input operand is a fpu-register, it is exchanged to the bottom of the stack
-
-      clear_fpu_stack(in);
-      if (in->is_fpu_register() && !in->is_xmm_register()) {
-        new_in = to_fpu_stack_top(in);
-      }
-
-      break;
-    }
-
-    case lir_move: {
-      if (in->is_fpu_register() && !in->is_xmm_register()) {
-        if (res->is_xmm_register()) {
-          // move from fpu register to xmm register (necessary for operations that
-          // are not available in the SSE instruction set)
-          insert_exchange(in);
-          new_in = to_fpu_stack_top(in);
-          pop_always(op1, in);
-
-        } else if (res->is_fpu_register() && !res->is_xmm_register()) {
-          // move from fpu-register to fpu-register:
-          // * input and result register equal:
-          //   nothing to do
-          // * input register is last use:
-          //   rename the input register to result register -> input register
-          //   not present on fpu-stack afterwards
-          // * input register not last use:
-          //   duplicate input register to result register to preserve input
-          //
-          // Note: The LIR-Assembler does not produce any code for fpu register moves,
-          //       so input and result stack index must be equal
-
-          if (fpu_num(in) == fpu_num(res)) {
-            // nothing to do
-          } else if (in->is_last_use()) {
-            insert_free_if_dead(res);//, in);
-            do_rename(in, res);
-          } else {
-            insert_free_if_dead(res);
-            insert_copy(in, res);
-          }
-          new_in = to_fpu_stack(res);
-          new_res = new_in;
-
-        } else {
-          // move from fpu-register to memory
-          // input operand must be on top of stack
-
-          insert_exchange(in);
-
-          // create debug information here because afterwards the register may have been popped
-          compute_debug_information(op1);
-
-          new_in = to_fpu_stack_top(in);
-          pop_if_last_use(op1, in);
-        }
-
-      } else if (res->is_fpu_register() && !res->is_xmm_register()) {
-        // move from memory/constant to fpu register
-        // result is pushed on the stack
-
-        insert_free_if_dead(res);
-
-        // create debug information before register is pushed
-        compute_debug_information(op1);
-
-        do_push(res);
-        new_res = to_fpu_stack_top(res);
-      }
-      break;
-    }
-
-    case lir_neg: {
-      if (in->is_fpu_register() && !in->is_xmm_register()) {
-        assert(res->is_fpu_register() && !res->is_xmm_register(), "must be");
-        assert(in->is_last_use(), "old value gets destroyed");
-
-        insert_free_if_dead(res, in);
-        insert_exchange(in);
-        new_in = to_fpu_stack_top(in);
-
-        do_rename(in, res);
-        new_res = to_fpu_stack_top(res);
-      }
-      break;
-    }
-
-    case lir_convert: {
-      Bytecodes::Code bc = op1->as_OpConvert()->bytecode();
-      switch (bc) {
-        case Bytecodes::_d2f:
-        case Bytecodes::_f2d:
-          assert(res->is_fpu_register(), "must be");
-          assert(in->is_fpu_register(), "must be");
-
-          if (!in->is_xmm_register() && !res->is_xmm_register()) {
-            // this is quite the same as a move from fpu-register to fpu-register
-            // Note: input and result operands must have different types
-            if (fpu_num(in) == fpu_num(res)) {
-              // nothing to do
-              new_in = to_fpu_stack(in);
-            } else if (in->is_last_use()) {
-              insert_free_if_dead(res);//, in);
-              new_in = to_fpu_stack(in);
-              do_rename(in, res);
-            } else {
-              insert_free_if_dead(res);
-              insert_copy(in, res);
-              new_in = to_fpu_stack_top(in, true);
-            }
-            new_res = to_fpu_stack(res);
-          }
-
-          break;
-
-        case Bytecodes::_i2f:
-        case Bytecodes::_l2f:
-        case Bytecodes::_i2d:
-        case Bytecodes::_l2d:
-          assert(res->is_fpu_register(), "must be");
-          if (!res->is_xmm_register()) {
-            insert_free_if_dead(res);
-            do_push(res);
-            new_res = to_fpu_stack_top(res);
-          }
-          break;
-
-        case Bytecodes::_f2i:
-        case Bytecodes::_d2i:
-          assert(in->is_fpu_register(), "must be");
-          if (!in->is_xmm_register()) {
-            insert_exchange(in);
-            new_in = to_fpu_stack_top(in);
-
-            // TODO: update registes of stub
-          }
-          break;
-
-        case Bytecodes::_f2l:
-        case Bytecodes::_d2l:
-          assert(in->is_fpu_register(), "must be");
-          if (!in->is_xmm_register()) {
-            insert_exchange(in);
-            new_in = to_fpu_stack_top(in);
-            pop_always(op1, in);
-          }
-          break;
-
-        case Bytecodes::_i2l:
-        case Bytecodes::_l2i:
-        case Bytecodes::_i2b:
-        case Bytecodes::_i2c:
-        case Bytecodes::_i2s:
-          // no fpu operands
-          break;
-
-        default:
-          ShouldNotReachHere();
-      }
-      break;
-    }
-
-    case lir_roundfp: {
-      assert(in->is_fpu_register() && !in->is_xmm_register(), "input must be in register");
-      assert(res->is_stack(), "result must be on stack");
-
-      insert_exchange(in);
-      new_in = to_fpu_stack_top(in);
-      pop_if_last_use(op1, in);
-      break;
-    }
-
-    default: {
-      assert(!in->is_float_kind() && !res->is_float_kind(), "missed a fpu-operation");
-    }
-  }
-
-  op1->set_in_opr(new_in);
-  op1->set_result_opr(new_res);
-}
-
-void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
-  LIR_Opr left  = op2->in_opr1();
-  if (!left->is_float_kind()) {
-    return;
-  }
-  if (left->is_xmm_register()) {
-    return;
-  }
-
-  LIR_Opr right = op2->in_opr2();
-  LIR_Opr res   = op2->result_opr();
-  LIR_Opr new_left  = left;  // new operands relative to the actual fpu stack top
-  LIR_Opr new_right = right;
-  LIR_Opr new_res   = res;
-
-  assert(!left->is_xmm_register() && !right->is_xmm_register() && !res->is_xmm_register(), "not for xmm registers");
-
-  switch (op2->code()) {
-    case lir_cmp:
-    case lir_cmp_fd2i:
-    case lir_ucmp_fd2i:
-    case lir_assert: {
-      assert(left->is_fpu_register(), "invalid LIR");
-      assert(right->is_fpu_register(), "invalid LIR");
-
-      // the left-hand side must be on top of stack.
-      // the right-hand side is never popped, even if is_last_use is set
-      insert_exchange(left);
-      new_left = to_fpu_stack_top(left);
-      new_right = to_fpu_stack(right);
-      pop_if_last_use(op2, left);
-      break;
-    }
-
-    case lir_mul_strictfp:
-    case lir_div_strictfp: {
-      assert(op2->tmp1_opr()->is_fpu_register(), "strict operations need temporary fpu stack slot");
-      insert_free_if_dead(op2->tmp1_opr());
-      assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
-      // fall-through: continue with the normal handling of lir_mul and lir_div
-    }
-    case lir_add:
-    case lir_sub:
-    case lir_mul:
-    case lir_div: {
-      assert(left->is_fpu_register(), "must be");
-      assert(res->is_fpu_register(), "must be");
-      assert(left->is_equal(res), "must be");
-
-      // either the left-hand or the right-hand side must be on top of stack
-      // (if right is not a register, left must be on top)
-      if (!right->is_fpu_register()) {
-        insert_exchange(left);
-        new_left = to_fpu_stack_top(left);
-      } else {
-        // no exchange necessary if right is alredy on top of stack
-        if (tos_offset(right) == 0) {
-          new_left = to_fpu_stack(left);
-          new_right = to_fpu_stack_top(right);
-        } else {
-          insert_exchange(left);
-          new_left = to_fpu_stack_top(left);
-          new_right = to_fpu_stack(right);
-        }
-
-        if (right->is_last_use()) {
-          op2->set_fpu_pop_count(1);
-
-          if (tos_offset(right) == 0) {
-            sim()->pop();
-          } else {
-            // if left is on top of stack, the result is placed in the stack
-            // slot of right, so a renaming from right to res is necessary
-            assert(tos_offset(left) == 0, "must be");
-            sim()->pop();
-            do_rename(right, res);
-          }
-        }
-      }
-      new_res = to_fpu_stack(res);
-
-      break;
-    }
-
-    case lir_rem: {
-      assert(left->is_fpu_register(), "must be");
-      assert(right->is_fpu_register(), "must be");
-      assert(res->is_fpu_register(), "must be");
-      assert(left->is_equal(res), "must be");
-
-      // Must bring both operands to top of stack with following operand ordering:
-      // * fpu stack before rem: ... right left
-      // * fpu stack after rem:  ... left
-      if (tos_offset(right) != 1) {
-        insert_exchange(right);
-        insert_exchange(1);
-      }
-      insert_exchange(left);
-      assert(tos_offset(right) == 1, "check");
-      assert(tos_offset(left) == 0, "check");
-
-      new_left = to_fpu_stack_top(left);
-      new_right = to_fpu_stack(right);
-
-      op2->set_fpu_pop_count(1);
-      sim()->pop();
-      do_rename(right, res);
-
-      new_res = to_fpu_stack_top(res);
-      break;
-    }
-
-    case lir_abs:
-    case lir_sqrt: {
-      // Right argument appears to be unused
-      assert(right->is_illegal(), "must be");
-      assert(left->is_fpu_register(), "must be");
-      assert(res->is_fpu_register(), "must be");
-      assert(left->is_last_use(), "old value gets destroyed");
-
-      insert_free_if_dead(res, left);
-      insert_exchange(left);
-      do_rename(left, res);
-
-      new_left = to_fpu_stack_top(res);
-      new_res = new_left;
-
-      op2->set_fpu_stack_size(sim()->stack_size());
-      break;
-    }
-
-    case lir_log:
-    case lir_log10: {
-      // log and log10 need one temporary fpu stack slot, so
-      // there is one temporary registers stored in temp of the
-      // operation. the stack allocator must guarantee that the stack
-      // slots are really free, otherwise there might be a stack
-      // overflow.
-      assert(right->is_illegal(), "must be");
-      assert(left->is_fpu_register(), "must be");
-      assert(res->is_fpu_register(), "must be");
-      assert(op2->tmp1_opr()->is_fpu_register(), "must be");
-
-      insert_free_if_dead(op2->tmp1_opr());
-      insert_free_if_dead(res, left);
-      insert_exchange(left);
-      do_rename(left, res);
-
-      new_left = to_fpu_stack_top(res);
-      new_res = new_left;
-
-      op2->set_fpu_stack_size(sim()->stack_size());
-      assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
-      break;
-    }
-
-
-    case lir_tan:
-    case lir_sin:
-    case lir_cos:
-    case lir_exp: {
-      // sin, cos and exp need two temporary fpu stack slots, so there are two temporary
-      // registers (stored in right and temp of the operation).
-      // the stack allocator must guarantee that the stack slots are really free,
-      // otherwise there might be a stack overflow.
-      assert(left->is_fpu_register(), "must be");
-      assert(res->is_fpu_register(), "must be");
-      // assert(left->is_last_use(), "old value gets destroyed");
-      assert(right->is_fpu_register(), "right is used as the first temporary register");
-      assert(op2->tmp1_opr()->is_fpu_register(), "temp is used as the second temporary register");
-      assert(fpu_num(left) != fpu_num(right) && fpu_num(right) != fpu_num(op2->tmp1_opr()) && fpu_num(op2->tmp1_opr()) != fpu_num(res), "need distinct temp registers");
-
-      insert_free_if_dead(right);
-      insert_free_if_dead(op2->tmp1_opr());
-
-      insert_free_if_dead(res, left);
-      insert_exchange(left);
-      do_rename(left, res);
-
-      new_left = to_fpu_stack_top(res);
-      new_res = new_left;
-
-      op2->set_fpu_stack_size(sim()->stack_size());
-      assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
-      break;
-    }
-
-    case lir_pow: {
-      // pow needs two temporary fpu stack slots, so there are two temporary
-      // registers (stored in tmp1 and tmp2 of the operation).
-      // the stack allocator must guarantee that the stack slots are really free,
-      // otherwise there might be a stack overflow.
-      assert(left->is_fpu_register(), "must be");
-      assert(right->is_fpu_register(), "must be");
-      assert(res->is_fpu_register(), "must be");
-
-      assert(op2->tmp1_opr()->is_fpu_register(), "tmp1 is the first temporary register");
-      assert(op2->tmp2_opr()->is_fpu_register(), "tmp2 is the second temporary register");
-      assert(fpu_num(left) != fpu_num(right) && fpu_num(left) != fpu_num(op2->tmp1_opr()) && fpu_num(left) != fpu_num(op2->tmp2_opr()) && fpu_num(left) != fpu_num(res), "need distinct temp registers");
-      assert(fpu_num(right) != fpu_num(op2->tmp1_opr()) && fpu_num(right) != fpu_num(op2->tmp2_opr()) && fpu_num(right) != fpu_num(res), "need distinct temp registers");
-      assert(fpu_num(op2->tmp1_opr()) != fpu_num(op2->tmp2_opr()) && fpu_num(op2->tmp1_opr()) != fpu_num(res), "need distinct temp registers");
-      assert(fpu_num(op2->tmp2_opr()) != fpu_num(res), "need distinct temp registers");
-
-      insert_free_if_dead(op2->tmp1_opr());
-      insert_free_if_dead(op2->tmp2_opr());
-
-      // Must bring both operands to top of stack with following operand ordering:
-      // * fpu stack before pow: ... right left
-      // * fpu stack after pow:  ... left
-
-      insert_free_if_dead(res, right);
-
-      if (tos_offset(right) != 1) {
-        insert_exchange(right);
-        insert_exchange(1);
-      }
-      insert_exchange(left);
-      assert(tos_offset(right) == 1, "check");
-      assert(tos_offset(left) == 0, "check");
-
-      new_left = to_fpu_stack_top(left);
-      new_right = to_fpu_stack(right);
-
-      op2->set_fpu_stack_size(sim()->stack_size());
-      assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
-
-      sim()->pop();
-
-      do_rename(right, res);
-
-      new_res = to_fpu_stack_top(res);
-      break;
-    }
-
-    default: {
-      assert(false, "missed a fpu-operation");
-    }
-  }
-
-  op2->set_in_opr1(new_left);
-  op2->set_in_opr2(new_right);
-  op2->set_result_opr(new_res);
-}
-
-void FpuStackAllocator::handle_opCall(LIR_OpCall* opCall) {
-  LIR_Opr res = opCall->result_opr();
-
-  // clear fpu-stack before call
-  // it may contain dead values that could not have been remved by previous operations
-  clear_fpu_stack(LIR_OprFact::illegalOpr);
-  assert(sim()->is_empty(), "fpu stack must be empty now");
-
-  // compute debug information before (possible) fpu result is pushed
-  compute_debug_information(opCall);
-
-  if (res->is_fpu_register() && !res->is_xmm_register()) {
-    do_push(res);
-    opCall->set_result_opr(to_fpu_stack_top(res));
-  }
-}
-
-#ifndef PRODUCT
-void FpuStackAllocator::check_invalid_lir_op(LIR_Op* op) {
-  switch (op->code()) {
-    case lir_24bit_FPU:
-    case lir_reset_FPU:
-    case lir_ffree:
-      assert(false, "operations not allowed in lir. If one of these operations is needed, check if they have fpu operands");
-      break;
-
-    case lir_fpop_raw:
-    case lir_fxch:
-    case lir_fld:
-      assert(false, "operations only inserted by FpuStackAllocator");
-      break;
-  }
-}
-#endif
-
-
-void FpuStackAllocator::merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg) {
-  LIR_Op1* move = new LIR_Op1(lir_move, LIR_OprFact::doubleConst(0), LIR_OprFact::double_fpu(reg)->make_fpu_stack_offset());
-
-  instrs->instructions_list()->push(move);
-
-  cur_sim->push(reg);
-  move->set_result_opr(to_fpu_stack(move->result_opr()));
-
-  #ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->print("Added new register: %d         New state: ", reg); cur_sim->print(); tty->cr();
-    }
-  #endif
-}
-
-void FpuStackAllocator::merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot) {
-  assert(slot > 0, "no exchange necessary");
-
-  LIR_Op1* fxch = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(slot));
-  instrs->instructions_list()->push(fxch);
-  cur_sim->swap(slot);
-
-  #ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->print("Exchanged register: %d         New state: ", cur_sim->get_slot(slot)); cur_sim->print(); tty->cr();
-    }
-  #endif
-}
-
-void FpuStackAllocator::merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim) {
-  int reg = cur_sim->get_slot(0);
-
-  LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
-  instrs->instructions_list()->push(fpop);
-  cur_sim->pop(reg);
-
-  #ifndef PRODUCT
-    if (TraceFPUStack) {
-      tty->print("Removed register: %d           New state: ", reg); cur_sim->print(); tty->cr();
-    }
-  #endif
-}
-
-bool FpuStackAllocator::merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot) {
-  int reg = cur_sim->get_slot(change_slot);
-
-  for (int slot = start_slot; slot >= 0; slot--) {
-    int new_reg = sux_sim->get_slot(slot);
-
-    if (!cur_sim->contains(new_reg)) {
-      cur_sim->set_slot(change_slot, new_reg);
-
-      #ifndef PRODUCT
-        if (TraceFPUStack) {
-          tty->print("Renamed register %d to %d       New state: ", reg, new_reg); cur_sim->print(); tty->cr();
-        }
-      #endif
-
-      return true;
-    }
-  }
-  return false;
-}
-
-
-void FpuStackAllocator::merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim) {
-#ifndef PRODUCT
-  if (TraceFPUStack) {
-    tty->cr();
-    tty->print("before merging: pred: "); cur_sim->print(); tty->cr();
-    tty->print("                 sux: "); sux_sim->print(); tty->cr();
-  }
-
-  int slot;
-  for (slot = 0; slot < cur_sim->stack_size(); slot++) {
-    assert(!cur_sim->slot_is_empty(slot), "not handled by algorithm");
-  }
-  for (slot = 0; slot < sux_sim->stack_size(); slot++) {
-    assert(!sux_sim->slot_is_empty(slot), "not handled by algorithm");
-  }
-#endif
-
-  // size difference between cur and sux that must be resolved by adding or removing values form the stack
-  int size_diff = cur_sim->stack_size() - sux_sim->stack_size();
-
-  if (!ComputeExactFPURegisterUsage) {
-    // add slots that are currently free, but used in successor
-    // When the exact FPU register usage is computed, the stack does
-    // not contain dead values at merging -> no values must be added
-
-    int sux_slot = sux_sim->stack_size() - 1;
-    while (size_diff < 0) {
-      assert(sux_slot >= 0, "slot out of bounds -> error in algorithm");
-
-      int reg = sux_sim->get_slot(sux_slot);
-      if (!cur_sim->contains(reg)) {
-        merge_insert_add(instrs, cur_sim, reg);
-        size_diff++;
-
-        if (sux_slot + size_diff != 0) {
-          merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
-        }
-      }
-     sux_slot--;
-    }
-  }
-
-  assert(cur_sim->stack_size() >= sux_sim->stack_size(), "stack size must be equal or greater now");
-  assert(size_diff == cur_sim->stack_size() - sux_sim->stack_size(), "must be");
-
-  // stack merge algorithm:
-  // 1) as long as the current stack top is not in the right location (that meens
-  //    it should not be on the stack top), exchange it into the right location
-  // 2) if the stack top is right, but the remaining stack is not ordered correctly,
-  //    the stack top is exchanged away to get another value on top ->
-  //    now step 1) can be continued
-  // the stack can also contain unused items -> these items are removed from stack
-
-  int finished_slot = sux_sim->stack_size() - 1;
-  while (finished_slot >= 0 || size_diff > 0) {
-    while (size_diff > 0 || (cur_sim->stack_size() > 0 && cur_sim->get_slot(0) != sux_sim->get_slot(0))) {
-      int reg = cur_sim->get_slot(0);
-      if (sux_sim->contains(reg)) {
-        int sux_slot = sux_sim->offset_from_tos(reg);
-        merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
-
-      } else if (!merge_rename(cur_sim, sux_sim, finished_slot, 0)) {
-        assert(size_diff > 0, "must be");
-
-        merge_insert_pop(instrs, cur_sim);
-        size_diff--;
-      }
-      assert(cur_sim->stack_size() == 0 || cur_sim->get_slot(0) != reg, "register must have been changed");
-    }
-
-    while (finished_slot >= 0 && cur_sim->get_slot(finished_slot) == sux_sim->get_slot(finished_slot)) {
-      finished_slot--;
-    }
-
-    if (finished_slot >= 0) {
-      int reg = cur_sim->get_slot(finished_slot);
-
-      if (sux_sim->contains(reg) || !merge_rename(cur_sim, sux_sim, finished_slot, finished_slot)) {
-        assert(sux_sim->contains(reg) || size_diff > 0, "must be");
-        merge_insert_xchg(instrs, cur_sim, finished_slot);
-      }
-      assert(cur_sim->get_slot(finished_slot) != reg, "register must have been changed");
-    }
-  }
-
-#ifndef PRODUCT
-  if (TraceFPUStack) {
-    tty->print("after merging:  pred: "); cur_sim->print(); tty->cr();
-    tty->print("                 sux: "); sux_sim->print(); tty->cr();
-    tty->cr();
-  }
-#endif
-  assert(cur_sim->stack_size() == sux_sim->stack_size(), "stack size must be equal now");
-}
-
-
-void FpuStackAllocator::merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs) {
-#ifndef PRODUCT
-  if (TraceFPUStack) {
-    tty->cr();
-    tty->print("before cleanup: state: "); cur_sim->print(); tty->cr();
-    tty->print("                live:  "); live_fpu_regs.print_on(tty); tty->cr();
-  }
-#endif
-
-  int slot = 0;
-  while (slot < cur_sim->stack_size()) {
-    int reg = cur_sim->get_slot(slot);
-    if (!live_fpu_regs.at(reg)) {
-      if (slot != 0) {
-        merge_insert_xchg(instrs, cur_sim, slot);
-      }
-      merge_insert_pop(instrs, cur_sim);
-    } else {
-      slot++;
-    }
-  }
-
-#ifndef PRODUCT
-  if (TraceFPUStack) {
-    tty->print("after cleanup:  state: "); cur_sim->print(); tty->cr();
-    tty->print("                live:  "); live_fpu_regs.print_on(tty); tty->cr();
-    tty->cr();
-  }
-
-  // check if fpu stack only contains live registers
-  for (unsigned int i = 0; i < live_fpu_regs.size(); i++) {
-    if (live_fpu_regs.at(i) != cur_sim->contains(i)) {
-      tty->print_cr("mismatch between required and actual stack content");
-      break;
-    }
-  }
-#endif
-}
-
-
-bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
-#ifndef PRODUCT
-  if (TraceFPUStack) {
-    tty->print_cr("Propagating FPU stack state for B%d at LIR_Op position %d to successors:",
-                  block->block_id(), pos());
-    sim()->print();
-    tty->cr();
-  }
-#endif
-
-  bool changed = false;
-  int number_of_sux = block->number_of_sux();
-
-  if (number_of_sux == 1 && block->sux_at(0)->number_of_preds() > 1) {
-    // The successor has at least two incoming edges, so a stack merge will be necessary
-    // If this block is the first predecessor, cleanup the current stack and propagate it
-    // If this block is not the first predecessor, a stack merge will be necessary
-
-    BlockBegin* sux = block->sux_at(0);
-    intArray* state = sux->fpu_stack_state();
-    LIR_List* instrs = new LIR_List(_compilation);
-
-    if (state != NULL) {
-      // Merge with a successors that already has a FPU stack state
-      // the block must only have one successor because critical edges must been split
-      FpuStackSim* cur_sim = sim();
-      FpuStackSim* sux_sim = temp_sim();
-      sux_sim->read_state(state);
-
-      merge_fpu_stack(instrs, cur_sim, sux_sim);
-
-    } else {
-      // propagate current FPU stack state to successor without state
-      // clean up stack first so that there are no dead values on the stack
-      if (ComputeExactFPURegisterUsage) {
-        FpuStackSim* cur_sim = sim();
-        BitMap live_fpu_regs = block->sux_at(0)->fpu_register_usage();
-        assert(live_fpu_regs.size() == FrameMap::nof_fpu_regs, "missing register usage");
-
-        merge_cleanup_fpu_stack(instrs, cur_sim, live_fpu_regs);
-      }
-
-      intArray* state = sim()->write_state();
-      if (TraceFPUStack) {
-        tty->print_cr("Setting FPU stack state of B%d (merge path)", sux->block_id());
-        sim()->print(); tty->cr();
-      }
-      sux->set_fpu_stack_state(state);
-    }
-
-    if (instrs->instructions_list()->length() > 0) {
-      lir()->insert_before(pos(), instrs);
-      set_pos(instrs->instructions_list()->length() + pos());
-      changed = true;
-    }
-
-  } else {
-    // Propagate unmodified Stack to successors where a stack merge is not necessary
-    intArray* state = sim()->write_state();
-    for (int i = 0; i < number_of_sux; i++) {
-      BlockBegin* sux = block->sux_at(i);
-
-#ifdef ASSERT
-      for (int j = 0; j < sux->number_of_preds(); j++) {
-        assert(block == sux->pred_at(j), "all critical edges must be broken");
-      }
-
-      // check if new state is same
-      if (sux->fpu_stack_state() != NULL) {
-        intArray* sux_state = sux->fpu_stack_state();
-        assert(state->length() == sux_state->length(), "overwriting existing stack state");
-        for (int j = 0; j < state->length(); j++) {
-          assert(state->at(j) == sux_state->at(j), "overwriting existing stack state");
-        }
-      }
-#endif
-#ifndef PRODUCT
-      if (TraceFPUStack) {
-        tty->print_cr("Setting FPU stack state of B%d", sux->block_id());
-        sim()->print(); tty->cr();
-      }
-#endif
-
-      sux->set_fpu_stack_state(state);
-    }
-  }
-
-#ifndef PRODUCT
-  // assertions that FPU stack state conforms to all successors' states
-  intArray* cur_state = sim()->write_state();
-  for (int i = 0; i < number_of_sux; i++) {
-    BlockBegin* sux = block->sux_at(i);
-    intArray* sux_state = sux->fpu_stack_state();
-
-    assert(sux_state != NULL, "no fpu state");
-    assert(cur_state->length() == sux_state->length(), "incorrect length");
-    for (int i = 0; i < cur_state->length(); i++) {
-      assert(cur_state->at(i) == sux_state->at(i), "element not equal");
-    }
-  }
-#endif
-
-  return changed;
-}
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp
--- a/src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_LinearScan_aarch64.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -24,8 +24,8 @@
  *
  */

-#ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP
-#define CPU_X86_VM_C1_LINEARSCAN_X86_HPP
+#ifndef CPU_AARCH64_VM_C1_LINEARSCAN_HPP
+#define CPU_AARCH64_VM_C1_LINEARSCAN_HPP

 inline bool LinearScan::is_processed_reg_num(int reg_num) {
   return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
@@ -74,78 +74,4 @@
 }


-
-class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
- private:
-  Compilation* _compilation;
-  LinearScan* _allocator;
-
-  LIR_OpVisitState visitor;
-
-  LIR_List* _lir;
-  int _pos;
-  FpuStackSim _sim;
-  FpuStackSim _temp_sim;
-
-  bool _debug_information_computed;
-
-  LinearScan*   allocator()                      { return _allocator; }
-  Compilation*  compilation() const              { return _compilation; }
-
-  // unified bailout support
-  void          bailout(const char* msg) const   { compilation()->bailout(msg); }
-  bool          bailed_out() const               { return compilation()->bailed_out(); }
-
-  int pos() { return _pos; }
-  void set_pos(int pos) { _pos = pos; }
-  LIR_Op* cur_op() { Unimplemented(); return lir()->instructions_list()->at(pos()); }
-  LIR_List* lir() { return _lir; }
-  void set_lir(LIR_List* lir) { _lir = lir; }
-  FpuStackSim* sim() { return &_sim; }
-  FpuStackSim* temp_sim() { return &_temp_sim; }
-
-  int fpu_num(LIR_Opr opr);
-  int tos_offset(LIR_Opr opr);
-  LIR_Opr to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset = false);
-
-  // Helper functions for handling operations
-  void insert_op(LIR_Op* op);
-  void insert_exchange(int offset);
-  void insert_exchange(LIR_Opr opr);
-  void insert_free(int offset);
-  void insert_free_if_dead(LIR_Opr opr);
-  void insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore);
-  void insert_copy(LIR_Opr from, LIR_Opr to);
-  void do_rename(LIR_Opr from, LIR_Opr to);
-  void do_push(LIR_Opr opr);
-  void pop_if_last_use(LIR_Op* op, LIR_Opr opr);
-  void pop_always(LIR_Op* op, LIR_Opr opr);
-  void clear_fpu_stack(LIR_Opr preserve);
-  void handle_op1(LIR_Op1* op1);
-  void handle_op2(LIR_Op2* op2);
-  void handle_opCall(LIR_OpCall* opCall);
-  void compute_debug_information(LIR_Op* op);
-  void allocate_exception_handler(XHandler* xhandler);
-  void allocate_block(BlockBegin* block);
-
-#ifndef PRODUCT
-  void check_invalid_lir_op(LIR_Op* op);
-#endif
-
-  // Helper functions for merging of fpu stacks
-  void merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg);
-  void merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot);
-  void merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim);
-  bool merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot);
-  void merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim);
-  void merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs);
-  bool merge_fpu_stack_with_successors(BlockBegin* block);
-
- public:
-  LIR_Opr to_fpu_stack(LIR_Opr opr); // used by LinearScan for creation of debug information
-
-  FpuStackAllocator(Compilation* compilation, LinearScan* allocator);
-  void allocate();
-};
-
-#endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP
+#endif // CPU_AARCH64_VM_C1_LINEARSCAN_HPP
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -432,9 +432,6 @@
 }


-void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) { Unimplemented(); }
-
-
 void C1_MacroAssembler::verified_entry() {
 }

diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp
--- a/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/cppInterpreterGenerator_aarch64.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -29,29 +29,7 @@

  protected:

-#if 0
-  address generate_asm_interpreter_entry(bool synchronized);
-  address generate_native_entry(bool synchronized);
-  address generate_abstract_entry(void);
-  address generate_math_entry(AbstractInterpreter::MethodKind kind);
-  address generate_empty_entry(void);
-  address generate_accessor_entry(void);
-  address generate_Reference_get_entry(void);
-  void lock_method(void);
-  void generate_stack_overflow_check(void);
-
-  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
-  void generate_counter_overflow(Label* do_continue);
-#endif
-
   void generate_more_monitors();
   void generate_deopt_handling();
-#if 0
-  address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
-  void generate_compute_interpreter_state(const Register state,
-                                          const Register prev_state,
-                                          const Register sender_sp,
-                                          bool native); // C++ interpreter only
-#endif

 #endif // CPU_AARCH64_VM_CPPINTERPRETERGENERATOR_AARCH64_HPP
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/interp_masm_aarch64.cpp
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -43,10 +43,6 @@

 // Implementation of InterpreterMacroAssembler

-#ifdef CC_INTERP
-void InterpreterMacroAssembler::get_method(Register reg) { Unimplemented(); }
-#endif // CC_INTERP
-
 #ifndef CC_INTERP

 void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/macroAssembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -696,19 +696,6 @@
   void store_check_part_1(Register obj);
   void store_check_part_2(Register obj);

-  // currently unimplemented
-#if 0
-  // C 'boolean' to Java boolean: x == 0 ? 0 : 1
-  void c2bool(Register x);
-
-  // C++ bool manipulation
-
-  void movbool(Register dst, Address src);
-  void movbool(Address dst, bool boolconst);
-  void movbool(Address dst, Register src);
-  void testbool(Register dst);
-#endif
-
   // oop manipulations
   void load_klass(Register dst, Register src);
   void store_klass(Register dst, Register src);
@@ -744,12 +731,6 @@
   void decode_heap_oop_not_null(Register dst, Register src);

   void set_narrow_oop(Register dst, jobject obj);
-  // currently unimplemented
-#if 0
-  void set_narrow_oop(Address dst, jobject obj);
-  void cmp_narrow_oop(Register dst, jobject obj);
-  void cmp_narrow_oop(Address dst, jobject obj);
-#endif

   void encode_klass_not_null(Register r);
   void decode_klass_not_null(Register r);
@@ -757,38 +738,18 @@
   void decode_klass_not_null(Register dst, Register src);

   void set_narrow_klass(Register dst, Klass* k);
-  // currently unimplemented
-#if 0
-  void set_narrow_klass(Address dst, Klass* k);
-  void cmp_narrow_klass(Register dst, Klass* k);
-  void cmp_narrow_klass(Address dst, Klass* k);
-#endif

   // if heap base register is used - reinit it with the correct value
   void reinit_heapbase();

   DEBUG_ONLY(void verify_heapbase(const char* msg);)

-  // currently unimplemented
-#if 0
-  void int3();
-#endif
-
   void push_CPU_state();
   void pop_CPU_state() ;

   // Round up to a power of two
   void round_to(Register reg, int modulus);

-  // unimplemented
-#if 0
-  // Callee saved registers handling
-  void push_callee_saved_registers();
-  void pop_callee_saved_registers();
-#endif
-
-  // unimplemented
-
   // allocation
   void eden_allocate(
     Register obj,                      // result: pointer to object after successful allocation
@@ -860,25 +821,9 @@
                            Register temp_reg,
                            Label& L_success);

-  // unimplemented
-#if 0
-  // method handles (JSR 292)
-  void check_method_handle_type(Register mtype_reg, Register mh_reg,
-                                Register temp_reg,
-                                Label& wrong_method_type);
-  void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
-                                  Register temp_reg);
-  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
-#endif
   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);


-  //----
-#if 0
-  // method handles (JSR 292)
-  void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
-#endif
-
   // Debugging

   // only if +VerifyOops
@@ -903,22 +848,12 @@

   static void debug64(char* msg, int64_t pc, int64_t regs[]);

-  // unimplemented
-#if 0
-  void os_breakpoint();
-#endif
-
   void untested()                                { stop("untested"); }

   void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }

   void should_not_reach_here()                   { stop("should not reach here"); }

-  // unimplemented
-#if 0
-  void print_CPU_state();
-#endif
-
   // Stack overflow checking
   void bang_stack_with_offset(int offset) {
     // stack grows down, caller passes positive offset
@@ -947,39 +882,7 @@
     str(rscratch1, Address(rscratch2));
   }

-  // unimplemented
-#if 0
-  void addptr(Address dst, Register src);
-#endif
-
-  void addptr(Register dst, Address src) { Unimplemented(); }
-  // unimplemented
-#if 0
-  void addptr(Register dst, int32_t src);
-  void addptr(Register dst, Register src);
-#endif
-  void addptr(Register dst, RegisterOrConstant src) { Unimplemented(); }
-
-  // unimplemented
-#if 0
-  void andptr(Register dst, int32_t src);
-#endif
-  void andptr(Register src1, Register src2) { Unimplemented(); }
-
-  // unimplemented
-#if 0
-  // renamed to drag out the casting of address to int32_t/intptr_t
-  void cmp32(Register src1, int32_t imm);
-
-  void cmp32(Register src1, Address src2);
-#endif
-
-  void cmpptr(Register src1, Register src2) { Unimplemented(); }
   void cmpptr(Register src1, Address src2);
-  // void cmpptr(Address src1, Register src2) { Unimplemented(); }
-
-  void cmpptr(Register src1, int32_t src2) { Unimplemented(); }
-  void cmpptr(Address src1, int32_t src2) { Unimplemented(); }

   void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
 		  Label &suceed, Label *fail);
@@ -993,69 +896,6 @@
   void atomic_xchg(Register prev, Register newv, Register addr);
   void atomic_xchgw(Register prev, Register newv, Register addr);

-  void imulptr(Register dst, Register src) { Unimplemented(); }
-
-
-  void negptr(Register dst) { Unimplemented(); }
-
-  void notptr(Register dst) { Unimplemented(); }
-
-  // unimplemented
-#if 0
-  void shlptr(Register dst, int32_t shift);
-#endif
-  void shlptr(Register dst) { Unimplemented(); }
-
-  // unimplemented
-#if 0
-  void shrptr(Register dst, int32_t shift);
-#endif
-  void shrptr(Register dst) { Unimplemented(); }
-
-  void sarptr(Register dst) { Unimplemented(); }
-  void sarptr(Register dst, int32_t src) { Unimplemented(); }
-
-  void subptr(Address dst, int32_t src) { Unimplemented(); }
-
-  void subptr(Register dst, Address src) { Unimplemented(); }
-  // unimplemented
-#if 0
-  void subptr(Register dst, int32_t src);
-  // Force generation of a 4 byte immediate value even if it fits into 8bit
-  void subptr_imm32(Register dst, int32_t src);
-  void subptr(Register dst, Register src);
-#endif
-  void subptr(Register dst, RegisterOrConstant src) { Unimplemented(); }
-
-  void sbbptr(Address dst, int32_t src) { Unimplemented(); }
-  void sbbptr(Register dst, int32_t src) { Unimplemented(); }
-
-  void xchgptr(Register src1, Register src2) { Unimplemented(); }
-  void xchgptr(Register src1, Address src2) { Unimplemented(); }
-
-  void xaddptr(Address src1, Register src2) { Unimplemented(); }
-
-
-
-  // unimplemented
-#if 0
-
-  // Perhaps we should implement this one
-  void lea(Register dst, Address adr) { Unimplemented(); }
-
-  void leal32(Register dst, Address src) { Unimplemented(); }
-
-  void orptr(Register dst, Address src) { Unimplemented(); }
-  void orptr(Register dst, Register src) { Unimplemented(); }
-  void orptr(Register dst, int32_t src) { Unimplemented(); }
-
-  void testptr(Register src, int32_t imm32) {  Unimplemented(); }
-  void testptr(Register src1, Register src2);
-
-  void xorptr(Register dst, Register src) { Unimplemented(); }
-  void xorptr(Register dst, Address src) { Unimplemented(); }
-#endif
-
   void orptr(Address adr, RegisterOrConstant src) {
     ldr(rscratch2, adr);
     if (src.is_register())
@@ -1077,46 +917,6 @@
   // Emit the CompiledIC call idiom
   void ic_call(address entry);

-  // Jumps
-
-  // unimplemented
-#if 0
-  // NOTE: these jumps tranfer to the effective address of dst NOT
-  // the address contained by dst. This is because this is more natural
-  // for jumps/calls.
-  void jump(Address dst);
-  void jump_cc(Condition cc, Address dst);
-#endif
-
-  // Floating
-
-  void fadd_s(Address src)        { Unimplemented(); }
-
-  void fldcw(Address src) { Unimplemented(); }
-
-  void fld_s(int index)   { Unimplemented(); }
-  void fld_s(Address src) { Unimplemented(); }
-
-  void fld_d(Address src) { Unimplemented(); }
-
-  void fld_x(Address src) { Unimplemented(); }
-
-  void fmul_s(Address src)        { Unimplemented(); }
-
-  // unimplemented
-#if 0
-  // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
-  // all corner cases and may result in NaN and require fallback to a
-  // runtime call.
-  void fast_pow();
-  void fast_exp();
-#endif
-
-  // computes exp(x). Fallback to runtime call included.
-  void exp_with_fallback(int num_fpu_regs_in_use) { Unimplemented(); }
-  // computes pow(x,y). Fallback to runtime call included.
-  void pow_with_fallback(int num_fpu_regs_in_use) { Unimplemented(); }
-
 public:

   // Data
@@ -1124,23 +924,9 @@
   void mov_metadata(Register dst, Metadata* obj);
   Address allocate_metadata_address(Metadata* obj);
   Address constant_oop_address(jobject obj);
-  // unimplemented
-#if 0
-  void pushoop(jobject obj);
-#endif

   void movoop(Register dst, jobject obj, bool immediate = false);

-  // sign extend as need a l to ptr sized element
-  void movl2ptr(Register dst, Address src) { Unimplemented(); }
-  void movl2ptr(Register dst, Register src) { Unimplemented(); }
-
-  // unimplemented
-#if 0
-  // C2 compiled method's prolog code.
-  void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
-#endif
-
   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
   void kernel_crc32(Register crc, Register buf, Register len,
         Register table0, Register table1, Register table2, Register table3,
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/macroAssembler_aarch64.inline.hpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.inline.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.inline.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -31,8 +31,6 @@

 #ifndef PRODUCT

-inline void MacroAssembler::pd_print_patched_instruction(address branch) { Unimplemented(); }
-
 #endif // ndef PRODUCT

 #endif // CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_INLINE_HPP
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/methodHandles_aarch64.cpp
--- a/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -41,13 +41,6 @@

 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")

-// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
-#if 0
-static RegisterOrConstant constant(int value) {
-  return RegisterOrConstant(value);
-}
-#endif
-
 void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
   if (VerifyMethodHandles)
     verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/nativeInst_aarch64.cpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -48,8 +48,6 @@
   return instruction_address() + displacement();
 }

-void NativeCall::print() { Unimplemented(); }
-
 // Inserts a native call instruction at a given pc
 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }

@@ -83,12 +81,8 @@

 //-------------------------------------------------------------------

-int NativeMovRegMem::instruction_start() const { Unimplemented(); return 0; }
-
 address NativeMovRegMem::instruction_address() const      { return addr_at(instruction_offset); }

-address NativeMovRegMem::next_instruction_address() const { Unimplemented(); return 0; }
-
 int NativeMovRegMem::offset() const  {
   address pc = instruction_address();
   unsigned insn = *(unsigned*)pc;
@@ -117,23 +111,11 @@
 #endif
 }

-
-void NativeMovRegMem::print() { Unimplemented(); }
-
-//-------------------------------------------------------------------
-
-void NativeLoadAddress::verify() { Unimplemented(); }
-
-
-void NativeLoadAddress::print() { Unimplemented(); }
-
 //--------------------------------------------------------------------------------

 void NativeJump::verify() { ; }


-void NativeJump::insert(address code_pos, address entry) { Unimplemented(); }
-
 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 }

@@ -222,11 +204,6 @@
 }


-void NativePopReg::insert(address code_pos, Register reg) { Unimplemented(); }
-
-
-void NativeIllegalInstruction::insert(address code_pos) { Unimplemented(); }
-
 void NativeGeneralJump::verify() {  }


diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/nativeInst_aarch64.hpp
--- a/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -57,7 +57,6 @@
   enum { instruction_size = 4 };
   inline bool is_nop();
   bool is_dtrace_trap();
-  inline bool is_call();
   inline bool is_illegal();
   inline bool is_return();
   bool is_jump();
@@ -175,11 +174,6 @@
     return is_call_at(return_address - NativeCall::return_address_offset);
   }

-  static bool is_call_to(address instr, address target) {
-    return nativeInstruction_at(instr)->is_call() &&
-      nativeCall_at(instr)->destination() == target;
-  }
-
   // MT-safe patching of a call instruction.
   static void insert(address code_pos, address entry);

@@ -345,9 +339,6 @@

   // unit test stuff
   static void test() {}
-
- private:
-  friend NativeLoadAddress* nativeLoadAddress_at (address address) { Unimplemented(); return 0; }
 };

 class NativeJump: public NativeInstruction {
@@ -434,10 +425,6 @@
  public:
 };

-inline bool NativeInstruction::is_illegal()      { Unimplemented(); return false; }
-inline bool NativeInstruction::is_call()         { Unimplemented(); return false; }
-inline bool NativeInstruction::is_return()       { Unimplemented(); return false; }
-
 inline bool NativeInstruction::is_nop()         {
   uint32_t insn = *(uint32_t*)addr_at(0);
   return insn == 0xd503201f;
@@ -466,8 +453,4 @@
   return is_nop() || is_jump();
 }

-inline bool NativeInstruction::is_cond_jump()    { Unimplemented(); return false; }
-
-inline bool NativeInstruction::is_mov_literal64() { Unimplemented(); return false; }
-
 #endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/register_aarch64.hpp
--- a/src/cpu/aarch64/vm/register_aarch64.hpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/register_aarch64.hpp	Fri Jul 25 08:17:44 2014 -0400
@@ -174,44 +174,6 @@
 CONSTANT_REGISTER_DECLARATION(FloatRegister, v30    , (30));
 CONSTANT_REGISTER_DECLARATION(FloatRegister, v31    , (31));

-// #ifndef DONT_USE_REGISTER_DEFINES
-#if 0
-#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
-#define v0     ((FloatRegister)(    v0_FloatRegisterEnumValue))
-#define v1     ((FloatRegister)(    v1_FloatRegisterEnumValue))
-#define v2     ((FloatRegister)(    v2_FloatRegisterEnumValue))
-#define v3     ((FloatRegister)(    v3_FloatRegisterEnumValue))
-#define v4     ((FloatRegister)(    v4_FloatRegisterEnumValue))
-#define v5     ((FloatRegister)(    v5_FloatRegisterEnumValue))
-#define v6     ((FloatRegister)(    v6_FloatRegisterEnumValue))
-#define v7     ((FloatRegister)(    v7_FloatRegisterEnumValue))
-#define v8     ((FloatRegister)(    v8_FloatRegisterEnumValue))
-#define v9     ((FloatRegister)(    v9_FloatRegisterEnumValue))
-#define v10    ((FloatRegister)(   v10_FloatRegisterEnumValue))
-#define v11    ((FloatRegister)(   v11_FloatRegisterEnumValue))
-#define v12    ((FloatRegister)(   v12_FloatRegisterEnumValue))
-#define v13    ((FloatRegister)(   v13_FloatRegisterEnumValue))
-#define v14    ((FloatRegister)(   v14_FloatRegisterEnumValue))
-#define v15    ((FloatRegister)(   v15_FloatRegisterEnumValue))
-#define v16    ((FloatRegister)(   v16_FloatRegisterEnumValue))
-#define v17    ((FloatRegister)(   v17_FloatRegisterEnumValue))
-#define v18    ((FloatRegister)(   v18_FloatRegisterEnumValue))
-#define v19    ((FloatRegister)(   v19_FloatRegisterEnumValue))
-#define v20    ((FloatRegister)(   v20_FloatRegisterEnumValue))
-#define v21    ((FloatRegister)(   v21_FloatRegisterEnumValue))
-#define v22    ((FloatRegister)(   v22_FloatRegisterEnumValue))
-#define v23    ((FloatRegister)(   v23_FloatRegisterEnumValue))
-#define v24    ((FloatRegister)(   v24_FloatRegisterEnumValue))
-#define v25    ((FloatRegister)(   v25_FloatRegisterEnumValue))
-#define v26    ((FloatRegister)(   v26_FloatRegisterEnumValue))
-#define v27    ((FloatRegister)(   v27_FloatRegisterEnumValue))
-#define v28    ((FloatRegister)(   v28_FloatRegisterEnumValue))
-#define v29    ((FloatRegister)(   v29_FloatRegisterEnumValue))
-#define v30    ((FloatRegister)(   v30_FloatRegisterEnumValue))
-#define v31    ((FloatRegister)(   v31_FloatRegisterEnumValue))
-#endif // 0
-//#endif // DONT_USE_REGISTER_DEFINES
-
 // Need to know the total number of registers of all sorts for SharedInfo.
 // Define a class that exports it.
 class ConcreteRegisterImpl : public AbstractRegisterImpl {
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -2580,11 +2580,6 @@
   }
 #endif

-  // TODO check various assumptions here
-  //
-  // call unimplemented to make sure we actually check this later
-  // __ call_Unimplemented();
-
   assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");

   address start = __ pc();
diff -r fdcc9aef9dbb -r 4020f25a52c2 src/cpu/aarch64/vm/stubGenerator_aarch64.cpp
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Tue Jul 22 11:56:07 2014 -0400
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Jul 25 08:17:44 2014 -0400
@@ -67,10 +67,6 @@

 // Stub Code definitions

-#if 0
-static address handle_unsafe_access() { Unimplemented(); return 0; }
-#endif
-
 class StubGenerator: public StubCodeGenerator {
  private:

@@ -592,159 +588,6 @@
     return start;
   }

-  // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
-  //
-  // Arguments :
-  //    c_rarg0: exchange_value
-  //    c_rarg0: dest
-  //
-  // Result:
-  //    *dest <- ex, return (orig *dest)
-
-  // NOTE: not sure this is actually needed but if so it looks like it
-  // is called from os-specific code i.e. it needs an x86 prolog
-
-  address generate_atomic_xchg() { return 0; }
-
-  // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
-  //
-  // Arguments :
-  //    c_rarg0: exchange_value
-  //    c_rarg1: dest
-  //
-  // Result:
-  //    *dest <- ex, return (orig *dest)
-
-  // NOTE: not sure this is actually needed but if so it looks like it
-  // is called from os-specific code i.e. it needs an x86 prolog
-
-  address generate_atomic_xchg_ptr() { return 0; }
-
-  // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
-  //                                         jint compare_value)
-  //
-  // Arguments :
-  //    c_rarg0: exchange_value
-  //    c_rarg1: dest
-  //    c_rarg2: compare_value
-  //
-  // Result:
-  //    if ( compare_value == *dest ) {
-  //       *dest = exchange_value
-  //       return compare_value;
-  //    else
-  //       return *dest;
-  address generate_atomic_cmpxchg() { return 0; }
-
-  // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
-  //                                             volatile jlong* dest,
-  //                                             jlong compare_value)
-  // Arguments :
-  //    c_rarg0: exchange_value
-  //    c_rarg1: dest
-  //    c_rarg2: compare_value
-  //
-  // Result:
-  //    if ( compare_value == *dest ) {
-  //       *dest = exchange_value
-  //       return compare_value;
-  //    else
-  //       return *dest;
-
-  // NOTE: not sure this is actually needed but if so it looks like it
-  // is called from os-specific code i.e. it needs an x86 prolog
-
-  address generate_atomic_cmpxchg_long() { return 0; }
-
-  // Support for jint atomic::add(jint add_value, volatile jint* dest)
-  //
-  // Arguments :
-  //    c_rarg0: add_value
-  //    c_rarg1: dest
-  //
-  // Result:
-  //    *dest += add_value
-  //    return *dest;
-
-  // NOTE: not sure this is actually needed but if so it looks like it
-  // is called from os-specific code i.e. it needs an x86 prolog
-
-  address generate_atomic_add() { return 0; }
-
-  // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-  //
-  // Arguments :
-  //    c_rarg0: add_value
-  //    c_rarg1: dest
-  //
-  // Result:
-  //    *dest += add_value
-  //    return *dest;
-
-  // NOTE: not sure this is actually needed but if so it looks like it
-  // is called from os-specific code i.e. it needs an x86 prolog
-
-  address generate_atomic_add_ptr() { return 0; }
-
-  // Support for intptr_t OrderAccess::fence()
-  //
-  // Arguments :
-  //
-  // Result:
-
-  // NOTE: this is called from C code so it needs an x86 prolog
-  // or else we need to fiddle it with inline asm for now
-
-  address generate_orderaccess_fence() { return 0; }
-
-  // Support for intptr_t get_previous_fp()
-  //
-  // This routine is used to find the previous frame pointer for the
-  // caller (current_frame_guess). This is used as part of debugging
-  // ps() is seemingly lost trying to find frames.
-  // This code assumes that caller current_frame_guess) has a frame.
-
-  // NOTE: this is called from C code in os_windows.cpp with AMD64. other
-  // builds use inline asm -- so we should be ok for aarch64
-
-  address generate_get_previous_fp() { return 0; }
-
-  // Support for intptr_t get_previous_sp()
-  //
-  // This routine is used to find the previous stack pointer for the
-  // caller.
-
-  // NOTE: this is called from C code in os_windows.cpp with AMD64. other
-  // builds use inline asm -- so we should be ok for aarch64
-
-  address generate_get_previous_sp() { return 0; }
-
-  // NOTE: these fixup routines appear only to be called from the
-  // opto code (they are mentioned in x86_64.ad) so we can do
-  // without them for now on aarch64
-
-  address generate_f2i_fixup() { Unimplemented(); return 0; }
-
-  address generate_f2l_fixup() { Unimplemented(); return 0; }
-
-  address generate_d2i_fixup() { Unimplemented(); return 0; }
-
-  address generate_d2l_fixup() { Unimplemented(); return 0; }
-
-  // The following routine generates a subroutine to throw an
-  // asynchronous UnknownError when an unsafe access gets a fault that
-  // could not be reasonably prevented by the programmer.  (Example:
-  // SIGBUS/OBJERR.)
-
-  // NOTE: this is used by the signal handler code as a return address
-  // to re-enter Java execution so it needs an x86 prolog which will
-  // reenter the simulator executing the generated handler code. so
-  // the prolog needs to adjust the sim's restart pc to enter the
-  // generated code at the start position then return from native to
-  // simulated execution.
-
-  address generate_handler_for_unsafe_access() { return 0; }
-
   // Non-destructive plausibility checks for oops
   //
   // Arguments:
@@ -817,29 +660,7 @@
     return start;
   }

-  //
-  // Verify that a register contains clean 32-bits positive value
-  // (high 32-bits are 0) so it could be used in 64-bits shifts.
-  //
-  //  Input:
-  //    Rint  -  32-bits value
-  //    Rtmp  -  scratch
-  //
-  void assert_clean_int(Register Rint, Register Rtmp) { Unimplemented(); }
-
-  //  Generate overlap test for array copy stubs
-  //
-  //  Input:
-  //     c_rarg0 - from
-  //     c_rarg1 - to
-  //     c_rarg2 - element count
-  //
-  //  Output:
-  //     r0   - &from[element count - 1]
-  //
-  void array_overlap_test(address no_overlap_target, int sf) { Unimplemented(); }
   void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
-  void array_overlap_test(address no_overlap_target, Label* NOLp, int sf) { Unimplemented(); }

   // Generate code for an array write pre barrier
   //
@@ -1730,23 +1551,6 @@
     return start;
   }

-  //
-  //  Generate 'unsafe' array copy stub
-  //  Though just as safe as the other stubs, it takes an unscaled
-  //  size_t argument instead of an element count.
-  //
-  //  Input:
-  //    c_rarg0   - source array address
-  //    c_rarg1   - destination array address
-  //    c_rarg2   - byte count, treated as ssize_t, can be zero
-  //
-  // Examines the alignment of the operands and dispatches
-  // to a long, int, short, or byte copy loop.
-  //
-  address generate_unsafe_copy(const char *name,
-                               address byte_copy_entry, address short_copy_entry,
-                               address int_copy_entry, address long_copy_entry) { Unimplemented(); return 0; }
-
   // Perform range checks on the proposed arraycopy.
   // Kills temp, but nothing else.
   // Also, clean the sign bits of src_pos and dst_pos.
@@ -1758,28 +1562,6 @@
                               Register temp,
                               Label& L_failed) { Unimplemented(); }

-  //
-  //  Generate generic array copy stubs
-  //
-  //  Input:
-  //    c_rarg0    -  src oop
-  //    c_rarg1    -  src_pos (32-bits)
-  //    c_rarg2    -  dst oop
-  //    c_rarg3    -  dst_pos (32-bits)
-  // not Win64
-  //    c_rarg4    -  element count (32-bits)
-  // Win64
-  //    rsp+40     -  element count (32-bits)
-  //
-  //  Output:
-  //    r0 ==  0  -  success
-  //    r0 == -1^K - failure, where K is partial transfer count
-  //
-  address generate_generic_copy(const char *name,
-                                address byte_copy_entry, address short_copy_entry,
-                                address int_copy_entry, address oop_copy_entry,
-                                address long_copy_entry, address checkcast_copy_entry) { Unimplemented(); return 0; }
-
   // These stubs get called from some dumb test routine.
   // I'll write them properly when they're called from
   // something that's actually doing something.
@@ -1876,8 +1658,6 @@
                                                                         /*dest_uninitialized*/true);
   }

-  void generate_math_stubs() { Unimplemented(); }
-
   // Arguments:
   //
   // Inputs:
@@ -2391,38 +2171,6 @@
   // otherwise assume that stack unwinding will be initiated, so
   // caller saved registers were assumed volatile in the compiler.

-  // NOTE: this needs carefully checking to see where the generated
-  // code gets called from for each generated error
-  //
-  // WrongMethodTypeException : jumped to directly from generated method
-  // handle code.
-  //
-  // StackOverflowError : jumped to directly from generated code in
-  // cpp and template interpreter. the generated code address also
-  // appears to be returned from the signal handler as the re-entry
-  // address forJava execution to continue from. This means it needs
-  // to be enterable from x86 code. Hmm, we may need to expose both an
-  // x86 prolog and the address of the generated ARM code and clients
-  // will have to be mdoified to pick the correct one.
-  //
-  // AbstractMethodError : never jumped to from generated code but the
-  // generated code address appears to be returned from the signal
-  // handler as the re-entry address for Java execution to continue
-  // from. This means it needs to be enterable from x86 code. So, we
-  // will need to provide this one with an x86 prolog as per
-  // StackOverflowError
-  //
-  // IncompatibleClassChangeError : only appears to be jumped to
-  // directly from vtableStubs code
-  //
-  // NullPointerException : never jumped to from generated code but
-  // the generated code address appears to be returned from the signal
-  // handler as the re-entry address for Java execution to continue
-  // from. This means it needs to be enterable from x86 code. So, we
-  // will need to provide this one with an x86 prolog as per
-  // StackOverflowError
-
-
   address generate_throw_exception(const char* name,
                                    address runtime_entry,
                                    Register arg1 = noreg,
@@ -2528,22 +2276,6 @@
     // is referenced by megamorphic call
     StubRoutines::_catch_exception_entry = generate_catch_exception();

-    // atomic calls
-    StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
-    StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
-    StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
-    StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
-    StubRoutines::_atomic_add_entry          = generate_atomic_add();
-    StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
-    StubRoutines::_fence_entry               = generate_orderaccess_fence();
-
-    StubRoutines::_handler_for_unsafe_access_entry =
-      generate_handler_for_unsafe_access();
-
-    // platform dependent
-    StubRoutines::aarch64::_get_previous_fp_entry = generate_get_previous_fp();
-    StubRoutines::aarch64::_get_previous_sp_entry = generate_get_previous_sp();
-
     // Build this early so it's available for the interpreter.
     StubRoutines::_throw_StackOverflowError_entry =
       generate_throw_exception("StackOverflowError throw_exception",



More information about the aarch64-port-dev mailing list