Ignore:
Timestamp:
Feb 13, 2012, 10:07:12 PM (14 years ago)
Author:
dmik
Message:

trunk: Merged in openjdk6 b24 from branches/vendor/oracle.

Location:
trunk/openjdk
Files:
99 edited

Legend:

Unmodified
Added
Removed
  • trunk/openjdk

  • trunk/openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_assembler_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "gc_interface/collectedHeap.inline.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "memory/cardTableModRefBS.hpp"
     30#include "memory/resourceArea.hpp"
     31#include "prims/methodHandles.hpp"
     32#include "runtime/biasedLocking.hpp"
     33#include "runtime/interfaceSupport.hpp"
     34#include "runtime/objectMonitor.hpp"
     35#include "runtime/os.hpp"
     36#include "runtime/sharedRuntime.hpp"
     37#include "runtime/stubRoutines.hpp"
     38#ifndef SERIALGC
     39#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
     40#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
     41#include "gc_implementation/g1/heapRegion.hpp"
     42#endif
    2743
    2844// Implementation of AddressLiteral
     
    805821
    806822
    807 // Now the Assembler instruction (identical for 32/64 bits)
     823// Now the Assembler instructions (identical for 32/64 bits)
     824
     825void Assembler::adcl(Address dst, int32_t imm32) {
     826  InstructionMark im(this);
     827  prefix(dst);
     828  emit_arith_operand(0x81, rdx, dst, imm32);
     829}
     830
     831void Assembler::adcl(Address dst, Register src) {
     832  InstructionMark im(this);
     833  prefix(dst, src);
     834  emit_byte(0x11);
     835  emit_operand(src, dst);
     836}
    808837
    809838void Assembler::adcl(Register dst, int32_t imm32) {
     
    12761305}
    12771306
     1307void Assembler::divl(Register src) { // Unsigned
     1308  int encode = prefix_and_encode(src->encoding());
     1309  emit_byte(0xF7);
     1310  emit_byte(0xF0 | encode);
     1311}
     1312
    12781313void Assembler::imull(Register dst, Register src) {
    12791314  int encode = prefix_and_encode(dst->encoding(), src->encoding());
     
    12891324    emit_byte(0x6B);
    12901325    emit_byte(0xC0 | encode);
    1291     emit_byte(value);
     1326    emit_byte(value & 0xFF);
    12921327  } else {
    12931328    emit_byte(0x69);
     
    21742209  InstructionMark im(this);
    21752210  prefix(dst);
    2176   emit_byte(0x81);
    2177   emit_operand(rcx, dst, 4);
    2178   emit_long(imm32);
     2211  emit_arith_operand(0x81, rcx, dst, imm32);
    21792212}
    21802213
     
    21842217}
    21852218
    2186 
    21872219void Assembler::orl(Register dst, Address src) {
    21882220  InstructionMark im(this);
     
    21912223  emit_operand(dst, src);
    21922224}
    2193 
    21942225
    21952226void Assembler::orl(Register dst, Register src) {
     
    22762307
    22772308void Assembler::prefetchr(Address src) {
    2278   NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
     2309  NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support"));
    22792310  InstructionMark im(this);
    22802311  prefetch_prefix(src);
     
    23082339
    23092340void Assembler::prefetchw(Address src) {
    2310   NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
     2341  NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support"));
    23112342  InstructionMark im(this);
    23122343  prefetch_prefix(src);
     
    23172348void Assembler::prefix(Prefix p) {
    23182349  a_byte(p);
     2350}
     2351
     2352void Assembler::por(XMMRegister dst, XMMRegister src) {
     2353  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
     2354
     2355  emit_byte(0x66);
     2356  int  encode = prefix_and_encode(dst->encoding(), src->encoding());
     2357  emit_byte(0x0F);
     2358
     2359  emit_byte(0xEB);
     2360  emit_byte(0xC0 | encode);
    23192361}
    23202362
     
    26282670}
    26292671
     2672void Assembler::sqrtsd(XMMRegister dst, Address src) {
     2673  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
     2674  InstructionMark im(this);
     2675  emit_byte(0xF2);
     2676  prefix(src, dst);
     2677  emit_byte(0x0F);
     2678  emit_byte(0x51);
     2679  emit_operand(dst, src);
     2680}
     2681
     2682void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
     2683  // HMM Table D-1 says sse2
     2684  // NOT_LP64(assert(VM_Version::supports_sse(), ""));
     2685  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
     2686  emit_byte(0xF3);
     2687  int encode = prefix_and_encode(dst->encoding(), src->encoding());
     2688  emit_byte(0x0F);
     2689  emit_byte(0x51);
     2690  emit_byte(0xC0 | encode);
     2691}
     2692
     2693void Assembler::sqrtss(XMMRegister dst, Address src) {
     2694  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
     2695  InstructionMark im(this);
     2696  emit_byte(0xF3);
     2697  prefix(src, dst);
     2698  emit_byte(0x0F);
     2699  emit_byte(0x51);
     2700  emit_operand(dst, src);
     2701}
     2702
    26302703void Assembler::stmxcsr( Address dst) {
    26312704  NOT_LP64(assert(VM_Version::supports_sse(), ""));
     
    26402713  InstructionMark im(this);
    26412714  prefix(dst);
    2642   if (is8bit(imm32)) {
    2643     emit_byte(0x83);
    2644     emit_operand(rbp, dst, 1);
    2645     emit_byte(imm32 & 0xFF);
    2646   } else {
    2647     emit_byte(0x81);
    2648     emit_operand(rbp, dst, 4);
    2649     emit_long(imm32);
    2650   }
     2715  emit_arith_operand(0x81, rbp, dst, imm32);
     2716}
     2717
     2718void Assembler::subl(Address dst, Register src) {
     2719  InstructionMark im(this);
     2720  prefix(dst, src);
     2721  emit_byte(0x29);
     2722  emit_operand(src, dst);
    26512723}
    26522724
     
    26542726  prefix(dst);
    26552727  emit_arith(0x81, 0xE8, dst, imm32);
    2656 }
    2657 
    2658 void Assembler::subl(Address dst, Register src) {
    2659   InstructionMark im(this);
    2660   prefix(dst, src);
    2661   emit_byte(0x29);
    2662   emit_operand(src, dst);
    26632728}
    26642729
     
    39043969    emit_byte(0x6B);
    39053970    emit_byte(0xC0 | encode);
    3906     emit_byte(value);
     3971    emit_byte(value & 0xFF);
    39073972  } else {
    39083973    emit_byte(0x69);
     
    42814346  emit_byte(0xF8 | encode);
    42824347}
     4348
    42834349void Assembler::sbbq(Address dst, int32_t imm32) {
    42844350  InstructionMark im(this);
     
    43374403}
    43384404
    4339 void Assembler::sqrtsd(XMMRegister dst, Address src) {
    4340   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
    4341   InstructionMark im(this);
    4342   emit_byte(0xF2);
    4343   prefix(src, dst);
    4344   emit_byte(0x0F);
    4345   emit_byte(0x51);
    4346   emit_operand(dst, src);
    4347 }
    4348 
    43494405void Assembler::subq(Address dst, int32_t imm32) {
    43504406  InstructionMark im(this);
    43514407  prefixq(dst);
    4352   if (is8bit(imm32)) {
    4353     emit_byte(0x83);
    4354     emit_operand(rbp, dst, 1);
    4355     emit_byte(imm32 & 0xFF);
    4356   } else {
    4357     emit_byte(0x81);
    4358     emit_operand(rbp, dst, 4);
    4359     emit_long(imm32);
    4360   }
     4408  emit_arith_operand(0x81, rbp, dst, imm32);
     4409}
     4410
     4411void Assembler::subq(Address dst, Register src) {
     4412  InstructionMark im(this);
     4413  prefixq(dst, src);
     4414  emit_byte(0x29);
     4415  emit_operand(src, dst);
    43614416}
    43624417
     
    43644419  (void) prefixq_and_encode(dst->encoding());
    43654420  emit_arith(0x81, 0xE8, dst, imm32);
    4366 }
    4367 
    4368 void Assembler::subq(Address dst, Register src) {
    4369   InstructionMark im(this);
    4370   prefixq(dst, src);
    4371   emit_byte(0x29);
    4372   emit_operand(src, dst);
    43734421}
    43744422
     
    49084956
    49094957
    4910 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
    4911   movsd(dst, as_Address(src));
    4912 }
    4913 
    49144958void MacroAssembler::pop_callee_saved_registers() {
    49154959  pop(rcx);
     
    49945038      tty->print_cr("eip = 0x%08x", eip);
    49955039#ifndef PRODUCT
    4996       tty->cr();
    4997       findpc(eip);
    4998       tty->cr();
     5040      if ((WizardMode || Verbose) && PrintMiscellaneous) {
     5041        tty->cr();
     5042        findpc(eip);
     5043        tty->cr();
     5044      }
    49995045#endif
    5000       tty->print_cr("rax, = 0x%08x", rax);
    5001       tty->print_cr("rbx, = 0x%08x", rbx);
     5046      tty->print_cr("rax = 0x%08x", rax);
     5047      tty->print_cr("rbx = 0x%08x", rbx);
    50025048      tty->print_cr("rcx = 0x%08x", rcx);
    50035049      tty->print_cr("rdx = 0x%08x", rdx);
    50045050      tty->print_cr("rdi = 0x%08x", rdi);
    50055051      tty->print_cr("rsi = 0x%08x", rsi);
    5006       tty->print_cr("rbp, = 0x%08x", rbp);
     5052      tty->print_cr("rbp = 0x%08x", rbp);
    50075053      tty->print_cr("rsp = 0x%08x", rsp);
    50085054      BREAKPOINT;
     5055      assert(false, "start up GDB");
    50095056    }
    50105057  } else {
     
    55145561
    55155562void MacroAssembler::warn(const char* msg) {
    5516   push(r12);
    5517   movq(r12, rsp);
     5563  push(rsp);
    55185564  andq(rsp, -16);     // align stack as required by push_CPU_state and call
    55195565
     
    55225568  call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
    55235569  pop_CPU_state();
    5524 
    5525   movq(rsp, r12);
    5526   pop(r12);
     5570  pop(rsp);
    55275571}
    55285572
     
    58365880  assert(number_of_arguments >= 0   , "cannot have negative number of arguments");
    58375881  LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
     5882#ifdef ASSERT
     5883  LP64_ONLY(if (UseCompressedOops) verify_heapbase("call_VM_base");)
     5884#endif // ASSERT
     5885
    58385886  assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
    58395887  assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
     
    70947142
    70957143// Preserves rbx, and rdx.
    7096 void MacroAssembler::tlab_refill(Label& retry,
    7097                                  Label& try_eden,
    7098                                  Label& slow_case) {
     7144Register MacroAssembler::tlab_refill(Label& retry,
     7145                                     Label& try_eden,
     7146                                     Label& slow_case) {
    70997147  Register top = rax;
    71007148  Register t1  = rcx;
     
    71437191  // if tlab is currently allocated (top or end != null) then
    71447192  // fill [top, end + alignment_reserve) with array object
    7145   testptr (top, top);
     7193  testptr(top, top);
    71467194  jcc(Assembler::zero, do_refill);
    71477195
     
    71557203  // set klass to intArrayKlass
    71567204  // dubious reloc why not an oop reloc?
    7157   movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
     7205  movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
    71587206  // store klass last.  concurrent gcs assumes klass length is valid if
    71597207  // klass field is not null.
    71607208  store_klass(top, t1);
     7209
     7210  movptr(t1, top);
     7211  subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
     7212  incr_allocated_bytes(thread_reg, t1, 0);
    71617213
    71627214  // refill the tlab with an eden allocation
     
    71647216  movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
    71657217  shlptr(t1, LogHeapWordSize);
    7166   // add object_size ??
     7218  // allocate new tlab, address returned in top
    71677219  eden_allocate(top, t1, 0, t2, slow_case);
    71687220
     
    71927244  verify_tlab();
    71937245  jmp(retry);
     7246
     7247  return thread_reg; // for use by caller
     7248}
     7249
     7250void MacroAssembler::incr_allocated_bytes(Register thread,
     7251                                          Register var_size_in_bytes,
     7252                                          int con_size_in_bytes,
     7253                                          Register t1) {
     7254#ifdef _LP64
     7255  if (var_size_in_bytes->is_valid()) {
     7256    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
     7257  } else {
     7258    addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
     7259  }
     7260#else
     7261  if (!thread->is_valid()) {
     7262    assert(t1->is_valid(), "need temp reg");
     7263    thread = t1;
     7264    get_thread(thread);
     7265  }
     7266
     7267  if (var_size_in_bytes->is_valid()) {
     7268    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
     7269  } else {
     7270    addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
     7271  }
     7272  adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
     7273#endif
    71947274}
    71957275
     
    76787758
    76797759#ifdef ASSERT
    7680   Label L;
    7681   testptr(tmp, tmp);
    7682   jccb(Assembler::notZero, L);
    7683   hlt();
    7684   bind(L);
     7760  { Label L;
     7761    testptr(tmp, tmp);
     7762    if (WizardMode) {
     7763      jcc(Assembler::notZero, L);
     7764      char* buf = new char[40];
     7765      sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
     7766      stop(buf);
     7767    } else {
     7768      jccb(Assembler::notZero, L);
     7769      hlt();
     7770    }
     7771    bind(L);
     7772  }
    76857773#endif
    76867774
     
    76997787                                              Register temp_reg,
    77007788                                              Label& wrong_method_type) {
    7701   if (UseCompressedOops)  unimplemented();  // field accesses must decode
     7789  Address type_addr(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg));
    77027790  // compare method type against that of the receiver
    7703   cmpptr(mtype_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
     7791  if (UseCompressedOops) {
     7792    load_heap_oop(temp_reg, type_addr);
     7793    cmpptr(mtype_reg, temp_reg);
     7794  } else {
     7795    cmpptr(mtype_reg, type_addr);
     7796  }
    77047797  jcc(Assembler::notEqual, wrong_method_type);
    77057798}
     
    77137806                                                Register temp_reg) {
    77147807  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
    7715   if (UseCompressedOops)  unimplemented();  // field accesses must decode
    77167808  // load mh.type.form.vmslots
    77177809  if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
     
    77207812  } else {
    77217813    Register temp2_reg = vmslots_reg;
    7722     movptr(temp2_reg, Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
    7723     movptr(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));
     7814    load_heap_oop(temp2_reg, Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
     7815    load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));
    77247816    movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
    77257817  }
     
    77357827  assert_different_registers(mh_reg, temp_reg);
    77367828
    7737   if (UseCompressedOops)  unimplemented();  // field accesses must decode
    7738 
    77397829  // pick out the interpreted side of the handler
     7830  // NOTE: vmentry is not an oop!
    77407831  movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg)));
    77417832
     
    82288319}
    82298320
     8321void MacroAssembler::load_heap_oop(Register dst, Address src) {
     8322#ifdef _LP64
     8323  if (UseCompressedOops) {
     8324    movl(dst, src);
     8325    decode_heap_oop(dst);
     8326  } else
     8327#endif
     8328    movptr(dst, src);
     8329}
     8330
     8331void MacroAssembler::store_heap_oop(Address dst, Register src) {
     8332#ifdef _LP64
     8333  if (UseCompressedOops) {
     8334    assert(!dst.uses(src), "not enough registers");
     8335    encode_heap_oop(src);
     8336    movl(dst, src);
     8337  } else
     8338#endif
     8339    movptr(dst, src);
     8340}
     8341
     8342// Used for storing NULLs.
     8343void MacroAssembler::store_heap_oop_null(Address dst) {
     8344#ifdef _LP64
     8345  if (UseCompressedOops) {
     8346    movl(dst, (int32_t)NULL_WORD);
     8347  } else {
     8348    movslq(dst, (int32_t)NULL_WORD);
     8349  }
     8350#else
     8351  movl(dst, (int32_t)NULL_WORD);
     8352#endif
     8353}
     8354
    82308355#ifdef _LP64
    82318356void MacroAssembler::store_klass_gap(Register dst, Register src) {
     
    82338358    // Store to klass gap in destination
    82348359    movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
    8235   }
    8236 }
    8237 
    8238 void MacroAssembler::load_heap_oop(Register dst, Address src) {
    8239   if (UseCompressedOops) {
    8240     movl(dst, src);
    8241     decode_heap_oop(dst);
    8242   } else {
    8243     movq(dst, src);
    8244   }
    8245 }
    8246 
    8247 void MacroAssembler::store_heap_oop(Address dst, Register src) {
    8248   if (UseCompressedOops) {
    8249     assert(!dst.uses(src), "not enough registers");
    8250     encode_heap_oop(src);
    8251     movl(dst, src);
    8252   } else {
    8253     movq(dst, src);
    8254   }
    8255 }
    8256 
    8257 // Used for storing NULLs.
    8258 void MacroAssembler::store_heap_oop_null(Address dst) {
    8259   if (UseCompressedOops) {
    8260     movl(dst, (int32_t)NULL_WORD);
    8261   } else {
    8262     movslq(dst, (int32_t)NULL_WORD);
    82638360  }
    82648361}
     
    85518648void MacroAssembler::string_compare(Register str1, Register str2,
    85528649                                    Register cnt1, Register cnt2, Register result,
    8553                                     XMMRegister vec1, XMMRegister vec2) {
     8650                                    XMMRegister vec1) {
    85548651  Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
    85558652
     
    85988695  }
    85998696
    8600   // Advance to next character
    8601   addptr(str1, 2);
    8602   addptr(str2, 2);
     8697  Address::ScaleFactor scale = Address::times_2;
     8698  int stride = 8;
     8699
     8700  // Advance to next element
     8701  addptr(str1, 16/stride);
     8702  addptr(str2, 16/stride);
    86038703
    86048704  if (UseSSE42Intrinsics) {
    8605     // With SSE4.2, use double quad vector compare
    8606     Label COMPARE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
     8705    Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
     8706    int pcmpmask = 0x19;
    86078707    // Setup to compare 16-byte vectors
    8608     movl(cnt1, cnt2);
    8609     andl(cnt2, 0xfffffff8); // cnt2 holds the vector count
    8610     andl(cnt1, 0x00000007); // cnt1 holds the tail count
    8611     testl(cnt2, cnt2);
     8708    movl(result, cnt2);
     8709    andl(cnt2, ~(stride - 1));   // cnt2 holds the vector count
    86128710    jccb(Assembler::zero, COMPARE_TAIL);
    86138711
    8614     lea(str2, Address(str2, cnt2, Address::times_2));
    8615     lea(str1, Address(str1, cnt2, Address::times_2));
    8616     negptr(cnt2);
    8617 
    8618     bind(COMPARE_VECTORS);
    8619     movdqu(vec1, Address(str1, cnt2, Address::times_2));
    8620     movdqu(vec2, Address(str2, cnt2, Address::times_2));
    8621     pxor(vec1, vec2);
    8622     ptest(vec1, vec1);
    8623     jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
    8624     addptr(cnt2, 8);
    8625     jcc(Assembler::notZero, COMPARE_VECTORS);
    8626     jmpb(COMPARE_TAIL);
     8712    lea(str1, Address(str1, result, scale));
     8713    lea(str2, Address(str2, result, scale));
     8714    negptr(result);
     8715
     8716    // pcmpestri
     8717    //   inputs:
     8718    //     vec1- substring
     8719    //     rax - negative string length (elements count)
     8720    //     mem - scaned string
     8721    //     rdx - string length (elements count)
     8722    //     pcmpmask - cmp mode: 11000 (string compare with negated result)
     8723    //               + 00 (unsigned bytes) or  + 01 (unsigned shorts)
     8724    //   outputs:
     8725    //     rcx - first mismatched element index
     8726    assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
     8727
     8728    bind(COMPARE_WIDE_VECTORS);
     8729    movdqu(vec1, Address(str1, result, scale));
     8730    pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
     8731    // After pcmpestri cnt1(rcx) contains mismatched element index
     8732
     8733    jccb(Assembler::below, VECTOR_NOT_EQUAL);  // CF==1
     8734    addptr(result, stride);
     8735    subptr(cnt2, stride);
     8736    jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
     8737
     8738    // compare wide vectors tail
     8739    testl(result, result);
     8740    jccb(Assembler::zero, LENGTH_DIFF_LABEL);
     8741
     8742    movl(cnt2, stride);
     8743    movl(result, stride);
     8744    negptr(result);
     8745    movdqu(vec1, Address(str1, result, scale));
     8746    pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
     8747    jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
    86278748
    86288749    // Mismatched characters in the vectors
    86298750    bind(VECTOR_NOT_EQUAL);
    8630     lea(str1, Address(str1, cnt2, Address::times_2));
    8631     lea(str2, Address(str2, cnt2, Address::times_2));
    8632     movl(cnt1, 8);
    8633 
    8634     // Compare tail (< 8 chars), or rescan last vectors to
    8635     // find 1st mismatched characters
    8636     bind(COMPARE_TAIL);
    8637     testl(cnt1, cnt1);
    8638     jccb(Assembler::zero, LENGTH_DIFF_LABEL);
    8639     movl(cnt2, cnt1);
     8751    addptr(result, cnt1);
     8752    movptr(cnt2, result);
     8753    load_unsigned_short(result, Address(str1, cnt2, scale));
     8754    load_unsigned_short(cnt1, Address(str2, cnt2, scale));
     8755    subl(result, cnt1);
     8756    jmpb(POP_LABEL);
     8757
     8758    bind(COMPARE_TAIL); // limit is zero
     8759    movl(cnt2, result);
    86408760    // Fallthru to tail compare
    86418761  }
    86428762
    86438763  // Shift str2 and str1 to the end of the arrays, negate min
    8644   lea(str1, Address(str1, cnt2, Address::times_2, 0));
    8645   lea(str2, Address(str2, cnt2, Address::times_2, 0));
     8764  lea(str1, Address(str1, cnt2, scale, 0));
     8765  lea(str2, Address(str2, cnt2, scale, 0));
    86468766  negptr(cnt2);
    86478767
    8648     // Compare the rest of the characters
     8768  // Compare the rest of the elements
    86498769  bind(WHILE_HEAD_LABEL);
    8650   load_unsigned_short(result, Address(str1, cnt2, Address::times_2, 0));
    8651   load_unsigned_short(cnt1, Address(str2, cnt2, Address::times_2, 0));
     8770  load_unsigned_short(result, Address(str1, cnt2, scale, 0));
     8771  load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
    86528772  subl(result, cnt1);
    86538773  jccb(Assembler::notZero, POP_LABEL);
    86548774  increment(cnt2);
    8655   jcc(Assembler::notZero, WHILE_HEAD_LABEL);
     8775  jccb(Assembler::notZero, WHILE_HEAD_LABEL);
    86568776
    86578777  // Strings are equal up to min length.  Return the length difference.
     
    86628782  // Discard the stored length difference
    86638783  bind(POP_LABEL);
    8664   addptr(rsp, wordSize);
     8784  pop(cnt1);
    86658785
    86668786  // That's it
     
    87108830    // With SSE4.2, use double quad vector compare
    87118831    Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
     8832
    87128833    // Compare 16-byte vectors
    87138834    andl(result, 0x0000000e);  //   tail count (in bytes)
     
    87238844    movdqu(vec2, Address(ary2, limit, Address::times_1));
    87248845    pxor(vec1, vec2);
     8846
    87258847    ptest(vec1, vec1);
    87268848    jccb(Assembler::notZero, FALSE_LABEL);
    87278849    addptr(limit, 16);
    87288850    jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
     8851
     8852    testl(result, result);
     8853    jccb(Assembler::zero, TRUE_LABEL);
     8854
     8855    movdqu(vec1, Address(ary1, result, Address::times_1, -16));
     8856    movdqu(vec2, Address(ary2, result, Address::times_1, -16));
     8857    pxor(vec1, vec2);
     8858
     8859    ptest(vec1, vec1);
     8860    jccb(Assembler::notZero, FALSE_LABEL);
     8861    jmpb(TRUE_LABEL);
    87298862
    87308863    bind(COMPARE_TAIL); // limit is zero
  • trunk/openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
     26#define CPU_X86_VM_ASSEMBLER_X86_HPP
     27
    2528class BiasedLockingCounters;
    2629
     
    133136// will cause an assertion failure
    134137#define rscratch1 noreg
     138#define rscratch2 noreg
    135139
    136140#endif // _LP64
     
    671675
    672676#ifdef _LP64
    673  static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) )  <= x   &&   x  <  ( CONST64(1) << (nbits-1) ); }
     677 static bool is_simm(int64_t x, int nbits) { return -(CONST64(1) << (nbits-1)) <= x &&
     678                                                    x < (CONST64(1) << (nbits-1)); }
    674679 static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; }
    675680#else
    676  static bool is_simm(int32_t x, int nbits) { return -( 1 << (nbits-1) )  <= x   &&   x  <  ( 1 << (nbits-1) ); }
     681 static bool is_simm(int32_t x, int nbits) { return -(1 << (nbits-1)) <= x &&
     682                                                    x < (1 << (nbits-1)); }
    677683 static bool is_simm32(int32_t x) { return true; }
    678 #endif // LP64
     684#endif // _LP64
    679685
    680686  // Generic instructions
     
    701707  void push(void* v);
    702708  void pop(void* v);
    703 
    704709
    705710  // These do register sized moves/scans
     
    713718  // Vanilla instructions in lexical order
    714719
     720  void adcl(Address dst, int32_t imm32);
     721  void adcl(Address dst, Register src);
    715722  void adcl(Register dst, int32_t imm32);
    716723  void adcl(Register dst, Address src);
     
    720727  void adcq(Register dst, Address src);
    721728  void adcq(Register dst, Register src);
    722 
    723729
    724730  void addl(Address dst, int32_t imm32);
     
    734740  void addq(Register dst, Register src);
    735741
    736 
    737742  void addr_nop_4();
    738743  void addr_nop_5();
     
    755760  void andq(Register dst, Address src);
    756761  void andq(Register dst, Register src);
    757 
    758762
    759763  // Bitwise Logical AND of Packed Double-Precision Floating-Point Values
     
    10121016
    10131017  void idivl(Register src);
     1018  void divl(Register src); // Unsigned division
    10141019
    10151020  void idivq(Register src);
     
    11471152  void movq(Register dst, Register src);
    11481153  void movq(Register dst, Address src);
    1149   void movq(Address dst, Register src);
     1154  void movq(Address  dst, Register src);
    11501155#endif
    11511156
     
    11731178
    11741179  // Move signed 32bit immediate to 64bit extending sign
    1175   void movslq(Address dst, int32_t imm64);
     1180  void movslq(Address  dst, int32_t imm64);
    11761181  void movslq(Register dst, int32_t imm64);
    11771182
     
    12731278  void prefetchw(Address src);
    12741279
     1280  // POR - Bitwise logical OR
     1281  void por(XMMRegister dst, XMMRegister src);
     1282
    12751283  // Shuffle Packed Doublewords
    12761284  void pshufd(XMMRegister dst, XMMRegister src, int mode);
     
    13481356  void sqrtsd(XMMRegister dst, Address src);
    13491357  void sqrtsd(XMMRegister dst, XMMRegister src);
     1358
     1359  // Compute Square Root of Scalar Single-Precision Floating-Point Value
     1360  void sqrtss(XMMRegister dst, Address src);
     1361  void sqrtss(XMMRegister dst, XMMRegister src);
    13501362
    13511363  void std() { emit_byte(0xfd); }
     
    16831695  void store_klass(Register dst, Register src);
    16841696
     1697  void load_heap_oop(Register dst, Address src);
     1698  void store_heap_oop(Address dst, Register src);
     1699
     1700  // Used for storing NULL. All other oop constants should be
     1701  // stored using routines that take a jobject.
     1702  void store_heap_oop_null(Address dst);
     1703
    16851704  void load_prototype_header(Register dst, Register src);
    16861705
    16871706#ifdef _LP64
    16881707  void store_klass_gap(Register dst, Register src);
    1689 
    1690   void load_heap_oop(Register dst, Address src);
    1691   void store_heap_oop(Address dst, Register src);
    16921708
    16931709  // This dummy is to prevent a call to store_heap_oop from
     
    16961712
    16971713  void store_heap_oop(Address dst, void* dummy);
    1698 
    1699   // Used for storing NULL. All other oop constants should be
    1700   // stored using routines that take a jobject.
    1701   void store_heap_oop_null(Address dst);
    17021714
    17031715  void encode_heap_oop(Register r);
     
    18491861    Label&   slow_case                 // continuation point if fast allocation fails
    18501862  );
    1851   void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
     1863  Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
     1864  void incr_allocated_bytes(Register thread,
     1865                            Register var_size_in_bytes, int con_size_in_bytes,
     1866                            Register t1 = noreg);
    18521867
    18531868  // interface method calling
     
    19281943  void untested()                                { stop("untested"); }
    19291944
    1930   void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, sizeof(b), "unimplemented: %s", what);  stop(b); }
     1945  void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
    19311946
    19321947  void should_not_reach_here()                   { stop("should not reach here"); }
     
    21212136  void comisd(XMMRegister dst, AddressLiteral src);
    21222137
     2138  void fadd_s(Address src)        { Assembler::fadd_s(src); }
     2139  void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
     2140
    21232141  void fldcw(Address src) { Assembler::fldcw(src); }
    21242142  void fldcw(AddressLiteral src);
     
    21332151  void fld_x(Address src) { Assembler::fld_x(src); }
    21342152  void fld_x(AddressLiteral src);
     2153
     2154  void fmul_s(Address src)        { Assembler::fmul_s(src); }
     2155  void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
    21352156
    21362157  void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
     
    21502171public:
    21512172
     2173  void addsd(XMMRegister dst, XMMRegister src)    { Assembler::addsd(dst, src); }
     2174  void addsd(XMMRegister dst, Address src)        { Assembler::addsd(dst, src); }
     2175  void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); }
     2176
     2177  void addss(XMMRegister dst, XMMRegister src)    { Assembler::addss(dst, src); }
     2178  void addss(XMMRegister dst, Address src)        { Assembler::addss(dst, src); }
     2179  void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); }
     2180
     2181  void divsd(XMMRegister dst, XMMRegister src)    { Assembler::divsd(dst, src); }
     2182  void divsd(XMMRegister dst, Address src)        { Assembler::divsd(dst, src); }
     2183  void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); }
     2184
     2185  void divss(XMMRegister dst, XMMRegister src)    { Assembler::divss(dst, src); }
     2186  void divss(XMMRegister dst, Address src)        { Assembler::divss(dst, src); }
     2187  void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); }
     2188
    21522189  void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
    21532190  void movsd(Address dst, XMMRegister src)     { Assembler::movsd(dst, src); }
    21542191  void movsd(XMMRegister dst, Address src)     { Assembler::movsd(dst, src); }
    2155   void movsd(XMMRegister dst, AddressLiteral src);
     2192  void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); }
     2193
     2194  void mulsd(XMMRegister dst, XMMRegister src)    { Assembler::mulsd(dst, src); }
     2195  void mulsd(XMMRegister dst, Address src)        { Assembler::mulsd(dst, src); }
     2196  void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); }
     2197
     2198  void mulss(XMMRegister dst, XMMRegister src)    { Assembler::mulss(dst, src); }
     2199  void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
     2200  void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); }
     2201
     2202  void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
     2203  void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
     2204  void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); }
     2205
     2206  void sqrtss(XMMRegister dst, XMMRegister src)    { Assembler::sqrtss(dst, src); }
     2207  void sqrtss(XMMRegister dst, Address src)        { Assembler::sqrtss(dst, src); }
     2208  void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); }
     2209
     2210  void subsd(XMMRegister dst, XMMRegister src)    { Assembler::subsd(dst, src); }
     2211  void subsd(XMMRegister dst, Address src)        { Assembler::subsd(dst, src); }
     2212  void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); }
     2213
     2214  void subss(XMMRegister dst, XMMRegister src)    { Assembler::subss(dst, src); }
     2215  void subss(XMMRegister dst, Address src)        { Assembler::subss(dst, src); }
     2216  void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); }
    21562217
    21572218  void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
     
    22362297  void string_compare(Register str1, Register str2,
    22372298                      Register cnt1, Register cnt2, Register result,
    2238                       XMMRegister vec1, XMMRegister vec2);
     2299                      XMMRegister vec1);
    22392300
    22402301  // Compare char[] arrays.
     
    22732334inline bool AbstractAssembler::pd_check_instruction_mark() { return true; }
    22742335#endif
     2336
     2337#endif // CPU_X86_VM_ASSEMBLER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/assembler_x86.inline.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
     26#define CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
     27
     28#include "asm/assembler.inline.hpp"
     29#include "asm/codeBuffer.hpp"
     30#include "code/codeCache.hpp"
     31#include "runtime/handles.inline.hpp"
    2432
    2533inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
     
    8694}
    8795#endif // _LP64
     96
     97#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_bytecodeInterpreter_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "interpreter/bytecodeInterpreter.hpp"
     28#include "interpreter/bytecodeInterpreter.inline.hpp"
     29#include "interpreter/interpreter.hpp"
     30#include "interpreter/interpreterRuntime.hpp"
     31#include "oops/methodDataOop.hpp"
     32#include "oops/methodOop.hpp"
     33#include "oops/oop.inline.hpp"
     34#include "prims/jvmtiExport.hpp"
     35#include "prims/jvmtiThreadState.hpp"
     36#include "runtime/deoptimization.hpp"
     37#include "runtime/frame.inline.hpp"
     38#include "runtime/sharedRuntime.hpp"
     39#include "runtime/stubRoutines.hpp"
     40#include "runtime/synchronizer.hpp"
     41#include "runtime/vframeArray.hpp"
     42#include "utilities/debug.hpp"
     43#ifdef TARGET_ARCH_MODEL_x86_32
     44# include "interp_masm_x86_32.hpp"
     45#endif
     46#ifdef TARGET_ARCH_MODEL_x86_64
     47# include "interp_masm_x86_64.hpp"
     48#endif
    2749
    2850#ifdef CC_INTERP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
     26#define CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
    2427
    2528// Platform specific for C++ based Interpreter
     
    109112#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
    110113                                                ((VMJavaVal64*)(addr))->l)
     114
     115#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
     26#define CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
     27
    2528// Inline interpreter functions for IA32
    2629
     
    279282  return (jbyte) val;
    280283}
     284
     285#endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodes_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_bytecodes_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/bytecodes.hpp"
    2727
    2828
  • trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodes_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_BYTECODES_X86_HPP
     26#define CPU_X86_VM_BYTECODES_X86_HPP
     27
    2528// No i486 specific bytecodes
     29
     30#endif // CPU_X86_VM_BYTECODES_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/bytes_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2001, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_BYTES_X86_HPP
     26#define CPU_X86_VM_BYTES_X86_HPP
     27
     28#include "memory/allocation.hpp"
    2429
    2530class Bytes: AllStatic {
     
    6873
    6974// The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base]
    70 #include "incls/_bytes_pd.inline.hpp.incl"
     75#ifdef TARGET_OS_ARCH_linux_x86
     76# include "bytes_linux_x86.inline.hpp"
     77#endif
     78#ifdef TARGET_OS_ARCH_solaris_x86
     79# include "bytes_solaris_x86.inline.hpp"
     80#endif
     81#ifdef TARGET_OS_ARCH_windows_x86
     82# include "bytes_windows_x86.inline.hpp"
     83#endif
     84
     85
     86#endif // CPU_X86_VM_BYTES_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_c1_CodeStubs_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_CodeStubs.hpp"
     27#include "c1/c1_FrameMap.hpp"
     28#include "c1/c1_LIRAssembler.hpp"
     29#include "c1/c1_MacroAssembler.hpp"
     30#include "c1/c1_Runtime1.hpp"
     31#include "nativeInst_x86.hpp"
     32#include "runtime/sharedRuntime.hpp"
     33#include "vmreg_x86.inline.hpp"
     34#ifndef SERIALGC
     35#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
     36#endif
    2737
    2838
     
    6979}
    7080
    71 #ifdef TIERED
    7281void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
    7382  __ bind(_entry);
     83  ce->store_parameter(_method->as_register(), 1);
    7484  ce->store_parameter(_bci, 0);
    7585  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
    7686  ce->add_call_info_here(_info);
    7787  ce->verify_oop_map(_info);
    78 
    79   __ jmp(_continuation);
    80 }
    81 #endif // TIERED
    82 
    83 
     88  __ jmp(_continuation);
     89}
    8490
    8591RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
     
    8894  , _index(index)
    8995{
    90   _info = info == NULL ? NULL : new CodeEmitInfo(info);
     96  assert(info != NULL, "must have info");
     97  _info = new CodeEmitInfo(info);
    9198}
    9299
     
    477484  Register pre_val_reg = pre_val()->as_register();
    478485
    479   ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
     486  ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
    480487
    481488  __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
     
    503510  __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
    504511  __ jcc(Assembler::equal, _continuation);
    505   ce->store_parameter(addr()->as_register(), 0);
     512  ce->store_parameter(addr()->as_pointer_register(), 0);
    506513  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
    507514  __ jmp(_continuation);
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_Defs_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_C1_DEFS_X86_HPP
     26#define CPU_X86_VM_C1_DEFS_X86_HPP
    2427
    2528// native word offsets from memory address (little endian)
     
    5962  pd_first_cpu_reg = 0,
    6063  pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
    61   pd_first_byte_reg = 2,
    62   pd_last_byte_reg = 5,
     64  pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0),
     65  pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11),
    6366  pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
    6467  pd_last_fpu_reg =  pd_first_fpu_reg + 7,
     
    7275  pd_float_saved_as_double = true
    7376};
     77
     78#endif // CPU_X86_VM_C1_DEFS_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_c1_FpuStackSim_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_FpuStackSim.hpp"
     27#include "c1/c1_FrameMap.hpp"
     28#include "utilities/array.hpp"
     29#include "utilities/ostream.hpp"
    2730
    2831//--------------------------------------------------------
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP
     26#define CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP
    2427
    2528//  Simulates the FPU stack and maintains mapping [fpu-register -> stack offset]
     
    6669  void print() PRODUCT_RETURN;
    6770};
     71
     72#endif // CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp

    r278 r309  
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_c1_FrameMap_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_FrameMap.hpp"
     27#include "c1/c1_LIR.hpp"
     28#include "runtime/sharedRuntime.hpp"
     29#include "vmreg_x86.inline.hpp"
    2730
    2831const int FrameMap::pd_c_runtime_reserved_arg_size = 0;
     
    156159  map_register( 7, r9);    r9_opr = LIR_OprFact::single_cpu(7);
    157160  map_register( 8, r11);  r11_opr = LIR_OprFact::single_cpu(8);
    158   map_register( 9, r12);  r12_opr = LIR_OprFact::single_cpu(9);
    159   map_register(10, r13);  r13_opr = LIR_OprFact::single_cpu(10);
    160   map_register(11, r14);  r14_opr = LIR_OprFact::single_cpu(11);
     161  map_register( 9, r13);  r13_opr = LIR_OprFact::single_cpu(9);
     162  map_register(10, r14);  r14_opr = LIR_OprFact::single_cpu(10);
     163  // r12 is allocated conditionally. With compressed oops it holds
     164  // the heapbase value and is not visible to the allocator.
     165  map_register(11, r12);  r12_opr = LIR_OprFact::single_cpu(11);
    161166  // The unallocatable registers are at the end
    162167  map_register(12, r10);  r10_opr = LIR_OprFact::single_cpu(12);
     
    189194  _caller_save_cpu_regs[7]  = r9_opr;
    190195  _caller_save_cpu_regs[8]  = r11_opr;
    191   _caller_save_cpu_regs[9]  = r12_opr;
    192   _caller_save_cpu_regs[10] = r13_opr;
    193   _caller_save_cpu_regs[11] = r14_opr;
     196  _caller_save_cpu_regs[9]  = r13_opr;
     197  _caller_save_cpu_regs[10] = r14_opr;
     198  _caller_save_cpu_regs[11] = r12_opr;
    194199#endif // _LP64
    195200
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp

    r278 r309  
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_C1_FRAMEMAP_X86_HPP
     26#define CPU_X86_VM_C1_FRAMEMAP_X86_HPP
    2427
    2528//  On i486 the frame looks as follows:
     
    127130    return _caller_save_xmm_regs[i];
    128131  }
     132
     133  static int adjust_reg_range(int range) {
     134    // Reduce the number of available regs (to free r12) in case of compressed oops
     135    if (UseCompressedOops) return range - 1;
     136    return range;
     137  }
     138
     139  static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); }
     140  static int last_cpu_reg()             { return adjust_reg_range(pd_last_cpu_reg);  }
     141  static int last_byte_reg()            { return adjust_reg_range(pd_last_byte_reg); }
     142
     143#endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP
     144
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_c1_LIRAssembler_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_Compilation.hpp"
     27#include "c1/c1_LIRAssembler.hpp"
     28#include "c1/c1_MacroAssembler.hpp"
     29#include "c1/c1_Runtime1.hpp"
     30#include "c1/c1_ValueStack.hpp"
     31#include "ci/ciArrayKlass.hpp"
     32#include "ci/ciInstance.hpp"
     33#include "gc_interface/collectedHeap.hpp"
     34#include "memory/barrierSet.hpp"
     35#include "memory/cardTableModRefBS.hpp"
     36#include "nativeInst_x86.hpp"
     37#include "oops/objArrayKlass.hpp"
     38#include "runtime/sharedRuntime.hpp"
    2739
    2840
     
    332344  Register ic_klass = IC_Klass;
    333345  const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
    334 
    335   if (!VerifyOops) {
     346  const bool do_post_padding = VerifyOops || UseCompressedOops;
     347  if (!do_post_padding) {
    336348    // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
    337349    while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
     
    341353  int offset = __ offset();
    342354  __ inline_cache_check(receiver, IC_Klass);
    343   assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct");
    344   if (VerifyOops) {
     355  assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
     356  if (do_post_padding) {
    345357    // force alignment after the cache check.
    346358    // It's been verified to be aligned if !VerifyOops
     
    548560
    549561  // Get addresses of first characters from both Strings
    550   __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
    551   __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
    552   __ lea    (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
     562  __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
     563  __ movptr       (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
     564  __ lea          (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
    553565
    554566
    555567  // rbx, may be NULL
    556568  add_debug_info_for_null_check_here(info);
    557   __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
    558   __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
    559   __ lea    (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
     569  __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
     570  __ movptr       (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
     571  __ lea          (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
    560572
    561573  // compute minimum length (in rax) and difference of lengths (on top of stack)
     
    685697
    686698  switch (c->type()) {
    687     case T_INT:
     699    case T_INT: {
     700      assert(patch_code == lir_patch_none, "no patching handled here");
     701      __ movl(dest->as_register(), c->as_jint());
     702      break;
     703    }
     704
    688705    case T_ADDRESS: {
    689706      assert(patch_code == lir_patch_none, "no patching handled here");
    690       __ movl(dest->as_register(), c->as_jint());
     707      __ movptr(dest->as_register(), c->as_jint());
    691708      break;
    692709    }
     
    769786    case T_INT:  // fall through
    770787    case T_FLOAT:
     788      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
     789      break;
     790
    771791    case T_ADDRESS:
    772       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
     792      __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
    773793      break;
    774794
     
    795815}
    796816
    797 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
     817void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
    798818  assert(src->is_constant(), "should not call otherwise");
    799819  assert(dest->is_address(), "should not call otherwise");
     
    805825    case T_INT:    // fall through
    806826    case T_FLOAT:
     827      __ movl(as_Address(addr), c->as_jint_bits());
     828      break;
     829
    807830    case T_ADDRESS:
    808       __ movl(as_Address(addr), c->as_jint_bits());
     831      __ movptr(as_Address(addr), c->as_jint_bits());
    809832      break;
    810833
     
    812835    case T_ARRAY:
    813836      if (c->as_jobject() == NULL) {
    814         __ movptr(as_Address(addr), NULL_WORD);
     837        if (UseCompressedOops && !wide) {
     838          __ movl(as_Address(addr), (int32_t)NULL_WORD);
     839        } else {
     840          __ movptr(as_Address(addr), NULL_WORD);
     841        }
    815842      } else {
    816843        if (is_literal_address(addr)) {
     
    820847#ifdef _LP64
    821848          __ movoop(rscratch1, c->as_jobject());
    822           null_check_here = code_offset();
    823           __ movptr(as_Address_lo(addr), rscratch1);
     849          if (UseCompressedOops && !wide) {
     850            __ encode_heap_oop(rscratch1);
     851            null_check_here = code_offset();
     852            __ movl(as_Address_lo(addr), rscratch1);
     853          } else {
     854            null_check_here = code_offset();
     855            __ movptr(as_Address_lo(addr), rscratch1);
     856          }
    824857#else
    825858          __ movoop(as_Address(addr), c->as_jobject());
     
    9981031
    9991032
    1000 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) {
     1033void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
    10011034  LIR_Address* to_addr = dest->as_address_ptr();
    10021035  PatchingStub* patch = NULL;
     1036  Register compressed_src = rscratch1;
    10031037
    10041038  if (type == T_ARRAY || type == T_OBJECT) {
    10051039    __ verify_oop(src->as_register());
    1006   }
     1040#ifdef _LP64
     1041    if (UseCompressedOops && !wide) {
     1042      __ movptr(compressed_src, src->as_register());
     1043      __ encode_heap_oop(compressed_src);
     1044    }
     1045#endif
     1046  }
     1047
    10071048  if (patch_code != lir_patch_none) {
    10081049    patch = new PatchingStub(_masm, PatchingStub::access_field_id);
     
    10101051    assert(toa.disp() != 0, "must have");
    10111052  }
    1012   if (info != NULL) {
    1013     add_debug_info_for_null_check_here(info);
    1014   }
    1015 
     1053
     1054  int null_check_here = code_offset();
    10161055  switch (type) {
    10171056    case T_FLOAT: {
     
    10391078    }
    10401079
    1041     case T_ADDRESS: // fall through
    10421080    case T_ARRAY:   // fall through
    10431081    case T_OBJECT:  // fall through
    1044 #ifdef _LP64
     1082      if (UseCompressedOops && !wide) {
     1083        __ movl(as_Address(to_addr), compressed_src);
     1084      } else {
     1085        __ movptr(as_Address(to_addr), src->as_register());
     1086      }
     1087      break;
     1088    case T_ADDRESS:
    10451089      __ movptr(as_Address(to_addr), src->as_register());
    10461090      break;
    1047 #endif // _LP64
    10481091    case T_INT:
    10491092      __ movl(as_Address(to_addr), src->as_register());
     
    11021145      ShouldNotReachHere();
    11031146  }
     1147  if (info != NULL) {
     1148    add_debug_info_for_null_check(null_check_here, info);
     1149  }
    11041150
    11051151  if (patch_code != lir_patch_none) {
     
    11851231
    11861232
    1187 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) {
     1233void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
    11881234  assert(src->is_address(), "should not call otherwise");
    11891235  assert(dest->is_register(), "should not call otherwise");
     
    12391285    }
    12401286
    1241     case T_ADDRESS: // fall through
    12421287    case T_OBJECT:  // fall through
    12431288    case T_ARRAY:   // fall through
    1244 #ifdef _LP64
     1289      if (UseCompressedOops && !wide) {
     1290        __ movl(dest->as_register(), from_addr);
     1291      } else {
     1292        __ movptr(dest->as_register(), from_addr);
     1293      }
     1294      break;
     1295
     1296    case T_ADDRESS:
    12451297      __ movptr(dest->as_register(), from_addr);
    12461298      break;
    1247 #endif // _L64
    12481299    case T_INT:
    12491300      __ movl(dest->as_register(), from_addr);
     
    13401391
    13411392  if (type == T_ARRAY || type == T_OBJECT) {
     1393#ifdef _LP64
     1394    if (UseCompressedOops && !wide) {
     1395      __ decode_heap_oop(dest->as_register());
     1396    }
     1397#endif
    13421398    __ verify_oop(dest->as_register());
    13431399  }
     
    13601416        ShouldNotReachHere(); break;
    13611417    }
    1362   } else if (VM_Version::supports_3dnow()) {
     1418  } else if (VM_Version::supports_3dnow_prefetch()) {
    13631419    __ prefetchr(from_addr);
    13641420  }
     
    13831439        ShouldNotReachHere(); break;
    13841440    }
    1385   } else if (VM_Version::supports_3dnow()) {
     1441  } else if (VM_Version::supports_3dnow_prefetch()) {
    13861442    __ prefetchw(from_addr);
    13871443  }
     
    15861642
    15871643void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
     1644  Register len =  op->len()->as_register();
     1645  LP64_ONLY( __ movslq(len, len); )
     1646
    15881647  if (UseSlowPath ||
    15891648      (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
     
    15911650    __ jmp(*op->stub()->entry());
    15921651  } else {
    1593     Register len =  op->len()->as_register();
    15941652    Register tmp1 = op->tmp1()->as_register();
    15951653    Register tmp2 = op->tmp2()->as_register();
     
    16161674}
    16171675
     1676void LIR_Assembler::type_profile_helper(Register mdo,
     1677                                        ciMethodData *md, ciProfileData *data,
     1678                                        Register recv, Label* update_done) {
     1679  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
     1680    Label next_test;
     1681    // See if the receiver is receiver[n].
     1682    __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
     1683    __ jccb(Assembler::notEqual, next_test);
     1684    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
     1685    __ addptr(data_addr, DataLayout::counter_increment);
     1686    __ jmp(*update_done);
     1687    __ bind(next_test);
     1688  }
     1689
     1690  // Didn't find receiver; find next empty slot and fill it in
     1691  for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
     1692    Label next_test;
     1693    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
     1694    __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
     1695    __ jccb(Assembler::notEqual, next_test);
     1696    __ movptr(recv_addr, recv);
     1697    __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
     1698    __ jmp(*update_done);
     1699    __ bind(next_test);
     1700  }
     1701}
     1702
     1703void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
     1704  // we always need a stub for the failure case.
     1705  CodeStub* stub = op->stub();
     1706  Register obj = op->object()->as_register();
     1707  Register k_RInfo = op->tmp1()->as_register();
     1708  Register klass_RInfo = op->tmp2()->as_register();
     1709  Register dst = op->result_opr()->as_register();
     1710  ciKlass* k = op->klass();
     1711  Register Rtmp1 = noreg;
     1712
     1713  // check if it needs to be profiled
     1714  ciMethodData* md;
     1715  ciProfileData* data;
     1716
     1717  if (op->should_profile()) {
     1718    ciMethod* method = op->profiled_method();
     1719    assert(method != NULL, "Should have method");
     1720    int bci = op->profiled_bci();
     1721    md = method->method_data_or_null();
     1722    assert(md != NULL, "Sanity");
     1723    data = md->bci_to_data(bci);
     1724    assert(data != NULL,                "need data for type check");
     1725    assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
     1726  }
     1727  Label profile_cast_success, profile_cast_failure;
     1728  Label *success_target = op->should_profile() ? &profile_cast_success : success;
     1729  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
     1730
     1731  if (obj == k_RInfo) {
     1732    k_RInfo = dst;
     1733  } else if (obj == klass_RInfo) {
     1734    klass_RInfo = dst;
     1735  }
     1736  if (k->is_loaded() && !UseCompressedOops) {
     1737    select_different_registers(obj, dst, k_RInfo, klass_RInfo);
     1738  } else {
     1739    Rtmp1 = op->tmp3()->as_register();
     1740    select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
     1741  }
     1742
     1743  assert_different_registers(obj, k_RInfo, klass_RInfo);
     1744  if (!k->is_loaded()) {
     1745    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
     1746  } else {
     1747#ifdef _LP64
     1748    __ movoop(k_RInfo, k->constant_encoding());
     1749#endif // _LP64
     1750  }
     1751  assert(obj != k_RInfo, "must be different");
     1752
     1753  __ cmpptr(obj, (int32_t)NULL_WORD);
     1754  if (op->should_profile()) {
     1755    Label not_null;
     1756    __ jccb(Assembler::notEqual, not_null);
     1757    // Object is null; update MDO and exit
     1758    Register mdo  = klass_RInfo;
     1759    __ movoop(mdo, md->constant_encoding());
     1760    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
     1761    int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
     1762    __ orl(data_addr, header_bits);
     1763    __ jmp(*obj_is_null);
     1764    __ bind(not_null);
     1765  } else {
     1766    __ jcc(Assembler::equal, *obj_is_null);
     1767  }
     1768  __ verify_oop(obj);
     1769
     1770  if (op->fast_check()) {
     1771    // get object class
     1772    // not a safepoint as obj null check happens earlier
     1773#ifdef _LP64
     1774    if (UseCompressedOops) {
     1775      __ load_klass(Rtmp1, obj);
     1776      __ cmpptr(k_RInfo, Rtmp1);
     1777    } else {
     1778      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
     1779    }
     1780#else
     1781    if (k->is_loaded()) {
     1782      __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
     1783    } else {
     1784      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
     1785    }
     1786#endif
     1787    __ jcc(Assembler::notEqual, *failure_target);
     1788    // successful cast, fall through to profile or jump
     1789  } else {
     1790    // get object class
     1791    // not a safepoint as obj null check happens earlier
     1792    __ load_klass(klass_RInfo, obj);
     1793    if (k->is_loaded()) {
     1794      // See if we get an immediate positive hit
     1795#ifdef _LP64
     1796      __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
     1797#else
     1798      __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
     1799#endif // _LP64
     1800      if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
     1801        __ jcc(Assembler::notEqual, *failure_target);
     1802        // successful cast, fall through to profile or jump
     1803      } else {
     1804        // See if we get an immediate positive hit
     1805        __ jcc(Assembler::equal, *success_target);
     1806        // check for self
     1807#ifdef _LP64
     1808        __ cmpptr(klass_RInfo, k_RInfo);
     1809#else
     1810        __ cmpoop(klass_RInfo, k->constant_encoding());
     1811#endif // _LP64
     1812        __ jcc(Assembler::equal, *success_target);
     1813
     1814        __ push(klass_RInfo);
     1815#ifdef _LP64
     1816        __ push(k_RInfo);
     1817#else
     1818        __ pushoop(k->constant_encoding());
     1819#endif // _LP64
     1820        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
     1821        __ pop(klass_RInfo);
     1822        __ pop(klass_RInfo);
     1823        // result is a boolean
     1824        __ cmpl(klass_RInfo, 0);
     1825        __ jcc(Assembler::equal, *failure_target);
     1826        // successful cast, fall through to profile or jump
     1827      }
     1828    } else {
     1829      // perform the fast part of the checking logic
     1830      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
     1831      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
     1832      __ push(klass_RInfo);
     1833      __ push(k_RInfo);
     1834      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
     1835      __ pop(klass_RInfo);
     1836      __ pop(k_RInfo);
     1837      // result is a boolean
     1838      __ cmpl(k_RInfo, 0);
     1839      __ jcc(Assembler::equal, *failure_target);
     1840      // successful cast, fall through to profile or jump
     1841    }
     1842  }
     1843  if (op->should_profile()) {
     1844    Register mdo  = klass_RInfo, recv = k_RInfo;
     1845    __ bind(profile_cast_success);
     1846    __ movoop(mdo, md->constant_encoding());
     1847    __ load_klass(recv, obj);
     1848    Label update_done;
     1849    type_profile_helper(mdo, md, data, recv, success);
     1850    __ jmp(*success);
     1851
     1852    __ bind(profile_cast_failure);
     1853    __ movoop(mdo, md->constant_encoding());
     1854    Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
     1855    __ subptr(counter_addr, DataLayout::counter_increment);
     1856    __ jmp(*failure);
     1857  }
     1858  __ jmp(*success);
     1859}
    16181860
    16191861
     
    16281870
    16291871    CodeStub* stub = op->stub();
    1630     Label done;
     1872
     1873    // check if it needs to be profiled
     1874    ciMethodData* md;
     1875    ciProfileData* data;
     1876
     1877    if (op->should_profile()) {
     1878      ciMethod* method = op->profiled_method();
     1879      assert(method != NULL, "Should have method");
     1880      int bci = op->profiled_bci();
     1881      md = method->method_data_or_null();
     1882      assert(md != NULL, "Sanity");
     1883      data = md->bci_to_data(bci);
     1884      assert(data != NULL,                "need data for type check");
     1885      assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
     1886    }
     1887    Label profile_cast_success, profile_cast_failure, done;
     1888    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
     1889    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
     1890
    16311891    __ cmpptr(value, (int32_t)NULL_WORD);
    1632     __ jcc(Assembler::equal, done);
     1892    if (op->should_profile()) {
     1893      Label not_null;
     1894      __ jccb(Assembler::notEqual, not_null);
     1895      // Object is null; update MDO and exit
     1896      Register mdo  = klass_RInfo;
     1897      __ movoop(mdo, md->constant_encoding());
     1898      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
     1899      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
     1900      __ orl(data_addr, header_bits);
     1901      __ jmp(done);
     1902      __ bind(not_null);
     1903    } else {
     1904      __ jcc(Assembler::equal, done);
     1905    }
     1906
    16331907    add_debug_info_for_null_check_here(op->info_for_exception());
    1634     __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
    1635     __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
    1636 
    1637     // get instance klass
     1908    __ load_klass(k_RInfo, array);
     1909    __ load_klass(klass_RInfo, value);
     1910
     1911    // get instance klass (it's already uncompressed)
    16381912    __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
    16391913    // perform the fast part of the checking logic
    1640     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
     1914    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
    16411915    // call out-of-line instance of __ check_klass_subtype_slow_path(...):
    16421916    __ push(klass_RInfo);
     
    16471921    // result is a boolean
    16481922    __ cmpl(k_RInfo, 0);
    1649     __ jcc(Assembler::equal, *stub->entry());
     1923    __ jcc(Assembler::equal, *failure_target);
     1924    // fall through to the success case
     1925
     1926    if (op->should_profile()) {
     1927      Register mdo  = klass_RInfo, recv = k_RInfo;
     1928      __ bind(profile_cast_success);
     1929      __ movoop(mdo, md->constant_encoding());
     1930      __ load_klass(recv, value);
     1931      Label update_done;
     1932      type_profile_helper(mdo, md, data, recv, &done);
     1933      __ jmpb(done);
     1934
     1935      __ bind(profile_cast_failure);
     1936      __ movoop(mdo, md->constant_encoding());
     1937      Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
     1938      __ subptr(counter_addr, DataLayout::counter_increment);
     1939      __ jmp(*stub->entry());
     1940    }
     1941
    16501942    __ bind(done);
    1651   } else if (op->code() == lir_checkcast) {
    1652     // we always need a stub for the failure case.
    1653     CodeStub* stub = op->stub();
    1654     Register obj = op->object()->as_register();
    1655     Register k_RInfo = op->tmp1()->as_register();
    1656     Register klass_RInfo = op->tmp2()->as_register();
    1657     Register dst = op->result_opr()->as_register();
    1658     ciKlass* k = op->klass();
    1659     Register Rtmp1 = noreg;
    1660 
    1661     Label done;
    1662     if (obj == k_RInfo) {
    1663       k_RInfo = dst;
    1664     } else if (obj == klass_RInfo) {
    1665       klass_RInfo = dst;
    1666     }
    1667     if (k->is_loaded()) {
    1668       select_different_registers(obj, dst, k_RInfo, klass_RInfo);
    1669     } else {
    1670       Rtmp1 = op->tmp3()->as_register();
    1671       select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
    1672     }
    1673 
    1674     assert_different_registers(obj, k_RInfo, klass_RInfo);
    1675     if (!k->is_loaded()) {
    1676       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
    1677     } else {
    1678 #ifdef _LP64
    1679       __ movoop(k_RInfo, k->constant_encoding());
    1680 #else
    1681       k_RInfo = noreg;
    1682 #endif // _LP64
    1683     }
    1684     assert(obj != k_RInfo, "must be different");
    1685     __ cmpptr(obj, (int32_t)NULL_WORD);
    1686     if (op->profiled_method() != NULL) {
    1687       ciMethod* method = op->profiled_method();
    1688       int bci          = op->profiled_bci();
    1689 
    1690       Label profile_done;
    1691       __ jcc(Assembler::notEqual, profile_done);
    1692       // Object is null; update methodDataOop
    1693       ciMethodData* md = method->method_data();
    1694       if (md == NULL) {
    1695         bailout("out of memory building methodDataOop");
    1696         return;
    1697       }
    1698       ciProfileData* data = md->bci_to_data(bci);
    1699       assert(data != NULL,       "need data for checkcast");
    1700       assert(data->is_BitData(), "need BitData for checkcast");
    1701       Register mdo  = klass_RInfo;
    1702       __ movoop(mdo, md->constant_encoding());
    1703       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
    1704       int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
    1705       __ orl(data_addr, header_bits);
    1706       __ jmp(done);
    1707       __ bind(profile_done);
    1708     } else {
    1709       __ jcc(Assembler::equal, done);
    1710     }
    1711     __ verify_oop(obj);
    1712 
    1713     if (op->fast_check()) {
    1714       // get object classo
    1715       // not a safepoint as obj null check happens earlier
    1716       if (k->is_loaded()) {
    1717 #ifdef _LP64
    1718         __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    1719 #else
    1720         __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
    1721 #endif // _LP64
    1722       } else {
    1723         __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    1724 
    1725       }
    1726       __ jcc(Assembler::notEqual, *stub->entry());
    1727       __ bind(done);
    1728     } else {
    1729       // get object class
    1730       // not a safepoint as obj null check happens earlier
    1731       __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    1732       if (k->is_loaded()) {
    1733         // See if we get an immediate positive hit
    1734 #ifdef _LP64
    1735         __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
    1736 #else
    1737         __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
    1738 #endif // _LP64
    1739         if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
    1740           __ jcc(Assembler::notEqual, *stub->entry());
    1741         } else {
    1742           // See if we get an immediate positive hit
    1743           __ jcc(Assembler::equal, done);
    1744           // check for self
    1745 #ifdef _LP64
    1746           __ cmpptr(klass_RInfo, k_RInfo);
    1747 #else
    1748           __ cmpoop(klass_RInfo, k->constant_encoding());
    1749 #endif // _LP64
    1750           __ jcc(Assembler::equal, done);
    1751 
    1752           __ push(klass_RInfo);
    1753 #ifdef _LP64
    1754           __ push(k_RInfo);
    1755 #else
    1756           __ pushoop(k->constant_encoding());
    1757 #endif // _LP64
    1758           __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
    1759           __ pop(klass_RInfo);
    1760           __ pop(klass_RInfo);
    1761           // result is a boolean
    1762           __ cmpl(klass_RInfo, 0);
    1763           __ jcc(Assembler::equal, *stub->entry());
    1764         }
     1943  } else
     1944    if (code == lir_checkcast) {
     1945      Register obj = op->object()->as_register();
     1946      Register dst = op->result_opr()->as_register();
     1947      Label success;
     1948      emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
     1949      __ bind(success);
     1950      if (dst != obj) {
     1951        __ mov(dst, obj);
     1952      }
     1953    } else
     1954      if (code == lir_instanceof) {
     1955        Register obj = op->object()->as_register();
     1956        Register dst = op->result_opr()->as_register();
     1957        Label success, failure, done;
     1958        emit_typecheck_helper(op, &success, &failure, &failure);
     1959        __ bind(failure);
     1960        __ xorptr(dst, dst);
     1961        __ jmpb(done);
     1962        __ bind(success);
     1963        __ movptr(dst, 1);
    17651964        __ bind(done);
    17661965      } else {
    1767         // perform the fast part of the checking logic
    1768         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
    1769         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
    1770         __ push(klass_RInfo);
    1771         __ push(k_RInfo);
    1772         __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
    1773         __ pop(klass_RInfo);
    1774         __ pop(k_RInfo);
    1775         // result is a boolean
    1776         __ cmpl(k_RInfo, 0);
    1777         __ jcc(Assembler::equal, *stub->entry());
    1778         __ bind(done);
    1779       }
    1780 
    1781     }
    1782     if (dst != obj) {
    1783       __ mov(dst, obj);
    1784     }
    1785   } else if (code == lir_instanceof) {
    1786     Register obj = op->object()->as_register();
    1787     Register k_RInfo = op->tmp1()->as_register();
    1788     Register klass_RInfo = op->tmp2()->as_register();
    1789     Register dst = op->result_opr()->as_register();
    1790     ciKlass* k = op->klass();
    1791 
    1792     Label done;
    1793     Label zero;
    1794     Label one;
    1795     if (obj == k_RInfo) {
    1796       k_RInfo = klass_RInfo;
    1797       klass_RInfo = obj;
    1798     }
    1799     // patching may screw with our temporaries on sparc,
    1800     // so let's do it before loading the class
    1801     if (!k->is_loaded()) {
    1802       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
    1803     } else {
    1804       LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
    1805     }
    1806     assert(obj != k_RInfo, "must be different");
    1807 
    1808     __ verify_oop(obj);
    1809     if (op->fast_check()) {
    1810       __ cmpptr(obj, (int32_t)NULL_WORD);
    1811       __ jcc(Assembler::equal, zero);
    1812       // get object class
    1813       // not a safepoint as obj null check happens earlier
    1814       if (LP64_ONLY(false &&) k->is_loaded()) {
    1815         NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()));
    1816         k_RInfo = noreg;
    1817       } else {
    1818         __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    1819 
    1820       }
    1821       __ jcc(Assembler::equal, one);
    1822     } else {
    1823       // get object class
    1824       // not a safepoint as obj null check happens earlier
    1825       __ cmpptr(obj, (int32_t)NULL_WORD);
    1826       __ jcc(Assembler::equal, zero);
    1827       __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
    1828 
    1829 #ifndef _LP64
    1830       if (k->is_loaded()) {
    1831         // See if we get an immediate positive hit
    1832         __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
    1833         __ jcc(Assembler::equal, one);
    1834         if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
    1835           // check for self
    1836           __ cmpoop(klass_RInfo, k->constant_encoding());
    1837           __ jcc(Assembler::equal, one);
    1838           __ push(klass_RInfo);
    1839           __ pushoop(k->constant_encoding());
    1840           __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
    1841           __ pop(klass_RInfo);
    1842           __ pop(dst);
    1843           __ jmp(done);
    1844         }
    1845       }
    1846         else // next block is unconditional if LP64:
    1847 #endif // LP64
    1848       {
    1849         assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
    1850 
    1851         // perform the fast part of the checking logic
    1852         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL);
    1853         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
    1854         __ push(klass_RInfo);
    1855         __ push(k_RInfo);
    1856         __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
    1857         __ pop(klass_RInfo);
    1858         __ pop(dst);
    1859         __ jmp(done);
    1860       }
    1861     }
    1862     __ bind(zero);
    1863     __ xorptr(dst, dst);
    1864     __ jmp(done);
    1865     __ bind(one);
    1866     __ movptr(dst, 1);
    1867     __ bind(done);
    1868   } else {
    1869     ShouldNotReachHere();
    1870   }
     1966        ShouldNotReachHere();
     1967      }
    18711968
    18721969}
     
    18951992    assert(cmpval != addr, "cmp and addr must be in different registers");
    18961993    assert(newval != addr, "new value and addr must be in different registers");
    1897     if (os::is_MP()) {
    1898       __ lock();
    1899     }
     1994
    19001995    if ( op->code() == lir_cas_obj) {
    1901       __ cmpxchgptr(newval, Address(addr, 0));
    1902     } else if (op->code() == lir_cas_int) {
     1996#ifdef _LP64
     1997      if (UseCompressedOops) {
     1998        __ encode_heap_oop(cmpval);
     1999        __ mov(rscratch1, newval);
     2000        __ encode_heap_oop(rscratch1);
     2001        if (os::is_MP()) {
     2002          __ lock();
     2003        }
     2004        // cmpval (rax) is implicitly used by this instruction
     2005        __ cmpxchgl(rscratch1, Address(addr, 0));
     2006      } else
     2007#endif
     2008      {
     2009        if (os::is_MP()) {
     2010          __ lock();
     2011        }
     2012        __ cmpxchgptr(newval, Address(addr, 0));
     2013      }
     2014    } else {
     2015      assert(op->code() == lir_cas_int, "lir_cas_int expected");
     2016      if (os::is_MP()) {
     2017        __ lock();
     2018      }
    19032019      __ cmpxchgl(newval, Address(addr, 0));
    1904     } else {
    1905       LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0)));
    19062020    }
    19072021#ifdef _LP64
     
    19252039}
    19262040
    1927 
    1928 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
     2041void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
    19292042  Assembler::Condition acond, ncond;
    19302043  switch (condition) {
     
    20172130      switch (code) {
    20182131        case lir_add: {
    2019           __ increment(lreg, c);
     2132          __ incrementl(lreg, c);
    20202133          break;
    20212134        }
    20222135        case lir_sub: {
    2023           __ decrement(lreg, c);
     2136          __ decrementl(lreg, c);
    20242137          break;
    20252138        }
     
    31453258
    31463259  if (flags & LIR_OpArrayCopy::type_check) {
    3147     __ movptr(tmp, src_klass_addr);
    3148     __ cmpptr(tmp, dst_klass_addr);
     3260    if (UseCompressedOops) {
     3261      __ movl(tmp, src_klass_addr);
     3262      __ cmpl(tmp, dst_klass_addr);
     3263    } else {
     3264      __ movptr(tmp, src_klass_addr);
     3265      __ cmpptr(tmp, dst_klass_addr);
     3266    }
    31493267    __ jcc(Assembler::notEqual, *stub->entry());
    31503268  }
     
    31613279    Label known_ok, halt;
    31623280    __ movoop(tmp, default_type->constant_encoding());
     3281#ifdef _LP64
     3282    if (UseCompressedOops) {
     3283      __ encode_heap_oop(tmp);
     3284    }
     3285#endif
     3286
    31633287    if (basic_type != T_OBJECT) {
    3164       __ cmpptr(tmp, dst_klass_addr);
     3288
     3289      if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
     3290      else                   __ cmpptr(tmp, dst_klass_addr);
    31653291      __ jcc(Assembler::notEqual, halt);
    3166       __ cmpptr(tmp, src_klass_addr);
     3292      if (UseCompressedOops) __ cmpl(tmp, src_klass_addr);
     3293      else                   __ cmpptr(tmp, src_klass_addr);
    31673294      __ jcc(Assembler::equal, known_ok);
    31683295    } else {
    3169       __ cmpptr(tmp, dst_klass_addr);
     3296      if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
     3297      else                   __ cmpptr(tmp, dst_klass_addr);
    31703298      __ jcc(Assembler::equal, known_ok);
    31713299      __ cmpptr(src, dst);
     
    32413369
    32423370  // Update counter for all call types
    3243   ciMethodData* md = method->method_data();
    3244   if (md == NULL) {
    3245     bailout("out of memory building methodDataOop");
    3246     return;
    3247   }
     3371  ciMethodData* md = method->method_data_or_null();
     3372  assert(md != NULL, "Sanity");
    32483373  ciProfileData* data = md->bci_to_data(bci);
    32493374  assert(data->is_CounterData(), "need CounterData for calls");
     
    32563381  // invokeinterface bytecodes
    32573382  if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
    3258       Tier1ProfileVirtualCalls) {
     3383      C1ProfileVirtualCalls) {
    32593384    assert(op->recv()->is_single_cpu(), "recv must be allocated");
    32603385    Register recv = op->recv()->as_register();
     
    32623387    assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
    32633388    ciKlass* known_klass = op->known_holder();
    3264     if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
     3389    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
    32653390      // We know the type that will be seen at this call site; we can
    32663391      // statically update the methodDataOop rather than needing to do
     
    32753400        if (known_klass->equals(receiver)) {
    32763401          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
    3277           __ addl(data_addr, DataLayout::counter_increment);
     3402          __ addptr(data_addr, DataLayout::counter_increment);
    32783403          return;
    32793404        }
     
    32913416          __ movoop(recv_addr, known_klass->constant_encoding());
    32923417          Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
    3293           __ addl(data_addr, DataLayout::counter_increment);
     3418          __ addptr(data_addr, DataLayout::counter_increment);
    32943419          return;
    32953420        }
    32963421      }
    32973422    } else {
    3298       __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
     3423      __ load_klass(recv, recv);
    32993424      Label update_done;
    3300       uint i;
    3301       for (i = 0; i < VirtualCallData::row_limit(); i++) {
    3302         Label next_test;
    3303         // See if the receiver is receiver[n].
    3304         __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
    3305         __ jcc(Assembler::notEqual, next_test);
    3306         Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
    3307         __ addl(data_addr, DataLayout::counter_increment);
    3308         __ jmp(update_done);
    3309         __ bind(next_test);
    3310       }
    3311 
    3312       // Didn't find receiver; find next empty slot and fill it in
    3313       for (i = 0; i < VirtualCallData::row_limit(); i++) {
    3314         Label next_test;
    3315         Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
    3316         __ cmpptr(recv_addr, (int32_t)NULL_WORD);
    3317         __ jcc(Assembler::notEqual, next_test);
    3318         __ movptr(recv_addr, recv);
    3319         __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
    3320         __ jmp(update_done);
    3321         __ bind(next_test);
    3322       }
     3425      type_profile_helper(mdo, md, data, recv, &update_done);
    33233426      // Receiver did not match any saved receiver and there is no empty row for it.
    33243427      // Increment total counter to indicate polymorphic case.
    3325       __ addl(counter_addr, DataLayout::counter_increment);
     3428      __ addptr(counter_addr, DataLayout::counter_increment);
    33263429
    33273430      __ bind(update_done);
     
    33293432  } else {
    33303433    // Static call
    3331     __ addl(counter_addr, DataLayout::counter_increment);
    3332   }
    3333 }
    3334 
     3434    __ addptr(counter_addr, DataLayout::counter_increment);
     3435  }
     3436}
    33353437
    33363438void LIR_Assembler::emit_delay(LIR_OpDelay*) {
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
     26#define CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
     27
    2528 private:
    2629
     
    4346  Address as_Address(LIR_Address* addr, Register tmp);
    4447
    45 
     48  // Record the type of the receiver in ReceiverTypeData
     49  void type_profile_helper(Register mdo,
     50                           ciMethodData *md, ciProfileData *data,
     51                           Register recv, Label* update_done);
    4652public:
    4753
     
    5460         deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
    5561       };
     62
     63#endif // CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_c1_LIRGenerator_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_Compilation.hpp"
     27#include "c1/c1_FrameMap.hpp"
     28#include "c1/c1_Instruction.hpp"
     29#include "c1/c1_LIRAssembler.hpp"
     30#include "c1/c1_LIRGenerator.hpp"
     31#include "c1/c1_Runtime1.hpp"
     32#include "c1/c1_ValueStack.hpp"
     33#include "ci/ciArray.hpp"
     34#include "ci/ciObjArrayKlass.hpp"
     35#include "ci/ciTypeArrayKlass.hpp"
     36#include "runtime/sharedRuntime.hpp"
     37#include "runtime/stubRoutines.hpp"
     38#include "vmreg_x86.inline.hpp"
    2739
    2840#ifdef ASSERT
     
    108120  }
    109121  Constant* c = v->as_Constant();
    110   if (c && c->state() == NULL) {
     122  if (c && c->state_before() == NULL) {
    111123    // constants of any type can be stored directly, except for
    112124    // unloaded object constants.
     
    183195
    184196
    185 void LIRGenerator::increment_counter(address counter, int step) {
     197LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
     198  LIR_Opr r;
     199  if (type == T_LONG) {
     200    r = LIR_OprFact::longConst(x);
     201  } else if (type == T_INT) {
     202    r = LIR_OprFact::intConst(x);
     203  } else {
     204    ShouldNotReachHere();
     205  }
     206  return r;
     207}
     208
     209void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
    186210  LIR_Opr pointer = new_pointer_register();
    187211  __ move(LIR_OprFact::intptrConst(counter), pointer);
    188   LIR_Address* addr = new LIR_Address(pointer, T_INT);
     212  LIR_Address* addr = new LIR_Address(pointer, type);
    189213  increment_counter(addr, step);
    190214}
     
    194218  __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
    195219}
    196 
    197220
    198221void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
     
    240263
    241264void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
    242   assert(x->is_root(),"");
     265  assert(x->is_pinned(),"");
    243266  bool needs_range_check = true;
    244267  bool use_length = x->length() != NULL;
     
    315338
    316339void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
    317   assert(x->is_root(),"");
     340  assert(x->is_pinned(),"");
    318341  LIRItem obj(x->obj(), this);
    319342  obj.load_item();
     
    331354  CodeEmitInfo* info_for_exception = NULL;
    332355  if (x->needs_null_check()) {
    333     info_for_exception = state_for(x, x->lock_stack_before());
     356    info_for_exception = state_for(x);
    334357  }
    335358  // this CodeEmitInfo must not have the xhandlers because here the
     
    342365
    343366void LIRGenerator::do_MonitorExit(MonitorExit* x) {
    344   assert(x->is_root(),"");
     367  assert(x->is_pinned(),"");
    345368
    346369  LIRItem obj(x->obj(), this);
     
    711734  // generate compare-and-swap; produces zero condition if swap occurs
    712735  int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
    713   LIR_Opr addr = obj.result();
    714   __ add(addr, LIR_OprFact::intConst(value_offset), addr);
     736  LIR_Opr addr = new_pointer_register();
     737  __ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr);
    715738  LIR_Opr t1 = LIR_OprFact::illegalOpr;  // no temp needed
    716739  LIR_Opr t2 = LIR_OprFact::illegalOpr;  // no temp needed
     
    719742  // generate conditional move of boolean result
    720743  LIR_Opr result = rlock_result(x);
    721   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
     744  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
    722745}
    723746
     
    788811  // generate conditional move of boolean result
    789812  LIR_Opr result = rlock_result(x);
    790   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
     813  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
     814           result, as_BasicType(type));
    791815  if (type == objectType) {   // Write-barrier needed for Object fields.
    792816    // Seems to be precise
     
    852876void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
    853877  assert(x->number_of_arguments() == 5, "wrong type");
     878
     879  // Make all state_for calls early since they can emit code
     880  CodeEmitInfo* info = state_for(x, x->state());
     881
    854882  LIRItem src(x->argument_at(0), this);
    855883  LIRItem src_pos(x->argument_at(1), this);
     
    894922  arraycopy_helper(x, &flags, &expected_type);
    895923
    896   CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?)
    897924  __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
    898925}
     
    9741001
    9751002void LIRGenerator::do_NewInstance(NewInstance* x) {
     1003#ifndef PRODUCT
    9761004  if (PrintNotLoaded && !x->klass()->is_loaded()) {
    977     tty->print_cr("   ###class not loaded at new bci %d", x->bci());
    978   }
     1005    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
     1006  }
     1007#endif
    9791008  CodeEmitInfo* info = state_for(x, x->state());
    9801009  LIR_Opr reg = result_register_for(x->type());
     
    11171146
    11181147  // info for exceptions
    1119   CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks());
     1148  CodeEmitInfo* info_for_exception = state_for(x);
    11201149
    11211150  CodeStub* stub;
     
    11271156  }
    11281157  LIR_Opr reg = rlock_result(x);
     1158  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
     1159  if (!x->klass()->is_loaded() || UseCompressedOops) {
     1160    tmp3 = new_register(objectType);
     1161  }
    11291162  __ checkcast(reg, obj.result(), x->klass(),
    1130                new_register(objectType), new_register(objectType),
    1131                !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
     1163               new_register(objectType), new_register(objectType), tmp3,
    11321164               x->direct_compare(), info_for_exception, patching_info, stub,
    11331165               x->profiled_method(), x->profiled_bci());
     
    11461178  }
    11471179  obj.load_item();
    1148   LIR_Opr tmp = new_register(objectType);
     1180  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
     1181  if (!x->klass()->is_loaded() || UseCompressedOops) {
     1182    tmp3 = new_register(objectType);
     1183  }
    11491184  __ instanceof(reg, obj.result(), x->klass(),
    1150                 tmp, new_register(objectType), LIR_OprFact::illegalOpr,
    1151                 x->direct_compare(), patching_info);
     1185                new_register(objectType), new_register(objectType), tmp3,
     1186                x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
    11521187}
    11531188
     
    11891224  if (x->is_safepoint()) {
    11901225    // increment backedge counter if needed
    1191     increment_backedge_counter(state_for(x, x->state_before()));
    1192 
     1226    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
    11931227    __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
    11941228  }
     
    11981232  LIR_Opr right = yin->result();
    11991233  __ cmp(lir_cond(cond), left, right);
     1234  // Generate branch profiling. Profiling code doesn't kill flags.
    12001235  profile_branch(x, cond);
    12011236  move_to_phi(x->state());
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_c1_LinearScan_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_Instruction.hpp"
     27#include "c1/c1_LinearScan.hpp"
     28#include "utilities/bitMap.inline.hpp"
    2729
    2830
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP
     26#define CPU_X86_VM_C1_LINEARSCAN_X86_HPP
     27
    2528inline bool LinearScan::is_processed_reg_num(int reg_num) {
    2629#ifndef _LP64
     
    2932  assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
    3033  assert(reg_num >= 0, "invalid reg_num");
    31 
    32   return reg_num < 6 || reg_num > 7;
    3334#else
    34   // rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored
     35  // rsp and rbp, r10, r15 (numbers [12,15]) are ignored
     36  // r12 (number 11) is conditional on compressed oops.
     37  assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below");
    3538  assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
    3639  assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
     
    3841  assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
    3942  assert(reg_num >= 0, "invalid reg_num");
    40 
    41   return reg_num < 12 || reg_num > 15;
    4243#endif // _LP64
     44  return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
    4345}
    4446
     
    102104    assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
    103105    _first_reg = pd_first_byte_reg;
    104     _last_reg = pd_last_byte_reg;
     106    _last_reg = FrameMap::last_byte_reg();
    105107    return true;
    106108  } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
     
    186188  void allocate();
    187189};
     190
     191#endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_c1_MacroAssembler_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_MacroAssembler.hpp"
     27#include "c1/c1_Runtime1.hpp"
     28#include "classfile/systemDictionary.hpp"
     29#include "gc_interface/collectedHeap.hpp"
     30#include "interpreter/interpreter.hpp"
     31#include "oops/arrayOop.hpp"
     32#include "oops/markOop.hpp"
     33#include "runtime/basicLock.hpp"
     34#include "runtime/biasedLocking.hpp"
     35#include "runtime/os.hpp"
     36#include "runtime/stubRoutines.hpp"
    2737
    2838int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
     
    132142  } else {
    133143    eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
     144    incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
    134145  }
    135146}
     
    146157    movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
    147158  }
    148 
    149   movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
     159#ifdef _LP64
     160  if (UseCompressedOops) { // Take care not to kill klass
     161    movptr(t1, klass);
     162    encode_heap_oop_not_null(t1);
     163    movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
     164  } else
     165#endif
     166  {
     167    movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
     168  }
     169
    150170  if (len->is_valid()) {
    151171    movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
    152172  }
     173#ifdef _LP64
     174  else if (UseCompressedOops) {
     175    xorptr(t1, t1);
     176    store_klass_gap(obj, t1);
     177  }
     178#endif
    153179}
    154180
     
    210236void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
    211237  assert(obj == rax, "obj must be in rax, for cmpxchg");
    212   assert(obj != t1 && obj != t2 && t1 != t2, "registers must be different"); // XXX really?
     238  assert_different_registers(obj, t1, t2); // XXX really?
    213239  assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
    214240
     
    221247  assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
    222248         "con_size_in_bytes is not multiple of alignment");
    223   const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
     249  const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
    224250
    225251  initialize_header(obj, klass, noreg, t1, t2);
     
    308334  assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
    309335  int start_offset = offset();
    310   cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
     336
     337  if (UseCompressedOops) {
     338    load_klass(rscratch1, receiver);
     339    cmpptr(rscratch1, iCache);
     340  } else {
     341    cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
     342  }
    311343  // if icache check fails, then jump to runtime routine
    312344  // Note: RECEIVER must still contain the receiver!
     
    314346          RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
    315347  const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
    316   assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
     348  assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
    317349}
    318350
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
     26#define CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
    2427
    2528// C1_MacroAssembler contains high-level macros for C1
     
    114117
    115118  void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN;
     119
     120#endif // CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_c1_Runtime1_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "c1/c1_Defs.hpp"
     27#include "c1/c1_MacroAssembler.hpp"
     28#include "c1/c1_Runtime1.hpp"
     29#include "interpreter/interpreter.hpp"
     30#include "nativeInst_x86.hpp"
     31#include "oops/compiledICHolderOop.hpp"
     32#include "oops/oop.inline.hpp"
     33#include "prims/jvmtiExport.hpp"
     34#include "register_x86.hpp"
     35#include "runtime/sharedRuntime.hpp"
     36#include "runtime/signature.hpp"
     37#include "runtime/vframeArray.hpp"
     38#include "vmreg_x86.inline.hpp"
    2739
    2840
     
    966978        __ verify_not_null_oop(exception_oop);
    967979
    968 
    969980        oop_maps = new OopMapSet();
    970981        OopMap* oop_map = generate_oop_map(sasm, 1);
     
    10261037          // refilling the TLAB or allocating directly from eden.
    10271038          Label retry_tlab, try_eden;
    1028           __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass)
     1039          const Register thread =
     1040            __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
    10291041
    10301042          __ bind(retry_tlab);
     
    10321044          // get the instance size (size is postive so movl is fine for 64bit)
    10331045          __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
     1046
    10341047          __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
     1048
    10351049          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
    10361050          __ verify_oop(obj);
     
    10421056          // get the instance size (size is postive so movl is fine for 64bit)
    10431057          __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
     1058
    10441059          __ eden_allocate(obj, obj_size, 0, t1, slow_path);
     1060          __ incr_allocated_bytes(thread, obj_size, 0);
     1061
    10451062          __ initialize_object(obj, klass, obj_size, 0, t1, t2);
    10461063          __ verify_oop(obj);
     
    10691086      break;
    10701087
    1071 #ifdef TIERED
    10721088    case counter_overflow_id:
    10731089      {
    1074         Register bci = rax;
     1090        Register bci = rax, method = rbx;
    10751091        __ enter();
    1076         OopMap* map = save_live_registers(sasm, 2);
     1092        OopMap* map = save_live_registers(sasm, 3);
    10771093        // Retrieve bci
    10781094        __ movl(bci, Address(rbp, 2*BytesPerWord));
    1079         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
     1095        // And a pointer to the methodOop
     1096        __ movptr(method, Address(rbp, 3*BytesPerWord));
     1097        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
    10801098        oop_maps = new OopMapSet();
    10811099        oop_maps->add_gc_map(call_offset, map);
     
    10851103      }
    10861104      break;
    1087 #endif // TIERED
    10881105
    10891106    case new_type_array_id:
     
    11321149          // refilling the TLAB or allocating directly from eden.
    11331150          Label retry_tlab, try_eden;
    1134           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx
     1151          const Register thread =
     1152            __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
    11351153
    11361154          __ bind(retry_tlab);
    11371155
    11381156          // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
    1139           // since size is postive movl does right thing on 64bit
     1157          // since size is positive movl does right thing on 64bit
    11401158          __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
    11411159          // since size is postive movl does right thing on 64bit
     
    11641182          __ bind(try_eden);
    11651183          // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
    1166           // since size is postive movl does right thing on 64bit
     1184          // since size is positive movl does right thing on 64bit
    11671185          __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
    11681186          // since size is postive movl does right thing on 64bit
     
    11771195
    11781196          __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
     1197          __ incr_allocated_bytes(thread, arr_size, 0);
    11791198
    11801199          __ initialize_header(obj, klass, length, t1, t2);
     
    12501269        Label register_finalizer;
    12511270        Register t = rsi;
    1252         __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));
     1271        __ load_klass(t, rax);
    12531272        __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
    12541273        __ testl(t, JVM_ACC_HAS_FINALIZER);
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_C1_GLOBALS_X86_HPP
     26#define CPU_X86_VM_C1_GLOBALS_X86_HPP
     27
     28#include "utilities/globalDefinitions.hpp"
     29#include "utilities/macros.hpp"
     30
    2531// Sets the default values for platform dependent flags used by the client compiler.
    2632// (see c1_globals.hpp)
     
    3642define_pd_global(bool, TieredCompilation,            false);
    3743define_pd_global(intx, CompileThreshold,             1500 );
    38 define_pd_global(intx, Tier2CompileThreshold,        1500 );
    39 define_pd_global(intx, Tier3CompileThreshold,        2500 );
    40 define_pd_global(intx, Tier4CompileThreshold,        4500 );
    41 
    4244define_pd_global(intx, BackEdgeThreshold,            100000);
    43 define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
    44 define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
    45 define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
    4645
    4746define_pd_global(intx, OnStackReplacePercentage,     933  );
     
    6867
    6968define_pd_global(intx, SafepointPollOffset,          256  );
     69
     70#endif // CPU_X86_VM_C1_GLOBALS_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_C2_GLOBALS_X86_HPP
     26#define CPU_X86_VM_C2_GLOBALS_X86_HPP
     27
     28#include "utilities/globalDefinitions.hpp"
     29#include "utilities/macros.hpp"
     30
    2531// Sets the default values for platform dependent flags used by the server compiler.
    2632// (see c2_globals.hpp).  Alpha-sorted.
     
    4046#endif // CC_INTERP
    4147define_pd_global(bool, TieredCompilation,            false);
    42 #ifdef TIERED
    43 define_pd_global(intx, CompileThreshold,             1000);
    44 #else
    4548define_pd_global(intx, CompileThreshold,             10000);
    46 #endif // TIERED
    47 define_pd_global(intx, Tier2CompileThreshold,        10000);
    48 define_pd_global(intx, Tier3CompileThreshold,        20000);
    49 define_pd_global(intx, Tier4CompileThreshold,        40000);
    50 
    5149define_pd_global(intx, BackEdgeThreshold,            100000);
    52 define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
    53 define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
    54 define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
    5550
    5651define_pd_global(intx, OnStackReplacePercentage,     140);
     
    9994// Ergonomics related flags
    10095define_pd_global(bool, NeverActAsServerClassMachine, false);
     96
     97#endif // CPU_X86_VM_C2_GLOBALS_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/c2_init_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_c2_init_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "opto/compile.hpp"
     27#include "opto/node.hpp"
    2728
    2829// processor dependent initialization for i486
  • trunk/openjdk/hotspot/src/cpu/x86/vm/codeBuffer_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_CODEBUFFER_X86_HPP
     26#define CPU_X86_VM_CODEBUFFER_X86_HPP
     27
    2528private:
    2629  void pd_initialize() {}
     
    2831public:
    2932  void flush_bundle(bool start_new_bundle) {}
     33
     34#endif // CPU_X86_VM_CODEBUFFER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/copy_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_COPY_X86_HPP
     26#define CPU_X86_VM_COPY_X86_HPP
     27
    2528// Inline functions for memory copy and fill.
    2629
    2730// Contains inline asm implementations
    28 #include "incls/_copy_pd.inline.hpp.incl"
     31#ifdef TARGET_OS_ARCH_linux_x86
     32# include "copy_linux_x86.inline.hpp"
     33#endif
     34#ifdef TARGET_OS_ARCH_solaris_x86
     35# include "copy_solaris_x86.inline.hpp"
     36#endif
     37#ifdef TARGET_OS_ARCH_windows_x86
     38# include "copy_windows_x86.inline.hpp"
     39#endif
     40
    2941
    3042static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
     
    5971  (void)memset(to, 0, count);
    6072}
     73
     74#endif // CPU_X86_VM_COPY_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
     26#define CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
    2427
    2528 protected:
     
    4649                                          const Register sender_sp,
    4750                                          bool native); // C++ interpreter only
     51
     52#endif // CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2007, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_cppInterpreter_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "interpreter/bytecodeHistogram.hpp"
     28#include "interpreter/cppInterpreter.hpp"
     29#include "interpreter/interpreter.hpp"
     30#include "interpreter/interpreterGenerator.hpp"
     31#include "interpreter/interpreterRuntime.hpp"
     32#include "oops/arrayOop.hpp"
     33#include "oops/methodDataOop.hpp"
     34#include "oops/methodOop.hpp"
     35#include "oops/oop.inline.hpp"
     36#include "prims/jvmtiExport.hpp"
     37#include "prims/jvmtiThreadState.hpp"
     38#include "runtime/arguments.hpp"
     39#include "runtime/deoptimization.hpp"
     40#include "runtime/frame.inline.hpp"
     41#include "runtime/interfaceSupport.hpp"
     42#include "runtime/sharedRuntime.hpp"
     43#include "runtime/stubRoutines.hpp"
     44#include "runtime/synchronizer.hpp"
     45#include "runtime/timer.hpp"
     46#include "runtime/vframeArray.hpp"
     47#include "utilities/debug.hpp"
     48#ifdef SHARK
     49#include "shark/shark_globals.hpp"
     50#endif
    2751
    2852#ifdef CC_INTERP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.hpp

    r278 r309  
    2323 */
    2424
     25#ifndef CPU_X86_VM_CPPINTERPRETER_X86_HPP
     26#define CPU_X86_VM_CPPINTERPRETER_X86_HPP
     27
    2528
    2629  protected:
     
    3235  // Max size with JVMTI
    3336  const static int InterpreterCodeSize = 168 * 1024;
     37
     38#endif // CPU_X86_VM_CPPINTERPRETER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/debug_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_debug_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "code/codeCache.hpp"
     27#include "code/nmethod.hpp"
     28#include "runtime/frame.hpp"
     29#include "runtime/init.hpp"
     30#include "runtime/os.hpp"
     31#include "utilities/debug.hpp"
     32#include "utilities/top.hpp"
    2733
    2834void pd_ps(frame f) {}
  • trunk/openjdk/hotspot/src/cpu/x86/vm/depChecker_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_depChecker_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "compiler/disassembler.hpp"
     27#include "depChecker_x86.hpp"
    2728
    2829// Nothing to do on i486
  • trunk/openjdk/hotspot/src/cpu/x86/vm/depChecker_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_DEPCHECKER_X86_HPP
     26#define CPU_X86_VM_DEPCHECKER_X86_HPP
     27
    2528// Nothing to do on i486
     29
     30#endif // CPU_X86_VM_DEPCHECKER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/disassembler_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_DISASSEMBLER_X86_HPP
     26#define CPU_X86_VM_DISASSEMBLER_X86_HPP
     27
    2528  static int pd_instruction_alignment() {
    2629    return 1;
     
    3033    return "";
    3134  }
     35
     36#endif // CPU_X86_VM_DISASSEMBLER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/dump_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_dump_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "memory/compactingPermGenGen.hpp"
     28#include "memory/generation.inline.hpp"
     29#include "memory/space.inline.hpp"
    2730
    2831
  • trunk/openjdk/hotspot/src/cpu/x86/vm/dump_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_dump_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "memory/compactingPermGenGen.hpp"
     28#include "memory/generation.inline.hpp"
     29#include "memory/space.inline.hpp"
    2730
    2831
  • trunk/openjdk/hotspot/src/cpu/x86/vm/frame_x86.cpp

    r278 r309  
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_frame_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/interpreter.hpp"
     27#include "memory/resourceArea.hpp"
     28#include "oops/markOop.hpp"
     29#include "oops/methodOop.hpp"
     30#include "oops/oop.inline.hpp"
     31#include "runtime/frame.inline.hpp"
     32#include "runtime/handles.inline.hpp"
     33#include "runtime/javaCalls.hpp"
     34#include "runtime/monitorChunk.hpp"
     35#include "runtime/signature.hpp"
     36#include "runtime/stubCodeGenerator.hpp"
     37#include "runtime/stubRoutines.hpp"
     38#include "vmreg_x86.inline.hpp"
     39#ifdef COMPILER1
     40#include "c1/c1_Runtime1.hpp"
     41#include "runtime/vframeArray.hpp"
     42#endif
    2743
    2844#ifdef ASSERT
     
    142158
    143159    // Could just be some random pointer within the codeBlob
    144 
    145     if (!sender_blob->instructions_contains(sender_pc)) return false;
     160    if (!sender_blob->code_contains(sender_pc)) {
     161      return false;
     162    }
    146163
    147164    // We should never be able to see an adapter if the current frame is something from code cache
    148 
    149     if ( sender_blob->is_adapter_blob()) {
     165    if (sender_blob->is_adapter_blob()) {
    150166      return false;
    151167    }
     
    341357
    342358  address original_pc = nm->get_original_pc(&fr);
    343   assert(nm->code_contains(original_pc), "original PC must be in nmethod");
     359  assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
    344360  assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
    345361}
  • trunk/openjdk/hotspot/src/cpu/x86/vm/frame_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_FRAME_X86_HPP
     26#define CPU_X86_VM_FRAME_X86_HPP
     27
     28#include "runtime/synchronizer.hpp"
     29#include "utilities/top.hpp"
    2430
    2531// A frame represents a physical stack frame (an activation).  Frames can be
     
    200206  inline interpreterState get_interpreterState() const;
    201207#endif // CC_INTERP
     208
     209#endif // CPU_X86_VM_FRAME_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp

    r278 r309  
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
     26#define CPU_X86_VM_FRAME_X86_INLINE_HPP
    2427
    2528// Inline functions for Intel frames:
     
    6467  if (original_pc != NULL) {
    6568    _pc = original_pc;
    66     assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
     69    assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
    6770    _deopt_state = is_deoptimized;
    6871  } else {
     
    297300  *((oop*) map->location(rax->as_VMReg())) = obj;
    298301}
     302
     303#endif // CPU_X86_VM_FRAME_X86_INLINE_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
     26#define CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
     27
    2528const int StackAlignmentInBytes  = 16;
     29
     30#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/globals_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_GLOBALS_X86_HPP
     26#define CPU_X86_VM_GLOBALS_X86_HPP
     27
     28#include "utilities/globalDefinitions.hpp"
     29#include "utilities/macros.hpp"
    2430
    2531// Sets the default values for platform dependent flags used by the runtime system.
     
    5763define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
    5864#else
    59 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
     65define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
    6066#endif // AMD64
    6167
     
    6470define_pd_global(bool, RewriteBytecodes,     true);
    6571define_pd_global(bool, RewriteFrequentPairs, true);
     72
     73define_pd_global(bool, UseMembar,            false);
     74
     75#endif // CPU_X86_VM_GLOBALS_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/icBuffer_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_icBuffer_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "code/icBuffer.hpp"
     29#include "gc_interface/collectedHeap.inline.hpp"
     30#include "interpreter/bytecodes.hpp"
     31#include "memory/resourceArea.hpp"
     32#include "nativeInst_x86.hpp"
     33#include "oops/oop.inline.hpp"
     34#include "oops/oop.inline2.hpp"
    2735
    2836int InlineCacheBuffer::ic_stub_code_size() {
  • trunk/openjdk/hotspot/src/cpu/x86/vm/icache_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_icache_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "runtime/icache.hpp"
    2728
    2829#define __ _masm->
  • trunk/openjdk/hotspot/src/cpu/x86/vm/icache_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2004, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_ICACHE_X86_HPP
     26#define CPU_X86_VM_ICACHE_X86_HPP
    2427
    2528// Interface for updating the instruction cache.  Whenever the VM modifies
     
    5457#endif // AMD64
    5558};
     59
     60#endif // CPU_X86_VM_ICACHE_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interp_masm_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interp_masm_x86_32.hpp"
     27#include "interpreter/interpreter.hpp"
     28#include "interpreter/interpreterRuntime.hpp"
     29#include "oops/arrayOop.hpp"
     30#include "oops/markOop.hpp"
     31#include "oops/methodDataOop.hpp"
     32#include "oops/methodOop.hpp"
     33#include "prims/jvmtiExport.hpp"
     34#include "prims/jvmtiRedefineClassesTrace.hpp"
     35#include "prims/jvmtiThreadState.hpp"
     36#include "runtime/basicLock.hpp"
     37#include "runtime/biasedLocking.hpp"
     38#include "runtime/sharedRuntime.hpp"
     39#ifdef TARGET_OS_FAMILY_linux
     40# include "thread_linux.inline.hpp"
     41#endif
     42#ifdef TARGET_OS_FAMILY_solaris
     43# include "thread_solaris.inline.hpp"
     44#endif
     45#ifdef TARGET_OS_FAMILY_windows
     46# include "thread_windows.inline.hpp"
     47#endif
    2748
    2849
     
    799820void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
    800821  assert(ProfileInterpreter, "must be profiling interpreter");
    801   Label zero_continue;
     822  Label set_mdp;
    802823  push(rax);
    803824  push(rbx);
     
    807828  movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
    808829  testptr(rax, rax);
    809   jcc(Assembler::zero, zero_continue);
    810 
     830  jcc(Assembler::zero, set_mdp);
    811831  // rbx,: method
    812832  // rsi: bcp
    813833  call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
    814834  // rax,: mdi
    815 
     835  // mdo is guaranteed to be non-zero here, we checked for it before the call.
    816836  movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
    817   testptr(rbx, rbx);
    818   jcc(Assembler::zero, zero_continue);
    819837  addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
    820   addptr(rbx, rax);
    821   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
    822 
    823   bind(zero_continue);
     838  addptr(rax, rbx);
     839  bind(set_mdp);
     840  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
    824841  pop(rbx);
    825842  pop(rax);
     
    13981415  }
    13991416}
     1417
     1418// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
     1419void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
     1420                                                        int increment, int mask,
     1421                                                        Register scratch, bool preloaded,
     1422                                                        Condition cond, Label* where) {
     1423  if (!preloaded) {
     1424    movl(scratch, counter_addr);
     1425  }
     1426  incrementl(scratch, increment);
     1427  movl(counter_addr, scratch);
     1428  andl(scratch, mask);
     1429  jcc(cond, *where);
     1430}
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp

    r278 r309  
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP
     26#define CPU_X86_VM_INTERP_MASM_X86_32_HPP
     27
     28#include "assembler_x86.inline.hpp"
     29#include "interpreter/invocationCounter.hpp"
    2430
    2531// This file specializes the assember with interpreter-specific macros
     
    186192  void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
    187193                             bool decrement = false);
     194  void increment_mask_and_jump(Address counter_addr,
     195                               int increment, int mask,
     196                               Register scratch, bool preloaded,
     197                               Condition cond, Label* where);
    188198  void set_mdp_flag_at(Register mdp_in, int flag_constant);
    189199  void test_mdp_data_at(Register mdp_in, int offset, Register value,
     
    224234
    225235};
     236
     237#endif // CPU_X86_VM_INTERP_MASM_X86_32_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interp_masm_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interp_masm_x86_64.hpp"
     27#include "interpreter/interpreter.hpp"
     28#include "interpreter/interpreterRuntime.hpp"
     29#include "oops/arrayOop.hpp"
     30#include "oops/markOop.hpp"
     31#include "oops/methodDataOop.hpp"
     32#include "oops/methodOop.hpp"
     33#include "prims/jvmtiExport.hpp"
     34#include "prims/jvmtiRedefineClassesTrace.hpp"
     35#include "prims/jvmtiThreadState.hpp"
     36#include "runtime/basicLock.hpp"
     37#include "runtime/biasedLocking.hpp"
     38#include "runtime/sharedRuntime.hpp"
     39#ifdef TARGET_OS_FAMILY_linux
     40# include "thread_linux.inline.hpp"
     41#endif
     42#ifdef TARGET_OS_FAMILY_solaris
     43# include "thread_solaris.inline.hpp"
     44#endif
     45#ifdef TARGET_OS_FAMILY_windows
     46# include "thread_windows.inline.hpp"
     47#endif
    2748
    2849
     
    429450    // compiled code in threads for which the event is enabled.  Check here for
    430451    // interp_only_mode if these events CAN be enabled.
    431     get_thread(temp);
    432452    // interp_only is an int, on little endian it is sufficient to test the byte only
    433     // Is a cmpl faster (ce
    434     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
     453    // Is a cmpl faster?
     454    cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
    435455    jcc(Assembler::zero, run_compiled_code);
    436456    jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
     
    836856void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
    837857  assert(ProfileInterpreter, "must be profiling interpreter");
    838   Label zero_continue;
     858  Label set_mdp;
    839859  push(rax);
    840860  push(rbx);
     
    844864  movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
    845865  testptr(rax, rax);
    846   jcc(Assembler::zero, zero_continue);
    847 
     866  jcc(Assembler::zero, set_mdp);
    848867  // rbx: method
    849868  // r13: bcp
    850869  call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
    851870  // rax: mdi
    852 
     871  // mdo is guaranteed to be non-zero here, we checked for it before the call.
    853872  movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
    854   testptr(rbx, rbx);
    855   jcc(Assembler::zero, zero_continue);
    856873  addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
    857   addptr(rbx, rax);
    858   movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
    859 
    860   bind(zero_continue);
     874  addptr(rax, rbx);
     875  bind(set_mdp);
     876  movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
    861877  pop(rbx);
    862878  pop(rax);
     
    14811497  }
    14821498}
     1499
     1500// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
     1501void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
     1502                                                        int increment, int mask,
     1503                                                        Register scratch, bool preloaded,
     1504                                                        Condition cond, Label* where) {
     1505  if (!preloaded) {
     1506    movl(scratch, counter_addr);
     1507  }
     1508  incrementl(scratch, increment);
     1509  movl(counter_addr, scratch);
     1510  andl(scratch, mask);
     1511  jcc(cond, *where);
     1512}
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp

    r278 r309  
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
     26#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
     27
     28#include "assembler_x86.inline.hpp"
     29#include "interpreter/invocationCounter.hpp"
    2430
    2531// This file specializes the assember with interpreter-specific macros
     
    195201  void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
    196202                             bool decrement = false);
     203  void increment_mask_and_jump(Address counter_addr,
     204                               int increment, int mask,
     205                               Register scratch, bool preloaded,
     206                               Condition cond, Label* where);
    197207  void set_mdp_flag_at(Register mdp_in, int flag_constant);
    198208  void test_mdp_data_at(Register mdp_in, int offset, Register value,
     
    240250  void notify_method_exit(TosState state, NotifyMethodExitMode mode);
    241251};
     252
     253#endif // CPU_X86_VM_INTERP_MASM_X86_64_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
     26#define CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
     27
    2528
    2629// Generation of Interpreter
     
    4245  void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
    4346  void generate_counter_overflow(Label* do_continue);
     47
     48#endif // CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterRT_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_INTERPRETERRT_X86_HPP
     26#define CPU_X86_VM_INTERPRETERRT_X86_HPP
     27
     28#include "memory/allocation.hpp"
    2429
    2530// native method calls
     
    7378  static Register temp();
    7479};
     80
     81#endif // CPU_X86_VM_INTERPRETERRT_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interpreterRT_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/interpreter.hpp"
     27#include "interpreter/interpreterRuntime.hpp"
     28#include "memory/allocation.inline.hpp"
     29#include "memory/universe.inline.hpp"
     30#include "oops/methodOop.hpp"
     31#include "oops/oop.inline.hpp"
     32#include "runtime/handles.inline.hpp"
     33#include "runtime/icache.hpp"
     34#include "runtime/interfaceSupport.hpp"
     35#include "runtime/signature.hpp"
    2736
    2837
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interpreterRT_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/interpreter.hpp"
     27#include "interpreter/interpreterRuntime.hpp"
     28#include "memory/allocation.inline.hpp"
     29#include "memory/universe.inline.hpp"
     30#include "oops/methodOop.hpp"
     31#include "oops/oop.inline.hpp"
     32#include "runtime/handles.inline.hpp"
     33#include "runtime/icache.hpp"
     34#include "runtime/interfaceSupport.hpp"
     35#include "runtime/signature.hpp"
    2736
    2837#define __ _masm->
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreter_x86.hpp

    r278 r309  
    2323 */
    2424
     25#ifndef CPU_X86_VM_INTERPRETER_X86_HPP
     26#define CPU_X86_VM_INTERPRETER_X86_HPP
     27
    2528 public:
    2629
     
    4548    return stackElementWords * i;
    4649  }
     50
     51#endif // CPU_X86_VM_INTERPRETER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interpreter_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "interpreter/bytecodeHistogram.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "interpreter/interpreterGenerator.hpp"
     30#include "interpreter/interpreterRuntime.hpp"
     31#include "interpreter/templateTable.hpp"
     32#include "oops/arrayOop.hpp"
     33#include "oops/methodDataOop.hpp"
     34#include "oops/methodOop.hpp"
     35#include "oops/oop.inline.hpp"
     36#include "prims/jvmtiExport.hpp"
     37#include "prims/jvmtiThreadState.hpp"
     38#include "prims/methodHandles.hpp"
     39#include "runtime/arguments.hpp"
     40#include "runtime/deoptimization.hpp"
     41#include "runtime/frame.inline.hpp"
     42#include "runtime/sharedRuntime.hpp"
     43#include "runtime/stubRoutines.hpp"
     44#include "runtime/synchronizer.hpp"
     45#include "runtime/timer.hpp"
     46#include "runtime/vframeArray.hpp"
     47#include "utilities/debug.hpp"
     48#ifdef COMPILER1
     49#include "c1/c1_Runtime1.hpp"
     50#endif
    2751
    2852#define __ _masm->
  • trunk/openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interpreter_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "interpreter/bytecodeHistogram.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "interpreter/interpreterGenerator.hpp"
     30#include "interpreter/interpreterRuntime.hpp"
     31#include "interpreter/templateTable.hpp"
     32#include "oops/arrayOop.hpp"
     33#include "oops/methodDataOop.hpp"
     34#include "oops/methodOop.hpp"
     35#include "oops/oop.inline.hpp"
     36#include "prims/jvmtiExport.hpp"
     37#include "prims/jvmtiThreadState.hpp"
     38#include "prims/methodHandles.hpp"
     39#include "runtime/arguments.hpp"
     40#include "runtime/deoptimization.hpp"
     41#include "runtime/frame.inline.hpp"
     42#include "runtime/sharedRuntime.hpp"
     43#include "runtime/stubRoutines.hpp"
     44#include "runtime/synchronizer.hpp"
     45#include "runtime/timer.hpp"
     46#include "runtime/vframeArray.hpp"
     47#include "utilities/debug.hpp"
     48#ifdef COMPILER1
     49#include "c1/c1_Runtime1.hpp"
     50#endif
    2751
    2852#define __ _masm->
  • trunk/openjdk/hotspot/src/cpu/x86/vm/javaFrameAnchor_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP
     26#define CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP
    2427
    2528private:
     
    8083  // Assert (last_Java_sp == NULL || fp == NULL)
    8184  void set_last_Java_fp(intptr_t* fp)                { _last_Java_fp = fp; }
     85
     86#endif // CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_jniFastGetField_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "memory/resourceArea.hpp"
     28#include "prims/jniFastGetField.hpp"
     29#include "prims/jvm_misc.hpp"
     30#include "runtime/safepoint.hpp"
    2731
    2832#define __ masm->
     
    5559  }
    5660  ResourceMark rm;
    57   BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
    58   address fast_entry = b->instructions_begin();
    59   CodeBuffer cbuf(fast_entry, b->instructions_size());
     61  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
     62  CodeBuffer cbuf(blob);
    6063  MacroAssembler* masm = new MacroAssembler(&cbuf);
     64  address fast_entry = __ pc();
    6165
    6266  Label slow;
     
    136140#else
    137141  switch (type) {
    138     case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t)fast_entry; break;
    139     case T_BYTE:    jni_fast_GetByteField_fp = (GetByteField_t)fast_entry; break;
    140     case T_CHAR:    jni_fast_GetCharField_fp = (GetCharField_t)fast_entry; break;
    141     case T_SHORT:   jni_fast_GetShortField_fp = (GetShortField_t)fast_entry; break;
    142     case T_INT:     jni_fast_GetIntField_fp = (GetIntField_t)fast_entry;
     142  case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t) fast_entry; break;
     143  case T_BYTE:    jni_fast_GetByteField_fp    = (GetByteField_t)    fast_entry; break;
     144  case T_CHAR:    jni_fast_GetCharField_fp    = (GetCharField_t)    fast_entry; break;
     145  case T_SHORT:   jni_fast_GetShortField_fp   = (GetShortField_t)   fast_entry; break;
     146  case T_INT:     jni_fast_GetIntField_fp     = (GetIntField_t)     fast_entry; break;
    143147  }
    144148  return os::win32::fast_jni_accessor_wrapper(type);
     
    169173  const char *name = "jni_fast_GetLongField";
    170174  ResourceMark rm;
    171   BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
    172   address fast_entry = b->instructions_begin();
    173   CodeBuffer cbuf(fast_entry, b->instructions_size());
     175  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
     176  CodeBuffer cbuf(blob);
    174177  MacroAssembler* masm = new MacroAssembler(&cbuf);
     178  address fast_entry = __ pc();
    175179
    176180  Label slow;
     
    247251  return fast_entry;
    248252#else
    249   jni_fast_GetLongField_fp = (GetLongField_t)fast_entry;
     253  jni_fast_GetLongField_fp = (GetLongField_t) fast_entry;
    250254  return os::win32::fast_jni_accessor_wrapper(T_LONG);
    251255#endif
     
    260264  }
    261265  ResourceMark rm;
    262   BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize);
    263   address fast_entry = b->instructions_begin();
    264   CodeBuffer cbuf(fast_entry, b->instructions_size());
     266  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
     267  CodeBuffer cbuf(blob);
    265268  MacroAssembler* masm = new MacroAssembler(&cbuf);
     269  address fast_entry = __ pc();
    266270
    267271  Label slow_with_pop, slow;
     
    349353#else
    350354  switch (type) {
    351     case T_FLOAT:  jni_fast_GetFloatField_fp = (GetFloatField_t)fast_entry; break;
    352     case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t)fast_entry;
     355  case T_FLOAT:  jni_fast_GetFloatField_fp  = (GetFloatField_t)  fast_entry; break;
     356  case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t) fast_entry; break;
    353357  }
    354358  return os::win32::fast_jni_accessor_wrapper(type);
  • trunk/openjdk/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_jniFastGetField_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "memory/resourceArea.hpp"
     28#include "prims/jniFastGetField.hpp"
     29#include "prims/jvm_misc.hpp"
     30#include "runtime/safepoint.hpp"
    2731
    2832#define __ masm->
     
    5963  }
    6064  ResourceMark rm;
    61   BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE);
    62   address fast_entry = b->instructions_begin();
    63   CodeBuffer cbuf(fast_entry, b->instructions_size());
     65  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
     66  CodeBuffer cbuf(blob);
    6467  MacroAssembler* masm = new MacroAssembler(&cbuf);
     68  address fast_entry = __ pc();
    6569
    6670  Label slow;
     
    157161  }
    158162  ResourceMark rm;
    159   BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE);
    160   address fast_entry = b->instructions_begin();
    161   CodeBuffer cbuf(fast_entry, b->instructions_size());
     163  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
     164  CodeBuffer cbuf(blob);
    162165  MacroAssembler* masm = new MacroAssembler(&cbuf);
     166  address fast_entry = __ pc();
    163167
    164168  Label slow;
  • trunk/openjdk/hotspot/src/cpu/x86/vm/jniTypes_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, 2003, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_JNITYPES_X86_HPP
     26#define CPU_X86_VM_JNITYPES_X86_HPP
     27
     28#include "memory/allocation.hpp"
     29#include "oops/oop.hpp"
     30#include "prims/jni.h"
    2431
    2532// This file holds platform-dependent routines used to write primitive jni
     
    123130#undef _JNI_SLOT_OFFSET
    124131};
     132
     133#endif // CPU_X86_VM_JNITYPES_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/jni_x86.h

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
  • trunk/openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_methodHandles_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/interpreter.hpp"
     27#include "memory/allocation.inline.hpp"
     28#include "prims/methodHandles.hpp"
    2729
    2830#define __ _masm->
     31
     32#ifdef PRODUCT
     33#define BLOCK_COMMENT(str) /* nothing */
     34#else
     35#define BLOCK_COMMENT(str) __ block_comment(str)
     36#endif
     37
     38#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    2939
    3040address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
     
    6575  // Verify that argslot lies within (rsp, rbp].
    6676  Label L_ok, L_bad;
     77  BLOCK_COMMENT("{ verify_argslot");
    6778  __ cmpptr(argslot_reg, rbp);
    6879  __ jccb(Assembler::above, L_bad);
     
    7283  __ stop(error_message);
    7384  __ bind(L_ok);
     85  BLOCK_COMMENT("} verify_argslot");
    7486}
    7587#endif
     
    8193  // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
    8294  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
    83   // rdx: garbage temp, blown away
     95  // rdx, rdi: garbage temp, blown away
    8496
    8597  Register rbx_method = rbx;
     
    8799  Register rax_mtype  = rax;
    88100  Register rdx_temp   = rdx;
     101  Register rdi_temp   = rdi;
    89102
    90103  // emit WrongMethodType path first, to enable jccb back-branch from main path
    91104  Label wrong_method_type;
    92105  __ bind(wrong_method_type);
     106  Label invoke_generic_slow_path;
     107  assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
     108  __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
     109  __ jcc(Assembler::notEqual, invoke_generic_slow_path);
    93110  __ push(rax_mtype);       // required mtype
    94111  __ push(rcx_recv);        // bad mh (1st stacked argument)
     
    107124    }
    108125  }
    109   Register rbx_temp = rbx_method; // done with incoming methodOop
    110126
    111127  // given the MethodType, find out where the MH argument is buried
    112   __ movptr(rdx_temp, Address(rax_mtype,
    113                               __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
    114   __ movl(rdx_temp, Address(rdx_temp,
    115                             __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
    116   __ movptr(rcx_recv, __ argument_address(rdx_temp));
    117 
    118   __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
    119   __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     128  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
     129  Register rdx_vmslots = rdx_temp;
     130  __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
     131  __ movptr(rcx_recv, __ argument_address(rdx_vmslots));
     132
     133  trace_method_handle(_masm, "invokeExact");
     134
     135  __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
     136  __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
     137
     138  // for invokeGeneric (only), apply argument and result conversions on the fly
     139  __ bind(invoke_generic_slow_path);
     140#ifdef ASSERT
     141  { Label L;
     142    __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
     143    __ jcc(Assembler::equal, L);
     144    __ stop("bad methodOop::intrinsic_id");
     145    __ bind(L);
     146  }
     147#endif //ASSERT
     148  Register rbx_temp = rbx_method;  // don't need it now
     149
     150  // make room on the stack for another pointer:
     151  Register rcx_argslot = rcx_recv;
     152  __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
     153  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
     154                   rcx_argslot, rbx_temp, rdx_temp);
     155
     156  // load up an adapter from the calling type (Java weaves this)
     157  __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
     158  Register rdx_adapter = rdx_temp;
     159  // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
     160  // deal with old JDK versions:
     161  __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
     162  __ cmpptr(rdi_temp, rdx_temp);
     163  Label sorry_no_invoke_generic;
     164  __ jcc(Assembler::below, sorry_no_invoke_generic);
     165
     166  __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0));
     167  __ testptr(rdx_adapter, rdx_adapter);
     168  __ jcc(Assembler::zero, sorry_no_invoke_generic);
     169  __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
     170  // As a trusted first argument, pass the type being called, so the adapter knows
     171  // the actual types of the arguments and return values.
     172  // (Generic invokers are shared among form-families of method-type.)
     173  __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
     174  // FIXME: assert that rdx_adapter is of the right method-type.
     175  __ mov(rcx, rdx_adapter);
     176  trace_method_handle(_masm, "invokeGeneric");
     177  __ jump_to_method_handle_entry(rcx, rdi_temp);
     178
     179  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
     180  __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
     181  __ push(rax_mtype);       // required mtype
     182  __ push(rcx_recv);        // bad mh (1st stacked argument)
     183  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
    120184
    121185  return entry_point;
     
    165229  //     rdx[-size] = rdx[0]
    166230  //   argslot -= size;
     231  BLOCK_COMMENT("insert_arg_slots {");
    167232  __ mov(rdx_temp, rsp);                        // source pointer for copy
    168233  __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
    169234  {
    170235    Label loop;
    171     __ bind(loop);
     236    __ BIND(loop);
    172237    // pull one word down each time through the loop
    173238    __ movptr(rbx_temp, Address(rdx_temp, 0));
     
    180245  // Now move the argslot down, to point to the opened-up space.
    181246  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
     247  BLOCK_COMMENT("} insert_arg_slots");
    182248}
    183249
     
    219285#endif
    220286
     287  BLOCK_COMMENT("remove_arg_slots {");
    221288  // Pull up everything shallower than rax_argslot.
    222289  // Then remove the excess space on the stack.
     
    230297  {
    231298    Label loop;
    232     __ bind(loop);
     299    __ BIND(loop);
    233300    // pull one word up each time through the loop
    234301    __ movptr(rbx_temp, Address(rdx_temp, 0));
     
    243310  // And adjust the argslot address to point at the deletion point.
    244311  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
     312  BLOCK_COMMENT("} remove_arg_slots");
    245313}
    246314
     
    249317void trace_method_handle_stub(const char* adaptername,
    250318                              oop mh,
     319                              intptr_t* saved_regs,
    251320                              intptr_t* entry_sp,
    252321                              intptr_t* saved_sp,
     
    257326  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
    258327         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
    259   if (last_sp != saved_sp)
     328  if (last_sp != saved_sp && last_sp != NULL)
    260329    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
    261   if (Verbose)  print_method_handle(mh);
     330  if (Verbose) {
     331    printf(" reg dump: ");
     332    int saved_regs_count = (entry_sp-1) - saved_regs;
     333    // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
     334    int i;
     335    for (i = 0; i <= saved_regs_count; i++) {
     336      if (i > 0 && i % 4 == 0 && i != saved_regs_count)
     337        printf("\n   + dump: ");
     338      printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
     339    }
     340    printf("\n");
     341    int stack_dump_count = 16;
     342    if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
     343      stack_dump_count = (int)(saved_bp + 2 - saved_sp);
     344    if (stack_dump_count > 64)  stack_dump_count = 48;
     345    for (i = 0; i < stack_dump_count; i += 4) {
     346      printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
     347             i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
     348    }
     349    print_method_handle(mh);
     350  }
     351}
     352void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
     353  if (!TraceMethodHandles)  return;
     354  BLOCK_COMMENT("trace_method_handle {");
     355  __ push(rax);
     356  __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
     357  __ pusha();
     358  // arguments:
     359  __ push(rbp);               // interpreter frame pointer
     360  __ push(rsi);               // saved_sp
     361  __ push(rax);               // entry_sp
     362  __ push(rcx);               // mh
     363  __ push(rcx);
     364  __ movptr(Address(rsp, 0), (intptr_t) adaptername);
     365  __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
     366  __ popa();
     367  __ pop(rax);
     368  BLOCK_COMMENT("} trace_method_handle");
    262369}
    263370#endif //PRODUCT
     
    279386}
    280387
     388//------------------------------------------------------------------------------
     389// MethodHandles::generate_method_handle_stub
     390//
    281391// Generate an "entry" field for a method handle.
    282392// This determines how the method handle will respond to calls.
     
    290400  // - rdx: garbage temp, can blow away
    291401
    292   Register rcx_recv    = rcx;
    293   Register rax_argslot = rax;
    294   Register rbx_temp    = rbx;
    295   Register rdx_temp    = rdx;
     402  const Register rcx_recv    = rcx;
     403  const Register rax_argslot = rax;
     404  const Register rbx_temp    = rbx;
     405  const Register rdx_temp    = rdx;
    296406
    297407  // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
    298408  // and gen_c2i_adapter (from compiled calls):
    299   Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
     409  const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
     410
     411  // Argument registers for _raise_exception.
     412  // 32-bit: Pass first two oop/int args in registers ECX and EDX.
     413  const Register rarg0_code     = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
     414  const Register rarg1_actual   = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
     415  const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
     416  assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);
    300417
    301418  guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
     
    323440
    324441  address interp_entry = __ pc();
    325   if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
    326 
    327 #ifndef PRODUCT
    328   if (TraceMethodHandles) {
    329     __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
    330     __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
    331     // arguments:
    332     __ push(rbp);               // interpreter frame pointer
    333     __ push(rsi);               // saved_sp
    334     __ push(rax);               // entry_sp
    335     __ push(rcx);               // mh
    336     __ push(rcx);
    337     __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
    338     __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
    339     __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
    340   }
    341 #endif //PRODUCT
     442
     443  trace_method_handle(_masm, entry_name(ek));
     444
     445  BLOCK_COMMENT(entry_name(ek));
    342446
    343447  switch ((int) ek) {
    344448  case _raise_exception:
    345449    {
    346       // Not a real MH entry, but rather shared code for raising an exception.
    347       // Extra local arguments are pushed on stack, as required type at TOS+8,
    348       // failing object (or NULL) at TOS+4, failing bytecode type at TOS.
    349       // Beyond those local arguments are the PC, of course.
    350       Register rdx_code = rdx_temp;
    351       Register rcx_fail = rcx_recv;
    352       Register rax_want = rax_argslot;
    353       Register rdi_pc   = rdi;
    354       __ pop(rdx_code);  // TOS+0
    355       __ pop(rcx_fail);  // TOS+4
    356       __ pop(rax_want);  // TOS+8
    357       __ pop(rdi_pc);    // caller PC
    358 
    359       __ mov(rsp, rsi);   // cut the stack back to where the caller started
    360 
    361       // Repush the arguments as if coming from the interpreter.
    362       __ push(rdx_code);
    363       __ push(rcx_fail);
    364       __ push(rax_want);
     450      // Not a real MH entry, but rather shared code for raising an
     451      // exception.  Since we use a C2I adapter to set up the
     452      // interpreter state, arguments are expected in compiler
     453      // argument registers.
     454      assert(raise_exception_method(), "must be set");
     455      address c2i_entry = raise_exception_method()->get_c2i_entry();
     456      assert(c2i_entry, "method must be linked");
     457
     458      const Register rdi_pc = rax;
     459      __ pop(rdi_pc);  // caller PC
     460      __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
    365461
    366462      Register rbx_method = rbx_temp;
    367       Label no_method;
     463      Label L_no_method;
    368464      // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
    369465      __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
    370466      __ testptr(rbx_method, rbx_method);
    371       __ jccb(Assembler::zero, no_method);
    372       int jobject_oop_offset = 0;
     467      __ jccb(Assembler::zero, L_no_method);
     468
     469      const int jobject_oop_offset = 0;
    373470      __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
    374471      __ testptr(rbx_method, rbx_method);
    375       __ jccb(Assembler::zero, no_method);
     472      __ jccb(Assembler::zero, L_no_method);
    376473      __ verify_oop(rbx_method);
    377       __ push(rdi_pc);          // and restore caller PC
    378       __ jmp(rbx_method_fie);
     474
     475      // 32-bit: push remaining arguments as if coming from the compiler.
     476      NOT_LP64(__ push(rarg2_required));
     477
     478      __ push(rdi_pc);  // restore caller PC
     479      __ jump(ExternalAddress(c2i_entry));  // do C2I transition
    379480
    380481      // If we get here, the Java runtime did not do its job of creating the exception.
    381482      // Do something that is at least causes a valid throw from the interpreter.
    382       __ bind(no_method);
    383       __ pop(rax_want);
    384       __ pop(rcx_fail);
    385       __ push(rax_want);
    386       __ push(rcx_fail);
     483      __ bind(L_no_method);
     484      __ push(rarg2_required);
     485      __ push(rarg1_actual);
    387486      __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
    388487    }
     
    393492    {
    394493      Register rbx_method = rbx_temp;
    395       __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
     494      __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop
    396495      __ verify_oop(rbx_method);
    397496      // same as TemplateTable::invokestatic or invokespecial,
     
    450549      Register rdx_intf  = rdx_temp;
    451550      Register rbx_index = rbx_temp;
    452       __ movptr(rdx_intf, rcx_mh_vmtarget);
    453       __ movl(rbx_index,   rcx_dmh_vmindex);
     551      __ load_heap_oop(rdx_intf, rcx_mh_vmtarget);
     552      __ movl(rbx_index, rcx_dmh_vmindex);
    454553      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
    455554      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
     
    479578      // Throw an exception.
    480579      // For historical reasons, it will be IncompatibleClassChangeError.
    481       __ pushptr(Address(rdx_intf, java_mirror_offset));  // required interface
    482       __ push(rcx_recv);        // bad receiver
    483       __ push((int)Bytecodes::_invokeinterface);  // who is complaining?
     580      __ mov(rbx_temp, rcx_recv);  // rarg2_required might be RCX
     581      assert_different_registers(rarg2_required, rbx_temp);
     582      __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset));  // required interface
     583      __ mov(   rarg1_actual,   rbx_temp);                               // bad receiver
     584      __ movl(  rarg0_code,     (int) Bytecodes::_invokeinterface);      // who is complaining?
    484585      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
    485586    }
     
    506607
    507608      // store bound argument into the new stack slot:
    508       __ movptr(rbx_temp, rcx_bmh_argument);
     609      __ load_heap_oop(rbx_temp, rcx_bmh_argument);
    509610      Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
    510611      if (arg_type == T_OBJECT) {
     
    524625      if (direct_to_method) {
    525626        Register rbx_method = rbx_temp;
    526         __ movptr(rbx_method, rcx_mh_vmtarget);
     627        __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
    527628        __ verify_oop(rbx_method);
    528629        __ jmp(rbx_method_fie);
    529630      } else {
    530         __ movptr(rcx_recv, rcx_mh_vmtarget);
     631        __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    531632        __ verify_oop(rcx_recv);
    532633        __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     
    538639  case _adapter_retype_raw:
    539640    // immediately jump to the next MH layer:
    540     __ movptr(rcx_recv, rcx_mh_vmtarget);
     641    __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    541642    __ verify_oop(rcx_recv);
    542643    __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
     
    555656
    556657      // What class are we casting to?
    557       __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
    558       __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
     658      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
     659      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
    559660
    560661      Label done;
    561662      __ movptr(rdx_temp, vmarg);
    562663      __ testptr(rdx_temp, rdx_temp);
    563       __ jccb(Assembler::zero, done);         // no cast if null
     664      __ jcc(Assembler::zero, done);         // no cast if null
    564665      __ load_klass(rdx_temp, rdx_temp);
    565666
     
    576677      __ movptr(rdx_temp, vmarg);
    577678
    578       __ pushptr(rcx_amh_argument); // required class
    579       __ push(rdx_temp);            // bad object
    580       __ push((int)Bytecodes::_checkcast);  // who is complaining?
     679      assert_different_registers(rarg2_required, rdx_temp);
     680      __ load_heap_oop(rarg2_required, rcx_amh_argument);             // required class
     681      __ mov(          rarg1_actual,   rdx_temp);                     // bad object
     682      __ movl(         rarg0_code,     (int) Bytecodes::_checkcast);  // who is complaining?
    581683      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
    582684
    583685      __ bind(done);
    584686      // get the new MH:
    585       __ movptr(rcx_recv, rcx_mh_vmtarget);
     687      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    586688      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    587689    }
     
    645747
    646748      // get the new MH:
    647       __ movptr(rcx_recv, rcx_mh_vmtarget);
     749      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    648750      // (now we are done with the old MH)
    649751
     
    720822      }
    721823
    722       __ movptr(rcx_recv, rcx_mh_vmtarget);
     824      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    723825      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    724826    }
     
    762864      }
    763865
    764       __ movptr(rcx_recv, rcx_mh_vmtarget);
     866      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    765867      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    766868    }
     
    873975      }
    874976
    875       __ movptr(rcx_recv, rcx_mh_vmtarget);
     977      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    876978      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    877979    }
     
    9331035      __ pop(rdi);              // restore temp
    9341036
    935       __ movptr(rcx_recv, rcx_mh_vmtarget);
     1037      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    9361038      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    9371039    }
     
    9561058      __ pop(rdi);              // restore temp
    9571059
    958       __ movptr(rcx_recv, rcx_mh_vmtarget);
     1060      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    9591061      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    9601062    }
     
    10071109      // Check the array type.
    10081110      Register rbx_klass = rbx_temp;
    1009       __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
    1010       __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
     1111      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
     1112      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
    10111113
    10121114      Label ok_array_klass, bad_array_klass, bad_array_length;
     
    10901192      // Arguments are spread.  Move to next method handle.
    10911193      UNPUSH_RSI_RDI;
    1092       __ movptr(rcx_recv, rcx_mh_vmtarget);
     1194      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    10931195      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    10941196
    10951197      __ bind(bad_array_klass);
    10961198      UNPUSH_RSI_RDI;
    1097       __ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type
    1098       __ pushptr(vmarg);                // bad array
    1099       __ push((int)Bytecodes::_aaload); // who is complaining?
     1199      assert(!vmarg.uses(rarg2_required), "must be different registers");
     1200      __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset));  // required type
     1201      __ movptr(rarg1_actual,   vmarg);                                         // bad array
     1202      __ movl(  rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
    11001203      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
    11011204
    11021205      __ bind(bad_array_length);
    11031206      UNPUSH_RSI_RDI;
    1104       __ push(rcx_recv);        // AMH requiring a certain length
    1105       __ pushptr(vmarg);        // bad array
    1106       __ push((int)Bytecodes::_arraylength); // who is complaining?
     1207      assert(!vmarg.uses(rarg2_required), "must be different registers");
     1208      __ mov   (rarg2_required, rcx_recv);                       // AMH requiring a certain length
     1209      __ movptr(rarg1_actual,   vmarg);                          // bad array
     1210      __ movl(  rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
    11071211      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
    11081212
  • trunk/openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_nativeInst_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "memory/resourceArea.hpp"
     28#include "nativeInst_x86.hpp"
     29#include "oops/oop.inline.hpp"
     30#include "runtime/handles.hpp"
     31#include "runtime/sharedRuntime.hpp"
     32#include "runtime/stubRoutines.hpp"
     33#include "utilities/ostream.hpp"
     34#ifdef COMPILER1
     35#include "c1/c1_Runtime1.hpp"
     36#endif
    2737
    2838void NativeInstruction::wrote(int offset) {
  • trunk/openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_NATIVEINST_X86_HPP
     26#define CPU_X86_VM_NATIVEINST_X86_HPP
     27
     28#include "asm/assembler.hpp"
     29#include "memory/allocation.hpp"
     30#include "runtime/icache.hpp"
     31#include "runtime/os.hpp"
     32#include "utilities/top.hpp"
    2433
    2534// We have interfaces for the following instructions:
     
    548557#endif // AMD64
    549558}
     559
     560#endif // CPU_X86_VM_NATIVEINST_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/registerMap_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_REGISTERMAP_X86_HPP
     26#define CPU_X86_VM_REGISTERMAP_X86_HPP
     27
    2528// machine-dependent implemention for register maps
    2629  friend class frame;
     
    3841  void pd_initialize() {}
    3942  void pd_initialize_from(const RegisterMap* map) {}
     43
     44#endif // CPU_X86_VM_REGISTERMAP_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_register_definitions_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "asm/register.hpp"
     28#include "register_x86.hpp"
     29#ifdef TARGET_ARCH_MODEL_x86_32
     30# include "interp_masm_x86_32.hpp"
     31#endif
     32#ifdef TARGET_ARCH_MODEL_x86_64
     33# include "interp_masm_x86_64.hpp"
     34#endif
    2735
    2836REGISTER_DEFINITION(Register, noreg);
  • trunk/openjdk/hotspot/src/cpu/x86/vm/register_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_register_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "register_x86.hpp"
     27
    2728#ifndef AMD64
    2829const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers;
  • trunk/openjdk/hotspot/src/cpu/x86/vm/register_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_REGISTER_X86_HPP
     26#define CPU_X86_VM_REGISTER_X86_HPP
     27
     28#include "asm/register.hpp"
     29#include "vm_version_x86.hpp"
    2430
    2531class VMRegImpl;
     
    220226
    221227};
     228
     229#endif // CPU_X86_VM_REGISTER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/relocInfo_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_relocInfo_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.inline.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "code/relocInfo.hpp"
     29#include "nativeInst_x86.hpp"
     30#include "oops/oop.inline.hpp"
     31#include "runtime/safepoint.hpp"
    2732
    2833
  • trunk/openjdk/hotspot/src/cpu/x86/vm/relocInfo_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_RELOCINFO_X86_HPP
     26#define CPU_X86_VM_RELOCINFO_X86_HPP
     27
    2528  // machine-dependent parts of class relocInfo
    2629 private:
     
    3740#endif
    3841  };
     42
     43#endif // CPU_X86_VM_RELOCINFO_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp

    r278 r309  
    2323 */
    2424
     25#include "precompiled.hpp"
     26#ifdef COMPILER2
     27#include "asm/assembler.hpp"
     28#include "assembler_x86.inline.hpp"
     29#include "classfile/systemDictionary.hpp"
     30#include "code/vmreg.hpp"
     31#include "interpreter/interpreter.hpp"
     32#include "nativeInst_x86.hpp"
     33#include "opto/runtime.hpp"
     34#include "runtime/interfaceSupport.hpp"
     35#include "runtime/sharedRuntime.hpp"
     36#include "runtime/stubRoutines.hpp"
     37#include "runtime/vframeArray.hpp"
     38#include "utilities/globalDefinitions.hpp"
     39#include "vmreg_x86.inline.hpp"
     40#endif
    2541
    26 #include "incls/_precompiled.incl"
    27 #include "incls/_runtime_x86_32.cpp.incl"
    2842
    2943#define __ masm->
  • trunk/openjdk/hotspot/src/cpu/x86/vm/runtime_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
     25#include "precompiled.hpp"
     26#ifdef COMPILER2
     27#include "asm/assembler.hpp"
     28#include "assembler_x86.inline.hpp"
     29#include "classfile/systemDictionary.hpp"
     30#include "code/vmreg.hpp"
     31#include "interpreter/interpreter.hpp"
     32#include "nativeInst_x86.hpp"
     33#include "opto/runtime.hpp"
     34#include "runtime/interfaceSupport.hpp"
     35#include "runtime/sharedRuntime.hpp"
     36#include "runtime/stubRoutines.hpp"
     37#include "runtime/vframeArray.hpp"
     38#include "utilities/globalDefinitions.hpp"
     39#include "vmreg_x86.inline.hpp"
     40#endif
     41
    2642
    2743// This file should really contain the code for generating the OptoRuntime
  • trunk/openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_sharedRuntime_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "code/debugInfoRec.hpp"
     29#include "code/icBuffer.hpp"
     30#include "code/vtableStubs.hpp"
     31#include "interpreter/interpreter.hpp"
     32#include "oops/compiledICHolderOop.hpp"
     33#include "prims/jvmtiRedefineClassesTrace.hpp"
     34#include "runtime/sharedRuntime.hpp"
     35#include "runtime/vframeArray.hpp"
     36#include "vmreg_x86.inline.hpp"
     37#ifdef COMPILER1
     38#include "c1/c1_Runtime1.hpp"
     39#endif
     40#ifdef COMPILER2
     41#include "opto/runtime.hpp"
     42#endif
    2743
    2844#define __ masm->
  • trunk/openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_sharedRuntime_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "code/debugInfoRec.hpp"
     29#include "code/icBuffer.hpp"
     30#include "code/vtableStubs.hpp"
     31#include "interpreter/interpreter.hpp"
     32#include "oops/compiledICHolderOop.hpp"
     33#include "prims/jvmtiRedefineClassesTrace.hpp"
     34#include "runtime/sharedRuntime.hpp"
     35#include "runtime/vframeArray.hpp"
     36#include "vmreg_x86.inline.hpp"
     37#ifdef COMPILER1
     38#include "c1/c1_Runtime1.hpp"
     39#endif
     40#ifdef COMPILER2
     41#include "opto/runtime.hpp"
     42#endif
    2743
    2844DeoptimizationBlob *SharedRuntime::_deopt_blob;
  • trunk/openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_stubGenerator_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "nativeInst_x86.hpp"
     30#include "oops/instanceOop.hpp"
     31#include "oops/methodOop.hpp"
     32#include "oops/objArrayKlass.hpp"
     33#include "oops/oop.inline.hpp"
     34#include "prims/methodHandles.hpp"
     35#include "runtime/frame.inline.hpp"
     36#include "runtime/handles.inline.hpp"
     37#include "runtime/sharedRuntime.hpp"
     38#include "runtime/stubCodeGenerator.hpp"
     39#include "runtime/stubRoutines.hpp"
     40#include "utilities/top.hpp"
     41#ifdef TARGET_OS_FAMILY_linux
     42# include "thread_linux.inline.hpp"
     43#endif
     44#ifdef TARGET_OS_FAMILY_solaris
     45# include "thread_solaris.inline.hpp"
     46#endif
     47#ifdef TARGET_OS_FAMILY_windows
     48# include "thread_windows.inline.hpp"
     49#endif
     50#ifdef COMPILER2
     51#include "opto/runtime.hpp"
     52#endif
    2753
    2854// Declaration and definition of StubGenerator (no .hpp file).
  • trunk/openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp

    r278 r309  
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_stubGenerator_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "nativeInst_x86.hpp"
     30#include "oops/instanceOop.hpp"
     31#include "oops/methodOop.hpp"
     32#include "oops/objArrayKlass.hpp"
     33#include "oops/oop.inline.hpp"
     34#include "prims/methodHandles.hpp"
     35#include "runtime/frame.inline.hpp"
     36#include "runtime/handles.inline.hpp"
     37#include "runtime/sharedRuntime.hpp"
     38#include "runtime/stubCodeGenerator.hpp"
     39#include "runtime/stubRoutines.hpp"
     40#include "utilities/top.hpp"
     41#ifdef TARGET_OS_FAMILY_linux
     42# include "thread_linux.inline.hpp"
     43#endif
     44#ifdef TARGET_OS_FAMILY_solaris
     45# include "thread_solaris.inline.hpp"
     46#endif
     47#ifdef TARGET_OS_FAMILY_windows
     48# include "thread_windows.inline.hpp"
     49#endif
     50#ifdef COMPILER2
     51#include "opto/runtime.hpp"
     52#endif
    2753
    2854// Declaration and definition of StubGenerator (no .hpp file).
     
    21722198    __ enter(); // required for proper stackwalking of RuntimeStub frame
    21732199
    2174     checkcast_copy_entry  = __ pc();
    2175     BLOCK_COMMENT("Entry:");
    2176 
    21772200#ifdef ASSERT
    21782201    // caller guarantees that the arrays really are different
     
    21852208#endif //ASSERT
    21862209
    2187     // allocate spill slots for r13, r14
    2188     enum {
    2189       saved_r13_offset,
    2190       saved_r14_offset,
    2191       saved_rbp_offset,
    2192       saved_rip_offset,
    2193       saved_rarg0_offset
    2194     };
    2195     __ subptr(rsp, saved_rbp_offset * wordSize);
    2196     __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
    2197     __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
    21982210    setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
    21992211                       // ckoff => rcx, ckval => r8
     
    22012213#ifdef _WIN64
    22022214    // last argument (#4) is on stack on Win64
    2203     const int ckval_offset = saved_rarg0_offset + 4;
    2204     __ movptr(ckval, Address(rsp, ckval_offset * wordSize));
     2215    __ movptr(ckval, Address(rsp, 6 * wordSize));
    22052216#endif
     2217
     2218    // Caller of this entry point must set up the argument registers.
     2219    checkcast_copy_entry  = __ pc();
     2220    BLOCK_COMMENT("Entry:");
     2221
     2222    // allocate spill slots for r13, r14
     2223    enum {
     2224      saved_r13_offset,
     2225      saved_r14_offset,
     2226      saved_rbp_offset
     2227    };
     2228    __ subptr(rsp, saved_rbp_offset * wordSize);
     2229    __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
     2230    __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
    22062231
    22072232    // check that int operands are properly extended to size_t
     
    24182443    const Register dst        = c_rarg2;  // destination array oop
    24192444    const Register dst_pos    = c_rarg3;  // destination position
    2420     // elements count is on stack on Win64
    2421 #ifdef _WIN64
    2422 #define C_RARG4 Address(rsp, 6 * wordSize)
     2445#ifndef _WIN64
     2446    const Register length     = c_rarg4;
    24232447#else
    2424 #define C_RARG4 c_rarg4
     2448    const Address  length(rsp, 6 * wordSize);  // elements count is on stack on Win64
    24252449#endif
    24262450
     
    24892513    const Register r11_length    = r11; // elements count to copy
    24902514    const Register r10_src_klass = r10; // array klass
    2491     const Register r9_dst_klass  = r9;  // dest array klass
    24922515
    24932516    //  if (length < 0) return -1;
    2494     __ movl(r11_length, C_RARG4);       // length (elements count, 32-bits value)
     2517    __ movl(r11_length, length);        // length (elements count, 32-bits value)
    24952518    __ testl(r11_length, r11_length);
    24962519    __ jccb(Assembler::negative, L_failed_0);
     
    24992522#ifdef ASSERT
    25002523    //  assert(src->klass() != NULL);
    2501     BLOCK_COMMENT("assert klasses not null");
    2502     { Label L1, L2;
     2524    {
     2525      BLOCK_COMMENT("assert klasses not null {");
     2526      Label L1, L2;
    25032527      __ testptr(r10_src_klass, r10_src_klass);
    25042528      __ jcc(Assembler::notZero, L2);   // it is broken if klass is NULL
     
    25062530      __ stop("broken null klass");
    25072531      __ bind(L2);
    2508       __ load_klass(r9_dst_klass, dst);
    2509       __ cmpq(r9_dst_klass, 0);
     2532      __ load_klass(rax, dst);
     2533      __ cmpq(rax, 0);
    25102534      __ jcc(Assembler::equal, L1);     // this would be broken also
    2511       BLOCK_COMMENT("assert done");
     2535      BLOCK_COMMENT("} assert klasses not null done");
    25122536    }
    25132537#endif
     
    25212545    //
    25222546
    2523     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
    2524                     Klass::layout_helper_offset_in_bytes();
     2547    const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
     2548                          Klass::layout_helper_offset_in_bytes();
     2549
     2550    // Handle objArrays completely differently...
     2551    const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
     2552    __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
     2553    __ jcc(Assembler::equal, L_objArray);
     2554
     2555    //  if (src->klass() != dst->klass()) return -1;
     2556    __ load_klass(rax, dst);
     2557    __ cmpq(r10_src_klass, rax);
     2558    __ jcc(Assembler::notEqual, L_failed);
    25252559
    25262560    const Register rax_lh = rax;  // layout helper
    2527 
    25282561    __ movl(rax_lh, Address(r10_src_klass, lh_offset));
    2529 
    2530     // Handle objArrays completely differently...
    2531     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
    2532     __ cmpl(rax_lh, objArray_lh);
    2533     __ jcc(Assembler::equal, L_objArray);
    2534 
    2535     //  if (src->klass() != dst->klass()) return -1;
    2536     __ load_klass(r9_dst_klass, dst);
    2537     __ cmpq(r10_src_klass, r9_dst_klass);
    2538     __ jcc(Assembler::notEqual, L_failed);
    25392562
    25402563    //  if (!src->is_Array()) return -1;
     
    25442567    // At this point, it is known to be a typeArray (array_tag 0x3).
    25452568#ifdef ASSERT
    2546     { Label L;
     2569    {
     2570      BLOCK_COMMENT("assert primitive array {");
     2571      Label L;
    25472572      __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
    25482573      __ jcc(Assembler::greaterEqual, L);
    25492574      __ stop("must be a primitive array");
    25502575      __ bind(L);
     2576      BLOCK_COMMENT("} assert primitive array done");
    25512577    }
    25522578#endif
     
    26062632  __ BIND(L_copy_longs);
    26072633#ifdef ASSERT
    2608     { Label L;
     2634    {
     2635      BLOCK_COMMENT("assert long copy {");
     2636      Label L;
    26092637      __ cmpl(rax_elsize, LogBytesPerLong);
    26102638      __ jcc(Assembler::equal, L);
    26112639      __ stop("must be long copy, but elsize is wrong");
    26122640      __ bind(L);
     2641      BLOCK_COMMENT("} assert long copy done");
    26132642    }
    26142643#endif
     
    26202649    // objArrayKlass
    26212650  __ BIND(L_objArray);
    2622     // live at this point:  r10_src_klass, src[_pos], dst[_pos]
     2651    // live at this point:  r10_src_klass, r11_length, src[_pos], dst[_pos]
    26232652
    26242653    Label L_plain_copy, L_checkcast_copy;
    26252654    //  test array classes for subtyping
    2626     __ load_klass(r9_dst_klass, dst);
    2627     __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
     2655    __ load_klass(rax, dst);
     2656    __ cmpq(r10_src_klass, rax); // usual case is exact equality
    26282657    __ jcc(Assembler::notEqual, L_checkcast_copy);
    26292658
     
    26412670
    26422671  __ BIND(L_checkcast_copy);
    2643     // live at this point:  r10_src_klass, !r11_length
     2672    // live at this point:  r10_src_klass, r11_length, rax (dst_klass)
    26442673    {
    2645       // assert(r11_length == C_RARG4); // will reload from here
    2646       Register r11_dst_klass = r11;
    2647       __ load_klass(r11_dst_klass, dst);
    2648 
    26492674      // Before looking at dst.length, make sure dst is also an objArray.
    2650       __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
     2675      __ cmpl(Address(rax, lh_offset), objArray_lh);
    26512676      __ jcc(Assembler::notEqual, L_failed);
    26522677
    26532678      // It is safe to examine both src.length and dst.length.
    2654 #ifndef _WIN64
    2655       arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
    2656                              rax, L_failed);
    2657 #else
    2658       __ movl(r11_length, C_RARG4);     // reload
    26592679      arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
    26602680                             rax, L_failed);
     2681
     2682      const Register r11_dst_klass = r11;
    26612683      __ load_klass(r11_dst_klass, dst); // reload
    2662 #endif
    26632684
    26642685      // Marshal the base address arguments now, freeing registers.
     
    26672688      __ lea(to,   Address(dst, dst_pos, TIMES_OOP,
    26682689                   arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
    2669       __ movl(count, C_RARG4);          // length (reloaded)
     2690      __ movl(count, length);           // length (reloaded)
    26702691      Register sco_temp = c_rarg3;      // this register is free now
    26712692      assert_different_registers(from, to, count, sco_temp,
     
    26742695
    26752696      // Generate the type check.
    2676       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
    2677                         Klass::super_check_offset_offset_in_bytes());
     2697      const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
     2698                              Klass::super_check_offset_offset_in_bytes());
    26782699      __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
    26792700      assert_clean_int(sco_temp, rax);
     
    26842705                       objArrayKlass::element_klass_offset_in_bytes());
    26852706      __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
    2686       __ movl(sco_temp,      Address(r11_dst_klass, sco_offset));
     2707      __ movl(  sco_temp,      Address(r11_dst_klass, sco_offset));
    26872708      assert_clean_int(sco_temp, rax);
    26882709
    26892710      // the checkcast_copy loop needs two extra arguments:
    26902711      assert(c_rarg3 == sco_temp, "#3 already in place");
    2691       __ movptr(C_RARG4, r11_dst_klass);  // dst.klass.element_klass
     2712      // Set up arguments for checkcast_copy_entry.
     2713      setup_arg_regs(4);
     2714      __ movptr(r8, r11_dst_klass);  // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
    26922715      __ jump(RuntimeAddress(checkcast_copy_entry));
    26932716    }
     
    27012724    return start;
    27022725  }
    2703 
    2704 #undef length_arg
    27052726
    27062727  void generate_arraycopy_stubs() {
  • trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_stubRoutines_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "runtime/deoptimization.hpp"
     27#include "runtime/frame.inline.hpp"
     28#include "runtime/stubRoutines.hpp"
     29#ifdef TARGET_OS_FAMILY_linux
     30# include "thread_linux.inline.hpp"
     31#endif
     32#ifdef TARGET_OS_FAMILY_solaris
     33# include "thread_solaris.inline.hpp"
     34#endif
     35#ifdef TARGET_OS_FAMILY_windows
     36# include "thread_windows.inline.hpp"
     37#endif
    2738
    2839// Implementation of the platform-specific part of StubRoutines - for
  • trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp

    r278 r309  
    2323 */
    2424
     25#ifndef CPU_X86_VM_STUBROUTINES_X86_32_HPP
     26#define CPU_X86_VM_STUBROUTINES_X86_32_HPP
     27
    2528// This file holds the platform specific parts of the StubRoutines
    2629// definition. See stubRoutines.hpp for a description on how to
     
    3437// MethodHandles adapters
    3538enum method_handles_platform_dependent_constants {
    36   method_handles_adapters_code_size = 5000
     39  method_handles_adapters_code_size = 10000
    3740};
    3841
     
    6164  static bool    returns_to_call_stub(address return_pc)     { return (return_pc == _call_stub_return_address) ||
    6265                                                                       return_pc == x86::get_call_stub_compiled_return(); }
     66
     67#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_stubRoutines_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "runtime/deoptimization.hpp"
     27#include "runtime/frame.inline.hpp"
     28#include "runtime/stubRoutines.hpp"
     29#ifdef TARGET_OS_FAMILY_linux
     30# include "thread_linux.inline.hpp"
     31#endif
     32#ifdef TARGET_OS_FAMILY_solaris
     33# include "thread_solaris.inline.hpp"
     34#endif
     35#ifdef TARGET_OS_FAMILY_windows
     36# include "thread_windows.inline.hpp"
     37#endif
    2738
    2839// Implementation of the platform-specific part of StubRoutines - for
  • trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp

    r278 r309  
    2323 */
    2424
     25#ifndef CPU_X86_VM_STUBROUTINES_X86_64_HPP
     26#define CPU_X86_VM_STUBROUTINES_X86_64_HPP
     27
    2528// This file holds the platform specific parts of the StubRoutines
    2629// definition. See stubRoutines.hpp for a description on how to
     
    3639// MethodHandles adapters
    3740enum method_handles_platform_dependent_constants {
    38   method_handles_adapters_code_size = 13000
     41  method_handles_adapters_code_size = 40000
    3942};
    4043
     
    114117  }
    115118};
     119
     120#endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
     26#define CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
     27
    2528 protected:
    2629
     
    2831
    2932 // address generate_asm_interpreter_entry(bool synchronized);
     33
     34#endif // CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp

    r278 r309  
    2323 */
    2424
     25#ifndef CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
     26#define CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
     27
    2528
    2629  protected:
     
    3639  const static int InterpreterCodeSize = 168 * 1024;
    3740#endif // AMD64
     41
     42#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_templateInterpreter_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "interpreter/bytecodeHistogram.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "interpreter/interpreterGenerator.hpp"
     30#include "interpreter/interpreterRuntime.hpp"
     31#include "interpreter/templateTable.hpp"
     32#include "oops/arrayOop.hpp"
     33#include "oops/methodDataOop.hpp"
     34#include "oops/methodOop.hpp"
     35#include "oops/oop.inline.hpp"
     36#include "prims/jvmtiExport.hpp"
     37#include "prims/jvmtiThreadState.hpp"
     38#include "runtime/arguments.hpp"
     39#include "runtime/deoptimization.hpp"
     40#include "runtime/frame.inline.hpp"
     41#include "runtime/sharedRuntime.hpp"
     42#include "runtime/stubRoutines.hpp"
     43#include "runtime/synchronizer.hpp"
     44#include "runtime/timer.hpp"
     45#include "runtime/vframeArray.hpp"
     46#include "utilities/debug.hpp"
    2747
    2848#define __ _masm->
     
    360380//
    361381void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
    362 
    363   const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
    364   const Address backedge_counter  (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
    365 
    366   if (ProfileInterpreter) { // %%% Merge this into methodDataOop
    367     __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
    368   }
    369   // Update standard invocation counters
    370   __ movl(rax, backedge_counter);               // load backedge counter
    371 
    372   __ incrementl(rcx, InvocationCounter::count_increment);
    373   __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
    374 
    375   __ movl(invocation_counter, rcx);             // save invocation count
    376   __ addl(rcx, rax);                            // add both counters
    377 
    378   // profile_method is non-null only for interpreted method so
    379   // profile_method != NULL == !native_call
    380   // BytecodeInterpreter only calls for native so code is elided.
    381 
    382   if (ProfileInterpreter && profile_method != NULL) {
    383     // Test to see if we should create a method data oop
     382  const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
     383                                        in_bytes(InvocationCounter::counter_offset()));
     384  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
     385  if (TieredCompilation) {
     386    int increment = InvocationCounter::count_increment;
     387    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
     388    Label no_mdo, done;
     389    if (ProfileInterpreter) {
     390      // Are we profiling?
     391      __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
     392      __ testptr(rax, rax);
     393      __ jccb(Assembler::zero, no_mdo);
     394      // Increment counter in the MDO
     395      const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
     396                                                in_bytes(InvocationCounter::counter_offset()));
     397      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
     398      __ jmpb(done);
     399    }
     400    __ bind(no_mdo);
     401    // Increment counter in methodOop (we don't need to load it, it's in rcx).
     402    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
     403    __ bind(done);
     404  } else {
     405    const Address backedge_counter  (rbx, methodOopDesc::backedge_counter_offset() +
     406                                          InvocationCounter::counter_offset());
     407
     408    if (ProfileInterpreter) { // %%% Merge this into methodDataOop
     409      __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
     410    }
     411    // Update standard invocation counters
     412    __ movl(rax, backedge_counter);               // load backedge counter
     413
     414    __ incrementl(rcx, InvocationCounter::count_increment);
     415    __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
     416
     417    __ movl(invocation_counter, rcx);             // save invocation count
     418    __ addl(rcx, rax);                            // add both counters
     419
     420    // profile_method is non-null only for interpreted method so
     421    // profile_method != NULL == !native_call
     422    // BytecodeInterpreter only calls for native so code is elided.
     423
     424    if (ProfileInterpreter && profile_method != NULL) {
     425      // Test to see if we should create a method data oop
     426      __ cmp32(rcx,
     427               ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
     428      __ jcc(Assembler::less, *profile_method_continue);
     429
     430      // if no method data exists, go to profile_method
     431      __ test_method_data_pointer(rax, *profile_method);
     432    }
     433
    384434    __ cmp32(rcx,
    385              ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
    386     __ jcc(Assembler::less, *profile_method_continue);
    387 
    388     // if no method data exists, go to profile_method
    389     __ test_method_data_pointer(rax, *profile_method);
    390   }
    391 
    392   __ cmp32(rcx,
    393            ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
    394   __ jcc(Assembler::aboveEqual, *overflow);
    395 
     435             ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
     436    __ jcc(Assembler::aboveEqual, *overflow);
     437  }
    396438}
    397439
     
    13261368      // We have decided to profile this method in the interpreter
    13271369      __ bind(profile_method);
    1328 
    1329       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
    1330 
    1331       __ movptr(rbx, Address(rbp, method_offset));   // restore methodOop
    1332       __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
    1333       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
    1334       __ test_method_data_pointer(rax, profile_method_continue);
    1335       __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
    1336       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
     1370      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
     1371      __ set_method_data_pointer_for_bcp();
     1372      __ get_method(rbx);
    13371373      __ jmp(profile_method_continue);
    13381374    }
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_interpreter_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "interpreter/bytecodeHistogram.hpp"
     28#include "interpreter/interpreter.hpp"
     29#include "interpreter/interpreterGenerator.hpp"
     30#include "interpreter/interpreterRuntime.hpp"
     31#include "interpreter/templateTable.hpp"
     32#include "oops/arrayOop.hpp"
     33#include "oops/methodDataOop.hpp"
     34#include "oops/methodOop.hpp"
     35#include "oops/oop.inline.hpp"
     36#include "prims/jvmtiExport.hpp"
     37#include "prims/jvmtiThreadState.hpp"
     38#include "runtime/arguments.hpp"
     39#include "runtime/deoptimization.hpp"
     40#include "runtime/frame.inline.hpp"
     41#include "runtime/sharedRuntime.hpp"
     42#include "runtime/stubRoutines.hpp"
     43#include "runtime/synchronizer.hpp"
     44#include "runtime/timer.hpp"
     45#include "runtime/vframeArray.hpp"
     46#include "utilities/debug.hpp"
    2747
    2848#define __ _masm->
     
    311331        Label* profile_method,
    312332        Label* profile_method_continue) {
    313 
    314   const Address invocation_counter(rbx,
    315                                    methodOopDesc::invocation_counter_offset() +
     333  const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
     334                                        in_bytes(InvocationCounter::counter_offset()));
     335  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
     336  if (TieredCompilation) {
     337    int increment = InvocationCounter::count_increment;
     338    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
     339    Label no_mdo, done;
     340    if (ProfileInterpreter) {
     341      // Are we profiling?
     342      __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
     343      __ testptr(rax, rax);
     344      __ jccb(Assembler::zero, no_mdo);
     345      // Increment counter in the MDO
     346      const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
     347                                                in_bytes(InvocationCounter::counter_offset()));
     348      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
     349      __ jmpb(done);
     350    }
     351    __ bind(no_mdo);
     352    // Increment counter in methodOop (we don't need to load it, it's in ecx).
     353    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
     354    __ bind(done);
     355  } else {
     356    const Address backedge_counter(rbx,
     357                                   methodOopDesc::backedge_counter_offset() +
    316358                                   InvocationCounter::counter_offset());
    317   const Address backedge_counter(rbx,
    318                                  methodOopDesc::backedge_counter_offset() +
    319                                  InvocationCounter::counter_offset());
    320 
    321   if (ProfileInterpreter) { // %%% Merge this into methodDataOop
    322     __ incrementl(Address(rbx,
    323                     methodOopDesc::interpreter_invocation_counter_offset()));
    324   }
    325   // Update standard invocation counters
    326   __ movl(rax, backedge_counter); // load backedge counter
    327 
    328   __ incrementl(rcx, InvocationCounter::count_increment);
    329   __ andl(rax, InvocationCounter::count_mask_value); // mask out the
    330                                                      // status bits
    331 
    332   __ movl(invocation_counter, rcx); // save invocation count
    333   __ addl(rcx, rax); // add both counters
    334 
    335   // profile_method is non-null only for interpreted method so
    336   // profile_method != NULL == !native_call
    337 
    338   if (ProfileInterpreter && profile_method != NULL) {
    339     // Test to see if we should create a method data oop
    340     __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
    341     __ jcc(Assembler::less, *profile_method_continue);
    342 
    343     // if no method data exists, go to profile_method
    344     __ test_method_data_pointer(rax, *profile_method);
    345   }
    346 
    347   __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
    348   __ jcc(Assembler::aboveEqual, *overflow);
     359
     360    if (ProfileInterpreter) { // %%% Merge this into methodDataOop
     361      __ incrementl(Address(rbx,
     362                            methodOopDesc::interpreter_invocation_counter_offset()));
     363    }
     364    // Update standard invocation counters
     365    __ movl(rax, backedge_counter);   // load backedge counter
     366
     367    __ incrementl(rcx, InvocationCounter::count_increment);
     368    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
     369
     370    __ movl(invocation_counter, rcx); // save invocation count
     371    __ addl(rcx, rax);                // add both counters
     372
     373    // profile_method is non-null only for interpreted method so
     374    // profile_method != NULL == !native_call
     375
     376    if (ProfileInterpreter && profile_method != NULL) {
     377      // Test to see if we should create a method data oop
     378      __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
     379      __ jcc(Assembler::less, *profile_method_continue);
     380
     381      // if no method data exists, go to profile_method
     382      __ test_method_data_pointer(rax, *profile_method);
     383    }
     384
     385    __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
     386    __ jcc(Assembler::aboveEqual, *overflow);
     387  }
    349388}
    350389
     
    10311070    //
    10321071    __ mov(c_rarg0, r15_thread);
    1033     __ mov(r12, rsp); // remember sp
     1072    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
    10341073    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
    10351074    __ andptr(rsp, -16); // align stack as required by ABI
     
    10781117
    10791118    __ pusha(); // XXX only save smashed registers
    1080     __ mov(r12, rsp); // remember sp
     1119    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
    10811120    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
    10821121    __ andptr(rsp, -16); // align stack as required by ABI
     
    13451384      // We have decided to profile this method in the interpreter
    13461385      __ bind(profile_method);
    1347 
    1348       __ call_VM(noreg,
    1349                  CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
    1350                  r13, true);
    1351 
    1352       __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
    1353       __ movptr(rax, Address(rbx,
    1354                              in_bytes(methodOopDesc::method_data_offset())));
    1355       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
    1356                 rax);
    1357       __ test_method_data_pointer(rax, profile_method_continue);
    1358       __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
    1359       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
    1360               rax);
     1386      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
     1387      __ set_method_data_pointer_for_bcp();
     1388      __ get_method(rbx);
    13611389      __ jmp(profile_method_continue);
    13621390    }
     
    18691897  assert(Interpreter::trace_code(t->tos_in()) != NULL,
    18701898         "entry must have been generated");
    1871   __ mov(r12, rsp); // remember sp
     1899  __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
    18721900  __ andptr(rsp, -16); // align stack as required by ABI
    18731901  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_templateTable_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/interpreter.hpp"
     27#include "interpreter/interpreterRuntime.hpp"
     28#include "interpreter/templateTable.hpp"
     29#include "memory/universe.inline.hpp"
     30#include "oops/methodDataOop.hpp"
     31#include "oops/objArrayKlass.hpp"
     32#include "oops/oop.inline.hpp"
     33#include "prims/methodHandles.hpp"
     34#include "runtime/sharedRuntime.hpp"
     35#include "runtime/stubRoutines.hpp"
     36#include "runtime/synchronizer.hpp"
    2737
    2838#ifndef CC_INTERP
     
    400410    __ verify_oop(rax);
    401411  }
     412
     413  Label L_done, L_throw_exception;
     414  const Register con_klass_temp = rcx;  // same as Rcache
     415  __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
     416  __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
     417  __ jcc(Assembler::notEqual, L_done);
     418  __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
     419  __ jcc(Assembler::notEqual, L_throw_exception);
     420  __ xorptr(rax, rax);
     421  __ jmp(L_done);
     422
     423  // Load the exception from the system-array which wraps it:
     424  __ bind(L_throw_exception);
     425  __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
     426  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
     427
     428  __ bind(L_done);
    402429}
    403430
     
    15591586    __ jcc(Assembler::positive, dispatch); // count only if backward branch
    15601587
    1561     // increment counter
    1562     __ movl(rax, Address(rcx, be_offset));        // load backedge counter
    1563     __ incrementl(rax, InvocationCounter::count_increment); // increment counter
    1564     __ movl(Address(rcx, be_offset), rax);        // store counter
    1565 
    1566     __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
    1567     __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
    1568     __ addl(rax, Address(rcx, be_offset));        // add both counters
    1569 
    1570     if (ProfileInterpreter) {
    1571       // Test to see if we should create a method data oop
    1572       __ cmp32(rax,
    1573                ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
    1574       __ jcc(Assembler::less, dispatch);
    1575 
    1576       // if no method data exists, go to profile method
    1577       __ test_method_data_pointer(rax, profile_method);
    1578 
    1579       if (UseOnStackReplacement) {
    1580         // check for overflow against rbx, which is the MDO taken count
    1581         __ cmp32(rbx,
    1582                  ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
    1583         __ jcc(Assembler::below, dispatch);
    1584 
    1585         // When ProfileInterpreter is on, the backedge_count comes from the
    1586         // methodDataOop, which value does not get reset on the call to
    1587         // frequency_counter_overflow().  To avoid excessive calls to the overflow
    1588         // routine while the method is being compiled, add a second test to make
    1589         // sure the overflow function is called only once every overflow_frequency.
    1590         const int overflow_frequency = 1024;
    1591         __ andptr(rbx, overflow_frequency-1);
    1592         __ jcc(Assembler::zero, backedge_counter_overflow);
    1593 
     1588    if (TieredCompilation) {
     1589      Label no_mdo;
     1590      int increment = InvocationCounter::count_increment;
     1591      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
     1592      if (ProfileInterpreter) {
     1593        // Are we profiling?
     1594        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
     1595        __ testptr(rbx, rbx);
     1596        __ jccb(Assembler::zero, no_mdo);
     1597        // Increment the MDO backedge counter
     1598        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
     1599                                                in_bytes(InvocationCounter::counter_offset()));
     1600        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
     1601                                   rax, false, Assembler::zero, &backedge_counter_overflow);
     1602        __ jmp(dispatch);
    15941603      }
     1604      __ bind(no_mdo);
     1605      // Increment backedge counter in methodOop
     1606      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
     1607                                 rax, false, Assembler::zero, &backedge_counter_overflow);
    15951608    } else {
    1596       if (UseOnStackReplacement) {
    1597         // check for overflow against rax, which is the sum of the counters
     1609      // increment counter
     1610      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
     1611      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
     1612      __ movl(Address(rcx, be_offset), rax);        // store counter
     1613
     1614      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
     1615      __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
     1616      __ addl(rax, Address(rcx, be_offset));        // add both counters
     1617
     1618      if (ProfileInterpreter) {
     1619        // Test to see if we should create a method data oop
    15981620        __ cmp32(rax,
    1599                  ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
    1600         __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
    1601 
     1621                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
     1622        __ jcc(Assembler::less, dispatch);
     1623
     1624        // if no method data exists, go to profile method
     1625        __ test_method_data_pointer(rax, profile_method);
     1626
     1627        if (UseOnStackReplacement) {
     1628          // check for overflow against rbx, which is the MDO taken count
     1629          __ cmp32(rbx,
     1630                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
     1631          __ jcc(Assembler::below, dispatch);
     1632
     1633          // When ProfileInterpreter is on, the backedge_count comes from the
     1634          // methodDataOop, which value does not get reset on the call to
     1635          // frequency_counter_overflow().  To avoid excessive calls to the overflow
     1636          // routine while the method is being compiled, add a second test to make
     1637          // sure the overflow function is called only once every overflow_frequency.
     1638          const int overflow_frequency = 1024;
     1639          __ andptr(rbx, overflow_frequency-1);
     1640          __ jcc(Assembler::zero, backedge_counter_overflow);
     1641        }
     1642      } else {
     1643        if (UseOnStackReplacement) {
     1644          // check for overflow against rax, which is the sum of the counters
     1645          __ cmp32(rax,
     1646                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
     1647          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
     1648
     1649        }
    16021650      }
    16031651    }
     
    16181666      // Out-of-line code to allocate method data oop.
    16191667      __ bind(profile_method);
    1620       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
     1668      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
    16211669      __ load_unsigned_byte(rbx, Address(rsi, 0));  // restore target bytecode
    1622       __ movptr(rcx, Address(rbp, method_offset));
    1623       __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
    1624       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
    1625       __ test_method_data_pointer(rcx, dispatch);
    1626       // offset non-null mdp by MDO::data_offset() + IR::profile_method()
    1627       __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
    1628       __ addptr(rcx, rax);
    1629       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
     1670      __ set_method_data_pointer_for_bcp();
    16301671      __ jmp(dispatch);
    16311672    }
     
    30913132  // rax: CallSite object (f1)
    30923133  // rbx: unused (f2)
     3134  // rcx: receiver address
    30933135  // rdx: flags (unused)
    30943136
     3137  Register rax_callsite      = rax;
     3138  Register rcx_method_handle = rcx;
     3139
    30953140  if (ProfileInterpreter) {
    3096     Label L;
    30973141    // %%% should make a type profile for any invokedynamic that takes a ref argument
    30983142    // profile this call
     
    31003144  }
    31013145
    3102   __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
    3103   __ null_check(rcx);
     3146  __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
     3147  __ null_check(rcx_method_handle);
    31043148  __ prepare_to_jump_from_interpreted();
    3105   __ jump_to_method_handle_entry(rcx, rdx);
     3149  __ jump_to_method_handle_entry(rcx_method_handle, rdx);
    31063150}
    31073151
     
    31533197    Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
    31543198
     3199  const Register thread = rcx;
     3200  if (UseTLAB || allow_shared_alloc) {
     3201    __ get_thread(thread);
     3202  }
     3203
    31553204  if (UseTLAB) {
    3156     const Register thread = rcx;
    3157 
    3158     __ get_thread(thread);
    31593205    __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
    31603206    __ lea(rbx, Address(rax, rdx, Address::times_1));
     
    31973243    // if someone beat us on the allocation, try again, otherwise continue
    31983244    __ jcc(Assembler::notEqual, retry);
     3245
     3246    __ incr_allocated_bytes(thread, rdx, 0);
    31993247  }
    32003248
     
    32063254    __ jcc(Assembler::zero, initialize_header);
    32073255
    3208   // Initialize topmost object field, divide rdx by 8, check if odd and
    3209   // test if zero.
     3256    // Initialize topmost object field, divide rdx by 8, check if odd and
     3257    // test if zero.
    32103258    __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
    32113259    __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
    32123260
    3213   // rdx must have been multiple of 8
     3261    // rdx must have been multiple of 8
    32143262#ifdef ASSERT
    32153263    // make sure rdx was multiple of 8
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
     26#define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
     27
    2528  static void prepare_invoke(Register method, Register index, int byte_no);
    2629  static void invokevirtual_helper(Register index, Register recv,
     
    3134  static void index_check(Register array, Register index);
    3235  static void index_check_without_pop(Register array, Register index);
     36
     37#endif // CPU_X86_VM_TEMPLATETABLE_X86_32_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_templateTable_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "interpreter/interpreter.hpp"
     27#include "interpreter/interpreterRuntime.hpp"
     28#include "interpreter/templateTable.hpp"
     29#include "memory/universe.inline.hpp"
     30#include "oops/methodDataOop.hpp"
     31#include "oops/objArrayKlass.hpp"
     32#include "oops/oop.inline.hpp"
     33#include "prims/methodHandles.hpp"
     34#include "runtime/sharedRuntime.hpp"
     35#include "runtime/stubRoutines.hpp"
     36#include "runtime/synchronizer.hpp"
    2737
    2838#ifndef CC_INTERP
     
    414424    __ verify_oop(rax);
    415425  }
     426
     427  Label L_done, L_throw_exception;
     428  const Register con_klass_temp = rcx;  // same as cache
     429  const Register array_klass_temp = rdx;  // same as index
     430  __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
     431  __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
     432  __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
     433  __ jcc(Assembler::notEqual, L_done);
     434  __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
     435  __ jcc(Assembler::notEqual, L_throw_exception);
     436  __ xorptr(rax, rax);
     437  __ jmp(L_done);
     438
     439  // Load the exception from the system-array which wraps it:
     440  __ bind(L_throw_exception);
     441  __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
     442  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
     443
     444  __ bind(L_done);
    416445}
    417446
     
    15841613    __ testl(rdx, rdx);             // check if forward or backward branch
    15851614    __ jcc(Assembler::positive, dispatch); // count only if backward branch
    1586 
    1587     // increment counter
    1588     __ movl(rax, Address(rcx, be_offset));        // load backedge counter
    1589     __ incrementl(rax, InvocationCounter::count_increment); // increment
    1590                                                             // counter
    1591     __ movl(Address(rcx, be_offset), rax);        // store counter
    1592 
    1593     __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
    1594     __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
    1595     __ addl(rax, Address(rcx, be_offset));        // add both counters
    1596 
    1597     if (ProfileInterpreter) {
    1598       // Test to see if we should create a method data oop
    1599       __ cmp32(rax,
    1600                ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
    1601       __ jcc(Assembler::less, dispatch);
    1602 
    1603       // if no method data exists, go to profile method
    1604       __ test_method_data_pointer(rax, profile_method);
    1605 
    1606       if (UseOnStackReplacement) {
    1607         // check for overflow against ebx which is the MDO taken count
    1608         __ cmp32(rbx,
    1609                  ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
    1610         __ jcc(Assembler::below, dispatch);
    1611 
    1612         // When ProfileInterpreter is on, the backedge_count comes
    1613         // from the methodDataOop, which value does not get reset on
    1614         // the call to frequency_counter_overflow().  To avoid
    1615         // excessive calls to the overflow routine while the method is
    1616         // being compiled, add a second test to make sure the overflow
    1617         // function is called only once every overflow_frequency.
    1618         const int overflow_frequency = 1024;
    1619         __ andl(rbx, overflow_frequency - 1);
    1620         __ jcc(Assembler::zero, backedge_counter_overflow);
    1621 
     1615    if (TieredCompilation) {
     1616      Label no_mdo;
     1617      int increment = InvocationCounter::count_increment;
     1618      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
     1619      if (ProfileInterpreter) {
     1620        // Are we profiling?
     1621        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
     1622        __ testptr(rbx, rbx);
     1623        __ jccb(Assembler::zero, no_mdo);
     1624        // Increment the MDO backedge counter
     1625        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
     1626                                           in_bytes(InvocationCounter::counter_offset()));
     1627        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
     1628                                   rax, false, Assembler::zero, &backedge_counter_overflow);
     1629        __ jmp(dispatch);
    16221630      }
     1631      __ bind(no_mdo);
     1632      // Increment backedge counter in methodOop
     1633      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
     1634                                 rax, false, Assembler::zero, &backedge_counter_overflow);
    16231635    } else {
    1624       if (UseOnStackReplacement) {
    1625         // check for overflow against eax, which is the sum of the
    1626         // counters
     1636      // increment counter
     1637      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
     1638      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
     1639      __ movl(Address(rcx, be_offset), rax);        // store counter
     1640
     1641      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
     1642      __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
     1643      __ addl(rax, Address(rcx, be_offset));        // add both counters
     1644
     1645      if (ProfileInterpreter) {
     1646        // Test to see if we should create a method data oop
    16271647        __ cmp32(rax,
    1628                  ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
    1629         __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
    1630 
     1648                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
     1649        __ jcc(Assembler::less, dispatch);
     1650
     1651        // if no method data exists, go to profile method
     1652        __ test_method_data_pointer(rax, profile_method);
     1653
     1654        if (UseOnStackReplacement) {
     1655          // check for overflow against ebx which is the MDO taken count
     1656          __ cmp32(rbx,
     1657                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
     1658          __ jcc(Assembler::below, dispatch);
     1659
     1660          // When ProfileInterpreter is on, the backedge_count comes
     1661          // from the methodDataOop, which value does not get reset on
     1662          // the call to frequency_counter_overflow().  To avoid
     1663          // excessive calls to the overflow routine while the method is
     1664          // being compiled, add a second test to make sure the overflow
     1665          // function is called only once every overflow_frequency.
     1666          const int overflow_frequency = 1024;
     1667          __ andl(rbx, overflow_frequency - 1);
     1668          __ jcc(Assembler::zero, backedge_counter_overflow);
     1669
     1670        }
     1671      } else {
     1672        if (UseOnStackReplacement) {
     1673          // check for overflow against eax, which is the sum of the
     1674          // counters
     1675          __ cmp32(rax,
     1676                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
     1677          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
     1678
     1679        }
    16311680      }
    16321681    }
     
    16471696      // Out-of-line code to allocate method data oop.
    16481697      __ bind(profile_method);
    1649       __ call_VM(noreg,
    1650                  CAST_FROM_FN_PTR(address,
    1651                                   InterpreterRuntime::profile_method), r13);
     1698      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
    16521699      __ load_unsigned_byte(rbx, Address(r13, 0));  // restore target bytecode
    1653       __ movptr(rcx, Address(rbp, method_offset));
    1654       __ movptr(rcx, Address(rcx,
    1655                              in_bytes(methodOopDesc::method_data_offset())));
    1656       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
    1657                 rcx);
    1658       __ test_method_data_pointer(rcx, dispatch);
    1659       // offset non-null mdp by MDO::data_offset() + IR::profile_method()
    1660       __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
    1661       __ addptr(rcx, rax);
    1662       __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
    1663                 rcx);
     1700      __ set_method_data_pointer_for_bcp();
    16641701      __ jmp(dispatch);
    16651702    }
     
    27142751    __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
    27152752    __ verify_oop(rax);
    2716     __ mov(r12, rax);  // save object pointer before call_VM() clobbers it
     2753    __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
    27172754    __ mov(c_rarg1, rax);
    27182755    // c_rarg1: object pointer copied above
     
    27222759                                InterpreterRuntime::post_field_access),
    27232760               c_rarg1, c_rarg2);
    2724     __ mov(rax, r12); // restore object pointer
    2725     __ reinit_heapbase();
     2761    __ pop_ptr(rax); // restore object pointer
    27262762    __ bind(L1);
    27272763  }
     
    29132949                                         Register recv,
    29142950                                         Register flags) {
    2915   // Uses temporary registers rax, rdx  assert_different_registers(index, recv, rax, rdx);
     2951  // Uses temporary registers rax, rdx
     2952  assert_different_registers(index, recv, rax, rdx);
    29162953
    29172954  // Test for an invoke of a final method
     
    31003137  // rdx: flags (unused)
    31013138
     3139  Register rax_callsite      = rax;
     3140  Register rcx_method_handle = rcx;
     3141
    31023142  if (ProfileInterpreter) {
    3103     Label L;
    31043143    // %%% should make a type profile for any invokedynamic that takes a ref argument
    31053144    // profile this call
     
    31073146  }
    31083147
    3109   __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
    3110   __ null_check(rcx);
     3148  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
     3149  __ null_check(rcx_method_handle);
    31113150  __ prepare_to_jump_from_interpreted();
    3112   __ jump_to_method_handle_entry(rcx, rdx);
     3151  __ jump_to_method_handle_entry(rcx_method_handle, rdx);
    31133152}
    31143153
     
    32163255    // if someone beat us on the allocation, try again, otherwise continue
    32173256    __ jcc(Assembler::notEqual, retry);
     3257
     3258    __ incr_allocated_bytes(r15_thread, rdx, 0);
    32183259  }
    32193260
     
    33143355  __ jcc(Assembler::equal, quicked);
    33153356  __ push(atos); // save receiver for result, and for GC
    3316   __ mov(r12, rcx); // save rcx XXX
    33173357  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
    3318   __ movq(rcx, r12); // restore rcx XXX
    3319   __ reinit_heapbase();
    33203358  __ pop_ptr(rdx); // restore receiver
    33213359  __ jmpb(resolved);
     
    33713409
    33723410  __ push(atos); // save receiver for result, and for GC
    3373   __ mov(r12, rcx); // save rcx
    33743411  call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
    3375   __ movq(rcx, r12); // restore rcx
    3376   __ reinit_heapbase();
    33773412  __ pop_ptr(rdx); // restore receiver
     3413  __ verify_oop(rdx);
    33783414  __ load_klass(rdx, rdx);
    33793415  __ jmpb(resolved);
  • trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
     26#define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
     27
    2528  static void prepare_invoke(Register method, Register index, int byte_no);
    2629  static void invokevirtual_helper(Register index, Register recv,
     
    3134  static void index_check(Register array, Register index);
    3235  static void index_check_without_pop(Register array, Register index);
     36
     37#endif // CPU_X86_VM_TEMPLATETABLE_X86_64_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2001, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_VMSTRUCTS_X86_HPP
     26#define CPU_X86_VM_VMSTRUCTS_X86_HPP
    2427
    2528// These are the CPU-specific fields, types and integer
     
    6164  /* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must  */
    6265  /* be present there)                                                       */
     66
     67#endif // CPU_X86_VM_VMSTRUCTS_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vm_version_x86.cpp

    r278 r309  
    2323 */
    2424
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_vm_version_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "assembler_x86.inline.hpp"
     27#include "memory/resourceArea.hpp"
     28#include "runtime/java.hpp"
     29#include "runtime/stubCodeGenerator.hpp"
     30#include "vm_version_x86.hpp"
     31#ifdef TARGET_OS_FAMILY_linux
     32# include "os_linux.inline.hpp"
     33#endif
     34#ifdef TARGET_OS_FAMILY_solaris
     35# include "os_solaris.inline.hpp"
     36#endif
     37#ifdef TARGET_OS_FAMILY_windows
     38# include "os_windows.inline.hpp"
     39#endif
    2740
    2841
     
    336349
    337350  char buf[256];
    338   jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
     351  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
    339352               cores_per_cpu(), threads_per_core(),
    340353               cpu_family(), _model, _stepping,
     
    351364               (supports_popcnt() ? ", popcnt" : ""),
    352365               (supports_mmx_ext() ? ", mmxext" : ""),
    353                (supports_3dnow()   ? ", 3dnow"  : ""),
    354                (supports_3dnow2()  ? ", 3dnowext" : ""),
     366               (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
    355367               (supports_lzcnt()   ? ", lzcnt": ""),
    356368               (supports_sse4a()   ? ", sse4a": ""),
     
    417429      }
    418430    }
     431    if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) {
     432      if( supports_sse4_2() && UseSSE >= 4 ) {
     433        UseSSE42Intrinsics = true;
     434      }
     435    }
    419436
    420437    // Use count leading zeros count instruction if available.
     
    424441      }
    425442    }
     443
     444    // some defaults for AMD family 15h
     445    if ( cpu_family() == 0x15 ) {
     446      // On family 15h processors default is no sw prefetch
     447      if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
     448        AllocatePrefetchStyle = 0;
     449      }
     450      // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
     451      if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
     452        AllocatePrefetchInstr = 3;
     453      }
     454      // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
     455      if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
     456        UseXMMForArrayCopy = true;
     457      }
     458      if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
     459        UseUnalignedLoadStores = true;
     460      }
     461    }
     462
    426463  }
    427464
     
    498535  if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0;
    499536  if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3;
    500   if( ReadPrefetchInstr == 3 && !supports_3dnow() ) ReadPrefetchInstr = 0;
    501   if( !supports_sse() && supports_3dnow() ) ReadPrefetchInstr = 3;
     537  if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0;
     538  if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3;
    502539
    503540  if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0;
    504541  if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3;
    505   if( AllocatePrefetchInstr == 3 && !supports_3dnow() ) AllocatePrefetchInstr=0;
    506   if( !supports_sse() && supports_3dnow() ) AllocatePrefetchInstr = 3;
     542  if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0;
     543  if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3;
    507544
    508545  // Allocation prefetch settings
     
    551588                  logical_processors_per_package());
    552589    tty->print_cr("UseSSE=%d",UseSSE);
    553     tty->print("Allocation: ");
    554     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow()) {
    555       tty->print_cr("no prefetching");
     590    tty->print("Allocation");
     591    if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
     592      tty->print_cr(": no prefetching");
    556593    } else {
    557       if (UseSSE == 0 && supports_3dnow()) {
     594      tty->print(" prefetching: ");
     595      if (UseSSE == 0 && supports_3dnow_prefetch()) {
    558596        tty->print("PREFETCHW");
    559597      } else if (UseSSE >= 1) {
     
    596634    vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
    597635  }
    598   CodeBuffer c(stub_blob->instructions_begin(),
    599                stub_blob->instructions_size());
     636  CodeBuffer c(stub_blob);
    600637  VM_Version_StubGenerator g(&c);
    601638  getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vm_version_x86.hpp

    r278 r309  
    2323 */
    2424
     25#ifndef CPU_X86_VM_VM_VERSION_X86_HPP
     26#define CPU_X86_VM_VM_VERSION_X86_HPP
     27
     28#include "runtime/globals_extension.hpp"
     29#include "runtime/vm_version.hpp"
     30
    2531class VM_Version : public Abstract_VM_Version {
    2632public:
     
    183189     CPU_HT     = (1 << 3),
    184190     CPU_MMX    = (1 << 4),
    185      CPU_3DNOW  = (1 << 5), // 3DNow comes from cpuid 0x80000001 (EDX)
     191     CPU_3DNOW_PREFETCH  = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions
     192                                     // may not necessarily support other 3dnow instructions
    186193     CPU_SSE    = (1 << 6),
    187194     CPU_SSE2   = (1 << 7),
     
    297304    if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
    298305      result |= CPU_CMOV;
    299     if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() &&
    300         _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)
     306    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() &&
     307        _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
    301308      result |= CPU_FXSR;
    302309    // HT flag is set for multi-core processors also.
    303310    if (threads_per_core() > 1)
    304311      result |= CPU_HT;
    305     if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() &&
    306         _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)
     312    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() &&
     313        _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
    307314      result |= CPU_MMX;
    308315    if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
     
    323330    // AMD features.
    324331    if (is_amd()) {
    325       if (_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0)
    326         result |= CPU_3DNOW;
     332      if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) ||
     333          (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0))
     334        result |= CPU_3DNOW_PREFETCH;
    327335      if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0)
    328336        result |= CPU_LZCNT;
     
    441449  // AMD features
    442450  //
    443   static bool supports_3dnow()    { return (_cpuFeatures & CPU_3DNOW) != 0; }
     451  static bool supports_3dnow_prefetch()    { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; }
    444452  static bool supports_mmx_ext()  { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; }
    445   static bool supports_3dnow2()   { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.tdnow2 != 0; }
    446453  static bool supports_lzcnt()    { return (_cpuFeatures & CPU_LZCNT) != 0; }
    447454  static bool supports_sse4a()    { return (_cpuFeatures & CPU_SSE4A) != 0; }
     455
     456  // Intel Core and newer cpus have fast IDIV instruction (excluding Atom).
     457  static bool has_fast_idiv()     { return is_intel() && cpu_family() == 6 &&
     458                                           supports_sse3() && _model != 0x1C; }
    448459
    449460  static bool supports_compare_and_exchange() { return true; }
     
    517528  }
    518529};
     530
     531#endif // CPU_X86_VM_VM_VERSION_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vmreg_x86.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2006, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_vmreg_x86.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "code/vmreg.hpp"
    2728
    2829
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vmreg_x86.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
     25#ifndef CPU_X86_VM_VMREG_X86_HPP
     26#define CPU_X86_VM_VMREG_X86_HPP
     27
    2528  bool is_Register();
    2629  Register as_Register();
     
    3134  bool is_XMMRegister();
    3235  XMMRegister as_XMMRegister();
     36
     37#endif // CPU_X86_VM_VMREG_X86_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vmreg_x86.inline.hpp

    r278 r309  
    11/*
    2  * Copyright (c) 2006, 2007, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2222 *
    2323 */
     24
     25#ifndef CPU_X86_VM_VMREG_X86_INLINE_HPP
     26#define CPU_X86_VM_VMREG_X86_INLINE_HPP
    2427
    2528inline VMReg RegisterImpl::as_VMReg() {
     
    8386  return is_even(value());
    8487}
     88
     89#endif // CPU_X86_VM_VMREG_X86_INLINE_HPP
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_vtableStubs_x86_32.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "code/vtableStubs.hpp"
     29#include "interp_masm_x86_32.hpp"
     30#include "memory/resourceArea.hpp"
     31#include "oops/instanceKlass.hpp"
     32#include "oops/klassVtable.hpp"
     33#include "runtime/sharedRuntime.hpp"
     34#include "vmreg_x86.inline.hpp"
     35#ifdef COMPILER2
     36#include "opto/runtime.hpp"
     37#endif
    2738
    2839// machine-dependent part of VtableStubs: create VtableStub of correct size and
  • trunk/openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp

    r278 r309  
    11/*
    2  * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
     2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    33 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44 *
     
    2323 */
    2424
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_vtableStubs_x86_64.cpp.incl"
     25#include "precompiled.hpp"
     26#include "asm/assembler.hpp"
     27#include "assembler_x86.inline.hpp"
     28#include "code/vtableStubs.hpp"
     29#include "interp_masm_x86_64.hpp"
     30#include "memory/resourceArea.hpp"
     31#include "oops/instanceKlass.hpp"
     32#include "oops/klassVtable.hpp"
     33#include "runtime/sharedRuntime.hpp"
     34#include "vmreg_x86.inline.hpp"
     35#ifdef COMPILER2
     36#include "opto/runtime.hpp"
     37#endif
    2738
    2839// machine-dependent part of VtableStubs: create VtableStub of correct size and
     
    210221  } else {
    211222    // Itable stub size
    212     return (DebugVtables ? 512 : 72) + (CountCompiledCalls ? 13 : 0) +
     223    return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
    213224           (UseCompressedOops ? 32 : 0);  // 2 leaqs
    214225  }
  • trunk/openjdk/hotspot/src/cpu/x86/vm/x86_32.ad

    r278 r309  
    351351void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
    352352  unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
    353   *(cbuf.code_end()) = c;
    354   cbuf.set_code_end(cbuf.code_end() + 1);
     353  cbuf.insts()->emit_int8(c);
    355354}
    356355
     
    358357void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
    359358  unsigned char c = (unsigned char)( f1 | f2 );
    360   *(cbuf.code_end()) = c;
    361   cbuf.set_code_end(cbuf.code_end() + 1);
     359  cbuf.insts()->emit_int8(c);
    362360}
    363361
    364362// EMIT_OPCODE()
    365363void emit_opcode(CodeBuffer &cbuf, int code) {
    366   *(cbuf.code_end()) = (unsigned char)code;
    367   cbuf.set_code_end(cbuf.code_end() + 1);
     364  cbuf.insts()->emit_int8((unsigned char) code);
    368365}
    369366
    370367// EMIT_OPCODE() w/ relocation information
    371368void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
    372   cbuf.relocate(cbuf.inst_mark() + offset, reloc);
     369  cbuf.relocate(cbuf.insts_mark() + offset, reloc);
    373370  emit_opcode(cbuf, code);
    374371}
     
    376373// EMIT_D8()
    377374void emit_d8(CodeBuffer &cbuf, int d8) {
    378   *(cbuf.code_end()) = (unsigned char)d8;
    379   cbuf.set_code_end(cbuf.code_end() + 1);
     375  cbuf.insts()->emit_int8((unsigned char) d8);
    380376}
    381377
    382378// EMIT_D16()
    383379void emit_d16(CodeBuffer &cbuf, int d16) {
    384   *((short *)(cbuf.code_end())) = d16;
    385   cbuf.set_code_end(cbuf.code_end() + 2);
     380  cbuf.insts()->emit_int16(d16);
    386381}
    387382
    388383// EMIT_D32()
    389384void emit_d32(CodeBuffer &cbuf, int d32) {
    390   *((int *)(cbuf.code_end())) = d32;
    391   cbuf.set_code_end(cbuf.code_end() + 4);
     385  cbuf.insts()->emit_int32(d32);
    392386}
    393387
     
    395389void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
    396390        int format) {
    397   cbuf.relocate(cbuf.inst_mark(), reloc, format);
    398 
    399   *((int *)(cbuf.code_end())) = d32;
    400   cbuf.set_code_end(cbuf.code_end() + 4);
     391  cbuf.relocate(cbuf.insts_mark(), reloc, format);
     392  cbuf.insts()->emit_int32(d32);
    401393}
    402394
     
    409401  }
    410402#endif
    411   cbuf.relocate(cbuf.inst_mark(), rspec, format);
    412 
    413   *((int *)(cbuf.code_end())) = d32;
    414   cbuf.set_code_end(cbuf.code_end() + 4);
     403  cbuf.relocate(cbuf.insts_mark(), rspec, format);
     404  cbuf.insts()->emit_int32(d32);
    415405}
    416406
     
    518508
    519509//=============================================================================
     510const bool Matcher::constant_table_absolute_addressing = true;
     511const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
     512
     513void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
     514  // Empty encoding
     515}
     516
     517uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
     518  return 0;
     519}
     520
     521#ifndef PRODUCT
     522void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
     523  st->print("# MachConstantBaseNode (empty encoding)");
     524}
     525#endif
     526
     527
     528//=============================================================================
    520529#ifndef PRODUCT
    521530void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
     
    614623    emit_d32(cbuf, framesize);
    615624  }
    616   C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
     625  C->set_frame_complete(cbuf.insts_size());
    617626
    618627#ifdef ASSERT
     
    696705
    697706  if( do_polling() && C->is_method_compilation() ) {
    698     cbuf.relocate(cbuf.code_end(), relocInfo::poll_return_type, 0);
     707    cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
    699708    emit_opcode(cbuf,0x85);
    700709    emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
     
    12121221  // jmp -1
    12131222
    1214   address mark = cbuf.inst_mark();  // get mark within main instrs section
    1215 
    1216   // Note that the code buffer's inst_mark is always relative to insts.
     1223  address mark = cbuf.insts_mark();  // get mark within main instrs section
     1224
     1225  // Note that the code buffer's insts_mark is always relative to insts.
    12171226  // That's why we must use the macroassembler to generate a stub.
    12181227  MacroAssembler _masm(&cbuf);
     
    12291238
    12301239  __ end_a_stub();
    1231   // Update current stubs pointer and restore code_end.
     1240  // Update current stubs pointer and restore insts_end.
    12321241}
    12331242// size of call stub, compiled java to interpretor
     
    12551264  MacroAssembler masm(&cbuf);
    12561265#ifdef ASSERT
    1257   uint code_size = cbuf.code_size();
     1266  uint insts_size = cbuf.insts_size();
    12581267#endif
    12591268  masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
     
    12671276  masm.nop(nops_cnt);
    12681277
    1269   assert(cbuf.code_size() - code_size == size(ra_), "checking code size of inline cache node");
     1278  assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
    12701279}
    12711280
     
    12891298int emit_exception_handler(CodeBuffer& cbuf) {
    12901299
    1291   // Note that the code buffer's inst_mark is always relative to insts.
     1300  // Note that the code buffer's insts_mark is always relative to insts.
    12921301  // That's why we must use the macroassembler to generate a handler.
    12931302  MacroAssembler _masm(&cbuf);
     
    12961305  if (base == NULL)  return 0;  // CodeBuffer::expand failed
    12971306  int offset = __ offset();
    1298   __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
     1307  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
    12991308  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
    13001309  __ end_a_stub();
     
    13141323int emit_deopt_handler(CodeBuffer& cbuf) {
    13151324
    1316   // Note that the code buffer's inst_mark is always relative to insts.
     1325  // Note that the code buffer's insts_mark is always relative to insts.
    13171326  // That's why we must use the macroassembler to generate a handler.
    13181327  MacroAssembler _masm(&cbuf);
     
    13311340
    13321341
    1333 static void emit_double_constant(CodeBuffer& cbuf, double x) {
    1334   int mark = cbuf.insts()->mark_off();
    1335   MacroAssembler _masm(&cbuf);
    1336   address double_address = __ double_constant(x);
    1337   cbuf.insts()->set_mark_off(mark);  // preserve mark across masm shift
    1338   emit_d32_reloc(cbuf,
    1339                  (int)double_address,
    1340                  internal_word_Relocation::spec(double_address),
    1341                  RELOC_DISP32);
    1342 }
    1343 
    1344 static void emit_float_constant(CodeBuffer& cbuf, float x) {
    1345   int mark = cbuf.insts()->mark_off();
    1346   MacroAssembler _masm(&cbuf);
    1347   address float_address = __ float_constant(x);
    1348   cbuf.insts()->set_mark_off(mark);  // preserve mark across masm shift
    1349   emit_d32_reloc(cbuf,
    1350                  (int)float_address,
    1351                  internal_word_Relocation::spec(float_address),
    1352                  RELOC_DISP32);
    1353 }
    1354 
    1355 
    13561342const bool Matcher::match_rule_supported(int opcode) {
    13571343  if (!has_match_rule(opcode))
     
    13631349int Matcher::regnum_to_fpu_offset(int regnum) {
    13641350  return regnum - 32; // The FP registers are in the second chunk
    1365 }
    1366 
    1367 bool is_positive_zero_float(jfloat f) {
    1368   return jint_cast(f) == jint_cast(0.0F);
    1369 }
    1370 
    1371 bool is_positive_one_float(jfloat f) {
    1372   return jint_cast(f) == jint_cast(1.0F);
    1373 }
    1374 
    1375 bool is_positive_zero_double(jdouble d) {
    1376   return jlong_cast(d) == jlong_cast(0.0);
    1377 }
    1378 
    1379 bool is_positive_one_double(jdouble d) {
    1380   return jlong_cast(d) == jlong_cast(1.0);
    13811351}
    13821352
     
    15191489}
    15201490
     1491bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
     1492  // Use hardware integer DIV instruction when
     1493  // it is faster than a code which use multiply.
     1494  // Only when constant divisor fits into 32 bit
     1495  // (min_jint is excluded to get only correct
     1496  // positive 32 bit values from negative).
     1497  return VM_Version::has_fast_idiv() &&
     1498         (divisor == (int)divisor && divisor != min_jint);
     1499}
     1500
    15211501// Register for DIVI projection of divmodI
    15221502RegMask Matcher::divI_proj_mask() {
     
    15561536      return true;
    15571537    }
     1538  }
     1539  if (opc == Op_ConL && (n->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
     1540    return true;
    15581541  }
    15591542  return false;
     
    17291712  enc_class Lbl (label labl) %{ // JMP, CALL
    17301713    Label *l = $labl$$label;
    1731     emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size()+4)) : 0);
     1714    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0);
    17321715  %}
    17331716
    17341717  enc_class LblShort (label labl) %{ // JMP, CALL
    17351718    Label *l = $labl$$label;
    1736     int disp = l ? (l->loc_pos() - (cbuf.code_size()+1)) : 0;
     1719    int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0;
    17371720    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
    17381721    emit_d8(cbuf, disp);
     
    17651748    $$$emit8$primary;
    17661749    emit_cc(cbuf, $secondary, $cop$$cmpcode);
    1767     emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size()+4)) : 0);
     1750    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0);
    17681751  %}
    17691752
     
    17711754    Label *l = $labl$$label;
    17721755    emit_cc(cbuf, $primary, $cop$$cmpcode);
    1773     int disp = l ? (l->loc_pos() - (cbuf.code_size()+1)) : 0;
     1756    int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0;
    17741757    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
    17751758    emit_d8(cbuf, disp);
     
    18391822  enc_class Java_To_Runtime (method meth) %{    // CALL Java_To_Runtime, Java_To_Runtime_Leaf
    18401823    // This is the instruction starting address for relocation info.
    1841     cbuf.set_inst_mark();
     1824    cbuf.set_insts_mark();
    18421825    $$$emit8$primary;
    18431826    // CALL directly to the runtime
    1844     emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
     1827    emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
    18451828                runtime_call_Relocation::spec(), RELOC_IMM32 );
    18461829
     
    18721855  enc_class pre_call_FPU %{
    18731856    // If method sets FPU control word restore it here
    1874     debug_only(int off0 = cbuf.code_size());
     1857    debug_only(int off0 = cbuf.insts_size());
    18751858    if( Compile::current()->in_24_bit_fp_mode() ) {
    18761859      MacroAssembler masm(&cbuf);
    18771860      masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
    18781861    }
    1879     debug_only(int off1 = cbuf.code_size());
     1862    debug_only(int off1 = cbuf.insts_size());
    18801863    assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
    18811864  %}
     
    18901873
    18911874  enc_class preserve_SP %{
    1892     debug_only(int off0 = cbuf.code_size());
     1875    debug_only(int off0 = cbuf.insts_size());
    18931876    MacroAssembler _masm(&cbuf);
    18941877    // RBP is preserved across all calls, even compiled calls.
    18951878    // Use it to preserve RSP in places where the callee might change the SP.
    18961879    __ movptr(rbp_mh_SP_save, rsp);
    1897     debug_only(int off1 = cbuf.code_size());
     1880    debug_only(int off1 = cbuf.insts_size());
    18981881    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
    18991882  %}
     
    19071890    // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
    19081891    // who we intended to call.
    1909     cbuf.set_inst_mark();
     1892    cbuf.set_insts_mark();
    19101893    $$$emit8$primary;
    19111894    if ( !_method ) {
    1912       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
     1895      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
    19131896                     runtime_call_Relocation::spec(), RELOC_IMM32 );
    19141897    } else if(_optimized_virtual) {
    1915       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
     1898      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
    19161899                     opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
    19171900    } else {
    1918       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
     1901      emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
    19191902                     static_call_Relocation::spec(), RELOC_IMM32 );
    19201903    }
     
    19281911    // Generate  "Mov EAX,0x00", placeholder instruction to load oop-info
    19291912    // emit_call_dynamic_prologue( cbuf );
    1930     cbuf.set_inst_mark();
     1913    cbuf.set_insts_mark();
    19311914    emit_opcode(cbuf, 0xB8 + EAX_enc);        // mov    EAX,-1
    19321915    emit_d32_reloc(cbuf, (int)Universe::non_oop_word(), oop_Relocation::spec_for_immediate(), RELOC_IMM32);
    1933     address  virtual_call_oop_addr = cbuf.inst_mark();
     1916    address  virtual_call_oop_addr = cbuf.insts_mark();
    19341917    // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
    19351918    // who we intended to call.
    1936     cbuf.set_inst_mark();
     1919    cbuf.set_insts_mark();
    19371920    $$$emit8$primary;
    1938     emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
     1921    emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
    19391922                virtual_call_Relocation::spec(virtual_call_oop_addr), RELOC_IMM32 );
    19401923  %}
     
    19451928
    19461929    // CALL *[EAX+in_bytes(methodOopDesc::from_compiled_code_entry_point_offset())]
    1947     cbuf.set_inst_mark();
     1930    cbuf.set_insts_mark();
    19481931    $$$emit8$primary;
    19491932    emit_rm(cbuf, 0x01, $secondary, EAX_enc );  // R/M byte
     
    19771960//
    19781961//     // CALL to interpreter.
    1979 //     cbuf.set_inst_mark();
     1962//     cbuf.set_insts_mark();
    19801963//     $$$emit8$primary;
    1981 //     emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.code_end()) - 4),
     1964//     emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4),
    19821965//                 runtime_call_Relocation::spec(), RELOC_IMM32 );
    19831966//   %}
     
    20312014      emit_d32(cbuf, src_con);
    20322015    }
    2033   %}
    2034 
    2035 
    2036   enc_class LdImmD (immD src) %{    // Load Immediate
    2037     if( is_positive_zero_double($src$$constant)) {
    2038       // FLDZ
    2039       emit_opcode(cbuf,0xD9);
    2040       emit_opcode(cbuf,0xEE);
    2041     } else if( is_positive_one_double($src$$constant)) {
    2042       // FLD1
    2043       emit_opcode(cbuf,0xD9);
    2044       emit_opcode(cbuf,0xE8);
    2045     } else {
    2046       emit_opcode(cbuf,0xDD);
    2047       emit_rm(cbuf, 0x0, 0x0, 0x5);
    2048       emit_double_constant(cbuf, $src$$constant);
    2049     }
    2050   %}
    2051 
    2052 
    2053   enc_class LdImmF (immF src) %{    // Load Immediate
    2054     if( is_positive_zero_float($src$$constant)) {
    2055       emit_opcode(cbuf,0xD9);
    2056       emit_opcode(cbuf,0xEE);
    2057     } else if( is_positive_one_float($src$$constant)) {
    2058       emit_opcode(cbuf,0xD9);
    2059       emit_opcode(cbuf,0xE8);
    2060     } else {
    2061       $$$emit8$primary;
    2062       // Load immediate does not have a zero or sign extended version
    2063       // for 8-bit immediates
    2064       // First load to TOS, then move to dst
    2065       emit_rm(cbuf, 0x0, 0x0, 0x5);
    2066       emit_float_constant(cbuf, $src$$constant);
    2067     }
    2068   %}
    2069 
    2070   enc_class LdImmX (regX dst, immXF con) %{    // Load Immediate
    2071     emit_rm(cbuf, 0x0, $dst$$reg, 0x5);
    2072     emit_float_constant(cbuf, $con$$constant);
    2073   %}
    2074 
    2075   enc_class LdImmXD (regXD dst, immXD con) %{    // Load Immediate
    2076     emit_rm(cbuf, 0x0, $dst$$reg, 0x5);
    2077     emit_double_constant(cbuf, $con$$constant);
    2078   %}
    2079 
    2080   enc_class load_conXD (regXD dst, immXD con) %{ // Load double constant
    2081     // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
    2082     emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
    2083     emit_opcode(cbuf, 0x0F);
    2084     emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
    2085     emit_rm(cbuf, 0x0, $dst$$reg, 0x5);
    2086     emit_double_constant(cbuf, $con$$constant);
    2087   %}
    2088 
    2089   enc_class Opc_MemImm_F(immF src) %{
    2090     cbuf.set_inst_mark();
    2091     $$$emit8$primary;
    2092     emit_rm(cbuf, 0x0, $secondary, 0x5);
    2093     emit_float_constant(cbuf, $src$$constant);
    20942016  %}
    20952017
     
    22812203
    22822204  enc_class set_instruction_start( ) %{
    2283     cbuf.set_inst_mark();            // Mark start of opcode for reloc info in mem operand
     2205    cbuf.set_insts_mark();            // Mark start of opcode for reloc info in mem operand
    22842206  %}
    22852207
     
    23202242    emit_opcode( cbuf, 0x8B ); // Move
    23212243    emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
    2322     emit_d8(cbuf,$primary);
    2323     emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
    2324     emit_d8(cbuf,$cnt$$constant-32);
     2244    if( $cnt$$constant > 32 ) { // Shift, if not by zero
     2245      emit_d8(cbuf,$primary);
     2246      emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
     2247      emit_d8(cbuf,$cnt$$constant-32);
     2248    }
    23252249    emit_d8(cbuf,$primary);
    23262250    emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW($dst$$reg));
     
    24302354      emit_d8( cbuf, 0xC0-1+$src$$reg );
    24312355    }
    2432     cbuf.set_inst_mark();       // Mark start of opcode for reloc info in mem operand
     2356    cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
    24332357    emit_opcode(cbuf,$primary);
    24342358    encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop);
     
    24752399    emit_rm(cbuf, 0x3, tmpReg, tmpReg);
    24762400    // AND $tmp,$y
    2477     cbuf.set_inst_mark();       // Mark start of opcode for reloc info in mem operand
     2401    cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
    24782402    emit_opcode(cbuf,0x23);
    24792403    int reg_encoding = tmpReg;
     
    31583082    emit_opcode(cbuf,               0x50+$src2$$reg  );
    31593083    // CALL directly to the runtime
    3160     cbuf.set_inst_mark();
     3084    cbuf.set_insts_mark();
    31613085    emit_opcode(cbuf,0xE8);       // Call into runtime
    3162     emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     3086    emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    31633087    // Restore stack
    31643088    emit_opcode(cbuf, 0x83); // add  SP, #framesize
     
    31773101    emit_opcode(cbuf,               0x50+$src2$$reg  );
    31783102    // CALL directly to the runtime
    3179     cbuf.set_inst_mark();
     3103    cbuf.set_insts_mark();
    31803104    emit_opcode(cbuf,0xE8);       // Call into runtime
    3181     emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     3105    emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    31823106    // Restore stack
    31833107    emit_opcode(cbuf, 0x83); // add  SP, #framesize
     
    34963420
    34973421         // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
    3498          if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
     3422         if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
    34993423            // prefetchw [eax + Offset(_owner)-2]
    35003424            masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
     
    35403464
    35413465         // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
    3542          if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
     3466         if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
    35433467            // prefetchw [eax + Offset(_owner)-2]
    35443468            masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
     
    36873611
    36883612      masm.get_thread (boxReg) ;
    3689       if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) {
     3613      if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
    36903614        // prefetchw [ebx + Offset(_owner)-2]
    36913615        masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
     
    38253749
    38263750  enc_class enc_rethrow() %{
    3827     cbuf.set_inst_mark();
     3751    cbuf.set_insts_mark();
    38283752    emit_opcode(cbuf, 0xE9);        // jmp    entry
    3829     emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.code_end())-4,
     3753    emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4,
    38303754                   runtime_call_Relocation::spec(), RELOC_IMM32 );
    38313755  %}
     
    38743798    emit_d8    (cbuf,0xC0-1+$src$$reg );
    38753799    // CALL directly to the runtime
    3876     cbuf.set_inst_mark();
     3800    cbuf.set_insts_mark();
    38773801    emit_opcode(cbuf,0xE8);       // Call into runtime
    3878     emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     3802    emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    38793803    // Carry on here...
    38803804  %}
     
    39163840    emit_d8    (cbuf,0xC0-1+$src$$reg );
    39173841    // CALL directly to the runtime
    3918     cbuf.set_inst_mark();
     3842    cbuf.set_insts_mark();
    39193843    emit_opcode(cbuf,0xE8);       // Call into runtime
    3920     emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     3844    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    39213845    // Carry on here...
    39223846  %}
     
    39893913
    39903914    // CALL directly to the runtime
    3991     cbuf.set_inst_mark();
     3915    cbuf.set_insts_mark();
    39923916    emit_opcode(cbuf,0xE8);       // Call into runtime
    3993     emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     3917    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    39943918    // Carry on here...
    39953919  %}
     
    40633987
    40643988    // CALL directly to the runtime
    4065     cbuf.set_inst_mark();
     3989    cbuf.set_insts_mark();
    40663990    emit_opcode(cbuf,0xE8);      // Call into runtime
    4067     emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     3991    emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    40683992    // Carry on here...
    40693993  %}
     
    41234047
    41244048    // CALL directly to the runtime
    4125     cbuf.set_inst_mark();
     4049    cbuf.set_insts_mark();
    41264050    emit_opcode(cbuf,0xE8);       // Call into runtime
    4127     emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
     4051    emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
    41284052
    41294053    // Carry on here...
     
    43224246  enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{
    43234247    store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp );
    4324     cbuf.set_inst_mark();            // Mark start of FIST in case $mem has an oop
     4248    cbuf.set_insts_mark();            // Mark start of FIST in case $mem has an oop
    43254249    emit_opcode(cbuf,0xDF);
    43264250    int rm_byte_opcode = 0x07;
     
    43464270      encode_RegMem(cbuf, $tmp$$reg, base, index, scale, displace, disp_is_oop);
    43474271    }
    4348     cbuf.set_inst_mark();            // Mark start of MOVSD in case $mem has an oop
     4272    cbuf.set_insts_mark();            // Mark start of MOVSD in case $mem has an oop
    43494273    { // MOVSD $mem,$tmp ! atomic long store
    43504274      emit_opcode(cbuf,0xF2);
     
    43794303      emit_rm(cbuf, 0x3, $tmp$$reg, $tmp2$$reg);
    43804304    }
    4381     cbuf.set_inst_mark();            // Mark start of MOVSD in case $mem has an oop
     4305    cbuf.set_insts_mark();            // Mark start of MOVSD in case $mem has an oop
    43824306    { // MOVSD $mem,$tmp ! atomic long store
    43834307      emit_opcode(cbuf,0xF2);
     
    44004324
    44014325  enc_class Safepoint_Poll() %{
    4402     cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0);
     4326    cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0);
    44034327    emit_opcode(cbuf,0x85);
    44044328    emit_rm (cbuf, 0x0, 0x7, 0x5);
     
    47974721%}
    47984722
    4799 // Double Immediate
     4723// Double Immediate one
    48004724operand immD1() %{
    48014725  predicate( UseSSE<=1 && n->getd() == 1.0 );
     
    48404764// Float Immediate zero
    48414765operand immF0() %{
    4842   predicate( UseSSE == 0 && n->getf() == 0.0 );
     4766  predicate(UseSSE == 0 && n->getf() == 0.0F);
     4767  match(ConF);
     4768
     4769  op_cost(5);
     4770  format %{ %}
     4771  interface(CONST_INTER);
     4772%}
     4773
     4774// Float Immediate one
     4775operand immF1() %{
     4776  predicate(UseSSE == 0 && n->getf() == 1.0F);
    48434777  match(ConF);
    48444778
     
    72117145
    72127146// The instruction usage is guarded by predicate in operand immF().
    7213 instruct loadConF(regF dst, immF src) %{
    7214   match(Set dst src);
     7147instruct loadConF(regF dst, immF con) %{
     7148  match(Set dst con);
    72157149  ins_cost(125);
    7216 
    7217   format %{ "FLD_S  ST,$src\n\t"
     7150  format %{ "FLD_S  ST,[$constantaddress]\t# load from constant table: float=$con\n\t"
    72187151            "FSTP   $dst" %}
    7219   opcode(0xD9, 0x00);       /* D9 /0 */
    7220   ins_encode(LdImmF(src), Pop_Reg_F(dst) );
    7221   ins_pipe( fpu_reg_con );
     7152  ins_encode %{
     7153    __ fld_s($constantaddress($con));
     7154    __ fstp_d($dst$$reg);
     7155  %}
     7156  ins_pipe(fpu_reg_con);
     7157%}
     7158
     7159// The instruction usage is guarded by predicate in operand immF0().
     7160instruct loadConF0(regF dst, immF0 con) %{
     7161  match(Set dst con);
     7162  ins_cost(125);
     7163  format %{ "FLDZ   ST\n\t"
     7164            "FSTP   $dst" %}
     7165  ins_encode %{
     7166    __ fldz();
     7167    __ fstp_d($dst$$reg);
     7168  %}
     7169  ins_pipe(fpu_reg_con);
     7170%}
     7171
     7172// The instruction usage is guarded by predicate in operand immF1().
     7173instruct loadConF1(regF dst, immF1 con) %{
     7174  match(Set dst con);
     7175  ins_cost(125);
     7176  format %{ "FLD1   ST\n\t"
     7177            "FSTP   $dst" %}
     7178  ins_encode %{
     7179    __ fld1();
     7180    __ fstp_d($dst$$reg);
     7181  %}
     7182  ins_pipe(fpu_reg_con);
    72227183%}
    72237184
     
    72267187  match(Set dst con);
    72277188  ins_cost(125);
    7228   format %{ "MOVSS  $dst,[$con]" %}
    7229   ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x10), LdImmX(dst, con));
    7230   ins_pipe( pipe_slow );
     7189  format %{ "MOVSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
     7190  ins_encode %{
     7191    __ movflt($dst$$XMMRegister, $constantaddress($con));
     7192  %}
     7193  ins_pipe(pipe_slow);
    72317194%}
    72327195
     
    72367199  ins_cost(100);
    72377200  format %{ "XORPS  $dst,$dst\t# float 0.0" %}
    7238   ins_encode( Opcode(0x0F), Opcode(0x57), RegReg(dst,dst));
    7239   ins_pipe( pipe_slow );
     7201  ins_encode %{
     7202    __ xorps($dst$$XMMRegister, $dst$$XMMRegister);
     7203  %}
     7204  ins_pipe(pipe_slow);
    72407205%}
    72417206
    72427207// The instruction usage is guarded by predicate in operand immD().
    7243 instruct loadConD(regD dst, immD src) %{
    7244   match(Set dst src);
     7208instruct loadConD(regD dst, immD con) %{
     7209  match(Set dst con);
    72457210  ins_cost(125);
    72467211
    7247   format %{ "FLD_D  ST,$src\n\t"
     7212  format %{ "FLD_D  ST,[$constantaddress]\t# load from constant table: double=$con\n\t"
    72487213            "FSTP   $dst" %}
    7249   ins_encode(LdImmD(src), Pop_Reg_D(dst) );
    7250   ins_pipe( fpu_reg_con );
     7214  ins_encode %{
     7215    __ fld_d($constantaddress($con));
     7216    __ fstp_d($dst$$reg);
     7217  %}
     7218  ins_pipe(fpu_reg_con);
     7219%}
     7220
     7221// The instruction usage is guarded by predicate in operand immD0().
     7222instruct loadConD0(regD dst, immD0 con) %{
     7223  match(Set dst con);
     7224  ins_cost(125);
     7225
     7226  format %{ "FLDZ   ST\n\t"
     7227            "FSTP   $dst" %}
     7228  ins_encode %{
     7229    __ fldz();
     7230    __ fstp_d($dst$$reg);
     7231  %}
     7232  ins_pipe(fpu_reg_con);
     7233%}
     7234
     7235// The instruction usage is guarded by predicate in operand immD1().
     7236instruct loadConD1(regD dst, immD1 con) %{
     7237  match(Set dst con);
     7238  ins_cost(125);
     7239
     7240  format %{ "FLD1   ST\n\t"
     7241            "FSTP   $dst" %}
     7242  ins_encode %{
     7243    __ fld1();
     7244    __ fstp_d($dst$$reg);
     7245  %}
     7246  ins_pipe(fpu_reg_con);
    72517247%}
    72527248
     
    72557251  match(Set dst con);
    72567252  ins_cost(125);
    7257   format %{ "MOVSD  $dst,[$con]" %}
    7258   ins_encode(load_conXD(dst, con));
    7259   ins_pipe( pipe_slow );
     7253  format %{ "MOVSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
     7254  ins_encode %{
     7255    __ movdbl($dst$$XMMRegister, $constantaddress($con));
     7256  %}
     7257  ins_pipe(pipe_slow);
    72607258%}
    72617259
     
    73327330
    73337331instruct prefetchr0( memory mem ) %{
    7334   predicate(UseSSE==0 && !VM_Version::supports_3dnow());
     7332  predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch());
    73357333  match(PrefetchRead mem);
    73367334  ins_cost(0);
     
    73427340
    73437341instruct prefetchr( memory mem ) %{
    7344   predicate(UseSSE==0 && VM_Version::supports_3dnow() || ReadPrefetchInstr==3);
     7342  predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || ReadPrefetchInstr==3);
    73457343  match(PrefetchRead mem);
    73467344  ins_cost(100);
     
    73867384
    73877385instruct prefetchw0( memory mem ) %{
    7388   predicate(UseSSE==0 && !VM_Version::supports_3dnow());
     7386  predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch());
    73897387  match(PrefetchWrite mem);
    73907388  ins_cost(0);
     
    73967394
    73977395instruct prefetchw( memory mem ) %{
    7398   predicate(UseSSE==0 && VM_Version::supports_3dnow() || AllocatePrefetchInstr==3);
     7396  predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || AllocatePrefetchInstr==3);
    73997397  match( PrefetchWrite mem );
    74007398  ins_cost(100);
     
    88538851%}
    88548852
     8853// Divide Register Long (no special case since divisor != -1)
     8854instruct divL_eReg_imm32( eADXRegL dst, immL32 imm, eRegI tmp, eRegI tmp2, eFlagsReg cr ) %{
     8855  match(Set dst (DivL dst imm));
     8856  effect( TEMP tmp, TEMP tmp2, KILL cr );
     8857  ins_cost(1000);
     8858  format %{ "MOV    $tmp,abs($imm) # ldiv EDX:EAX,$imm\n\t"
     8859            "XOR    $tmp2,$tmp2\n\t"
     8860            "CMP    $tmp,EDX\n\t"
     8861            "JA,s   fast\n\t"
     8862            "MOV    $tmp2,EAX\n\t"
     8863            "MOV    EAX,EDX\n\t"
     8864            "MOV    EDX,0\n\t"
     8865            "JLE,s  pos\n\t"
     8866            "LNEG   EAX : $tmp2\n\t"
     8867            "DIV    $tmp # unsigned division\n\t"
     8868            "XCHG   EAX,$tmp2\n\t"
     8869            "DIV    $tmp\n\t"
     8870            "LNEG   $tmp2 : EAX\n\t"
     8871            "JMP,s  done\n"
     8872    "pos:\n\t"
     8873            "DIV    $tmp\n\t"
     8874            "XCHG   EAX,$tmp2\n"
     8875    "fast:\n\t"
     8876            "DIV    $tmp\n"
     8877    "done:\n\t"
     8878            "MOV    EDX,$tmp2\n\t"
     8879            "NEG    EDX:EAX # if $imm < 0" %}
     8880  ins_encode %{
     8881    int con = (int)$imm$$constant;
     8882    assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
     8883    int pcon = (con > 0) ? con : -con;
     8884    Label Lfast, Lpos, Ldone;
     8885
     8886    __ movl($tmp$$Register, pcon);
     8887    __ xorl($tmp2$$Register,$tmp2$$Register);
     8888    __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
     8889    __ jccb(Assembler::above, Lfast); // result fits into 32 bit
     8890
     8891    __ movl($tmp2$$Register, $dst$$Register); // save
     8892    __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
     8893    __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
     8894    __ jccb(Assembler::lessEqual, Lpos); // result is positive
     8895
     8896    // Negative dividend.
     8897    // convert value to positive to use unsigned division
     8898    __ lneg($dst$$Register, $tmp2$$Register);
     8899    __ divl($tmp$$Register);
     8900    __ xchgl($dst$$Register, $tmp2$$Register);
     8901    __ divl($tmp$$Register);
     8902    // revert result back to negative
     8903    __ lneg($tmp2$$Register, $dst$$Register);
     8904    __ jmpb(Ldone);
     8905
     8906    __ bind(Lpos);
     8907    __ divl($tmp$$Register); // Use unsigned division
     8908    __ xchgl($dst$$Register, $tmp2$$Register);
     8909    // Fallthrow for final divide, tmp2 has 32 bit hi result
     8910
     8911    __ bind(Lfast);
     8912    // fast path: src is positive
     8913    __ divl($tmp$$Register); // Use unsigned division
     8914
     8915    __ bind(Ldone);
     8916    __ movl(HIGH_FROM_LOW($dst$$Register),$tmp2$$Register);
     8917    if (con < 0) {
     8918      __ lneg(HIGH_FROM_LOW($dst$$Register), $dst$$Register);
     8919    }
     8920  %}
     8921  ins_pipe( pipe_slow );
     8922%}
     8923
     8924// Remainder Register Long (remainder fit into 32 bits)
     8925instruct modL_eReg_imm32( eADXRegL dst, immL32 imm, eRegI tmp, eRegI tmp2, eFlagsReg cr ) %{
     8926  match(Set dst (ModL dst imm));
     8927  effect( TEMP tmp, TEMP tmp2, KILL cr );
     8928  ins_cost(1000);
     8929  format %{ "MOV    $tmp,abs($imm) # lrem EDX:EAX,$imm\n\t"
     8930            "CMP    $tmp,EDX\n\t"
     8931            "JA,s   fast\n\t"
     8932            "MOV    $tmp2,EAX\n\t"
     8933            "MOV    EAX,EDX\n\t"
     8934            "MOV    EDX,0\n\t"
     8935            "JLE,s  pos\n\t"
     8936            "LNEG   EAX : $tmp2\n\t"
     8937            "DIV    $tmp # unsigned division\n\t"
     8938            "MOV    EAX,$tmp2\n\t"
     8939            "DIV    $tmp\n\t"
     8940            "NEG    EDX\n\t"
     8941            "JMP,s  done\n"
     8942    "pos:\n\t"
     8943            "DIV    $tmp\n\t"
     8944            "MOV    EAX,$tmp2\n"
     8945    "fast:\n\t"
     8946            "DIV    $tmp\n"
     8947    "done:\n\t"
     8948            "MOV    EAX,EDX\n\t"
     8949            "SAR    EDX,31\n\t" %}
     8950  ins_encode %{
     8951    int con = (int)$imm$$constant;
     8952    assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
     8953    int pcon = (con > 0) ? con : -con;
     8954    Label  Lfast, Lpos, Ldone;
     8955
     8956    __ movl($tmp$$Register, pcon);
     8957    __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
     8958    __ jccb(Assembler::above, Lfast); // src is positive and result fits into 32 bit
     8959
     8960    __ movl($tmp2$$Register, $dst$$Register); // save
     8961    __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
     8962    __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
     8963    __ jccb(Assembler::lessEqual, Lpos); // result is positive
     8964
     8965    // Negative dividend.
     8966    // convert value to positive to use unsigned division
     8967    __ lneg($dst$$Register, $tmp2$$Register);
     8968    __ divl($tmp$$Register);
     8969    __ movl($dst$$Register, $tmp2$$Register);
     8970    __ divl($tmp$$Register);
     8971    // revert remainder back to negative
     8972    __ negl(HIGH_FROM_LOW($dst$$Register));
     8973    __ jmpb(Ldone);
     8974
     8975    __ bind(Lpos);
     8976    __ divl($tmp$$Register);
     8977    __ movl($dst$$Register, $tmp2$$Register);
     8978
     8979    __ bind(Lfast);
     8980    // fast path: src is positive
     8981    __ divl($tmp$$Register);
     8982
     8983    __ bind(Ldone);
     8984    __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
     8985    __ sarl(HIGH_FROM_LOW($dst$$Register), 31); // result sign
     8986
     8987  %}
     8988  ins_pipe( pipe_slow );
     8989%}
     8990
    88558991// Integer Shift Instructions
    88568992// Shift Left by one
     
    1016110297%}
    1016210298
    10163 instruct addD_reg_imm1(regD dst, immD1 src) %{
     10299instruct addD_reg_imm1(regD dst, immD1 con) %{
    1016410300  predicate(UseSSE<=1);
    10165   match(Set dst (AddD dst src));
     10301  match(Set dst (AddD dst con));
    1016610302  ins_cost(125);
    1016710303  format %{ "FLD1\n\t"
    1016810304            "DADDp  $dst,ST" %}
    10169   opcode(0xDE, 0x00);
    10170   ins_encode( LdImmD(src),
    10171               OpcP, RegOpc(dst) );
    10172   ins_pipe( fpu_reg );
    10173 %}
    10174 
    10175 instruct addD_reg_imm(regD dst, immD src) %{
     10305  ins_encode %{
     10306    __ fld1();
     10307    __ faddp($dst$$reg);
     10308  %}
     10309  ins_pipe(fpu_reg);
     10310%}
     10311
     10312instruct addD_reg_imm(regD dst, immD con) %{
    1017610313  predicate(UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
    10177   match(Set dst (AddD dst src));
     10314  match(Set dst (AddD dst con));
    1017810315  ins_cost(200);
    10179   format %{ "FLD_D  [$src]\n\t"
     10316  format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
    1018010317            "DADDp  $dst,ST" %}
    10181   opcode(0xDE, 0x00);       /* DE /0 */
    10182   ins_encode( LdImmD(src),
    10183               OpcP, RegOpc(dst));
    10184   ins_pipe( fpu_reg_mem );
     10318  ins_encode %{
     10319    __ fld_d($constantaddress($con));
     10320    __ faddp($dst$$reg);
     10321  %}
     10322  ins_pipe(fpu_reg_mem);
    1018510323%}
    1018610324
     
    1018910327  match(Set dst (RoundDouble (AddD src con)));
    1019010328  ins_cost(200);
    10191   format %{ "FLD_D  [$con]\n\t"
     10329  format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
    1019210330            "DADD   ST,$src\n\t"
    1019310331            "FSTP_D $dst\t# D-round" %}
    10194   opcode(0xD8, 0x00);       /* D8 /0 */
    10195   ins_encode( LdImmD(con),
    10196               OpcP, RegOpc(src), Pop_Mem_D(dst));
    10197   ins_pipe( fpu_mem_reg_con );
     10332  ins_encode %{
     10333    __ fld_d($constantaddress($con));
     10334    __ fadd($src$$reg);
     10335    __ fstp_d(Address(rsp, $dst$$disp));
     10336  %}
     10337  ins_pipe(fpu_mem_reg_con);
    1019810338%}
    1019910339
     
    1021010350  predicate(UseSSE>=2);
    1021110351  match(Set dst (AddD dst con));
    10212   format %{ "ADDSD  $dst,[$con]" %}
    10213   ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x58), LdImmXD(dst, con) );
    10214   ins_pipe( pipe_slow );
     10352  format %{ "ADDSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
     10353  ins_encode %{
     10354    __ addsd($dst$$XMMRegister, $constantaddress($con));
     10355  %}
     10356  ins_pipe(pipe_slow);
    1021510357%}
    1021610358
     
    1023510377  predicate(UseSSE>=2);
    1023610378  match(Set dst (SubD dst con));
    10237   format %{ "SUBSD  $dst,[$con]" %}
    10238   ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5C), LdImmXD(dst, con) );
    10239   ins_pipe( pipe_slow );
     10379  format %{ "SUBSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
     10380  ins_encode %{
     10381    __ subsd($dst$$XMMRegister, $constantaddress($con));
     10382  %}
     10383  ins_pipe(pipe_slow);
    1024010384%}
    1024110385
     
    1026010404  predicate(UseSSE>=2);
    1026110405  match(Set dst (MulD dst con));
    10262   format %{ "MULSD  $dst,[$con]" %}
    10263   ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x59), LdImmXD(dst, con) );
    10264   ins_pipe( pipe_slow );
     10406  format %{ "MULSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
     10407  ins_encode %{
     10408    __ mulsd($dst$$XMMRegister, $constantaddress($con));
     10409  %}
     10410  ins_pipe(pipe_slow);
    1026510411%}
    1026610412
     
    1028610432  predicate(UseSSE>=2);
    1028710433  match(Set dst (DivD dst con));
    10288   format %{ "DIVSD  $dst,[$con]" %}
    10289   ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5E), LdImmXD(dst, con));
    10290   ins_pipe( pipe_slow );
     10434  format %{ "DIVSD  $dst,[$constantaddress]\t# load from constant table: double=$con" %}
     10435  ins_encode %{
     10436    __ divsd($dst$$XMMRegister, $constantaddress($con));
     10437  %}
     10438  ins_pipe(pipe_slow);
    1029110439%}
    1029210440
     
    1033910487%}
    1034010488
    10341 instruct mulD_reg_imm(regD dst, immD src) %{
     10489instruct mulD_reg_imm(regD dst, immD con) %{
    1034210490  predicate( UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
    10343   match(Set dst (MulD dst src));
     10491  match(Set dst (MulD dst con));
    1034410492  ins_cost(200);
    10345   format %{ "FLD_D  [$src]\n\t"
     10493  format %{ "FLD_D  [$constantaddress]\t# load from constant table: double=$con\n\t"
    1034610494            "DMULp  $dst,ST" %}
    10347   opcode(0xDE, 0x1); /* DE /1 */
    10348   ins_encode( LdImmD(src),
    10349               OpcP, RegOpc(dst) );
    10350   ins_pipe( fpu_reg_mem );
     10495  ins_encode %{
     10496    __ fld_d($constantaddress($con));
     10497    __ fmulp($dst$$reg);
     10498  %}
     10499  ins_pipe(fpu_reg_mem);
    1035110500%}
    1035210501
     
    1108211231  predicate(UseSSE>=1);
    1108311232  match(Set dst (AddF dst con));
    11084   format %{ "ADDSS  $dst,[$con]" %}
    11085   ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x58), LdImmX(dst, con) );
    11086   ins_pipe( pipe_slow );
     11233  format %{ "ADDSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
     11234  ins_encode %{
     11235    __ addss($dst$$XMMRegister, $constantaddress($con));
     11236  %}
     11237  ins_pipe(pipe_slow);
    1108711238%}
    1108811239
     
    1110711258  predicate(UseSSE>=1);
    1110811259  match(Set dst (SubF dst con));
    11109   format %{ "SUBSS  $dst,[$con]" %}
    11110   ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5C), LdImmX(dst, con) );
    11111   ins_pipe( pipe_slow );
     11260  format %{ "SUBSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
     11261  ins_encode %{
     11262    __ subss($dst$$XMMRegister, $constantaddress($con));
     11263  %}
     11264  ins_pipe(pipe_slow);
    1111211265%}
    1111311266
     
    1113211285  predicate(UseSSE>=1);
    1113311286  match(Set dst (MulF dst con));
    11134   format %{ "MULSS  $dst,[$con]" %}
    11135   ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x59), LdImmX(dst, con) );
    11136   ins_pipe( pipe_slow );
     11287  format %{ "MULSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
     11288  ins_encode %{
     11289    __ mulss($dst$$XMMRegister, $constantaddress($con));
     11290  %}
     11291  ins_pipe(pipe_slow);
    1113711292%}
    1113811293
     
    1115711312  predicate(UseSSE>=1);
    1115811313  match(Set dst (DivF dst con));
    11159   format %{ "DIVSS  $dst,[$con]" %}
    11160   ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5E), LdImmX(dst, con) );
    11161   ins_pipe( pipe_slow );
     11314  format %{ "DIVSS  $dst,[$constantaddress]\t# load from constant table: float=$con" %}
     11315  ins_encode %{
     11316    __ divss($dst$$XMMRegister, $constantaddress($con));
     11317  %}
     11318  ins_pipe(pipe_slow);
    1116211319%}
    1116311320
     
    1131411471
    1131511472// Spill to obtain 24-bit precision
    11316 instruct addF24_reg_imm(stackSlotF dst, regF src1, immF src2) %{
     11473instruct addF24_reg_imm(stackSlotF dst, regF src, immF con) %{
    1131711474  predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
    11318   match(Set dst (AddF src1 src2));
    11319   format %{ "FLD    $src1\n\t"
    11320             "FADD   $src2\n\t"
     11475  match(Set dst (AddF src con));
     11476  format %{ "FLD    $src\n\t"
     11477            "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
    1132111478            "FSTP_S $dst"  %}
    11322   opcode(0xD8, 0x00);       /* D8 /0 */
    11323   ins_encode( Push_Reg_F(src1),
    11324               Opc_MemImm_F(src2),
    11325               Pop_Mem_F(dst));
    11326   ins_pipe( fpu_mem_reg_con );
     11479  ins_encode %{
     11480    __ fld_s($src$$reg - 1);  // FLD ST(i-1)
     11481    __ fadd_s($constantaddress($con));
     11482    __ fstp_s(Address(rsp, $dst$$disp));
     11483  %}
     11484  ins_pipe(fpu_mem_reg_con);
    1132711485%}
    1132811486//
    1132911487// This instruction does not round to 24-bits
    11330 instruct addF_reg_imm(regF dst, regF src1, immF src2) %{
     11488instruct addF_reg_imm(regF dst, regF src, immF con) %{
    1133111489  predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
    11332   match(Set dst (AddF src1 src2));
    11333   format %{ "FLD    $src1\n\t"
    11334             "FADD   $src2\n\t"
    11335             "FSTP_S $dst"  %}
    11336   opcode(0xD8, 0x00);       /* D8 /0 */
    11337   ins_encode( Push_Reg_F(src1),
    11338               Opc_MemImm_F(src2),
    11339               Pop_Reg_F(dst));
    11340   ins_pipe( fpu_reg_reg_con );
     11490  match(Set dst (AddF src con));
     11491  format %{ "FLD    $src\n\t"
     11492            "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
     11493            "FSTP   $dst"  %}
     11494  ins_encode %{
     11495    __ fld_s($src$$reg - 1);  // FLD ST(i-1)
     11496    __ fadd_s($constantaddress($con));
     11497    __ fstp_d($dst$$reg);
     11498  %}
     11499  ins_pipe(fpu_reg_reg_con);
    1134111500%}
    1134211501
     
    1141711576
    1141811577// Spill to obtain 24-bit precision
    11419 instruct mulF24_reg_imm(stackSlotF dst, regF src1, immF src2) %{
     11578instruct mulF24_reg_imm(stackSlotF dst, regF src, immF con) %{
    1142011579  predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
    11421   match(Set dst (MulF src1 src2));
    11422 
    11423   format %{ "FMULc $dst,$src1,$src2" %}
    11424   opcode(0xD8, 0x1);  /* D8 /1*/
    11425   ins_encode( Push_Reg_F(src1),
    11426               Opc_MemImm_F(src2),
    11427               Pop_Mem_F(dst));
    11428   ins_pipe( fpu_mem_reg_con );
     11580  match(Set dst (MulF src con));
     11581
     11582  format %{ "FLD    $src\n\t"
     11583            "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
     11584            "FSTP_S $dst"  %}
     11585  ins_encode %{
     11586    __ fld_s($src$$reg - 1);  // FLD ST(i-1)
     11587    __ fmul_s($constantaddress($con));
     11588    __ fstp_s(Address(rsp, $dst$$disp));
     11589  %}
     11590  ins_pipe(fpu_mem_reg_con);
    1142911591%}
    1143011592//
    1143111593// This instruction does not round to 24-bits
    11432 instruct mulF_reg_imm(regF dst, regF src1, immF src2) %{
     11594instruct mulF_reg_imm(regF dst, regF src, immF con) %{
    1143311595  predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
    11434   match(Set dst (MulF src1 src2));
    11435 
    11436   format %{ "FMULc $dst. $src1, $src2" %}
    11437   opcode(0xD8, 0x1);  /* D8 /1*/
    11438   ins_encode( Push_Reg_F(src1),
    11439               Opc_MemImm_F(src2),
    11440               Pop_Reg_F(dst));
    11441   ins_pipe( fpu_reg_reg_con );
     11596  match(Set dst (MulF src con));
     11597
     11598  format %{ "FLD    $src\n\t"
     11599            "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
     11600            "FSTP   $dst"  %}
     11601  ins_encode %{
     11602    __ fld_s($src$$reg - 1);  // FLD ST(i-1)
     11603    __ fmul_s($constantaddress($con));
     11604    __ fstp_d($dst$$reg);
     11605  %}
     11606  ins_pipe(fpu_reg_reg_con);
    1144211607%}
    1144311608
     
    1246512630%}
    1246612631
    12467 instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eBXRegI cnt2,
    12468                         eAXRegI result, regXD tmp1, regXD tmp2, eFlagsReg cr) %{
     12632instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2,
     12633                        eAXRegI result, regXD tmp1, eFlagsReg cr) %{
    1246912634  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
    12470   effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
    12471 
    12472   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // KILL $tmp1, $tmp2" %}
     12635  effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
     12636
     12637  format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // KILL $tmp1" %}
    1247312638  ins_encode %{
    1247412639    __ string_compare($str1$$Register, $str2$$Register,
    1247512640                      $cnt1$$Register, $cnt2$$Register, $result$$Register,
    12476                       $tmp1$$XMMRegister, $tmp2$$XMMRegister);
     12641                      $tmp1$$XMMRegister);
    1247712642  %}
    1247812643  ins_pipe( pipe_slow );
     
    1279712962  match(Jump switch_val);
    1279812963  ins_cost(350);
    12799 
    12800   format %{  "JMP    [table_base](,$switch_val,1)\n\t" %}
    12801 
     12964  format %{  "JMP    [$constantaddress](,$switch_val,1)\n\t" %}
    1280212965  ins_encode %{
    12803     address table_base  = __ address_table_constant(_index2label);
    12804 
    1280512966    // Jump to Address(table_base + switch_reg)
    12806     InternalAddress table(table_base);
    1280712967    Address index(noreg, $switch_val$$Register, Address::times_1);
    12808     __ jump(ArrayAddress(table, index));
     12968    __ jump(ArrayAddress($constantaddress, index));
    1280912969  %}
    1281012970  ins_pc_relative(1);
     
    1293313093    if ($cop$$cmpcode == Assembler::notEqual) {
    1293413094       // the two jumps 6 bytes apart so the jump distances are too
    12935        parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
     13095       parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
    1293613096    } else if ($cop$$cmpcode == Assembler::equal) {
    1293713097       parity_disp = 6;
     
    1294313103    $$$emit8$primary;
    1294413104    emit_cc(cbuf, $secondary, $cop$$cmpcode);
    12945     int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
     13105    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
    1294613106    emit_d32(cbuf, disp);
    1294713107  %}
     
    1312913289    int parity_disp = -1;
    1313013290    if ($cop$$cmpcode == Assembler::notEqual) {
    13131       parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
     13291      parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
    1313213292    } else if ($cop$$cmpcode == Assembler::equal) {
    1313313293      parity_disp = 2;
     
    1313713297    emit_d8(cbuf, parity_disp);
    1313813298    emit_cc(cbuf, $primary, $cop$$cmpcode);
    13139     int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
     13299    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
    1314013300    emit_d8(cbuf, disp);
    1314113301    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
  • trunk/openjdk/hotspot/src/cpu/x86/vm/x86_64.ad

    r278 r309  
    620620
    621621// EMIT_RM()
    622 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
    623 {
     622void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
    624623  unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
    625   *(cbuf.code_end()) = c;
    626   cbuf.set_code_end(cbuf.code_end() + 1);
     624  cbuf.insts()->emit_int8(c);
    627625}
    628626
    629627// EMIT_CC()
    630 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
    631 {
     628void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
    632629  unsigned char c = (unsigned char) (f1 | f2);
    633   *(cbuf.code_end()) = c;
    634   cbuf.set_code_end(cbuf.code_end() + 1);
     630  cbuf.insts()->emit_int8(c);
    635631}
    636632
    637633// EMIT_OPCODE()
    638 void emit_opcode(CodeBuffer &cbuf, int code)
    639 {
    640   *(cbuf.code_end()) = (unsigned char) code;
    641   cbuf.set_code_end(cbuf.code_end() + 1);
     634void emit_opcode(CodeBuffer &cbuf, int code) {
     635  cbuf.insts()->emit_int8((unsigned char) code);
    642636}
    643637
     
    646640                 int code, relocInfo::relocType reloc, int offset, int format)
    647641{
    648   cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
     642  cbuf.relocate(cbuf.insts_mark() + offset, reloc, format);
    649643  emit_opcode(cbuf, code);
    650644}
    651645
    652646// EMIT_D8()
    653 void emit_d8(CodeBuffer &cbuf, int d8)
    654 {
    655   *(cbuf.code_end()) = (unsigned char) d8;
    656   cbuf.set_code_end(cbuf.code_end() + 1);
     647void emit_d8(CodeBuffer &cbuf, int d8) {
     648  cbuf.insts()->emit_int8((unsigned char) d8);
    657649}
    658650
    659651// EMIT_D16()
    660 void emit_d16(CodeBuffer &cbuf, int d16)
    661 {
    662   *((short *)(cbuf.code_end())) = d16;
    663   cbuf.set_code_end(cbuf.code_end() + 2);
     652void emit_d16(CodeBuffer &cbuf, int d16) {
     653  cbuf.insts()->emit_int16(d16);
    664654}
    665655
    666656// EMIT_D32()
    667 void emit_d32(CodeBuffer &cbuf, int d32)
    668 {
    669   *((int *)(cbuf.code_end())) = d32;
    670   cbuf.set_code_end(cbuf.code_end() + 4);
     657void emit_d32(CodeBuffer &cbuf, int d32) {
     658  cbuf.insts()->emit_int32(d32);
    671659}
    672660
    673661// EMIT_D64()
    674 void emit_d64(CodeBuffer &cbuf, int64_t d64)
    675 {
    676   *((int64_t*) (cbuf.code_end())) = d64;
    677   cbuf.set_code_end(cbuf.code_end() + 8);
     662void emit_d64(CodeBuffer &cbuf, int64_t d64) {
     663  cbuf.insts()->emit_int64(d64);
    678664}
    679665
     
    685671{
    686672  assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
    687   cbuf.relocate(cbuf.inst_mark(), reloc, format);
    688 
    689   *((int*) (cbuf.code_end())) = d32;
    690   cbuf.set_code_end(cbuf.code_end() + 4);
     673  cbuf.relocate(cbuf.insts_mark(), reloc, format);
     674  cbuf.insts()->emit_int32(d32);
    691675}
    692676
    693677// emit 32 bit value and construct relocation entry from RelocationHolder
    694 void emit_d32_reloc(CodeBuffer& cbuf,
    695                     int d32,
    696                     RelocationHolder const& rspec,
    697                     int format)
    698 {
     678void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) {
    699679#ifdef ASSERT
    700680  if (rspec.reloc()->type() == relocInfo::oop_type &&
     
    703683  }
    704684#endif
    705   cbuf.relocate(cbuf.inst_mark(), rspec, format);
    706 
    707   *((int* )(cbuf.code_end())) = d32;
    708   cbuf.set_code_end(cbuf.code_end() + 4);
     685  cbuf.relocate(cbuf.insts_mark(), rspec, format);
     686  cbuf.insts()->emit_int32(d32);
    709687}
    710688
    711689void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
    712   address next_ip = cbuf.code_end() + 4;
     690  address next_ip = cbuf.insts_end() + 4;
    713691  emit_d32_reloc(cbuf, (int) (addr - next_ip),
    714692                 external_word_Relocation::spec(addr),
     
    718696
    719697// emit 64 bit value and construct relocation entry from relocInfo::relocType
    720 void emit_d64_reloc(CodeBuffer& cbuf,
    721                     int64_t d64,
    722                     relocInfo::relocType reloc,
    723                     int format)
    724 {
    725   cbuf.relocate(cbuf.inst_mark(), reloc, format);
    726 
    727   *((int64_t*) (cbuf.code_end())) = d64;
    728   cbuf.set_code_end(cbuf.code_end() + 8);
     698void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) {
     699  cbuf.relocate(cbuf.insts_mark(), reloc, format);
     700  cbuf.insts()->emit_int64(d64);
    729701}
    730702
    731703// emit 64 bit value and construct relocation entry from RelocationHolder
    732 void emit_d64_reloc(CodeBuffer& cbuf,
    733                     int64_t d64,
    734                     RelocationHolder const& rspec,
    735                     int format)
    736 {
     704void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) {
    737705#ifdef ASSERT
    738706  if (rspec.reloc()->type() == relocInfo::oop_type &&
     
    742710  }
    743711#endif
    744   cbuf.relocate(cbuf.inst_mark(), rspec, format);
    745 
    746   *((int64_t*) (cbuf.code_end())) = d64;
    747   cbuf.set_code_end(cbuf.code_end() + 8);
     712  cbuf.relocate(cbuf.insts_mark(), rspec, format);
     713  cbuf.insts()->emit_int64(d64);
    748714}
    749715
     
    868834
    869835//=============================================================================
     836const bool Matcher::constant_table_absolute_addressing = true;
     837const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
     838
     839void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
     840  // Empty encoding
     841}
     842
     843uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
     844  return 0;
     845}
     846
     847#ifndef PRODUCT
     848void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
     849  st->print("# MachConstantBaseNode (empty encoding)");
     850}
     851#endif
     852
     853
     854//=============================================================================
    870855#ifndef PRODUCT
    871856void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
     
    967952  }
    968953
    969   C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
     954  C->set_frame_complete(cbuf.insts_size());
    970955
    971956#ifdef ASSERT
     
    10511036    // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
    10521037    // XXX reg_mem doesn't support RIP-relative addressing yet
    1053     cbuf.set_inst_mark();
    1054     cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
     1038    cbuf.set_insts_mark();
     1039    cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_return_type, 0); // XXX
    10551040    emit_opcode(cbuf, 0x85); // testl
    10561041    emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
    1057     // cbuf.inst_mark() is beginning of instruction
     1042    // cbuf.insts_mark() is beginning of instruction
    10581043    emit_d32_reloc(cbuf, os::get_polling_page());
    10591044//                    relocInfo::poll_return_type,
     
    18151800  // jmp -5 # to self
    18161801
    1817   address mark = cbuf.inst_mark();  // get mark within main instrs section
    1818 
    1819   // Note that the code buffer's inst_mark is always relative to insts.
     1802  address mark = cbuf.insts_mark();  // get mark within main instrs section
     1803
     1804  // Note that the code buffer's insts_mark is always relative to insts.
    18201805  // That's why we must use the macroassembler to generate a stub.
    18211806  MacroAssembler _masm(&cbuf);
     
    18311816  __ jump(RuntimeAddress(__ pc()));
    18321817
    1833   // Update current stubs pointer and restore code_end.
     1818  // Update current stubs pointer and restore insts_end.
    18341819  __ end_a_stub();
    18351820}
     
    18691854{
    18701855  MacroAssembler masm(&cbuf);
    1871   uint code_size = cbuf.code_size();
     1856  uint insts_size = cbuf.insts_size();
    18721857  if (UseCompressedOops) {
    18731858    masm.load_klass(rscratch1, j_rarg0);
     
    18811866  /* WARNING these NOPs are critical so that verified entry point is properly
    18821867     4 bytes aligned for patching by NativeJump::patch_verified_entry() */
    1883   int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3);
     1868  int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
    18841869  if (OptoBreakpoint) {
    18851870    // Leave space for int3
     
    19111896{
    19121897
    1913   // Note that the code buffer's inst_mark is always relative to insts.
     1898  // Note that the code buffer's insts_mark is always relative to insts.
    19141899  // That's why we must use the macroassembler to generate a handler.
    19151900  MacroAssembler _masm(&cbuf);
     
    19181903  if (base == NULL)  return 0;  // CodeBuffer::expand failed
    19191904  int offset = __ offset();
    1920   __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
     1905  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
    19211906  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
    19221907  __ end_a_stub();
     
    19341919{
    19351920
    1936   // Note that the code buffer's inst_mark is always relative to insts.
     1921  // Note that the code buffer's insts_mark is always relative to insts.
    19371922  // That's why we must use the macroassembler to generate a handler.
    19381923  MacroAssembler _masm(&cbuf);
     
    19551940  __ end_a_stub();
    19561941  return offset;
    1957 }
    1958 
    1959 static void emit_double_constant(CodeBuffer& cbuf, double x) {
    1960   int mark = cbuf.insts()->mark_off();
    1961   MacroAssembler _masm(&cbuf);
    1962   address double_address = __ double_constant(x);
    1963   cbuf.insts()->set_mark_off(mark);  // preserve mark across masm shift
    1964   emit_d32_reloc(cbuf,
    1965                  (int) (double_address - cbuf.code_end() - 4),
    1966                  internal_word_Relocation::spec(double_address),
    1967                  RELOC_DISP32);
    1968 }
    1969 
    1970 static void emit_float_constant(CodeBuffer& cbuf, float x) {
    1971   int mark = cbuf.insts()->mark_off();
    1972   MacroAssembler _masm(&cbuf);
    1973   address float_address = __ float_constant(x);
    1974   cbuf.insts()->set_mark_off(mark);  // preserve mark across masm shift
    1975   emit_d32_reloc(cbuf,
    1976                  (int) (float_address - cbuf.code_end() - 4),
    1977                  internal_word_Relocation::spec(float_address),
    1978                  RELOC_DISP32);
    19791942}
    19801943
     
    20982061{
    20992062  return can_be_java_arg(reg);
     2063}
     2064
     2065bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
     2066  // In 64 bit mode a code which use multiply when
     2067  // devisor is constant is faster than hardware
     2068  // DIV instruction (it uses MulHiL).
     2069  return false;
    21002070}
    21012071
     
    24822452    // JMP, CALL
    24832453    Label* l = $labl$$label;
    2484     emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
     2454    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0);
    24852455  %}
    24862456
     
    24892459    // JMP, CALL
    24902460    Label* l = $labl$$label;
    2491     int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
     2461    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
    24922462    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
    24932463    emit_d8(cbuf, disp);
     
    25182488    $$$emit8$primary;
    25192489    emit_cc(cbuf, $secondary, $cop$$cmpcode);
    2520     emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
     2490    emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0);
    25212491  %}
    25222492
     
    25262496    Label *l = $labl$$label;
    25272497    emit_cc(cbuf, $primary, $cop$$cmpcode);
    2528     int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
     2498    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
    25292499    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
    25302500    emit_d8(cbuf, disp);
     
    26102580    // CALL Java_To_Interpreter
    26112581    // This is the instruction starting address for relocation info.
    2612     cbuf.set_inst_mark();
     2582    cbuf.set_insts_mark();
    26132583    $$$emit8$primary;
    26142584    // CALL directly to the runtime
    26152585    emit_d32_reloc(cbuf,
    2616                    (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
     2586                   (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
    26172587                   runtime_call_Relocation::spec(),
    26182588                   RELOC_DISP32);
     
    26202590
    26212591  enc_class preserve_SP %{
    2622     debug_only(int off0 = cbuf.code_size());
     2592    debug_only(int off0 = cbuf.insts_size());
    26232593    MacroAssembler _masm(&cbuf);
    26242594    // RBP is preserved across all calls, even compiled calls.
    26252595    // Use it to preserve RSP in places where the callee might change the SP.
    26262596    __ movptr(rbp_mh_SP_save, rsp);
    2627     debug_only(int off1 = cbuf.code_size());
     2597    debug_only(int off1 = cbuf.insts_size());
    26282598    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
    26292599  %}
     
    26392609    // CALL to fixup routine.  Fixup routine uses ScopeDesc info to
    26402610    // determine who we intended to call.
    2641     cbuf.set_inst_mark();
     2611    cbuf.set_insts_mark();
    26422612    $$$emit8$primary;
    26432613
    26442614    if (!_method) {
    26452615      emit_d32_reloc(cbuf,
    2646                      (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
     2616                     (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
    26472617                     runtime_call_Relocation::spec(),
    26482618                     RELOC_DISP32);
    26492619    } else if (_optimized_virtual) {
    26502620      emit_d32_reloc(cbuf,
    2651                      (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
     2621                     (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
    26522622                     opt_virtual_call_Relocation::spec(),
    26532623                     RELOC_DISP32);
    26542624    } else {
    26552625      emit_d32_reloc(cbuf,
    2656                      (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
     2626                     (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
    26572627                     static_call_Relocation::spec(),
    26582628                     RELOC_DISP32);
     
    26702640    // Generate  "movq rax, -1", placeholder instruction to load oop-info
    26712641    // emit_call_dynamic_prologue( cbuf );
    2672     cbuf.set_inst_mark();
     2642    cbuf.set_insts_mark();
    26732643
    26742644    // movq rax, -1
     
    26782648                   (int64_t) Universe::non_oop_word(),
    26792649                   oop_Relocation::spec_for_immediate(), RELOC_IMM64);
    2680     address virtual_call_oop_addr = cbuf.inst_mark();
     2650    address virtual_call_oop_addr = cbuf.insts_mark();
    26812651    // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
    26822652    // who we intended to call.
    2683     cbuf.set_inst_mark();
     2653    cbuf.set_insts_mark();
    26842654    $$$emit8$primary;
    26852655    emit_d32_reloc(cbuf,
    2686                    (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
     2656                   (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
    26872657                   virtual_call_Relocation::spec(virtual_call_oop_addr),
    26882658                   RELOC_DISP32);
     
    26982668
    26992669    // callq *disp(%rax)
    2700     cbuf.set_inst_mark();
     2670    cbuf.set_insts_mark();
    27012671    $$$emit8$primary;
    27022672    if (disp < 0x80) {
     
    28152785      emit_d64(cbuf, $src$$constant);
    28162786    }
    2817   %}
    2818 
    2819   enc_class load_immF(regF dst, immF con)
    2820   %{
    2821     // XXX reg_mem doesn't support RIP-relative addressing yet
    2822     emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
    2823     emit_float_constant(cbuf, $con$$constant);
    2824   %}
    2825 
    2826   enc_class load_immD(regD dst, immD con)
    2827   %{
    2828     // XXX reg_mem doesn't support RIP-relative addressing yet
    2829     emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
    2830     emit_double_constant(cbuf, $con$$constant);
    2831   %}
    2832 
    2833   enc_class load_conF (regF dst, immF con) %{    // Load float constant
    2834     emit_opcode(cbuf, 0xF3);
    2835     if ($dst$$reg >= 8) {
    2836       emit_opcode(cbuf, Assembler::REX_R);
    2837     }
    2838     emit_opcode(cbuf, 0x0F);
    2839     emit_opcode(cbuf, 0x10);
    2840     emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
    2841     emit_float_constant(cbuf, $con$$constant);
    2842   %}
    2843 
    2844   enc_class load_conD (regD dst, immD con) %{    // Load double constant
    2845     // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
    2846     emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
    2847     if ($dst$$reg >= 8) {
    2848       emit_opcode(cbuf, Assembler::REX_R);
    2849     }
    2850     emit_opcode(cbuf, 0x0F);
    2851     emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
    2852     emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
    2853     emit_double_constant(cbuf, $con$$constant);
    28542787  %}
    28552788
     
    29522885    emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
    29532886    emit_d32(cbuf, 0x00);
    2954   %}
    2955 
    2956   enc_class jump_enc(rRegL switch_val, rRegI dest) %{
    2957     MacroAssembler masm(&cbuf);
    2958 
    2959     Register switch_reg = as_Register($switch_val$$reg);
    2960     Register dest_reg   = as_Register($dest$$reg);
    2961     address table_base  = masm.address_table_constant(_index2label);
    2962 
    2963     // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
    2964     // to do that and the compiler is using that register as one it can allocate.
    2965     // So we build it all by hand.
    2966     // Address index(noreg, switch_reg, Address::times_1);
    2967     // ArrayAddress dispatch(table, index);
    2968 
    2969     Address dispatch(dest_reg, switch_reg, Address::times_1);
    2970 
    2971     masm.lea(dest_reg, InternalAddress(table_base));
    2972     masm.jmp(dispatch);
    2973   %}
    2974 
    2975   enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
    2976     MacroAssembler masm(&cbuf);
    2977 
    2978     Register switch_reg = as_Register($switch_val$$reg);
    2979     Register dest_reg   = as_Register($dest$$reg);
    2980     address table_base  = masm.address_table_constant(_index2label);
    2981 
    2982     // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
    2983     // to do that and the compiler is using that register as one it can allocate.
    2984     // So we build it all by hand.
    2985     // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
    2986     // ArrayAddress dispatch(table, index);
    2987 
    2988     Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
    2989 
    2990     masm.lea(dest_reg, InternalAddress(table_base));
    2991     masm.jmp(dispatch);
    2992   %}
    2993 
    2994   enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
    2995     MacroAssembler masm(&cbuf);
    2996 
    2997     Register switch_reg = as_Register($switch_val$$reg);
    2998     Register dest_reg   = as_Register($dest$$reg);
    2999     address table_base  = masm.address_table_constant(_index2label);
    3000 
    3001     // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
    3002     // to do that and the compiler is using that register as one it can allocate.
    3003     // So we build it all by hand.
    3004     // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
    3005     // ArrayAddress dispatch(table, index);
    3006 
    3007     Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
    3008     masm.lea(dest_reg, InternalAddress(table_base));
    3009     masm.jmp(dispatch);
    3010 
    30112887  %}
    30122888
     
    37303606  enc_class enc_rethrow()
    37313607  %{
    3732     cbuf.set_inst_mark();
     3608    cbuf.set_insts_mark();
    37333609    emit_opcode(cbuf, 0xE9); // jmp entry
    37343610    emit_d32_reloc(cbuf,
    3735                    (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
     3611                   (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4),
    37363612                   runtime_call_Relocation::spec(),
    37373613                   RELOC_DISP32);
     
    37433619    address signmask_address = (address) StubRoutines::x86::float_sign_mask();
    37443620
    3745     cbuf.set_inst_mark();
     3621    cbuf.set_insts_mark();
    37463622    if (dstenc >= 8) {
    37473623      emit_opcode(cbuf, Assembler::REX_R);
     
    37603636    address signmask_address = (address) StubRoutines::x86::double_sign_mask();
    37613637
    3762     cbuf.set_inst_mark();
     3638    cbuf.set_insts_mark();
    37633639    emit_opcode(cbuf, 0x66);
    37643640    if (dstenc >= 8) {
     
    37783654    address signflip_address = (address) StubRoutines::x86::float_sign_flip();
    37793655
    3780     cbuf.set_inst_mark();
     3656    cbuf.set_insts_mark();
    37813657    if (dstenc >= 8) {
    37823658      emit_opcode(cbuf, Assembler::REX_R);
     
    37953671    address signflip_address = (address) StubRoutines::x86::double_sign_flip();
    37963672
    3797     cbuf.set_inst_mark();
     3673    cbuf.set_insts_mark();
    37983674    emit_opcode(cbuf, 0x66);
    37993675    if (dstenc >= 8) {
     
    38473723
    38483724    // call f2i_fixup
    3849     cbuf.set_inst_mark();
     3725    cbuf.set_insts_mark();
    38503726    emit_opcode(cbuf, 0xE8);
    38513727    emit_d32_reloc(cbuf,
    38523728                   (int)
    3853                    (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
     3729                   (StubRoutines::x86::f2i_fixup() - cbuf.insts_end() - 4),
    38543730                   runtime_call_Relocation::spec(),
    38553731                   RELOC_DISP32);
     
    38713747
    38723748    // cmpq $dst, [0x8000000000000000]
    3873     cbuf.set_inst_mark();
     3749    cbuf.set_insts_mark();
    38743750    emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
    38753751    emit_opcode(cbuf, 0x39);
     
    39053781
    39063782    // call f2l_fixup
    3907     cbuf.set_inst_mark();
     3783    cbuf.set_insts_mark();
    39083784    emit_opcode(cbuf, 0xE8);
    39093785    emit_d32_reloc(cbuf,
    39103786                   (int)
    3911                    (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4),
     3787                   (StubRoutines::x86::f2l_fixup() - cbuf.insts_end() - 4),
    39123788                   runtime_call_Relocation::spec(),
    39133789                   RELOC_DISP32);
     
    39613837
    39623838    // call d2i_fixup
    3963     cbuf.set_inst_mark();
     3839    cbuf.set_insts_mark();
    39643840    emit_opcode(cbuf, 0xE8);
    39653841    emit_d32_reloc(cbuf,
    39663842                   (int)
    3967                    (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4),
     3843                   (StubRoutines::x86::d2i_fixup() - cbuf.insts_end() - 4),
    39683844                   runtime_call_Relocation::spec(),
    39693845                   RELOC_DISP32);
     
    39853861
    39863862    // cmpq $dst, [0x8000000000000000]
    3987     cbuf.set_inst_mark();
     3863    cbuf.set_insts_mark();
    39883864    emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
    39893865    emit_opcode(cbuf, 0x39);
     
    40193895
    40203896    // call d2l_fixup
    4021     cbuf.set_inst_mark();
     3897    cbuf.set_insts_mark();
    40223898    emit_opcode(cbuf, 0xE8);
    40233899    emit_d32_reloc(cbuf,
    40243900                   (int)
    4025                    (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4),
     3901                   (StubRoutines::x86::d2l_fixup() - cbuf.insts_end() - 4),
    40263902                   runtime_call_Relocation::spec(),
    40273903                   RELOC_DISP32);
     
    40433919    // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
    40443920    // XXX reg_mem doesn't support RIP-relative addressing yet
    4045     cbuf.set_inst_mark();
    4046     cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); // XXX
     3921    cbuf.set_insts_mark();
     3922    cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); // XXX
    40473923    emit_opcode(cbuf, 0x85); // testl
    40483924    emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
    4049     // cbuf.inst_mark() is beginning of instruction
     3925    // cbuf.insts_mark() is beginning of instruction
    40503926    emit_d32_reloc(cbuf, os::get_polling_page());
    40513927//                    relocInfo::poll_type,
     
    66696545%}
    66706546
    6671 instruct loadConP(rRegP dst, immP src)
    6672 %{
    6673   match(Set dst src);
    6674 
    6675   format %{ "movq    $dst, $src\t# ptr" %}
    6676   ins_encode(load_immP(dst, src));
     6547instruct loadConP(rRegP dst, immP con) %{
     6548  match(Set dst con);
     6549
     6550  format %{ "movq    $dst, $con\t# ptr" %}
     6551  ins_encode(load_immP(dst, con));
    66776552  ins_pipe(ialu_reg_fat); // XXX
    66786553%}
     
    67016576%}
    67026577
    6703 instruct loadConF(regF dst, immF src)
    6704 %{
    6705   match(Set dst src);
     6578instruct loadConF(regF dst, immF con) %{
     6579  match(Set dst con);
    67066580  ins_cost(125);
    6707 
    6708   format %{ "movss   $dst, [$src]" %}
    6709   ins_encode(load_conF(dst, src));
     6581  format %{ "movss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
     6582  ins_encode %{
     6583    __ movflt($dst$$XMMRegister, $constantaddress($con));
     6584  %}
    67106585  ins_pipe(pipe_slow);
    67116586%}
     
    67496624
    67506625// Use the same format since predicate() can not be used here.
    6751 instruct loadConD(regD dst, immD src)
    6752 %{
    6753   match(Set dst src);
     6626instruct loadConD(regD dst, immD con) %{
     6627  match(Set dst con);
    67546628  ins_cost(125);
    6755 
    6756   format %{ "movsd   $dst, [$src]" %}
    6757   ins_encode(load_conD(dst, src));
     6629  format %{ "movsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
     6630  ins_encode %{
     6631    __ movdbl($dst$$XMMRegister, $constantaddress($con));
     6632  %}
    67586633  ins_pipe(pipe_slow);
    67596634%}
     
    73837258  ins_pipe( ialu_reg );
    73847259%}
    7385 
    7386 instruct loadI_reversed(rRegI dst, memory src) %{
    7387   match(Set dst (ReverseBytesI (LoadI src)));
    7388 
    7389   format %{ "bswap_movl $dst, $src" %}
    7390   opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
    7391   ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst));
    7392   ins_pipe( ialu_reg_mem );
    7393 %}
    7394 
    7395 instruct loadL_reversed(rRegL dst, memory src) %{
    7396   match(Set dst (ReverseBytesL (LoadL src)));
    7397 
    7398   format %{ "bswap_movq $dst, $src" %}
    7399   opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */
    7400   ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst));
    7401   ins_pipe( ialu_reg_mem );
    7402 %}
    7403 
    7404 instruct storeI_reversed(memory dst, rRegI src) %{
    7405   match(Set dst (StoreI dst (ReverseBytesI  src)));
    7406 
    7407   format %{ "movl_bswap $dst, $src" %}
    7408   opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
    7409   ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) );
    7410   ins_pipe( ialu_mem_reg );
    7411 %}
    7412 
    7413 instruct storeL_reversed(memory dst, rRegL src) %{
    7414   match(Set dst (StoreL dst (ReverseBytesL  src)));
    7415 
    7416   format %{ "movq_bswap $dst, $src" %}
    7417   opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */
    7418   ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) );
    7419   ins_pipe( ialu_mem_reg );
    7420 %}
    7421 
    74227260
    74237261//---------- Zeros Count Instructions ------------------------------------------
     
    77597597  effect(TEMP dest);
    77607598
    7761   format %{ "leaq    $dest, table_base\n\t"
     7599  format %{ "leaq    $dest, [$constantaddress]\n\t"
    77627600            "jmp     [$dest + $switch_val << $shift]\n\t" %}
    7763   ins_encode(jump_enc_offset(switch_val, shift, dest));
     7601  ins_encode %{
     7602    // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
     7603    // to do that and the compiler is using that register as one it can allocate.
     7604    // So we build it all by hand.
     7605    // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
     7606    // ArrayAddress dispatch(table, index);
     7607    Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant);
     7608    __ lea($dest$$Register, $constantaddress);
     7609    __ jmp(dispatch);
     7610  %}
    77647611  ins_pipe(pipe_jmp);
    77657612  ins_pc_relative(1);
     
    77717618  effect(TEMP dest);
    77727619
    7773   format %{ "leaq    $dest, table_base\n\t"
     7620  format %{ "leaq    $dest, [$constantaddress]\n\t"
    77747621            "jmp     [$dest + $switch_val << $shift + $offset]\n\t" %}
    7775   ins_encode(jump_enc_addr(switch_val, shift, offset, dest));
     7622  ins_encode %{
     7623    // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
     7624    // to do that and the compiler is using that register as one it can allocate.
     7625    // So we build it all by hand.
     7626    // Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant);
     7627    // ArrayAddress dispatch(table, index);
     7628    Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant);
     7629    __ lea($dest$$Register, $constantaddress);
     7630    __ jmp(dispatch);
     7631  %}
    77767632  ins_pipe(pipe_jmp);
    77777633  ins_pc_relative(1);
     
    77837639  effect(TEMP dest);
    77847640
    7785   format %{ "leaq    $dest, table_base\n\t"
     7641  format %{ "leaq    $dest, [$constantaddress]\n\t"
    77867642            "jmp     [$dest + $switch_val]\n\t" %}
    7787   ins_encode(jump_enc(switch_val, dest));
     7643  ins_encode %{
     7644    // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
     7645    // to do that and the compiler is using that register as one it can allocate.
     7646    // So we build it all by hand.
     7647    // Address index(noreg, switch_reg, Address::times_1);
     7648    // ArrayAddress dispatch(table, index);
     7649    Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1);
     7650    __ lea($dest$$Register, $constantaddress);
     7651    __ jmp(dispatch);
     7652  %}
    77887653  ins_pipe(pipe_jmp);
    77897654  ins_pc_relative(1);
     
    1044110306%}
    1044210307
    10443 instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
    10444 %{
    10445   match(Set cr (CmpF src1 src2));
     10308instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{
     10309  match(Set cr (CmpF src con));
    1044610310
    1044710311  ins_cost(145);
    10448   format %{ "ucomiss $src1, $src2\n\t"
     10312  format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t"
    1044910313            "jnp,s   exit\n\t"
    1045010314            "pushfq\t# saw NaN, set CF\n\t"
     
    1045210316            "popfq\n"
    1045310317    "exit:   nop\t# avoid branch to branch" %}
    10454   opcode(0x0F, 0x2E);
    10455   ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
    10456              cmpfp_fixup);
     10318  ins_encode %{
     10319    Label L_exit;
     10320    __ ucomiss($src$$XMMRegister, $constantaddress($con));
     10321    __ jcc(Assembler::noParity, L_exit);
     10322    __ pushf();
     10323    __ andq(rsp, 0xffffff2b);
     10324    __ popf();
     10325    __ bind(L_exit);
     10326    __ nop();
     10327  %}
    1045710328  ins_pipe(pipe_slow);
    1045810329%}
    1045910330
    10460 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{
    10461   match(Set cr (CmpF src1 src2));
    10462 
     10331instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{
     10332  match(Set cr (CmpF src con));
    1046310333  ins_cost(100);
    10464   format %{ "ucomiss $src1, $src2" %}
    10465   opcode(0x0F, 0x2E);
    10466   ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2));
     10334  format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %}
     10335  ins_encode %{
     10336    __ ucomiss($src$$XMMRegister, $constantaddress($con));
     10337  %}
    1046710338  ins_pipe(pipe_slow);
    1046810339%}
     
    1052310394%}
    1052410395
    10525 instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
    10526 %{
    10527   match(Set cr (CmpD src1 src2));
     10396instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{
     10397  match(Set cr (CmpD src con));
    1052810398
    1052910399  ins_cost(145);
    10530   format %{ "ucomisd $src1, [$src2]\n\t"
     10400  format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t"
    1053110401            "jnp,s   exit\n\t"
    1053210402            "pushfq\t# saw NaN, set CF\n\t"
     
    1053410404            "popfq\n"
    1053510405    "exit:   nop\t# avoid branch to branch" %}
    10536   opcode(0x66, 0x0F, 0x2E);
    10537   ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
    10538              cmpfp_fixup);
     10406  ins_encode %{
     10407    Label L_exit;
     10408    __ ucomisd($src$$XMMRegister, $constantaddress($con));
     10409    __ jcc(Assembler::noParity, L_exit);
     10410    __ pushf();
     10411    __ andq(rsp, 0xffffff2b);
     10412    __ popf();
     10413    __ bind(L_exit);
     10414    __ nop();
     10415  %}
    1053910416  ins_pipe(pipe_slow);
    1054010417%}
    1054110418
    10542 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{
    10543   match(Set cr (CmpD src1 src2));
    10544 
     10419instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{
     10420  match(Set cr (CmpD src con));
    1054510421  ins_cost(100);
    10546   format %{ "ucomisd $src1, [$src2]" %}
    10547   opcode(0x66, 0x0F, 0x2E);
    10548   ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2));
     10422  format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %}
     10423  ins_encode %{
     10424    __ ucomisd($src$$XMMRegister, $constantaddress($con));
     10425  %}
    1054910426  ins_pipe(pipe_slow);
    1055010427%}
     
    1059310470
    1059410471// Compare into -1,0,1
    10595 instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr)
    10596 %{
    10597   match(Set dst (CmpF3 src1 src2));
     10472instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{
     10473  match(Set dst (CmpF3 src con));
    1059810474  effect(KILL cr);
    1059910475
    1060010476  ins_cost(275);
    10601   format %{ "ucomiss $src1, [$src2]\n\t"
     10477  format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t"
    1060210478            "movl    $dst, #-1\n\t"
    1060310479            "jp,s    done\n\t"
     
    1060610482            "movzbl  $dst, $dst\n"
    1060710483    "done:" %}
    10608 
    10609   opcode(0x0F, 0x2E);
    10610   ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
    10611              cmpfp3(dst));
     10484  ins_encode %{
     10485    Label L_done;
     10486    Register Rdst = $dst$$Register;
     10487    __ ucomiss($src$$XMMRegister, $constantaddress($con));
     10488    __ movl(Rdst, -1);
     10489    __ jcc(Assembler::parity, L_done);
     10490    __ jcc(Assembler::below, L_done);
     10491    __ setb(Assembler::notEqual, Rdst);
     10492    __ movzbl(Rdst, Rdst);
     10493    __ bind(L_done);
     10494  %}
    1061210495  ins_pipe(pipe_slow);
    1061310496%}
     
    1065610539
    1065710540// Compare into -1,0,1
    10658 instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr)
    10659 %{
    10660   match(Set dst (CmpD3 src1 src2));
     10541instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{
     10542  match(Set dst (CmpD3 src con));
    1066110543  effect(KILL cr);
    1066210544
    1066310545  ins_cost(275);
    10664   format %{ "ucomisd $src1, [$src2]\n\t"
     10546  format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t"
    1066510547            "movl    $dst, #-1\n\t"
    1066610548            "jp,s    done\n\t"
     
    1066910551            "movzbl  $dst, $dst\n"
    1067010552    "done:" %}
    10671 
    10672   opcode(0x66, 0x0F, 0x2E);
    10673   ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
    10674              cmpfp3(dst));
     10553  ins_encode %{
     10554    Register Rdst = $dst$$Register;
     10555    Label L_done;
     10556    __ ucomisd($src$$XMMRegister, $constantaddress($con));
     10557    __ movl(Rdst, -1);
     10558    __ jcc(Assembler::parity, L_done);
     10559    __ jcc(Assembler::below, L_done);
     10560    __ setb(Assembler::notEqual, Rdst);
     10561    __ movzbl(Rdst, Rdst);
     10562    __ bind(L_done);
     10563  %}
    1067510564  ins_pipe(pipe_slow);
    1067610565%}
     
    1069810587%}
    1069910588
    10700 instruct addF_imm(regF dst, immF src)
    10701 %{
    10702   match(Set dst (AddF dst src));
    10703 
    10704   format %{ "addss   $dst, [$src]" %}
     10589instruct addF_imm(regF dst, immF con) %{
     10590  match(Set dst (AddF dst con));
     10591  format %{ "addss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
    1070510592  ins_cost(150); // XXX
    10706   opcode(0xF3, 0x0F, 0x58);
    10707   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
     10593  ins_encode %{
     10594    __ addss($dst$$XMMRegister, $constantaddress($con));
     10595  %}
    1070810596  ins_pipe(pipe_slow);
    1070910597%}
     
    1073110619%}
    1073210620
    10733 instruct addD_imm(regD dst, immD src)
    10734 %{
    10735   match(Set dst (AddD dst src));
    10736 
    10737   format %{ "addsd   $dst, [$src]" %}
     10621instruct addD_imm(regD dst, immD con) %{
     10622  match(Set dst (AddD dst con));
     10623  format %{ "addsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
    1073810624  ins_cost(150); // XXX
    10739   opcode(0xF2, 0x0F, 0x58);
    10740   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
     10625  ins_encode %{
     10626    __ addsd($dst$$XMMRegister, $constantaddress($con));
     10627  %}
    1074110628  ins_pipe(pipe_slow);
    1074210629%}
     
    1076410651%}
    1076510652
    10766 instruct subF_imm(regF dst, immF src)
    10767 %{
    10768   match(Set dst (SubF dst src));
    10769 
    10770   format %{ "subss   $dst, [$src]" %}
     10653instruct subF_imm(regF dst, immF con) %{
     10654  match(Set dst (SubF dst con));
     10655  format %{ "subss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
    1077110656  ins_cost(150); // XXX
    10772   opcode(0xF3, 0x0F, 0x5C);
    10773   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
     10657  ins_encode %{
     10658    __ subss($dst$$XMMRegister, $constantaddress($con));
     10659  %}
    1077410660  ins_pipe(pipe_slow);
    1077510661%}
     
    1079710683%}
    1079810684
    10799 instruct subD_imm(regD dst, immD src)
    10800 %{
    10801   match(Set dst (SubD dst src));
    10802 
    10803   format %{ "subsd   $dst, [$src]" %}
     10685instruct subD_imm(regD dst, immD con) %{
     10686  match(Set dst (SubD dst con));
     10687  format %{ "subsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
    1080410688  ins_cost(150); // XXX
    10805   opcode(0xF2, 0x0F, 0x5C);
    10806   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
     10689  ins_encode %{
     10690    __ subsd($dst$$XMMRegister, $constantaddress($con));
     10691  %}
    1080710692  ins_pipe(pipe_slow);
    1080810693%}
     
    1083010715%}
    1083110716
    10832 instruct mulF_imm(regF dst, immF src)
    10833 %{
    10834   match(Set dst (MulF dst src));
    10835 
    10836   format %{ "mulss   $dst, [$src]" %}
     10717instruct mulF_imm(regF dst, immF con) %{
     10718  match(Set dst (MulF dst con));
     10719  format %{ "mulss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
    1083710720  ins_cost(150); // XXX
    10838   opcode(0xF3, 0x0F, 0x59);
    10839   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
     10721  ins_encode %{
     10722    __ mulss($dst$$XMMRegister, $constantaddress($con));
     10723  %}
    1084010724  ins_pipe(pipe_slow);
    1084110725%}
     
    1086310747%}
    1086410748
    10865 instruct mulD_imm(regD dst, immD src)
    10866 %{
    10867   match(Set dst (MulD dst src));
    10868 
    10869   format %{ "mulsd   $dst, [$src]" %}
     10749instruct mulD_imm(regD dst, immD con) %{
     10750  match(Set dst (MulD dst con));
     10751  format %{ "mulsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
    1087010752  ins_cost(150); // XXX
    10871   opcode(0xF2, 0x0F, 0x59);
    10872   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
     10753  ins_encode %{
     10754    __ mulsd($dst$$XMMRegister, $constantaddress($con));
     10755  %}
    1087310756  ins_pipe(pipe_slow);
    1087410757%}
     
    1089610779%}
    1089710780
    10898 instruct divF_imm(regF dst, immF src)
    10899 %{
    10900   match(Set dst (DivF dst src));
    10901 
    10902   format %{ "divss   $dst, [$src]" %}
     10781instruct divF_imm(regF dst, immF con) %{
     10782  match(Set dst (DivF dst con));
     10783  format %{ "divss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
    1090310784  ins_cost(150); // XXX
    10904   opcode(0xF3, 0x0F, 0x5E);
    10905   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
     10785  ins_encode %{
     10786    __ divss($dst$$XMMRegister, $constantaddress($con));
     10787  %}
    1090610788  ins_pipe(pipe_slow);
    1090710789%}
     
    1092910811%}
    1093010812
    10931 instruct divD_imm(regD dst, immD src)
    10932 %{
    10933   match(Set dst (DivD dst src));
    10934 
    10935   format %{ "divsd   $dst, [$src]" %}
     10813instruct divD_imm(regD dst, immD con) %{
     10814  match(Set dst (DivD dst con));
     10815  format %{ "divsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
    1093610816  ins_cost(150); // XXX
    10937   opcode(0xF2, 0x0F, 0x5E);
    10938   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
     10817  ins_encode %{
     10818    __ divsd($dst$$XMMRegister, $constantaddress($con));
     10819  %}
    1093910820  ins_pipe(pipe_slow);
    1094010821%}
     
    1096210843%}
    1096310844
    10964 instruct sqrtF_imm(regF dst, immF src)
    10965 %{
    10966   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
    10967 
    10968   format %{ "sqrtss  $dst, [$src]" %}
     10845instruct sqrtF_imm(regF dst, immF con) %{
     10846  match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
     10847  format %{ "sqrtss  $dst, [$constantaddress]\t# load from constant table: float=$con" %}
    1096910848  ins_cost(150); // XXX
    10970   opcode(0xF3, 0x0F, 0x51);
    10971   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
     10849  ins_encode %{
     10850    __ sqrtss($dst$$XMMRegister, $constantaddress($con));
     10851  %}
    1097210852  ins_pipe(pipe_slow);
    1097310853%}
     
    1099510875%}
    1099610876
    10997 instruct sqrtD_imm(regD dst, immD src)
    10998 %{
    10999   match(Set dst (SqrtD src));
    11000 
    11001   format %{ "sqrtsd  $dst, [$src]" %}
     10877instruct sqrtD_imm(regD dst, immD con) %{
     10878  match(Set dst (SqrtD con));
     10879  format %{ "sqrtsd  $dst, [$constantaddress]\t# load from constant table: double=$con" %}
    1100210880  ins_cost(150); // XXX
    11003   opcode(0xF2, 0x0F, 0x51);
    11004   ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
     10881  ins_encode %{
     10882    __ sqrtsd($dst$$XMMRegister, $constantaddress($con));
     10883  %}
    1100510884  ins_pipe(pipe_slow);
    1100610885%}
     
    1170511584%}
    1170611585
    11707 instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rbx_RegI cnt2,
    11708                         rax_RegI result, regD tmp1, regD tmp2, rFlagsReg cr)
     11586instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2,
     11587                        rax_RegI result, regD tmp1, rFlagsReg cr)
    1170911588%{
    1171011589  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
    11711   effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
    11712 
    11713   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // KILL $tmp1, $tmp2" %}
     11590  effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
     11591
     11592  format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   // KILL $tmp1" %}
    1171411593  ins_encode %{
    1171511594    __ string_compare($str1$$Register, $str2$$Register,
    1171611595                      $cnt1$$Register, $cnt2$$Register, $result$$Register,
    11717                       $tmp1$$XMMRegister, $tmp2$$XMMRegister);
     11596                      $tmp1$$XMMRegister);
    1171811597  %}
    1171911598  ins_pipe( pipe_slow );
     
    1230512184    if ($cop$$cmpcode == Assembler::notEqual) {
    1230612185       // the two jumps 6 bytes apart so the jump distances are too
    12307        parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
     12186       parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
    1230812187    } else if ($cop$$cmpcode == Assembler::equal) {
    1230912188       parity_disp = 6;
     
    1231412193    $$$emit8$primary;
    1231512194    emit_cc(cbuf, $secondary, $cop$$cmpcode);
    12316     int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
     12195    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0;
    1231712196    emit_d32(cbuf, disp);
    1231812197  %}
     
    1250912388    int parity_disp = -1;
    1251012389    if ($cop$$cmpcode == Assembler::notEqual) {
    12511       parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
     12390      parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
    1251212391    } else if ($cop$$cmpcode == Assembler::equal) {
    1251312392      parity_disp = 2;
     
    1251712396    emit_d8(cbuf, parity_disp);
    1251812397    emit_cc(cbuf, $primary, $cop$$cmpcode);
    12519     int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
     12398    int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0;
    1252012399    emit_d8(cbuf, disp);
    1252112400    assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
Note: See TracChangeset for help on using the changeset viewer.