Changeset 309 for trunk/openjdk/hotspot/src/cpu/x86
- Timestamp:
- Feb 13, 2012, 10:07:12 PM (14 years ago)
- Location:
- trunk/openjdk
- Files:
-
- 99 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/openjdk
- Property svn:mergeinfo changed
/branches/vendor/oracle/openjdk6/b24 (added) merged: 308 /branches/vendor/oracle/openjdk6/current merged: 307
- Property svn:mergeinfo changed
-
trunk/openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_assembler_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "gc_interface/collectedHeap.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "memory/cardTableModRefBS.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "prims/methodHandles.hpp" 32 #include "runtime/biasedLocking.hpp" 33 #include "runtime/interfaceSupport.hpp" 34 #include "runtime/objectMonitor.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #ifndef SERIALGC 39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 40 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 41 #include "gc_implementation/g1/heapRegion.hpp" 42 #endif 27 43 28 44 // Implementation of AddressLiteral … … 805 821 806 822 807 // Now the Assembler instruction (identical for 32/64 bits) 823 // Now the Assembler instructions (identical for 32/64 bits) 824 825 void Assembler::adcl(Address dst, int32_t imm32) { 826 InstructionMark im(this); 827 prefix(dst); 828 emit_arith_operand(0x81, rdx, dst, imm32); 829 } 830 831 void Assembler::adcl(Address dst, Register src) { 832 InstructionMark im(this); 833 prefix(dst, src); 834 emit_byte(0x11); 835 emit_operand(src, dst); 836 } 808 837 809 838 void Assembler::adcl(Register dst, int32_t imm32) { … … 1276 1305 } 1277 1306 1307 void Assembler::divl(Register src) { // Unsigned 1308 int encode = prefix_and_encode(src->encoding()); 1309 emit_byte(0xF7); 1310 emit_byte(0xF0 | encode); 1311 } 1312 1278 1313 void Assembler::imull(Register dst, Register src) { 1279 1314 int encode = prefix_and_encode(dst->encoding(), src->encoding()); … … 1289 1324 emit_byte(0x6B); 1290 1325 emit_byte(0xC0 | encode); 1291 emit_byte(value );1326 emit_byte(value & 0xFF); 1292 1327 } else { 1293 1328 emit_byte(0x69); … … 2174 2209 InstructionMark im(this); 2175 2210 prefix(dst); 2176 emit_byte(0x81); 2177 emit_operand(rcx, dst, 4); 2178 emit_long(imm32); 2211 emit_arith_operand(0x81, rcx, dst, imm32); 2179 2212 } 2180 2213 … … 2184 2217 } 2185 2218 2186 2187 2219 void Assembler::orl(Register dst, Address src) { 2188 2220 InstructionMark im(this); … … 2191 2223 emit_operand(dst, src); 2192 2224 } 2193 2194 2225 2195 2226 void Assembler::orl(Register dst, Register src) { … … 2276 2307 2277 2308 void Assembler::prefetchr(Address src) { 2278 NOT_LP64(assert(VM_Version::supports_3dnow (), "must support"));2309 NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support")); 2279 2310 InstructionMark im(this); 2280 2311 prefetch_prefix(src); … … 2308 2339 2309 2340 void Assembler::prefetchw(Address src) { 2310 NOT_LP64(assert(VM_Version::supports_3dnow (), "must support"));2341 NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support")); 2311 2342 InstructionMark im(this); 2312 2343 prefetch_prefix(src); … … 2317 2348 void Assembler::prefix(Prefix p) { 2318 2349 a_byte(p); 2350 } 2351 2352 void Assembler::por(XMMRegister dst, XMMRegister src) { 2353 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2354 2355 emit_byte(0x66); 2356 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2357 emit_byte(0x0F); 2358 2359 emit_byte(0xEB); 2360 emit_byte(0xC0 | encode); 2319 2361 } 2320 2362 … … 2628 2670 } 2629 2671 2672 void Assembler::sqrtsd(XMMRegister dst, Address src) { 2673 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2674 InstructionMark im(this); 2675 emit_byte(0xF2); 2676 prefix(src, dst); 2677 emit_byte(0x0F); 2678 emit_byte(0x51); 2679 emit_operand(dst, src); 2680 } 2681 2682 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 2683 // HMM Table D-1 says sse2 2684 // NOT_LP64(assert(VM_Version::supports_sse(), "")); 2685 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2686 emit_byte(0xF3); 2687 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2688 emit_byte(0x0F); 2689 emit_byte(0x51); 2690 emit_byte(0xC0 | encode); 2691 } 2692 2693 void Assembler::sqrtss(XMMRegister dst, Address src) { 2694 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2695 InstructionMark im(this); 2696 emit_byte(0xF3); 2697 prefix(src, dst); 2698 emit_byte(0x0F); 2699 emit_byte(0x51); 2700 emit_operand(dst, src); 2701 } 2702 2630 2703 void Assembler::stmxcsr( Address dst) { 2631 2704 NOT_LP64(assert(VM_Version::supports_sse(), "")); … … 2640 2713 InstructionMark im(this); 2641 2714 prefix(dst); 2642 if (is8bit(imm32)) { 2643 emit_byte(0x83); 2644 emit_operand(rbp, dst, 1); 2645 emit_byte(imm32 & 0xFF); 2646 } else { 2647 emit_byte(0x81); 2648 emit_operand(rbp, dst, 4); 2649 emit_long(imm32); 2650 } 2715 emit_arith_operand(0x81, rbp, dst, imm32); 2716 } 2717 2718 void Assembler::subl(Address dst, Register src) { 2719 InstructionMark im(this); 2720 prefix(dst, src); 2721 emit_byte(0x29); 2722 emit_operand(src, dst); 2651 2723 } 2652 2724 … … 2654 2726 prefix(dst); 2655 2727 emit_arith(0x81, 0xE8, dst, imm32); 2656 }2657 2658 void Assembler::subl(Address dst, Register src) {2659 InstructionMark im(this);2660 prefix(dst, src);2661 emit_byte(0x29);2662 emit_operand(src, dst);2663 2728 } 2664 2729 … … 3904 3969 emit_byte(0x6B); 3905 3970 emit_byte(0xC0 | encode); 3906 emit_byte(value );3971 emit_byte(value & 0xFF); 3907 3972 } else { 3908 3973 emit_byte(0x69); … … 4281 4346 emit_byte(0xF8 | encode); 4282 4347 } 4348 4283 4349 void Assembler::sbbq(Address dst, int32_t imm32) { 4284 4350 InstructionMark im(this); … … 4337 4403 } 4338 4404 4339 void Assembler::sqrtsd(XMMRegister dst, Address src) {4340 NOT_LP64(assert(VM_Version::supports_sse2(), ""));4341 InstructionMark im(this);4342 emit_byte(0xF2);4343 prefix(src, dst);4344 emit_byte(0x0F);4345 emit_byte(0x51);4346 emit_operand(dst, src);4347 }4348 4349 4405 void Assembler::subq(Address dst, int32_t imm32) { 4350 4406 InstructionMark im(this); 4351 4407 prefixq(dst); 4352 if (is8bit(imm32)) { 4353 emit_byte(0x83); 4354 emit_operand(rbp, dst, 1); 4355 emit_byte(imm32 & 0xFF); 4356 } else { 4357 emit_byte(0x81); 4358 emit_operand(rbp, dst, 4); 4359 emit_long(imm32); 4360 } 4408 emit_arith_operand(0x81, rbp, dst, imm32); 4409 } 4410 4411 void Assembler::subq(Address dst, Register src) { 4412 InstructionMark im(this); 4413 prefixq(dst, src); 4414 emit_byte(0x29); 4415 emit_operand(src, dst); 4361 4416 } 4362 4417 … … 4364 4419 (void) prefixq_and_encode(dst->encoding()); 4365 4420 emit_arith(0x81, 0xE8, dst, imm32); 4366 }4367 4368 void Assembler::subq(Address dst, Register src) {4369 InstructionMark im(this);4370 prefixq(dst, src);4371 emit_byte(0x29);4372 emit_operand(src, dst);4373 4421 } 4374 4422 … … 4908 4956 4909 4957 4910 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {4911 movsd(dst, as_Address(src));4912 }4913 4914 4958 void MacroAssembler::pop_callee_saved_registers() { 4915 4959 pop(rcx); … … 4994 5038 tty->print_cr("eip = 0x%08x", eip); 4995 5039 #ifndef PRODUCT 4996 tty->cr(); 4997 findpc(eip); 4998 tty->cr(); 5040 if ((WizardMode || Verbose) && PrintMiscellaneous) { 5041 tty->cr(); 5042 findpc(eip); 5043 tty->cr(); 5044 } 4999 5045 #endif 5000 tty->print_cr("rax ,= 0x%08x", rax);5001 tty->print_cr("rbx ,= 0x%08x", rbx);5046 tty->print_cr("rax = 0x%08x", rax); 5047 tty->print_cr("rbx = 0x%08x", rbx); 5002 5048 tty->print_cr("rcx = 0x%08x", rcx); 5003 5049 tty->print_cr("rdx = 0x%08x", rdx); 5004 5050 tty->print_cr("rdi = 0x%08x", rdi); 5005 5051 tty->print_cr("rsi = 0x%08x", rsi); 5006 tty->print_cr("rbp ,= 0x%08x", rbp);5052 tty->print_cr("rbp = 0x%08x", rbp); 5007 5053 tty->print_cr("rsp = 0x%08x", rsp); 5008 5054 BREAKPOINT; 5055 assert(false, "start up GDB"); 5009 5056 } 5010 5057 } else { … … 5514 5561 5515 5562 void MacroAssembler::warn(const char* msg) { 5516 push(r12); 5517 movq(r12, rsp); 5563 push(rsp); 5518 5564 andq(rsp, -16); // align stack as required by push_CPU_state and call 5519 5565 … … 5522 5568 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); 5523 5569 pop_CPU_state(); 5524 5525 movq(rsp, r12); 5526 pop(r12); 5570 pop(rsp); 5527 5571 } 5528 5572 … … 5836 5880 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 5837 5881 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 5882 #ifdef ASSERT 5883 LP64_ONLY(if (UseCompressedOops) verify_heapbase("call_VM_base");) 5884 #endif // ASSERT 5885 5838 5886 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 5839 5887 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); … … 7094 7142 7095 7143 // Preserves rbx, and rdx. 7096 voidMacroAssembler::tlab_refill(Label& retry,7097 Label& try_eden,7098 Label& slow_case) {7144 Register MacroAssembler::tlab_refill(Label& retry, 7145 Label& try_eden, 7146 Label& slow_case) { 7099 7147 Register top = rax; 7100 7148 Register t1 = rcx; … … 7143 7191 // if tlab is currently allocated (top or end != null) then 7144 7192 // fill [top, end + alignment_reserve) with array object 7145 testptr 7193 testptr(top, top); 7146 7194 jcc(Assembler::zero, do_refill); 7147 7195 … … 7155 7203 // set klass to intArrayKlass 7156 7204 // dubious reloc why not an oop reloc? 7157 movptr(t1, ExternalAddress((address) 7205 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr())); 7158 7206 // store klass last. concurrent gcs assumes klass length is valid if 7159 7207 // klass field is not null. 7160 7208 store_klass(top, t1); 7209 7210 movptr(t1, top); 7211 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 7212 incr_allocated_bytes(thread_reg, t1, 0); 7161 7213 7162 7214 // refill the tlab with an eden allocation … … 7164 7216 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); 7165 7217 shlptr(t1, LogHeapWordSize); 7166 // a dd object_size ??7218 // allocate new tlab, address returned in top 7167 7219 eden_allocate(top, t1, 0, t2, slow_case); 7168 7220 … … 7192 7244 verify_tlab(); 7193 7245 jmp(retry); 7246 7247 return thread_reg; // for use by caller 7248 } 7249 7250 void MacroAssembler::incr_allocated_bytes(Register thread, 7251 Register var_size_in_bytes, 7252 int con_size_in_bytes, 7253 Register t1) { 7254 #ifdef _LP64 7255 if (var_size_in_bytes->is_valid()) { 7256 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); 7257 } else { 7258 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); 7259 } 7260 #else 7261 if (!thread->is_valid()) { 7262 assert(t1->is_valid(), "need temp reg"); 7263 thread = t1; 7264 get_thread(thread); 7265 } 7266 7267 if (var_size_in_bytes->is_valid()) { 7268 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); 7269 } else { 7270 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); 7271 } 7272 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0); 7273 #endif 7194 7274 } 7195 7275 … … 7678 7758 7679 7759 #ifdef ASSERT 7680 Label L; 7681 testptr(tmp, tmp); 7682 jccb(Assembler::notZero, L); 7683 hlt(); 7684 bind(L); 7760 { Label L; 7761 testptr(tmp, tmp); 7762 if (WizardMode) { 7763 jcc(Assembler::notZero, L); 7764 char* buf = new char[40]; 7765 sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]); 7766 stop(buf); 7767 } else { 7768 jccb(Assembler::notZero, L); 7769 hlt(); 7770 } 7771 bind(L); 7772 } 7685 7773 #endif 7686 7774 … … 7699 7787 Register temp_reg, 7700 7788 Label& wrong_method_type) { 7701 if (UseCompressedOops) unimplemented(); // field accesses must decode7789 Address type_addr(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)); 7702 7790 // compare method type against that of the receiver 7703 cmpptr(mtype_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg))); 7791 if (UseCompressedOops) { 7792 load_heap_oop(temp_reg, type_addr); 7793 cmpptr(mtype_reg, temp_reg); 7794 } else { 7795 cmpptr(mtype_reg, type_addr); 7796 } 7704 7797 jcc(Assembler::notEqual, wrong_method_type); 7705 7798 } … … 7713 7806 Register temp_reg) { 7714 7807 assert_different_registers(vmslots_reg, mh_reg, temp_reg); 7715 if (UseCompressedOops) unimplemented(); // field accesses must decode7716 7808 // load mh.type.form.vmslots 7717 7809 if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) { … … 7720 7812 } else { 7721 7813 Register temp2_reg = vmslots_reg; 7722 movptr(temp2_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));7723 movptr(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));7814 load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg))); 7815 load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg))); 7724 7816 movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg))); 7725 7817 } … … 7735 7827 assert_different_registers(mh_reg, temp_reg); 7736 7828 7737 if (UseCompressedOops) unimplemented(); // field accesses must decode7738 7739 7829 // pick out the interpreted side of the handler 7830 // NOTE: vmentry is not an oop! 7740 7831 movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg))); 7741 7832 … … 8228 8319 } 8229 8320 8321 void MacroAssembler::load_heap_oop(Register dst, Address src) { 8322 #ifdef _LP64 8323 if (UseCompressedOops) { 8324 movl(dst, src); 8325 decode_heap_oop(dst); 8326 } else 8327 #endif 8328 movptr(dst, src); 8329 } 8330 8331 void MacroAssembler::store_heap_oop(Address dst, Register src) { 8332 #ifdef _LP64 8333 if (UseCompressedOops) { 8334 assert(!dst.uses(src), "not enough registers"); 8335 encode_heap_oop(src); 8336 movl(dst, src); 8337 } else 8338 #endif 8339 movptr(dst, src); 8340 } 8341 8342 // Used for storing NULLs. 8343 void MacroAssembler::store_heap_oop_null(Address dst) { 8344 #ifdef _LP64 8345 if (UseCompressedOops) { 8346 movl(dst, (int32_t)NULL_WORD); 8347 } else { 8348 movslq(dst, (int32_t)NULL_WORD); 8349 } 8350 #else 8351 movl(dst, (int32_t)NULL_WORD); 8352 #endif 8353 } 8354 8230 8355 #ifdef _LP64 8231 8356 void MacroAssembler::store_klass_gap(Register dst, Register src) { … … 8233 8358 // Store to klass gap in destination 8234 8359 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 8235 }8236 }8237 8238 void MacroAssembler::load_heap_oop(Register dst, Address src) {8239 if (UseCompressedOops) {8240 movl(dst, src);8241 decode_heap_oop(dst);8242 } else {8243 movq(dst, src);8244 }8245 }8246 8247 void MacroAssembler::store_heap_oop(Address dst, Register src) {8248 if (UseCompressedOops) {8249 assert(!dst.uses(src), "not enough registers");8250 encode_heap_oop(src);8251 movl(dst, src);8252 } else {8253 movq(dst, src);8254 }8255 }8256 8257 // Used for storing NULLs.8258 void MacroAssembler::store_heap_oop_null(Address dst) {8259 if (UseCompressedOops) {8260 movl(dst, (int32_t)NULL_WORD);8261 } else {8262 movslq(dst, (int32_t)NULL_WORD);8263 8360 } 8264 8361 } … … 8551 8648 void MacroAssembler::string_compare(Register str1, Register str2, 8552 8649 Register cnt1, Register cnt2, Register result, 8553 XMMRegister vec1 , XMMRegister vec2) {8650 XMMRegister vec1) { 8554 8651 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL; 8555 8652 … … 8598 8695 } 8599 8696 8600 // Advance to next character 8601 addptr(str1, 2); 8602 addptr(str2, 2); 8697 Address::ScaleFactor scale = Address::times_2; 8698 int stride = 8; 8699 8700 // Advance to next element 8701 addptr(str1, 16/stride); 8702 addptr(str2, 16/stride); 8603 8703 8604 8704 if (UseSSE42Intrinsics) { 8605 // With SSE4.2, use double quad vector compare8606 Label COMPARE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;8705 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL; 8706 int pcmpmask = 0x19; 8607 8707 // Setup to compare 16-byte vectors 8608 movl(cnt1, cnt2); 8609 andl(cnt2, 0xfffffff8); // cnt2 holds the vector count 8610 andl(cnt1, 0x00000007); // cnt1 holds the tail count 8611 testl(cnt2, cnt2); 8708 movl(result, cnt2); 8709 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count 8612 8710 jccb(Assembler::zero, COMPARE_TAIL); 8613 8711 8614 lea(str2, Address(str2, cnt2, Address::times_2)); 8615 lea(str1, Address(str1, cnt2, Address::times_2)); 8616 negptr(cnt2); 8617 8618 bind(COMPARE_VECTORS); 8619 movdqu(vec1, Address(str1, cnt2, Address::times_2)); 8620 movdqu(vec2, Address(str2, cnt2, Address::times_2)); 8621 pxor(vec1, vec2); 8622 ptest(vec1, vec1); 8623 jccb(Assembler::notZero, VECTOR_NOT_EQUAL); 8624 addptr(cnt2, 8); 8625 jcc(Assembler::notZero, COMPARE_VECTORS); 8626 jmpb(COMPARE_TAIL); 8712 lea(str1, Address(str1, result, scale)); 8713 lea(str2, Address(str2, result, scale)); 8714 negptr(result); 8715 8716 // pcmpestri 8717 // inputs: 8718 // vec1- substring 8719 // rax - negative string length (elements count) 8720 // mem - scaned string 8721 // rdx - string length (elements count) 8722 // pcmpmask - cmp mode: 11000 (string compare with negated result) 8723 // + 00 (unsigned bytes) or + 01 (unsigned shorts) 8724 // outputs: 8725 // rcx - first mismatched element index 8726 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); 8727 8728 bind(COMPARE_WIDE_VECTORS); 8729 movdqu(vec1, Address(str1, result, scale)); 8730 pcmpestri(vec1, Address(str2, result, scale), pcmpmask); 8731 // After pcmpestri cnt1(rcx) contains mismatched element index 8732 8733 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1 8734 addptr(result, stride); 8735 subptr(cnt2, stride); 8736 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS); 8737 8738 // compare wide vectors tail 8739 testl(result, result); 8740 jccb(Assembler::zero, LENGTH_DIFF_LABEL); 8741 8742 movl(cnt2, stride); 8743 movl(result, stride); 8744 negptr(result); 8745 movdqu(vec1, Address(str1, result, scale)); 8746 pcmpestri(vec1, Address(str2, result, scale), pcmpmask); 8747 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL); 8627 8748 8628 8749 // Mismatched characters in the vectors 8629 8750 bind(VECTOR_NOT_EQUAL); 8630 lea(str1, Address(str1, cnt2, Address::times_2)); 8631 lea(str2, Address(str2, cnt2, Address::times_2)); 8632 movl(cnt1, 8); 8633 8634 // Compare tail (< 8 chars), or rescan last vectors to 8635 // find 1st mismatched characters 8636 bind(COMPARE_TAIL); 8637 testl(cnt1, cnt1); 8638 jccb(Assembler::zero, LENGTH_DIFF_LABEL); 8639 movl(cnt2, cnt1); 8751 addptr(result, cnt1); 8752 movptr(cnt2, result); 8753 load_unsigned_short(result, Address(str1, cnt2, scale)); 8754 load_unsigned_short(cnt1, Address(str2, cnt2, scale)); 8755 subl(result, cnt1); 8756 jmpb(POP_LABEL); 8757 8758 bind(COMPARE_TAIL); // limit is zero 8759 movl(cnt2, result); 8640 8760 // Fallthru to tail compare 8641 8761 } 8642 8762 8643 8763 // Shift str2 and str1 to the end of the arrays, negate min 8644 lea(str1, Address(str1, cnt2, Address::times_2, 0));8645 lea(str2, Address(str2, cnt2, Address::times_2, 0));8764 lea(str1, Address(str1, cnt2, scale, 0)); 8765 lea(str2, Address(str2, cnt2, scale, 0)); 8646 8766 negptr(cnt2); 8647 8767 8648 // Compare the rest of the characters8768 // Compare the rest of the elements 8649 8769 bind(WHILE_HEAD_LABEL); 8650 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, 0));8651 load_unsigned_short(cnt1, Address(str2, cnt2, Address::times_2, 0));8770 load_unsigned_short(result, Address(str1, cnt2, scale, 0)); 8771 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0)); 8652 8772 subl(result, cnt1); 8653 8773 jccb(Assembler::notZero, POP_LABEL); 8654 8774 increment(cnt2); 8655 jcc (Assembler::notZero, WHILE_HEAD_LABEL);8775 jccb(Assembler::notZero, WHILE_HEAD_LABEL); 8656 8776 8657 8777 // Strings are equal up to min length. Return the length difference. … … 8662 8782 // Discard the stored length difference 8663 8783 bind(POP_LABEL); 8664 addptr(rsp, wordSize);8784 pop(cnt1); 8665 8785 8666 8786 // That's it … … 8710 8830 // With SSE4.2, use double quad vector compare 8711 8831 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 8832 8712 8833 // Compare 16-byte vectors 8713 8834 andl(result, 0x0000000e); // tail count (in bytes) … … 8723 8844 movdqu(vec2, Address(ary2, limit, Address::times_1)); 8724 8845 pxor(vec1, vec2); 8846 8725 8847 ptest(vec1, vec1); 8726 8848 jccb(Assembler::notZero, FALSE_LABEL); 8727 8849 addptr(limit, 16); 8728 8850 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 8851 8852 testl(result, result); 8853 jccb(Assembler::zero, TRUE_LABEL); 8854 8855 movdqu(vec1, Address(ary1, result, Address::times_1, -16)); 8856 movdqu(vec2, Address(ary2, result, Address::times_1, -16)); 8857 pxor(vec1, vec2); 8858 8859 ptest(vec1, vec1); 8860 jccb(Assembler::notZero, FALSE_LABEL); 8861 jmpb(TRUE_LABEL); 8729 8862 8730 8863 bind(COMPARE_TAIL); // limit is zero -
trunk/openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP 26 #define CPU_X86_VM_ASSEMBLER_X86_HPP 27 25 28 class BiasedLockingCounters; 26 29 … … 133 136 // will cause an assertion failure 134 137 #define rscratch1 noreg 138 #define rscratch2 noreg 135 139 136 140 #endif // _LP64 … … 671 675 672 676 #ifdef _LP64 673 static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) ) <= x && x < ( CONST64(1) << (nbits-1) ); } 677 static bool is_simm(int64_t x, int nbits) { return -(CONST64(1) << (nbits-1)) <= x && 678 x < (CONST64(1) << (nbits-1)); } 674 679 static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; } 675 680 #else 676 static bool is_simm(int32_t x, int nbits) { return -( 1 << (nbits-1) ) <= x && x < ( 1 << (nbits-1) ); } 681 static bool is_simm(int32_t x, int nbits) { return -(1 << (nbits-1)) <= x && 682 x < (1 << (nbits-1)); } 677 683 static bool is_simm32(int32_t x) { return true; } 678 #endif // LP64684 #endif // _LP64 679 685 680 686 // Generic instructions … … 701 707 void push(void* v); 702 708 void pop(void* v); 703 704 709 705 710 // These do register sized moves/scans … … 713 718 // Vanilla instructions in lexical order 714 719 720 void adcl(Address dst, int32_t imm32); 721 void adcl(Address dst, Register src); 715 722 void adcl(Register dst, int32_t imm32); 716 723 void adcl(Register dst, Address src); … … 720 727 void adcq(Register dst, Address src); 721 728 void adcq(Register dst, Register src); 722 723 729 724 730 void addl(Address dst, int32_t imm32); … … 734 740 void addq(Register dst, Register src); 735 741 736 737 742 void addr_nop_4(); 738 743 void addr_nop_5(); … … 755 760 void andq(Register dst, Address src); 756 761 void andq(Register dst, Register src); 757 758 762 759 763 // Bitwise Logical AND of Packed Double-Precision Floating-Point Values … … 1012 1016 1013 1017 void idivl(Register src); 1018 void divl(Register src); // Unsigned division 1014 1019 1015 1020 void idivq(Register src); … … 1147 1152 void movq(Register dst, Register src); 1148 1153 void movq(Register dst, Address src); 1149 void movq(Address dst, Register src);1154 void movq(Address dst, Register src); 1150 1155 #endif 1151 1156 … … 1173 1178 1174 1179 // Move signed 32bit immediate to 64bit extending sign 1175 void movslq(Address dst, int32_t imm64);1180 void movslq(Address dst, int32_t imm64); 1176 1181 void movslq(Register dst, int32_t imm64); 1177 1182 … … 1273 1278 void prefetchw(Address src); 1274 1279 1280 // POR - Bitwise logical OR 1281 void por(XMMRegister dst, XMMRegister src); 1282 1275 1283 // Shuffle Packed Doublewords 1276 1284 void pshufd(XMMRegister dst, XMMRegister src, int mode); … … 1348 1356 void sqrtsd(XMMRegister dst, Address src); 1349 1357 void sqrtsd(XMMRegister dst, XMMRegister src); 1358 1359 // Compute Square Root of Scalar Single-Precision Floating-Point Value 1360 void sqrtss(XMMRegister dst, Address src); 1361 void sqrtss(XMMRegister dst, XMMRegister src); 1350 1362 1351 1363 void std() { emit_byte(0xfd); } … … 1683 1695 void store_klass(Register dst, Register src); 1684 1696 1697 void load_heap_oop(Register dst, Address src); 1698 void store_heap_oop(Address dst, Register src); 1699 1700 // Used for storing NULL. All other oop constants should be 1701 // stored using routines that take a jobject. 1702 void store_heap_oop_null(Address dst); 1703 1685 1704 void load_prototype_header(Register dst, Register src); 1686 1705 1687 1706 #ifdef _LP64 1688 1707 void store_klass_gap(Register dst, Register src); 1689 1690 void load_heap_oop(Register dst, Address src);1691 void store_heap_oop(Address dst, Register src);1692 1708 1693 1709 // This dummy is to prevent a call to store_heap_oop from … … 1696 1712 1697 1713 void store_heap_oop(Address dst, void* dummy); 1698 1699 // Used for storing NULL. All other oop constants should be1700 // stored using routines that take a jobject.1701 void store_heap_oop_null(Address dst);1702 1714 1703 1715 void encode_heap_oop(Register r); … … 1849 1861 Label& slow_case // continuation point if fast allocation fails 1850 1862 ); 1851 void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); 1863 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address 1864 void incr_allocated_bytes(Register thread, 1865 Register var_size_in_bytes, int con_size_in_bytes, 1866 Register t1 = noreg); 1852 1867 1853 1868 // interface method calling … … 1928 1943 void untested() { stop("untested"); } 1929 1944 1930 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, sizeof(b), "unimplemented: %s", what); stop(b); }1945 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 1931 1946 1932 1947 void should_not_reach_here() { stop("should not reach here"); } … … 2121 2136 void comisd(XMMRegister dst, AddressLiteral src); 2122 2137 2138 void fadd_s(Address src) { Assembler::fadd_s(src); } 2139 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 2140 2123 2141 void fldcw(Address src) { Assembler::fldcw(src); } 2124 2142 void fldcw(AddressLiteral src); … … 2133 2151 void fld_x(Address src) { Assembler::fld_x(src); } 2134 2152 void fld_x(AddressLiteral src); 2153 2154 void fmul_s(Address src) { Assembler::fmul_s(src); } 2155 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 2135 2156 2136 2157 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } … … 2150 2171 public: 2151 2172 2173 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 2174 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 2175 void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); } 2176 2177 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 2178 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 2179 void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); } 2180 2181 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 2182 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 2183 void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); } 2184 2185 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 2186 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 2187 void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); } 2188 2152 2189 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 2153 2190 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 2154 2191 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 2155 void movsd(XMMRegister dst, AddressLiteral src); 2192 void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); } 2193 2194 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 2195 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 2196 void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); } 2197 2198 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 2199 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 2200 void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); } 2201 2202 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 2203 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 2204 void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); } 2205 2206 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 2207 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 2208 void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); } 2209 2210 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 2211 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 2212 void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); } 2213 2214 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 2215 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 2216 void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); } 2156 2217 2157 2218 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } … … 2236 2297 void string_compare(Register str1, Register str2, 2237 2298 Register cnt1, Register cnt2, Register result, 2238 XMMRegister vec1 , XMMRegister vec2);2299 XMMRegister vec1); 2239 2300 2240 2301 // Compare char[] arrays. … … 2273 2334 inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } 2274 2335 #endif 2336 2337 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/assembler_x86.inline.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP 26 #define CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP 27 28 #include "asm/assembler.inline.hpp" 29 #include "asm/codeBuffer.hpp" 30 #include "code/codeCache.hpp" 31 #include "runtime/handles.inline.hpp" 24 32 25 33 inline void MacroAssembler::pd_patch_instruction(address branch, address target) { … … 86 94 } 87 95 #endif // _LP64 96 97 #endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_bytecodeInterpreter_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeInterpreter.hpp" 28 #include "interpreter/bytecodeInterpreter.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "oops/methodDataOop.hpp" 32 #include "oops/methodOop.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/deoptimization.hpp" 37 #include "runtime/frame.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/synchronizer.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/debug.hpp" 43 #ifdef TARGET_ARCH_MODEL_x86_32 44 # include "interp_masm_x86_32.hpp" 45 #endif 46 #ifdef TARGET_ARCH_MODEL_x86_64 47 # include "interp_masm_x86_64.hpp" 48 #endif 27 49 28 50 #ifdef CC_INTERP -
trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP 26 #define CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP 24 27 25 28 // Platform specific for C++ based Interpreter … … 109 112 #define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \ 110 113 ((VMJavaVal64*)(addr))->l) 114 115 #endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2002, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP 26 #define CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP 27 25 28 // Inline interpreter functions for IA32 26 29 … … 279 282 return (jbyte) val; 280 283 } 284 285 #endif // CPU_X86_VM_BYTECODEINTERPRETER_X86_INLINE_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodes_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include " incls/_precompiled.incl"26 #include "in cls/_bytecodes_x86.cpp.incl"25 #include "precompiled.hpp" 26 #include "interpreter/bytecodes.hpp" 27 27 28 28 -
trunk/openjdk/hotspot/src/cpu/x86/vm/bytecodes_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_BYTECODES_X86_HPP 26 #define CPU_X86_VM_BYTECODES_X86_HPP 27 25 28 // No i486 specific bytecodes 29 30 #endif // CPU_X86_VM_BYTECODES_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/bytes_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 01, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_BYTES_X86_HPP 26 #define CPU_X86_VM_BYTES_X86_HPP 27 28 #include "memory/allocation.hpp" 24 29 25 30 class Bytes: AllStatic { … … 68 73 69 74 // The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base] 70 #include "incls/_bytes_pd.inline.hpp.incl" 75 #ifdef TARGET_OS_ARCH_linux_x86 76 # include "bytes_linux_x86.inline.hpp" 77 #endif 78 #ifdef TARGET_OS_ARCH_solaris_x86 79 # include "bytes_solaris_x86.inline.hpp" 80 #endif 81 #ifdef TARGET_OS_ARCH_windows_x86 82 # include "bytes_windows_x86.inline.hpp" 83 #endif 84 85 86 #endif // CPU_X86_VM_BYTES_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_c1_CodeStubs_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_CodeStubs.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "nativeInst_x86.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "vmreg_x86.inline.hpp" 34 #ifndef SERIALGC 35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 36 #endif 27 37 28 38 … … 69 79 } 70 80 71 #ifdef TIERED72 81 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 73 82 __ bind(_entry); 83 ce->store_parameter(_method->as_register(), 1); 74 84 ce->store_parameter(_bci, 0); 75 85 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 76 86 ce->add_call_info_here(_info); 77 87 ce->verify_oop_map(_info); 78 79 __ jmp(_continuation); 80 } 81 #endif // TIERED 82 83 88 __ jmp(_continuation); 89 } 84 90 85 91 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, … … 88 94 , _index(index) 89 95 { 90 _info = info == NULL ? NULL : new CodeEmitInfo(info); 96 assert(info != NULL, "must have info"); 97 _info = new CodeEmitInfo(info); 91 98 } 92 99 … … 477 484 Register pre_val_reg = pre_val()->as_register(); 478 485 479 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false );486 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 480 487 481 488 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); … … 503 510 __ cmpptr(new_val_reg, (int32_t) NULL_WORD); 504 511 __ jcc(Assembler::equal, _continuation); 505 ce->store_parameter(addr()->as_ register(), 0);512 ce->store_parameter(addr()->as_pointer_register(), 0); 506 513 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); 507 514 __ jmp(_continuation); -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_Defs_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_C1_DEFS_X86_HPP 26 #define CPU_X86_VM_C1_DEFS_X86_HPP 24 27 25 28 // native word offsets from memory address (little endian) … … 59 62 pd_first_cpu_reg = 0, 60 63 pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11), 61 pd_first_byte_reg = 2,62 pd_last_byte_reg = 5,64 pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0), 65 pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11), 63 66 pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, 64 67 pd_last_fpu_reg = pd_first_fpu_reg + 7, … … 72 75 pd_float_saved_as_double = true 73 76 }; 77 78 #endif // CPU_X86_VM_C1_DEFS_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_FpuStackSim_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_FpuStackSim.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "utilities/array.hpp" 29 #include "utilities/ostream.hpp" 27 30 28 31 //-------------------------------------------------------- -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FpuStackSim_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP 26 #define CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP 24 27 25 28 // Simulates the FPU stack and maintains mapping [fpu-register -> stack offset] … … 66 69 void print() PRODUCT_RETURN; 67 70 }; 71 72 #endif // CPU_X86_VM_C1_FPUSTACKSIM_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.cpp
r278 r309 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_FrameMap_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_FrameMap.hpp" 27 #include "c1/c1_LIR.hpp" 28 #include "runtime/sharedRuntime.hpp" 29 #include "vmreg_x86.inline.hpp" 27 30 28 31 const int FrameMap::pd_c_runtime_reserved_arg_size = 0; … … 156 159 map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); 157 160 map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); 158 map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9); 159 map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10); 160 map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11); 161 map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); 162 map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); 163 // r12 is allocated conditionally. With compressed oops it holds 164 // the heapbase value and is not visible to the allocator. 165 map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); 161 166 // The unallocatable registers are at the end 162 167 map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); … … 189 194 _caller_save_cpu_regs[7] = r9_opr; 190 195 _caller_save_cpu_regs[8] = r11_opr; 191 _caller_save_cpu_regs[9] = r1 2_opr;192 _caller_save_cpu_regs[10] = r1 3_opr;193 _caller_save_cpu_regs[11] = r1 4_opr;196 _caller_save_cpu_regs[9] = r13_opr; 197 _caller_save_cpu_regs[10] = r14_opr; 198 _caller_save_cpu_regs[11] = r12_opr; 194 199 #endif // _LP64 195 200 -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp
r278 r309 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_C1_FRAMEMAP_X86_HPP 26 #define CPU_X86_VM_C1_FRAMEMAP_X86_HPP 24 27 25 28 // On i486 the frame looks as follows: … … 127 130 return _caller_save_xmm_regs[i]; 128 131 } 132 133 static int adjust_reg_range(int range) { 134 // Reduce the number of available regs (to free r12) in case of compressed oops 135 if (UseCompressedOops) return range - 1; 136 return range; 137 } 138 139 static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); } 140 static int last_cpu_reg() { return adjust_reg_range(pd_last_cpu_reg); } 141 static int last_byte_reg() { return adjust_reg_range(pd_last_byte_reg); } 142 143 #endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP 144 -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_LIRAssembler_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_LIRAssembler.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "c1/c1_ValueStack.hpp" 31 #include "ci/ciArrayKlass.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc_interface/collectedHeap.hpp" 34 #include "memory/barrierSet.hpp" 35 #include "memory/cardTableModRefBS.hpp" 36 #include "nativeInst_x86.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "runtime/sharedRuntime.hpp" 27 39 28 40 … … 332 344 Register ic_klass = IC_Klass; 333 345 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 334 335 if (! VerifyOops) {346 const bool do_post_padding = VerifyOops || UseCompressedOops; 347 if (!do_post_padding) { 336 348 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 337 349 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { … … 341 353 int offset = __ offset(); 342 354 __ inline_cache_check(receiver, IC_Klass); 343 assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct");344 if ( VerifyOops) {355 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 356 if (do_post_padding) { 345 357 // force alignment after the cache check. 346 358 // It's been verified to be aligned if !VerifyOops … … 548 560 549 561 // Get addresses of first characters from both Strings 550 __ movptr(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));551 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));552 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));562 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 563 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 564 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 553 565 554 566 555 567 // rbx, may be NULL 556 568 add_debug_info_for_null_check_here(info); 557 __ movptr(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));558 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));559 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));569 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 570 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 571 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 560 572 561 573 // compute minimum length (in rax) and difference of lengths (on top of stack) … … 685 697 686 698 switch (c->type()) { 687 case T_INT: 699 case T_INT: { 700 assert(patch_code == lir_patch_none, "no patching handled here"); 701 __ movl(dest->as_register(), c->as_jint()); 702 break; 703 } 704 688 705 case T_ADDRESS: { 689 706 assert(patch_code == lir_patch_none, "no patching handled here"); 690 __ mov l(dest->as_register(), c->as_jint());707 __ movptr(dest->as_register(), c->as_jint()); 691 708 break; 692 709 } … … 769 786 case T_INT: // fall through 770 787 case T_FLOAT: 788 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 789 break; 790 771 791 case T_ADDRESS: 772 __ mov l(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());792 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 773 793 break; 774 794 … … 795 815 } 796 816 797 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info 817 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 798 818 assert(src->is_constant(), "should not call otherwise"); 799 819 assert(dest->is_address(), "should not call otherwise"); … … 805 825 case T_INT: // fall through 806 826 case T_FLOAT: 827 __ movl(as_Address(addr), c->as_jint_bits()); 828 break; 829 807 830 case T_ADDRESS: 808 __ mov l(as_Address(addr), c->as_jint_bits());831 __ movptr(as_Address(addr), c->as_jint_bits()); 809 832 break; 810 833 … … 812 835 case T_ARRAY: 813 836 if (c->as_jobject() == NULL) { 814 __ movptr(as_Address(addr), NULL_WORD); 837 if (UseCompressedOops && !wide) { 838 __ movl(as_Address(addr), (int32_t)NULL_WORD); 839 } else { 840 __ movptr(as_Address(addr), NULL_WORD); 841 } 815 842 } else { 816 843 if (is_literal_address(addr)) { … … 820 847 #ifdef _LP64 821 848 __ movoop(rscratch1, c->as_jobject()); 822 null_check_here = code_offset(); 823 __ movptr(as_Address_lo(addr), rscratch1); 849 if (UseCompressedOops && !wide) { 850 __ encode_heap_oop(rscratch1); 851 null_check_here = code_offset(); 852 __ movl(as_Address_lo(addr), rscratch1); 853 } else { 854 null_check_here = code_offset(); 855 __ movptr(as_Address_lo(addr), rscratch1); 856 } 824 857 #else 825 858 __ movoop(as_Address(addr), c->as_jobject()); … … 998 1031 999 1032 1000 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) {1033 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 1001 1034 LIR_Address* to_addr = dest->as_address_ptr(); 1002 1035 PatchingStub* patch = NULL; 1036 Register compressed_src = rscratch1; 1003 1037 1004 1038 if (type == T_ARRAY || type == T_OBJECT) { 1005 1039 __ verify_oop(src->as_register()); 1006 } 1040 #ifdef _LP64 1041 if (UseCompressedOops && !wide) { 1042 __ movptr(compressed_src, src->as_register()); 1043 __ encode_heap_oop(compressed_src); 1044 } 1045 #endif 1046 } 1047 1007 1048 if (patch_code != lir_patch_none) { 1008 1049 patch = new PatchingStub(_masm, PatchingStub::access_field_id); … … 1010 1051 assert(toa.disp() != 0, "must have"); 1011 1052 } 1012 if (info != NULL) { 1013 add_debug_info_for_null_check_here(info); 1014 } 1015 1053 1054 int null_check_here = code_offset(); 1016 1055 switch (type) { 1017 1056 case T_FLOAT: { … … 1039 1078 } 1040 1079 1041 case T_ADDRESS: // fall through1042 1080 case T_ARRAY: // fall through 1043 1081 case T_OBJECT: // fall through 1044 #ifdef _LP64 1082 if (UseCompressedOops && !wide) { 1083 __ movl(as_Address(to_addr), compressed_src); 1084 } else { 1085 __ movptr(as_Address(to_addr), src->as_register()); 1086 } 1087 break; 1088 case T_ADDRESS: 1045 1089 __ movptr(as_Address(to_addr), src->as_register()); 1046 1090 break; 1047 #endif // _LP641048 1091 case T_INT: 1049 1092 __ movl(as_Address(to_addr), src->as_register()); … … 1102 1145 ShouldNotReachHere(); 1103 1146 } 1147 if (info != NULL) { 1148 add_debug_info_for_null_check(null_check_here, info); 1149 } 1104 1150 1105 1151 if (patch_code != lir_patch_none) { … … 1185 1231 1186 1232 1187 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) {1233 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 1188 1234 assert(src->is_address(), "should not call otherwise"); 1189 1235 assert(dest->is_register(), "should not call otherwise"); … … 1239 1285 } 1240 1286 1241 case T_ADDRESS: // fall through1242 1287 case T_OBJECT: // fall through 1243 1288 case T_ARRAY: // fall through 1244 #ifdef _LP64 1289 if (UseCompressedOops && !wide) { 1290 __ movl(dest->as_register(), from_addr); 1291 } else { 1292 __ movptr(dest->as_register(), from_addr); 1293 } 1294 break; 1295 1296 case T_ADDRESS: 1245 1297 __ movptr(dest->as_register(), from_addr); 1246 1298 break; 1247 #endif // _L641248 1299 case T_INT: 1249 1300 __ movl(dest->as_register(), from_addr); … … 1340 1391 1341 1392 if (type == T_ARRAY || type == T_OBJECT) { 1393 #ifdef _LP64 1394 if (UseCompressedOops && !wide) { 1395 __ decode_heap_oop(dest->as_register()); 1396 } 1397 #endif 1342 1398 __ verify_oop(dest->as_register()); 1343 1399 } … … 1360 1416 ShouldNotReachHere(); break; 1361 1417 } 1362 } else if (VM_Version::supports_3dnow ()) {1418 } else if (VM_Version::supports_3dnow_prefetch()) { 1363 1419 __ prefetchr(from_addr); 1364 1420 } … … 1383 1439 ShouldNotReachHere(); break; 1384 1440 } 1385 } else if (VM_Version::supports_3dnow ()) {1441 } else if (VM_Version::supports_3dnow_prefetch()) { 1386 1442 __ prefetchw(from_addr); 1387 1443 } … … 1586 1642 1587 1643 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1644 Register len = op->len()->as_register(); 1645 LP64_ONLY( __ movslq(len, len); ) 1646 1588 1647 if (UseSlowPath || 1589 1648 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || … … 1591 1650 __ jmp(*op->stub()->entry()); 1592 1651 } else { 1593 Register len = op->len()->as_register();1594 1652 Register tmp1 = op->tmp1()->as_register(); 1595 1653 Register tmp2 = op->tmp2()->as_register(); … … 1616 1674 } 1617 1675 1676 void LIR_Assembler::type_profile_helper(Register mdo, 1677 ciMethodData *md, ciProfileData *data, 1678 Register recv, Label* update_done) { 1679 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1680 Label next_test; 1681 // See if the receiver is receiver[n]. 1682 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1683 __ jccb(Assembler::notEqual, next_test); 1684 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1685 __ addptr(data_addr, DataLayout::counter_increment); 1686 __ jmp(*update_done); 1687 __ bind(next_test); 1688 } 1689 1690 // Didn't find receiver; find next empty slot and fill it in 1691 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1692 Label next_test; 1693 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1694 __ cmpptr(recv_addr, (intptr_t)NULL_WORD); 1695 __ jccb(Assembler::notEqual, next_test); 1696 __ movptr(recv_addr, recv); 1697 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1698 __ jmp(*update_done); 1699 __ bind(next_test); 1700 } 1701 } 1702 1703 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1704 // we always need a stub for the failure case. 1705 CodeStub* stub = op->stub(); 1706 Register obj = op->object()->as_register(); 1707 Register k_RInfo = op->tmp1()->as_register(); 1708 Register klass_RInfo = op->tmp2()->as_register(); 1709 Register dst = op->result_opr()->as_register(); 1710 ciKlass* k = op->klass(); 1711 Register Rtmp1 = noreg; 1712 1713 // check if it needs to be profiled 1714 ciMethodData* md; 1715 ciProfileData* data; 1716 1717 if (op->should_profile()) { 1718 ciMethod* method = op->profiled_method(); 1719 assert(method != NULL, "Should have method"); 1720 int bci = op->profiled_bci(); 1721 md = method->method_data_or_null(); 1722 assert(md != NULL, "Sanity"); 1723 data = md->bci_to_data(bci); 1724 assert(data != NULL, "need data for type check"); 1725 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1726 } 1727 Label profile_cast_success, profile_cast_failure; 1728 Label *success_target = op->should_profile() ? &profile_cast_success : success; 1729 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 1730 1731 if (obj == k_RInfo) { 1732 k_RInfo = dst; 1733 } else if (obj == klass_RInfo) { 1734 klass_RInfo = dst; 1735 } 1736 if (k->is_loaded() && !UseCompressedOops) { 1737 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1738 } else { 1739 Rtmp1 = op->tmp3()->as_register(); 1740 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1741 } 1742 1743 assert_different_registers(obj, k_RInfo, klass_RInfo); 1744 if (!k->is_loaded()) { 1745 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 1746 } else { 1747 #ifdef _LP64 1748 __ movoop(k_RInfo, k->constant_encoding()); 1749 #endif // _LP64 1750 } 1751 assert(obj != k_RInfo, "must be different"); 1752 1753 __ cmpptr(obj, (int32_t)NULL_WORD); 1754 if (op->should_profile()) { 1755 Label not_null; 1756 __ jccb(Assembler::notEqual, not_null); 1757 // Object is null; update MDO and exit 1758 Register mdo = klass_RInfo; 1759 __ movoop(mdo, md->constant_encoding()); 1760 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1761 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1762 __ orl(data_addr, header_bits); 1763 __ jmp(*obj_is_null); 1764 __ bind(not_null); 1765 } else { 1766 __ jcc(Assembler::equal, *obj_is_null); 1767 } 1768 __ verify_oop(obj); 1769 1770 if (op->fast_check()) { 1771 // get object class 1772 // not a safepoint as obj null check happens earlier 1773 #ifdef _LP64 1774 if (UseCompressedOops) { 1775 __ load_klass(Rtmp1, obj); 1776 __ cmpptr(k_RInfo, Rtmp1); 1777 } else { 1778 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1779 } 1780 #else 1781 if (k->is_loaded()) { 1782 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1783 } else { 1784 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1785 } 1786 #endif 1787 __ jcc(Assembler::notEqual, *failure_target); 1788 // successful cast, fall through to profile or jump 1789 } else { 1790 // get object class 1791 // not a safepoint as obj null check happens earlier 1792 __ load_klass(klass_RInfo, obj); 1793 if (k->is_loaded()) { 1794 // See if we get an immediate positive hit 1795 #ifdef _LP64 1796 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1797 #else 1798 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1799 #endif // _LP64 1800 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { 1801 __ jcc(Assembler::notEqual, *failure_target); 1802 // successful cast, fall through to profile or jump 1803 } else { 1804 // See if we get an immediate positive hit 1805 __ jcc(Assembler::equal, *success_target); 1806 // check for self 1807 #ifdef _LP64 1808 __ cmpptr(klass_RInfo, k_RInfo); 1809 #else 1810 __ cmpoop(klass_RInfo, k->constant_encoding()); 1811 #endif // _LP64 1812 __ jcc(Assembler::equal, *success_target); 1813 1814 __ push(klass_RInfo); 1815 #ifdef _LP64 1816 __ push(k_RInfo); 1817 #else 1818 __ pushoop(k->constant_encoding()); 1819 #endif // _LP64 1820 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1821 __ pop(klass_RInfo); 1822 __ pop(klass_RInfo); 1823 // result is a boolean 1824 __ cmpl(klass_RInfo, 0); 1825 __ jcc(Assembler::equal, *failure_target); 1826 // successful cast, fall through to profile or jump 1827 } 1828 } else { 1829 // perform the fast part of the checking logic 1830 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1831 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1832 __ push(klass_RInfo); 1833 __ push(k_RInfo); 1834 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1835 __ pop(klass_RInfo); 1836 __ pop(k_RInfo); 1837 // result is a boolean 1838 __ cmpl(k_RInfo, 0); 1839 __ jcc(Assembler::equal, *failure_target); 1840 // successful cast, fall through to profile or jump 1841 } 1842 } 1843 if (op->should_profile()) { 1844 Register mdo = klass_RInfo, recv = k_RInfo; 1845 __ bind(profile_cast_success); 1846 __ movoop(mdo, md->constant_encoding()); 1847 __ load_klass(recv, obj); 1848 Label update_done; 1849 type_profile_helper(mdo, md, data, recv, success); 1850 __ jmp(*success); 1851 1852 __ bind(profile_cast_failure); 1853 __ movoop(mdo, md->constant_encoding()); 1854 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1855 __ subptr(counter_addr, DataLayout::counter_increment); 1856 __ jmp(*failure); 1857 } 1858 __ jmp(*success); 1859 } 1618 1860 1619 1861 … … 1628 1870 1629 1871 CodeStub* stub = op->stub(); 1630 Label done; 1872 1873 // check if it needs to be profiled 1874 ciMethodData* md; 1875 ciProfileData* data; 1876 1877 if (op->should_profile()) { 1878 ciMethod* method = op->profiled_method(); 1879 assert(method != NULL, "Should have method"); 1880 int bci = op->profiled_bci(); 1881 md = method->method_data_or_null(); 1882 assert(md != NULL, "Sanity"); 1883 data = md->bci_to_data(bci); 1884 assert(data != NULL, "need data for type check"); 1885 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1886 } 1887 Label profile_cast_success, profile_cast_failure, done; 1888 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1889 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1890 1631 1891 __ cmpptr(value, (int32_t)NULL_WORD); 1632 __ jcc(Assembler::equal, done); 1892 if (op->should_profile()) { 1893 Label not_null; 1894 __ jccb(Assembler::notEqual, not_null); 1895 // Object is null; update MDO and exit 1896 Register mdo = klass_RInfo; 1897 __ movoop(mdo, md->constant_encoding()); 1898 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1899 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1900 __ orl(data_addr, header_bits); 1901 __ jmp(done); 1902 __ bind(not_null); 1903 } else { 1904 __ jcc(Assembler::equal, done); 1905 } 1906 1633 1907 add_debug_info_for_null_check_here(op->info_for_exception()); 1634 __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));1635 __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));1636 1637 // get instance klass 1908 __ load_klass(k_RInfo, array); 1909 __ load_klass(klass_RInfo, value); 1910 1911 // get instance klass (it's already uncompressed) 1638 1912 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); 1639 1913 // perform the fast part of the checking logic 1640 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);1914 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1641 1915 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1642 1916 __ push(klass_RInfo); … … 1647 1921 // result is a boolean 1648 1922 __ cmpl(k_RInfo, 0); 1649 __ jcc(Assembler::equal, *stub->entry()); 1923 __ jcc(Assembler::equal, *failure_target); 1924 // fall through to the success case 1925 1926 if (op->should_profile()) { 1927 Register mdo = klass_RInfo, recv = k_RInfo; 1928 __ bind(profile_cast_success); 1929 __ movoop(mdo, md->constant_encoding()); 1930 __ load_klass(recv, value); 1931 Label update_done; 1932 type_profile_helper(mdo, md, data, recv, &done); 1933 __ jmpb(done); 1934 1935 __ bind(profile_cast_failure); 1936 __ movoop(mdo, md->constant_encoding()); 1937 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1938 __ subptr(counter_addr, DataLayout::counter_increment); 1939 __ jmp(*stub->entry()); 1940 } 1941 1650 1942 __ bind(done); 1651 } else if (op->code() == lir_checkcast) { 1652 // we always need a stub for the failure case. 1653 CodeStub* stub = op->stub(); 1654 Register obj = op->object()->as_register(); 1655 Register k_RInfo = op->tmp1()->as_register(); 1656 Register klass_RInfo = op->tmp2()->as_register(); 1657 Register dst = op->result_opr()->as_register(); 1658 ciKlass* k = op->klass(); 1659 Register Rtmp1 = noreg; 1660 1661 Label done; 1662 if (obj == k_RInfo) { 1663 k_RInfo = dst; 1664 } else if (obj == klass_RInfo) { 1665 klass_RInfo = dst; 1666 } 1667 if (k->is_loaded()) { 1668 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1669 } else { 1670 Rtmp1 = op->tmp3()->as_register(); 1671 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1672 } 1673 1674 assert_different_registers(obj, k_RInfo, klass_RInfo); 1675 if (!k->is_loaded()) { 1676 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 1677 } else { 1678 #ifdef _LP64 1679 __ movoop(k_RInfo, k->constant_encoding()); 1680 #else 1681 k_RInfo = noreg; 1682 #endif // _LP64 1683 } 1684 assert(obj != k_RInfo, "must be different"); 1685 __ cmpptr(obj, (int32_t)NULL_WORD); 1686 if (op->profiled_method() != NULL) { 1687 ciMethod* method = op->profiled_method(); 1688 int bci = op->profiled_bci(); 1689 1690 Label profile_done; 1691 __ jcc(Assembler::notEqual, profile_done); 1692 // Object is null; update methodDataOop 1693 ciMethodData* md = method->method_data(); 1694 if (md == NULL) { 1695 bailout("out of memory building methodDataOop"); 1696 return; 1697 } 1698 ciProfileData* data = md->bci_to_data(bci); 1699 assert(data != NULL, "need data for checkcast"); 1700 assert(data->is_BitData(), "need BitData for checkcast"); 1701 Register mdo = klass_RInfo; 1702 __ movoop(mdo, md->constant_encoding()); 1703 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1704 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1705 __ orl(data_addr, header_bits); 1706 __ jmp(done); 1707 __ bind(profile_done); 1708 } else { 1709 __ jcc(Assembler::equal, done); 1710 } 1711 __ verify_oop(obj); 1712 1713 if (op->fast_check()) { 1714 // get object classo 1715 // not a safepoint as obj null check happens earlier 1716 if (k->is_loaded()) { 1717 #ifdef _LP64 1718 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1719 #else 1720 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1721 #endif // _LP64 1722 } else { 1723 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1724 1725 } 1726 __ jcc(Assembler::notEqual, *stub->entry()); 1727 __ bind(done); 1728 } else { 1729 // get object class 1730 // not a safepoint as obj null check happens earlier 1731 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1732 if (k->is_loaded()) { 1733 // See if we get an immediate positive hit 1734 #ifdef _LP64 1735 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1736 #else 1737 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1738 #endif // _LP64 1739 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { 1740 __ jcc(Assembler::notEqual, *stub->entry()); 1741 } else { 1742 // See if we get an immediate positive hit 1743 __ jcc(Assembler::equal, done); 1744 // check for self 1745 #ifdef _LP64 1746 __ cmpptr(klass_RInfo, k_RInfo); 1747 #else 1748 __ cmpoop(klass_RInfo, k->constant_encoding()); 1749 #endif // _LP64 1750 __ jcc(Assembler::equal, done); 1751 1752 __ push(klass_RInfo); 1753 #ifdef _LP64 1754 __ push(k_RInfo); 1755 #else 1756 __ pushoop(k->constant_encoding()); 1757 #endif // _LP64 1758 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1759 __ pop(klass_RInfo); 1760 __ pop(klass_RInfo); 1761 // result is a boolean 1762 __ cmpl(klass_RInfo, 0); 1763 __ jcc(Assembler::equal, *stub->entry()); 1764 } 1943 } else 1944 if (code == lir_checkcast) { 1945 Register obj = op->object()->as_register(); 1946 Register dst = op->result_opr()->as_register(); 1947 Label success; 1948 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1949 __ bind(success); 1950 if (dst != obj) { 1951 __ mov(dst, obj); 1952 } 1953 } else 1954 if (code == lir_instanceof) { 1955 Register obj = op->object()->as_register(); 1956 Register dst = op->result_opr()->as_register(); 1957 Label success, failure, done; 1958 emit_typecheck_helper(op, &success, &failure, &failure); 1959 __ bind(failure); 1960 __ xorptr(dst, dst); 1961 __ jmpb(done); 1962 __ bind(success); 1963 __ movptr(dst, 1); 1765 1964 __ bind(done); 1766 1965 } else { 1767 // perform the fast part of the checking logic 1768 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL); 1769 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1770 __ push(klass_RInfo); 1771 __ push(k_RInfo); 1772 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1773 __ pop(klass_RInfo); 1774 __ pop(k_RInfo); 1775 // result is a boolean 1776 __ cmpl(k_RInfo, 0); 1777 __ jcc(Assembler::equal, *stub->entry()); 1778 __ bind(done); 1779 } 1780 1781 } 1782 if (dst != obj) { 1783 __ mov(dst, obj); 1784 } 1785 } else if (code == lir_instanceof) { 1786 Register obj = op->object()->as_register(); 1787 Register k_RInfo = op->tmp1()->as_register(); 1788 Register klass_RInfo = op->tmp2()->as_register(); 1789 Register dst = op->result_opr()->as_register(); 1790 ciKlass* k = op->klass(); 1791 1792 Label done; 1793 Label zero; 1794 Label one; 1795 if (obj == k_RInfo) { 1796 k_RInfo = klass_RInfo; 1797 klass_RInfo = obj; 1798 } 1799 // patching may screw with our temporaries on sparc, 1800 // so let's do it before loading the class 1801 if (!k->is_loaded()) { 1802 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 1803 } else { 1804 LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding())); 1805 } 1806 assert(obj != k_RInfo, "must be different"); 1807 1808 __ verify_oop(obj); 1809 if (op->fast_check()) { 1810 __ cmpptr(obj, (int32_t)NULL_WORD); 1811 __ jcc(Assembler::equal, zero); 1812 // get object class 1813 // not a safepoint as obj null check happens earlier 1814 if (LP64_ONLY(false &&) k->is_loaded()) { 1815 NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding())); 1816 k_RInfo = noreg; 1817 } else { 1818 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1819 1820 } 1821 __ jcc(Assembler::equal, one); 1822 } else { 1823 // get object class 1824 // not a safepoint as obj null check happens earlier 1825 __ cmpptr(obj, (int32_t)NULL_WORD); 1826 __ jcc(Assembler::equal, zero); 1827 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1828 1829 #ifndef _LP64 1830 if (k->is_loaded()) { 1831 // See if we get an immediate positive hit 1832 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1833 __ jcc(Assembler::equal, one); 1834 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) { 1835 // check for self 1836 __ cmpoop(klass_RInfo, k->constant_encoding()); 1837 __ jcc(Assembler::equal, one); 1838 __ push(klass_RInfo); 1839 __ pushoop(k->constant_encoding()); 1840 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1841 __ pop(klass_RInfo); 1842 __ pop(dst); 1843 __ jmp(done); 1844 } 1845 } 1846 else // next block is unconditional if LP64: 1847 #endif // LP64 1848 { 1849 assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers"); 1850 1851 // perform the fast part of the checking logic 1852 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL); 1853 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1854 __ push(klass_RInfo); 1855 __ push(k_RInfo); 1856 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1857 __ pop(klass_RInfo); 1858 __ pop(dst); 1859 __ jmp(done); 1860 } 1861 } 1862 __ bind(zero); 1863 __ xorptr(dst, dst); 1864 __ jmp(done); 1865 __ bind(one); 1866 __ movptr(dst, 1); 1867 __ bind(done); 1868 } else { 1869 ShouldNotReachHere(); 1870 } 1966 ShouldNotReachHere(); 1967 } 1871 1968 1872 1969 } … … 1895 1992 assert(cmpval != addr, "cmp and addr must be in different registers"); 1896 1993 assert(newval != addr, "new value and addr must be in different registers"); 1897 if (os::is_MP()) { 1898 __ lock(); 1899 } 1994 1900 1995 if ( op->code() == lir_cas_obj) { 1901 __ cmpxchgptr(newval, Address(addr, 0)); 1902 } else if (op->code() == lir_cas_int) { 1996 #ifdef _LP64 1997 if (UseCompressedOops) { 1998 __ encode_heap_oop(cmpval); 1999 __ mov(rscratch1, newval); 2000 __ encode_heap_oop(rscratch1); 2001 if (os::is_MP()) { 2002 __ lock(); 2003 } 2004 // cmpval (rax) is implicitly used by this instruction 2005 __ cmpxchgl(rscratch1, Address(addr, 0)); 2006 } else 2007 #endif 2008 { 2009 if (os::is_MP()) { 2010 __ lock(); 2011 } 2012 __ cmpxchgptr(newval, Address(addr, 0)); 2013 } 2014 } else { 2015 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 2016 if (os::is_MP()) { 2017 __ lock(); 2018 } 1903 2019 __ cmpxchgl(newval, Address(addr, 0)); 1904 } else {1905 LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0)));1906 2020 } 1907 2021 #ifdef _LP64 … … 1925 2039 } 1926 2040 1927 1928 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { 2041 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1929 2042 Assembler::Condition acond, ncond; 1930 2043 switch (condition) { … … 2017 2130 switch (code) { 2018 2131 case lir_add: { 2019 __ increment (lreg, c);2132 __ incrementl(lreg, c); 2020 2133 break; 2021 2134 } 2022 2135 case lir_sub: { 2023 __ decrement (lreg, c);2136 __ decrementl(lreg, c); 2024 2137 break; 2025 2138 } … … 3145 3258 3146 3259 if (flags & LIR_OpArrayCopy::type_check) { 3147 __ movptr(tmp, src_klass_addr); 3148 __ cmpptr(tmp, dst_klass_addr); 3260 if (UseCompressedOops) { 3261 __ movl(tmp, src_klass_addr); 3262 __ cmpl(tmp, dst_klass_addr); 3263 } else { 3264 __ movptr(tmp, src_klass_addr); 3265 __ cmpptr(tmp, dst_klass_addr); 3266 } 3149 3267 __ jcc(Assembler::notEqual, *stub->entry()); 3150 3268 } … … 3161 3279 Label known_ok, halt; 3162 3280 __ movoop(tmp, default_type->constant_encoding()); 3281 #ifdef _LP64 3282 if (UseCompressedOops) { 3283 __ encode_heap_oop(tmp); 3284 } 3285 #endif 3286 3163 3287 if (basic_type != T_OBJECT) { 3164 __ cmpptr(tmp, dst_klass_addr); 3288 3289 if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 3290 else __ cmpptr(tmp, dst_klass_addr); 3165 3291 __ jcc(Assembler::notEqual, halt); 3166 __ cmpptr(tmp, src_klass_addr); 3292 if (UseCompressedOops) __ cmpl(tmp, src_klass_addr); 3293 else __ cmpptr(tmp, src_klass_addr); 3167 3294 __ jcc(Assembler::equal, known_ok); 3168 3295 } else { 3169 __ cmpptr(tmp, dst_klass_addr); 3296 if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 3297 else __ cmpptr(tmp, dst_klass_addr); 3170 3298 __ jcc(Assembler::equal, known_ok); 3171 3299 __ cmpptr(src, dst); … … 3241 3369 3242 3370 // Update counter for all call types 3243 ciMethodData* md = method->method_data(); 3244 if (md == NULL) { 3245 bailout("out of memory building methodDataOop"); 3246 return; 3247 } 3371 ciMethodData* md = method->method_data_or_null(); 3372 assert(md != NULL, "Sanity"); 3248 3373 ciProfileData* data = md->bci_to_data(bci); 3249 3374 assert(data->is_CounterData(), "need CounterData for calls"); … … 3256 3381 // invokeinterface bytecodes 3257 3382 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3258 Tier1ProfileVirtualCalls) {3383 C1ProfileVirtualCalls) { 3259 3384 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3260 3385 Register recv = op->recv()->as_register(); … … 3262 3387 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3263 3388 ciKlass* known_klass = op->known_holder(); 3264 if ( Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {3389 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3265 3390 // We know the type that will be seen at this call site; we can 3266 3391 // statically update the methodDataOop rather than needing to do … … 3275 3400 if (known_klass->equals(receiver)) { 3276 3401 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3277 __ add l(data_addr, DataLayout::counter_increment);3402 __ addptr(data_addr, DataLayout::counter_increment); 3278 3403 return; 3279 3404 } … … 3291 3416 __ movoop(recv_addr, known_klass->constant_encoding()); 3292 3417 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3293 __ add l(data_addr, DataLayout::counter_increment);3418 __ addptr(data_addr, DataLayout::counter_increment); 3294 3419 return; 3295 3420 } 3296 3421 } 3297 3422 } else { 3298 __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));3423 __ load_klass(recv, recv); 3299 3424 Label update_done; 3300 uint i; 3301 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3302 Label next_test; 3303 // See if the receiver is receiver[n]. 3304 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)))); 3305 __ jcc(Assembler::notEqual, next_test); 3306 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3307 __ addl(data_addr, DataLayout::counter_increment); 3308 __ jmp(update_done); 3309 __ bind(next_test); 3310 } 3311 3312 // Didn't find receiver; find next empty slot and fill it in 3313 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3314 Label next_test; 3315 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3316 __ cmpptr(recv_addr, (int32_t)NULL_WORD); 3317 __ jcc(Assembler::notEqual, next_test); 3318 __ movptr(recv_addr, recv); 3319 __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment); 3320 __ jmp(update_done); 3321 __ bind(next_test); 3322 } 3425 type_profile_helper(mdo, md, data, recv, &update_done); 3323 3426 // Receiver did not match any saved receiver and there is no empty row for it. 3324 3427 // Increment total counter to indicate polymorphic case. 3325 __ add l(counter_addr, DataLayout::counter_increment);3428 __ addptr(counter_addr, DataLayout::counter_increment); 3326 3429 3327 3430 __ bind(update_done); … … 3329 3432 } else { 3330 3433 // Static call 3331 __ addl(counter_addr, DataLayout::counter_increment); 3332 } 3333 } 3334 3434 __ addptr(counter_addr, DataLayout::counter_increment); 3435 } 3436 } 3335 3437 3336 3438 void LIR_Assembler::emit_delay(LIR_OpDelay*) { -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP 26 #define CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP 27 25 28 private: 26 29 … … 43 46 Address as_Address(LIR_Address* addr, Register tmp); 44 47 45 48 // Record the type of the receiver in ReceiverTypeData 49 void type_profile_helper(Register mdo, 50 ciMethodData *md, ciProfileData *data, 51 Register recv, Label* update_done); 46 52 public: 47 53 … … 54 60 deopt_handler_size = NOT_LP64(10) LP64_ONLY(17) 55 61 }; 62 63 #endif // CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2005, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_LIRGenerator_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_FrameMap.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_LIRGenerator.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArray.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "vmreg_x86.inline.hpp" 27 39 28 40 #ifdef ASSERT … … 108 120 } 109 121 Constant* c = v->as_Constant(); 110 if (c && c->state () == NULL) {122 if (c && c->state_before() == NULL) { 111 123 // constants of any type can be stored directly, except for 112 124 // unloaded object constants. … … 183 195 184 196 185 void LIRGenerator::increment_counter(address counter, int step) { 197 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { 198 LIR_Opr r; 199 if (type == T_LONG) { 200 r = LIR_OprFact::longConst(x); 201 } else if (type == T_INT) { 202 r = LIR_OprFact::intConst(x); 203 } else { 204 ShouldNotReachHere(); 205 } 206 return r; 207 } 208 209 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 186 210 LIR_Opr pointer = new_pointer_register(); 187 211 __ move(LIR_OprFact::intptrConst(counter), pointer); 188 LIR_Address* addr = new LIR_Address(pointer, T_INT);212 LIR_Address* addr = new LIR_Address(pointer, type); 189 213 increment_counter(addr, step); 190 214 } … … 194 218 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 195 219 } 196 197 220 198 221 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { … … 240 263 241 264 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { 242 assert(x->is_ root(),"");265 assert(x->is_pinned(),""); 243 266 bool needs_range_check = true; 244 267 bool use_length = x->length() != NULL; … … 315 338 316 339 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 317 assert(x->is_ root(),"");340 assert(x->is_pinned(),""); 318 341 LIRItem obj(x->obj(), this); 319 342 obj.load_item(); … … 331 354 CodeEmitInfo* info_for_exception = NULL; 332 355 if (x->needs_null_check()) { 333 info_for_exception = state_for(x , x->lock_stack_before());356 info_for_exception = state_for(x); 334 357 } 335 358 // this CodeEmitInfo must not have the xhandlers because here the … … 342 365 343 366 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 344 assert(x->is_ root(),"");367 assert(x->is_pinned(),""); 345 368 346 369 LIRItem obj(x->obj(), this); … … 711 734 // generate compare-and-swap; produces zero condition if swap occurs 712 735 int value_offset = sun_misc_AtomicLongCSImpl::value_offset(); 713 LIR_Opr addr = obj.result();714 __ add(addr, LIR_OprFact::intConst(value_offset), addr);736 LIR_Opr addr = new_pointer_register(); 737 __ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr); 715 738 LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed 716 739 LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed … … 719 742 // generate conditional move of boolean result 720 743 LIR_Opr result = rlock_result(x); 721 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result );744 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG); 722 745 } 723 746 … … 788 811 // generate conditional move of boolean result 789 812 LIR_Opr result = rlock_result(x); 790 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result); 813 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 814 result, as_BasicType(type)); 791 815 if (type == objectType) { // Write-barrier needed for Object fields. 792 816 // Seems to be precise … … 852 876 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 853 877 assert(x->number_of_arguments() == 5, "wrong type"); 878 879 // Make all state_for calls early since they can emit code 880 CodeEmitInfo* info = state_for(x, x->state()); 881 854 882 LIRItem src(x->argument_at(0), this); 855 883 LIRItem src_pos(x->argument_at(1), this); … … 894 922 arraycopy_helper(x, &flags, &expected_type); 895 923 896 CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?)897 924 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 898 925 } … … 974 1001 975 1002 void LIRGenerator::do_NewInstance(NewInstance* x) { 1003 #ifndef PRODUCT 976 1004 if (PrintNotLoaded && !x->klass()->is_loaded()) { 977 tty->print_cr(" ###class not loaded at new bci %d", x->bci()); 978 } 1005 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); 1006 } 1007 #endif 979 1008 CodeEmitInfo* info = state_for(x, x->state()); 980 1009 LIR_Opr reg = result_register_for(x->type()); … … 1117 1146 1118 1147 // info for exceptions 1119 CodeEmitInfo* info_for_exception = state_for(x , x->state()->copy_locks());1148 CodeEmitInfo* info_for_exception = state_for(x); 1120 1149 1121 1150 CodeStub* stub; … … 1127 1156 } 1128 1157 LIR_Opr reg = rlock_result(x); 1158 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1159 if (!x->klass()->is_loaded() || UseCompressedOops) { 1160 tmp3 = new_register(objectType); 1161 } 1129 1162 __ checkcast(reg, obj.result(), x->klass(), 1130 new_register(objectType), new_register(objectType), 1131 !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr, 1163 new_register(objectType), new_register(objectType), tmp3, 1132 1164 x->direct_compare(), info_for_exception, patching_info, stub, 1133 1165 x->profiled_method(), x->profiled_bci()); … … 1146 1178 } 1147 1179 obj.load_item(); 1148 LIR_Opr tmp = new_register(objectType); 1180 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1181 if (!x->klass()->is_loaded() || UseCompressedOops) { 1182 tmp3 = new_register(objectType); 1183 } 1149 1184 __ instanceof(reg, obj.result(), x->klass(), 1150 tmp, new_register(objectType), LIR_OprFact::illegalOpr,1151 x->direct_compare(), patching_info );1185 new_register(objectType), new_register(objectType), tmp3, 1186 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1152 1187 } 1153 1188 … … 1189 1224 if (x->is_safepoint()) { 1190 1225 // increment backedge counter if needed 1191 increment_backedge_counter(state_for(x, x->state_before())); 1192 1226 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); 1193 1227 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); 1194 1228 } … … 1198 1232 LIR_Opr right = yin->result(); 1199 1233 __ cmp(lir_cond(cond), left, right); 1234 // Generate branch profiling. Profiling code doesn't kill flags. 1200 1235 profile_branch(x, cond); 1201 1236 move_to_phi(x->state()); -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_c1_LinearScan_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_Instruction.hpp" 27 #include "c1/c1_LinearScan.hpp" 28 #include "utilities/bitMap.inline.hpp" 27 29 28 30 -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2005, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_C1_LINEARSCAN_X86_HPP 26 #define CPU_X86_VM_C1_LINEARSCAN_X86_HPP 27 25 28 inline bool LinearScan::is_processed_reg_num(int reg_num) { 26 29 #ifndef _LP64 … … 29 32 assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); 30 33 assert(reg_num >= 0, "invalid reg_num"); 31 32 return reg_num < 6 || reg_num > 7;33 34 #else 34 // rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored 35 // rsp and rbp, r10, r15 (numbers [12,15]) are ignored 36 // r12 (number 11) is conditional on compressed oops. 37 assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below"); 35 38 assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below"); 36 39 assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below"); … … 38 41 assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below"); 39 42 assert(reg_num >= 0, "invalid reg_num"); 40 41 return reg_num < 12 || reg_num > 15;42 43 #endif // _LP64 44 return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map; 43 45 } 44 46 … … 102 104 assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only"); 103 105 _first_reg = pd_first_byte_reg; 104 _last_reg = pd_last_byte_reg;106 _last_reg = FrameMap::last_byte_reg(); 105 107 return true; 106 108 } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) { … … 186 188 void allocate(); 187 189 }; 190 191 #endif // CPU_X86_VM_C1_LINEARSCAN_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1999, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_c1_MacroAssembler_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_MacroAssembler.hpp" 27 #include "c1/c1_Runtime1.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "gc_interface/collectedHeap.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/markOop.hpp" 33 #include "runtime/basicLock.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/stubRoutines.hpp" 27 37 28 38 int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { … … 132 142 } else { 133 143 eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); 144 incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1); 134 145 } 135 146 } … … 146 157 movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); 147 158 } 148 149 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 159 #ifdef _LP64 160 if (UseCompressedOops) { // Take care not to kill klass 161 movptr(t1, klass); 162 encode_heap_oop_not_null(t1); 163 movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 164 } else 165 #endif 166 { 167 movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 168 } 169 150 170 if (len->is_valid()) { 151 171 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 152 172 } 173 #ifdef _LP64 174 else if (UseCompressedOops) { 175 xorptr(t1, t1); 176 store_klass_gap(obj, t1); 177 } 178 #endif 153 179 } 154 180 … … 210 236 void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { 211 237 assert(obj == rax, "obj must be in rax, for cmpxchg"); 212 assert (obj != t1 && obj != t2 && t1 != t2, "registers must be different"); // XXX really?238 assert_different_registers(obj, t1, t2); // XXX really? 213 239 assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); 214 240 … … 221 247 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 222 248 "con_size_in_bytes is not multiple of alignment"); 223 const int hdr_size_in_bytes = instanceOopDesc:: base_offset_in_bytes();249 const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 224 250 225 251 initialize_header(obj, klass, noreg, t1, t2); … … 308 334 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 309 335 int start_offset = offset(); 310 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 336 337 if (UseCompressedOops) { 338 load_klass(rscratch1, receiver); 339 cmpptr(rscratch1, iCache); 340 } else { 341 cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 342 } 311 343 // if icache check fails, then jump to runtime routine 312 344 // Note: RECEIVER must still contain the receiver! … … 314 346 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 315 347 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 316 assert( offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");348 assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 317 349 } 318 350 -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1999, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP 24 27 25 28 // C1_MacroAssembler contains high-level macros for C1 … … 114 117 115 118 void invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) PRODUCT_RETURN; 119 120 #endif // CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1999, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_c1_Runtime1_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "c1/c1_Defs.hpp" 27 #include "c1/c1_MacroAssembler.hpp" 28 #include "c1/c1_Runtime1.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "nativeInst_x86.hpp" 31 #include "oops/compiledICHolderOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "register_x86.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/signature.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "vmreg_x86.inline.hpp" 27 39 28 40 … … 966 978 __ verify_not_null_oop(exception_oop); 967 979 968 969 980 oop_maps = new OopMapSet(); 970 981 OopMap* oop_map = generate_oop_map(sasm, 1); … … 1026 1037 // refilling the TLAB or allocating directly from eden. 1027 1038 Label retry_tlab, try_eden; 1028 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass) 1039 const Register thread = 1040 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi 1029 1041 1030 1042 __ bind(retry_tlab); … … 1032 1044 // get the instance size (size is postive so movl is fine for 64bit) 1033 1045 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); 1046 1034 1047 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 1048 1035 1049 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1036 1050 __ verify_oop(obj); … … 1042 1056 // get the instance size (size is postive so movl is fine for 64bit) 1043 1057 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); 1058 1044 1059 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 1060 __ incr_allocated_bytes(thread, obj_size, 0); 1061 1045 1062 __ initialize_object(obj, klass, obj_size, 0, t1, t2); 1046 1063 __ verify_oop(obj); … … 1069 1086 break; 1070 1087 1071 #ifdef TIERED1072 1088 case counter_overflow_id: 1073 1089 { 1074 Register bci = rax ;1090 Register bci = rax, method = rbx; 1075 1091 __ enter(); 1076 OopMap* map = save_live_registers(sasm, 2);1092 OopMap* map = save_live_registers(sasm, 3); 1077 1093 // Retrieve bci 1078 1094 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1079 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci); 1095 // And a pointer to the methodOop 1096 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1097 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1080 1098 oop_maps = new OopMapSet(); 1081 1099 oop_maps->add_gc_map(call_offset, map); … … 1085 1103 } 1086 1104 break; 1087 #endif // TIERED1088 1105 1089 1106 case new_type_array_id: … … 1132 1149 // refilling the TLAB or allocating directly from eden. 1133 1150 Label retry_tlab, try_eden; 1134 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx 1151 const Register thread = 1152 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi 1135 1153 1136 1154 __ bind(retry_tlab); 1137 1155 1138 1156 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1139 // since size is pos tive movl does right thing on 64bit1157 // since size is positive movl does right thing on 64bit 1140 1158 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); 1141 1159 // since size is postive movl does right thing on 64bit … … 1164 1182 __ bind(try_eden); 1165 1183 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1166 // since size is pos tive movl does right thing on 64bit1184 // since size is positive movl does right thing on 64bit 1167 1185 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); 1168 1186 // since size is postive movl does right thing on 64bit … … 1177 1195 1178 1196 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 1197 __ incr_allocated_bytes(thread, arr_size, 0); 1179 1198 1180 1199 __ initialize_header(obj, klass, length, t1, t2); … … 1250 1269 Label register_finalizer; 1251 1270 Register t = rsi; 1252 __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));1271 __ load_klass(t, rax); 1253 1272 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); 1254 1273 __ testl(t, JVM_ACC_HAS_FINALIZER); -
trunk/openjdk/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_C1_GLOBALS_X86_HPP 26 #define CPU_X86_VM_C1_GLOBALS_X86_HPP 27 28 #include "utilities/globalDefinitions.hpp" 29 #include "utilities/macros.hpp" 30 25 31 // Sets the default values for platform dependent flags used by the client compiler. 26 32 // (see c1_globals.hpp) … … 36 42 define_pd_global(bool, TieredCompilation, false); 37 43 define_pd_global(intx, CompileThreshold, 1500 ); 38 define_pd_global(intx, Tier2CompileThreshold, 1500 );39 define_pd_global(intx, Tier3CompileThreshold, 2500 );40 define_pd_global(intx, Tier4CompileThreshold, 4500 );41 42 44 define_pd_global(intx, BackEdgeThreshold, 100000); 43 define_pd_global(intx, Tier2BackEdgeThreshold, 100000);44 define_pd_global(intx, Tier3BackEdgeThreshold, 100000);45 define_pd_global(intx, Tier4BackEdgeThreshold, 100000);46 45 47 46 define_pd_global(intx, OnStackReplacePercentage, 933 ); … … 68 67 69 68 define_pd_global(intx, SafepointPollOffset, 256 ); 69 70 #endif // CPU_X86_VM_C1_GLOBALS_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_C2_GLOBALS_X86_HPP 26 #define CPU_X86_VM_C2_GLOBALS_X86_HPP 27 28 #include "utilities/globalDefinitions.hpp" 29 #include "utilities/macros.hpp" 30 25 31 // Sets the default values for platform dependent flags used by the server compiler. 26 32 // (see c2_globals.hpp). Alpha-sorted. … … 40 46 #endif // CC_INTERP 41 47 define_pd_global(bool, TieredCompilation, false); 42 #ifdef TIERED43 define_pd_global(intx, CompileThreshold, 1000);44 #else45 48 define_pd_global(intx, CompileThreshold, 10000); 46 #endif // TIERED47 define_pd_global(intx, Tier2CompileThreshold, 10000);48 define_pd_global(intx, Tier3CompileThreshold, 20000);49 define_pd_global(intx, Tier4CompileThreshold, 40000);50 51 49 define_pd_global(intx, BackEdgeThreshold, 100000); 52 define_pd_global(intx, Tier2BackEdgeThreshold, 100000);53 define_pd_global(intx, Tier3BackEdgeThreshold, 100000);54 define_pd_global(intx, Tier4BackEdgeThreshold, 100000);55 50 56 51 define_pd_global(intx, OnStackReplacePercentage, 140); … … 99 94 // Ergonomics related flags 100 95 define_pd_global(bool, NeverActAsServerClassMachine, false); 96 97 #endif // CPU_X86_VM_C2_GLOBALS_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/c2_init_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 05, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c2_init_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "opto/compile.hpp" 27 #include "opto/node.hpp" 27 28 28 29 // processor dependent initialization for i486 -
trunk/openjdk/hotspot/src/cpu/x86/vm/codeBuffer_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2002, 20 05, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_CODEBUFFER_X86_HPP 26 #define CPU_X86_VM_CODEBUFFER_X86_HPP 27 25 28 private: 26 29 void pd_initialize() {} … … 28 31 public: 29 32 void flush_bundle(bool start_new_bundle) {} 33 34 #endif // CPU_X86_VM_CODEBUFFER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/copy_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 20 04, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_COPY_X86_HPP 26 #define CPU_X86_VM_COPY_X86_HPP 27 25 28 // Inline functions for memory copy and fill. 26 29 27 30 // Contains inline asm implementations 28 #include "incls/_copy_pd.inline.hpp.incl" 31 #ifdef TARGET_OS_ARCH_linux_x86 32 # include "copy_linux_x86.inline.hpp" 33 #endif 34 #ifdef TARGET_OS_ARCH_solaris_x86 35 # include "copy_solaris_x86.inline.hpp" 36 #endif 37 #ifdef TARGET_OS_ARCH_windows_x86 38 # include "copy_windows_x86.inline.hpp" 39 #endif 40 29 41 30 42 static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { … … 59 71 (void)memset(to, 0, count); 60 72 } 73 74 #endif // CPU_X86_VM_COPY_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP 26 #define CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP 24 27 25 28 protected: … … 46 49 const Register sender_sp, 47 50 bool native); // C++ interpreter only 51 52 #endif // CPU_X86_VM_CPPINTERPRETERGENERATOR_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2007, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_cppInterpreter_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/cppInterpreter.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterGenerator.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/interfaceSupport.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #ifdef SHARK 49 #include "shark/shark_globals.hpp" 50 #endif 27 51 28 52 #ifdef CC_INTERP -
trunk/openjdk/hotspot/src/cpu/x86/vm/cppInterpreter_x86.hpp
r278 r309 23 23 */ 24 24 25 #ifndef CPU_X86_VM_CPPINTERPRETER_X86_HPP 26 #define CPU_X86_VM_CPPINTERPRETER_X86_HPP 27 25 28 26 29 protected: … … 32 35 // Max size with JVMTI 33 36 const static int InterpreterCodeSize = 168 * 1024; 37 38 #endif // CPU_X86_VM_CPPINTERPRETER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/debug_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1999, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_debug_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/nmethod.hpp" 28 #include "runtime/frame.hpp" 29 #include "runtime/init.hpp" 30 #include "runtime/os.hpp" 31 #include "utilities/debug.hpp" 32 #include "utilities/top.hpp" 27 33 28 34 void pd_ps(frame f) {} -
trunk/openjdk/hotspot/src/cpu/x86/vm/depChecker_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_depChecker_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "compiler/disassembler.hpp" 27 #include "depChecker_x86.hpp" 27 28 28 29 // Nothing to do on i486 -
trunk/openjdk/hotspot/src/cpu/x86/vm/depChecker_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_DEPCHECKER_X86_HPP 26 #define CPU_X86_VM_DEPCHECKER_X86_HPP 27 25 28 // Nothing to do on i486 29 30 #endif // CPU_X86_VM_DEPCHECKER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/disassembler_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_DISASSEMBLER_X86_HPP 26 #define CPU_X86_VM_DISASSEMBLER_X86_HPP 27 25 28 static int pd_instruction_alignment() { 26 29 return 1; … … 30 33 return ""; 31 34 } 35 36 #endif // CPU_X86_VM_DISASSEMBLER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/dump_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2004, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_dump_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/compactingPermGenGen.hpp" 28 #include "memory/generation.inline.hpp" 29 #include "memory/space.inline.hpp" 27 30 28 31 -
trunk/openjdk/hotspot/src/cpu/x86/vm/dump_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2004, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_dump_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/compactingPermGenGen.hpp" 28 #include "memory/generation.inline.hpp" 29 #include "memory/space.inline.hpp" 27 30 28 31 -
trunk/openjdk/hotspot/src/cpu/x86/vm/frame_x86.cpp
r278 r309 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_frame_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "oops/markOop.hpp" 29 #include "oops/methodOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/frame.inline.hpp" 32 #include "runtime/handles.inline.hpp" 33 #include "runtime/javaCalls.hpp" 34 #include "runtime/monitorChunk.hpp" 35 #include "runtime/signature.hpp" 36 #include "runtime/stubCodeGenerator.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "vmreg_x86.inline.hpp" 39 #ifdef COMPILER1 40 #include "c1/c1_Runtime1.hpp" 41 #include "runtime/vframeArray.hpp" 42 #endif 27 43 28 44 #ifdef ASSERT … … 142 158 143 159 // Could just be some random pointer within the codeBlob 144 145 if (!sender_blob->instructions_contains(sender_pc)) return false; 160 if (!sender_blob->code_contains(sender_pc)) { 161 return false; 162 } 146 163 147 164 // We should never be able to see an adapter if the current frame is something from code cache 148 149 if ( sender_blob->is_adapter_blob()) { 165 if (sender_blob->is_adapter_blob()) { 150 166 return false; 151 167 } … … 341 357 342 358 address original_pc = nm->get_original_pc(&fr); 343 assert(nm-> code_contains(original_pc), "original PC must be in nmethod");359 assert(nm->insts_contains(original_pc), "original PC must be in nmethod"); 344 360 assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be"); 345 361 } -
trunk/openjdk/hotspot/src/cpu/x86/vm/frame_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_FRAME_X86_HPP 26 #define CPU_X86_VM_FRAME_X86_HPP 27 28 #include "runtime/synchronizer.hpp" 29 #include "utilities/top.hpp" 24 30 25 31 // A frame represents a physical stack frame (an activation). Frames can be … … 200 206 inline interpreterState get_interpreterState() const; 201 207 #endif // CC_INTERP 208 209 #endif // CPU_X86_VM_FRAME_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
r278 r309 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP 26 #define CPU_X86_VM_FRAME_X86_INLINE_HPP 24 27 25 28 // Inline functions for Intel frames: … … 64 67 if (original_pc != NULL) { 65 68 _pc = original_pc; 66 assert(((nmethod*)_cb)-> code_contains(_pc), "original PC must be in nmethod");69 assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod"); 67 70 _deopt_state = is_deoptimized; 68 71 } else { … … 297 300 *((oop*) map->location(rax->as_VMReg())) = obj; 298 301 } 302 303 #endif // CPU_X86_VM_FRAME_X86_INLINE_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1999, 20 04, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP 26 #define CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP 27 25 28 const int StackAlignmentInBytes = 16; 29 30 #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/globals_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_GLOBALS_X86_HPP 26 #define CPU_X86_VM_GLOBALS_X86_HPP 27 28 #include "utilities/globalDefinitions.hpp" 29 #include "utilities/macros.hpp" 24 30 25 31 // Sets the default values for platform dependent flags used by the runtime system. … … 57 63 define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2)); 58 64 #else 59 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+ 1));65 define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5)); 60 66 #endif // AMD64 61 67 … … 64 70 define_pd_global(bool, RewriteBytecodes, true); 65 71 define_pd_global(bool, RewriteFrequentPairs, true); 72 73 define_pd_global(bool, UseMembar, false); 74 75 #endif // CPU_X86_VM_GLOBALS_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/icBuffer_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 06, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_icBuffer_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "code/icBuffer.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "interpreter/bytecodes.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "oops/oop.inline2.hpp" 27 35 28 36 int InlineCacheBuffer::ic_stub_code_size() { -
trunk/openjdk/hotspot/src/cpu/x86/vm/icache_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_icache_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "runtime/icache.hpp" 27 28 28 29 #define __ _masm-> -
trunk/openjdk/hotspot/src/cpu/x86/vm/icache_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 04, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_ICACHE_X86_HPP 26 #define CPU_X86_VM_ICACHE_X86_HPP 24 27 25 28 // Interface for updating the instruction cache. Whenever the VM modifies … … 54 57 #endif // AMD64 55 58 }; 59 60 #endif // CPU_X86_VM_ICACHE_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interp_masm_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interp_masm_x86_32.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "oops/arrayOop.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/methodDataOop.hpp" 32 #include "oops/methodOop.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "prims/jvmtiRedefineClassesTrace.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #ifdef TARGET_OS_FAMILY_linux 40 # include "thread_linux.inline.hpp" 41 #endif 42 #ifdef TARGET_OS_FAMILY_solaris 43 # include "thread_solaris.inline.hpp" 44 #endif 45 #ifdef TARGET_OS_FAMILY_windows 46 # include "thread_windows.inline.hpp" 47 #endif 27 48 28 49 … … 799 820 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 800 821 assert(ProfileInterpreter, "must be profiling interpreter"); 801 Label zero_continue;822 Label set_mdp; 802 823 push(rax); 803 824 push(rbx); … … 807 828 movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 808 829 testptr(rax, rax); 809 jcc(Assembler::zero, zero_continue); 810 830 jcc(Assembler::zero, set_mdp); 811 831 // rbx,: method 812 832 // rsi: bcp 813 833 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi); 814 834 // rax,: mdi 815 835 // mdo is guaranteed to be non-zero here, we checked for it before the call. 816 836 movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 817 testptr(rbx, rbx);818 jcc(Assembler::zero, zero_continue);819 837 addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); 820 addptr(rbx, rax); 821 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); 822 823 bind(zero_continue); 838 addptr(rax, rbx); 839 bind(set_mdp); 840 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); 824 841 pop(rbx); 825 842 pop(rax); … … 1398 1415 } 1399 1416 } 1417 1418 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1419 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1420 int increment, int mask, 1421 Register scratch, bool preloaded, 1422 Condition cond, Label* where) { 1423 if (!preloaded) { 1424 movl(scratch, counter_addr); 1425 } 1426 incrementl(scratch, increment); 1427 movl(counter_addr, scratch); 1428 andl(scratch, mask); 1429 jcc(cond, *where); 1430 } -
trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp
r278 r309 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP 26 #define CPU_X86_VM_INTERP_MASM_X86_32_HPP 27 28 #include "assembler_x86.inline.hpp" 29 #include "interpreter/invocationCounter.hpp" 24 30 25 31 // This file specializes the assember with interpreter-specific macros … … 186 192 void increment_mdp_data_at(Register mdp_in, Register reg, int constant, 187 193 bool decrement = false); 194 void increment_mask_and_jump(Address counter_addr, 195 int increment, int mask, 196 Register scratch, bool preloaded, 197 Condition cond, Label* where); 188 198 void set_mdp_flag_at(Register mdp_in, int flag_constant); 189 199 void test_mdp_data_at(Register mdp_in, int offset, Register value, … … 224 234 225 235 }; 236 237 #endif // CPU_X86_VM_INTERP_MASM_X86_32_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interp_masm_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interp_masm_x86_64.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "oops/arrayOop.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/methodDataOop.hpp" 32 #include "oops/methodOop.hpp" 33 #include "prims/jvmtiExport.hpp" 34 #include "prims/jvmtiRedefineClassesTrace.hpp" 35 #include "prims/jvmtiThreadState.hpp" 36 #include "runtime/basicLock.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #ifdef TARGET_OS_FAMILY_linux 40 # include "thread_linux.inline.hpp" 41 #endif 42 #ifdef TARGET_OS_FAMILY_solaris 43 # include "thread_solaris.inline.hpp" 44 #endif 45 #ifdef TARGET_OS_FAMILY_windows 46 # include "thread_windows.inline.hpp" 47 #endif 27 48 28 49 … … 429 450 // compiled code in threads for which the event is enabled. Check here for 430 451 // interp_only_mode if these events CAN be enabled. 431 get_thread(temp);432 452 // interp_only is an int, on little endian it is sufficient to test the byte only 433 // Is a cmpl faster (ce434 cmpb(Address( temp, JavaThread::interp_only_mode_offset()), 0);453 // Is a cmpl faster? 454 cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0); 435 455 jcc(Assembler::zero, run_compiled_code); 436 456 jmp(Address(method, methodOopDesc::interpreter_entry_offset())); … … 836 856 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 837 857 assert(ProfileInterpreter, "must be profiling interpreter"); 838 Label zero_continue;858 Label set_mdp; 839 859 push(rax); 840 860 push(rbx); … … 844 864 movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 845 865 testptr(rax, rax); 846 jcc(Assembler::zero, zero_continue); 847 866 jcc(Assembler::zero, set_mdp); 848 867 // rbx: method 849 868 // r13: bcp 850 869 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13); 851 870 // rax: mdi 852 871 // mdo is guaranteed to be non-zero here, we checked for it before the call. 853 872 movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 854 testptr(rbx, rbx);855 jcc(Assembler::zero, zero_continue);856 873 addptr(rbx, in_bytes(methodDataOopDesc::data_offset())); 857 addptr(rbx, rax); 858 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); 859 860 bind(zero_continue); 874 addptr(rax, rbx); 875 bind(set_mdp); 876 movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); 861 877 pop(rbx); 862 878 pop(rax); … … 1481 1497 } 1482 1498 } 1499 1500 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 1501 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 1502 int increment, int mask, 1503 Register scratch, bool preloaded, 1504 Condition cond, Label* where) { 1505 if (!preloaded) { 1506 movl(scratch, counter_addr); 1507 } 1508 incrementl(scratch, increment); 1509 movl(counter_addr, scratch); 1510 andl(scratch, mask); 1511 jcc(cond, *where); 1512 } -
trunk/openjdk/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
r278 r309 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP 26 #define CPU_X86_VM_INTERP_MASM_X86_64_HPP 27 28 #include "assembler_x86.inline.hpp" 29 #include "interpreter/invocationCounter.hpp" 24 30 25 31 // This file specializes the assember with interpreter-specific macros … … 195 201 void increment_mdp_data_at(Register mdp_in, Register reg, int constant, 196 202 bool decrement = false); 203 void increment_mask_and_jump(Address counter_addr, 204 int increment, int mask, 205 Register scratch, bool preloaded, 206 Condition cond, Label* where); 197 207 void set_mdp_flag_at(Register mdp_in, int flag_constant); 198 208 void test_mdp_data_at(Register mdp_in, int offset, Register value, … … 240 250 void notify_method_exit(TosState state, NotifyMethodExitMode mode); 241 251 }; 252 253 #endif // CPU_X86_VM_INTERP_MASM_X86_64_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP 26 #define CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP 27 25 28 26 29 // Generation of Interpreter … … 42 45 void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue); 43 46 void generate_counter_overflow(Label* do_continue); 47 48 #endif // CPU_X86_VM_INTERPRETERGENERATOR_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterRT_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, 20 05, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_INTERPRETERRT_X86_HPP 26 #define CPU_X86_VM_INTERPRETERRT_X86_HPP 27 28 #include "memory/allocation.hpp" 24 29 25 30 // native method calls … … 73 78 static Register temp(); 74 79 }; 80 81 #endif // CPU_X86_VM_INTERPRETERRT_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interpreterRT_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodOop.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/handles.inline.hpp" 33 #include "runtime/icache.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/signature.hpp" 27 36 28 37 -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreterRT_x86_64.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interpreterRT_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodOop.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/handles.inline.hpp" 33 #include "runtime/icache.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/signature.hpp" 27 36 28 37 #define __ _masm-> -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreter_x86.hpp
r278 r309 23 23 */ 24 24 25 #ifndef CPU_X86_VM_INTERPRETER_X86_HPP 26 #define CPU_X86_VM_INTERPRETER_X86_HPP 27 25 28 public: 26 29 … … 45 48 return stackElementWords * i; 46 49 } 50 51 #endif // CPU_X86_VM_INTERPRETER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interpreter_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #ifdef COMPILER1 49 #include "c1/c1_Runtime1.hpp" 50 #endif 27 51 28 52 #define __ _masm-> -
trunk/openjdk/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interpreter_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #ifdef COMPILER1 49 #include "c1/c1_Runtime1.hpp" 50 #endif 27 51 28 52 #define __ _masm-> -
trunk/openjdk/hotspot/src/cpu/x86/vm/javaFrameAnchor_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2002, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP 26 #define CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP 24 27 25 28 private: … … 80 83 // Assert (last_Java_sp == NULL || fp == NULL) 81 84 void set_last_Java_fp(intptr_t* fp) { _last_Java_fp = fp; } 85 86 #endif // CPU_X86_VM_JAVAFRAMEANCHOR_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2004, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_jniFastGetField_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "prims/jniFastGetField.hpp" 29 #include "prims/jvm_misc.hpp" 30 #include "runtime/safepoint.hpp" 27 31 28 32 #define __ masm-> … … 55 59 } 56 60 ResourceMark rm; 57 BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 58 address fast_entry = b->instructions_begin(); 59 CodeBuffer cbuf(fast_entry, b->instructions_size()); 61 BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 62 CodeBuffer cbuf(blob); 60 63 MacroAssembler* masm = new MacroAssembler(&cbuf); 64 address fast_entry = __ pc(); 61 65 62 66 Label slow; … … 136 140 #else 137 141 switch (type) { 138 case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t)fast_entry; break;139 case T_BYTE: jni_fast_GetByteField_fp = (GetByteField_t)fast_entry; break;140 case T_CHAR: jni_fast_GetCharField_fp = (GetCharField_t)fast_entry; break;141 case T_SHORT: jni_fast_GetShortField_fp = (GetShortField_t)fast_entry; break;142 case T_INT: jni_fast_GetIntField_fp = (GetIntField_t)fast_entry;142 case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t) fast_entry; break; 143 case T_BYTE: jni_fast_GetByteField_fp = (GetByteField_t) fast_entry; break; 144 case T_CHAR: jni_fast_GetCharField_fp = (GetCharField_t) fast_entry; break; 145 case T_SHORT: jni_fast_GetShortField_fp = (GetShortField_t) fast_entry; break; 146 case T_INT: jni_fast_GetIntField_fp = (GetIntField_t) fast_entry; break; 143 147 } 144 148 return os::win32::fast_jni_accessor_wrapper(type); … … 169 173 const char *name = "jni_fast_GetLongField"; 170 174 ResourceMark rm; 171 BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 172 address fast_entry = b->instructions_begin(); 173 CodeBuffer cbuf(fast_entry, b->instructions_size()); 175 BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 176 CodeBuffer cbuf(blob); 174 177 MacroAssembler* masm = new MacroAssembler(&cbuf); 178 address fast_entry = __ pc(); 175 179 176 180 Label slow; … … 247 251 return fast_entry; 248 252 #else 249 jni_fast_GetLongField_fp = (GetLongField_t) fast_entry;253 jni_fast_GetLongField_fp = (GetLongField_t) fast_entry; 250 254 return os::win32::fast_jni_accessor_wrapper(T_LONG); 251 255 #endif … … 260 264 } 261 265 ResourceMark rm; 262 BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 263 address fast_entry = b->instructions_begin(); 264 CodeBuffer cbuf(fast_entry, b->instructions_size()); 266 BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 267 CodeBuffer cbuf(blob); 265 268 MacroAssembler* masm = new MacroAssembler(&cbuf); 269 address fast_entry = __ pc(); 266 270 267 271 Label slow_with_pop, slow; … … 349 353 #else 350 354 switch (type) { 351 case T_FLOAT: jni_fast_GetFloatField_fp = (GetFloatField_t)fast_entry; break;352 case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t)fast_entry;355 case T_FLOAT: jni_fast_GetFloatField_fp = (GetFloatField_t) fast_entry; break; 356 case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t) fast_entry; break; 353 357 } 354 358 return os::win32::fast_jni_accessor_wrapper(type); -
trunk/openjdk/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2004, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_jniFastGetField_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "prims/jniFastGetField.hpp" 29 #include "prims/jvm_misc.hpp" 30 #include "runtime/safepoint.hpp" 27 31 28 32 #define __ masm-> … … 59 63 } 60 64 ResourceMark rm; 61 BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); 62 address fast_entry = b->instructions_begin(); 63 CodeBuffer cbuf(fast_entry, b->instructions_size()); 65 BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); 66 CodeBuffer cbuf(blob); 64 67 MacroAssembler* masm = new MacroAssembler(&cbuf); 68 address fast_entry = __ pc(); 65 69 66 70 Label slow; … … 157 161 } 158 162 ResourceMark rm; 159 BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); 160 address fast_entry = b->instructions_begin(); 161 CodeBuffer cbuf(fast_entry, b->instructions_size()); 163 BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); 164 CodeBuffer cbuf(blob); 162 165 MacroAssembler* masm = new MacroAssembler(&cbuf); 166 address fast_entry = __ pc(); 163 167 164 168 Label slow; -
trunk/openjdk/hotspot/src/cpu/x86/vm/jniTypes_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, 20 03, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_JNITYPES_X86_HPP 26 #define CPU_X86_VM_JNITYPES_X86_HPP 27 28 #include "memory/allocation.hpp" 29 #include "oops/oop.hpp" 30 #include "prims/jni.h" 24 31 25 32 // This file holds platform-dependent routines used to write primitive jni … … 123 130 #undef _JNI_SLOT_OFFSET 124 131 }; 132 133 #endif // CPU_X86_VM_JNITYPES_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/jni_x86.h
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * -
trunk/openjdk/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_methodHandles_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "prims/methodHandles.hpp" 27 29 28 30 #define __ _masm-> 31 32 #ifdef PRODUCT 33 #define BLOCK_COMMENT(str) /* nothing */ 34 #else 35 #define BLOCK_COMMENT(str) __ block_comment(str) 36 #endif 37 38 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 29 39 30 40 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, … … 65 75 // Verify that argslot lies within (rsp, rbp]. 66 76 Label L_ok, L_bad; 77 BLOCK_COMMENT("{ verify_argslot"); 67 78 __ cmpptr(argslot_reg, rbp); 68 79 __ jccb(Assembler::above, L_bad); … … 72 83 __ stop(error_message); 73 84 __ bind(L_ok); 85 BLOCK_COMMENT("} verify_argslot"); 74 86 } 75 87 #endif … … 81 93 // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 82 94 // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted) 83 // rdx : garbage temp, blown away95 // rdx, rdi: garbage temp, blown away 84 96 85 97 Register rbx_method = rbx; … … 87 99 Register rax_mtype = rax; 88 100 Register rdx_temp = rdx; 101 Register rdi_temp = rdi; 89 102 90 103 // emit WrongMethodType path first, to enable jccb back-branch from main path 91 104 Label wrong_method_type; 92 105 __ bind(wrong_method_type); 106 Label invoke_generic_slow_path; 107 assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; 108 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact); 109 __ jcc(Assembler::notEqual, invoke_generic_slow_path); 93 110 __ push(rax_mtype); // required mtype 94 111 __ push(rcx_recv); // bad mh (1st stacked argument) … … 107 124 } 108 125 } 109 Register rbx_temp = rbx_method; // done with incoming methodOop110 126 111 127 // given the MethodType, find out where the MH argument is buried 112 __ movptr(rdx_temp, Address(rax_mtype, 113 __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp))); 114 __ movl(rdx_temp, Address(rdx_temp, 115 __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp))); 116 __ movptr(rcx_recv, __ argument_address(rdx_temp)); 117 118 __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type); 119 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 128 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp))); 129 Register rdx_vmslots = rdx_temp; 130 __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); 131 __ movptr(rcx_recv, __ argument_address(rdx_vmslots)); 132 133 trace_method_handle(_masm, "invokeExact"); 134 135 __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type); 136 __ jump_to_method_handle_entry(rcx_recv, rdi_temp); 137 138 // for invokeGeneric (only), apply argument and result conversions on the fly 139 __ bind(invoke_generic_slow_path); 140 #ifdef ASSERT 141 { Label L; 142 __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric); 143 __ jcc(Assembler::equal, L); 144 __ stop("bad methodOop::intrinsic_id"); 145 __ bind(L); 146 } 147 #endif //ASSERT 148 Register rbx_temp = rbx_method; // don't need it now 149 150 // make room on the stack for another pointer: 151 Register rcx_argslot = rcx_recv; 152 __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1)); 153 insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, 154 rcx_argslot, rbx_temp, rdx_temp); 155 156 // load up an adapter from the calling type (Java weaves this) 157 __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp))); 158 Register rdx_adapter = rdx_temp; 159 // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes())); 160 // deal with old JDK versions: 161 __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); 162 __ cmpptr(rdi_temp, rdx_temp); 163 Label sorry_no_invoke_generic; 164 __ jcc(Assembler::below, sorry_no_invoke_generic); 165 166 __ load_heap_oop(rdx_adapter, Address(rdi_temp, 0)); 167 __ testptr(rdx_adapter, rdx_adapter); 168 __ jcc(Assembler::zero, sorry_no_invoke_generic); 169 __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter); 170 // As a trusted first argument, pass the type being called, so the adapter knows 171 // the actual types of the arguments and return values. 172 // (Generic invokers are shared among form-families of method-type.) 173 __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype); 174 // FIXME: assert that rdx_adapter is of the right method-type. 175 __ mov(rcx, rdx_adapter); 176 trace_method_handle(_masm, "invokeGeneric"); 177 __ jump_to_method_handle_entry(rcx, rdi_temp); 178 179 __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! 180 __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize)); // recover original MH 181 __ push(rax_mtype); // required mtype 182 __ push(rcx_recv); // bad mh (1st stacked argument) 183 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); 120 184 121 185 return entry_point; … … 165 229 // rdx[-size] = rdx[0] 166 230 // argslot -= size; 231 BLOCK_COMMENT("insert_arg_slots {"); 167 232 __ mov(rdx_temp, rsp); // source pointer for copy 168 233 __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr)); 169 234 { 170 235 Label loop; 171 __ bind(loop);236 __ BIND(loop); 172 237 // pull one word down each time through the loop 173 238 __ movptr(rbx_temp, Address(rdx_temp, 0)); … … 180 245 // Now move the argslot down, to point to the opened-up space. 181 246 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); 247 BLOCK_COMMENT("} insert_arg_slots"); 182 248 } 183 249 … … 219 285 #endif 220 286 287 BLOCK_COMMENT("remove_arg_slots {"); 221 288 // Pull up everything shallower than rax_argslot. 222 289 // Then remove the excess space on the stack. … … 230 297 { 231 298 Label loop; 232 __ bind(loop);299 __ BIND(loop); 233 300 // pull one word up each time through the loop 234 301 __ movptr(rbx_temp, Address(rdx_temp, 0)); … … 243 310 // And adjust the argslot address to point at the deletion point. 244 311 __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); 312 BLOCK_COMMENT("} remove_arg_slots"); 245 313 } 246 314 … … 249 317 void trace_method_handle_stub(const char* adaptername, 250 318 oop mh, 319 intptr_t* saved_regs, 251 320 intptr_t* entry_sp, 252 321 intptr_t* saved_sp, … … 257 326 printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n", 258 327 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); 259 if (last_sp != saved_sp )328 if (last_sp != saved_sp && last_sp != NULL) 260 329 printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp); 261 if (Verbose) print_method_handle(mh); 330 if (Verbose) { 331 printf(" reg dump: "); 332 int saved_regs_count = (entry_sp-1) - saved_regs; 333 // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax 334 int i; 335 for (i = 0; i <= saved_regs_count; i++) { 336 if (i > 0 && i % 4 == 0 && i != saved_regs_count) 337 printf("\n + dump: "); 338 printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]); 339 } 340 printf("\n"); 341 int stack_dump_count = 16; 342 if (stack_dump_count < (int)(saved_bp + 2 - saved_sp)) 343 stack_dump_count = (int)(saved_bp + 2 - saved_sp); 344 if (stack_dump_count > 64) stack_dump_count = 48; 345 for (i = 0; i < stack_dump_count; i += 4) { 346 printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n", 347 i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]); 348 } 349 print_method_handle(mh); 350 } 351 } 352 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 353 if (!TraceMethodHandles) return; 354 BLOCK_COMMENT("trace_method_handle {"); 355 __ push(rax); 356 __ lea(rax, Address(rsp, wordSize*6)); // entry_sp 357 __ pusha(); 358 // arguments: 359 __ push(rbp); // interpreter frame pointer 360 __ push(rsi); // saved_sp 361 __ push(rax); // entry_sp 362 __ push(rcx); // mh 363 __ push(rcx); 364 __ movptr(Address(rsp, 0), (intptr_t) adaptername); 365 __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5); 366 __ popa(); 367 __ pop(rax); 368 BLOCK_COMMENT("} trace_method_handle"); 262 369 } 263 370 #endif //PRODUCT … … 279 386 } 280 387 388 //------------------------------------------------------------------------------ 389 // MethodHandles::generate_method_handle_stub 390 // 281 391 // Generate an "entry" field for a method handle. 282 392 // This determines how the method handle will respond to calls. … … 290 400 // - rdx: garbage temp, can blow away 291 401 292 Register rcx_recv = rcx;293 Register rax_argslot = rax;294 Register rbx_temp = rbx;295 Register rdx_temp = rdx;402 const Register rcx_recv = rcx; 403 const Register rax_argslot = rax; 404 const Register rbx_temp = rbx; 405 const Register rdx_temp = rdx; 296 406 297 407 // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls) 298 408 // and gen_c2i_adapter (from compiled calls): 299 Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi); 409 const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi); 410 411 // Argument registers for _raise_exception. 412 // 32-bit: Pass first two oop/int args in registers ECX and EDX. 413 const Register rarg0_code = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 414 const Register rarg1_actual = LP64_ONLY(j_rarg1) NOT_LP64(rdx); 415 const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi); 416 assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp); 300 417 301 418 guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); … … 323 440 324 441 address interp_entry = __ pc(); 325 if (UseCompressedOops) __ unimplemented("UseCompressedOops"); 326 327 #ifndef PRODUCT 328 if (TraceMethodHandles) { 329 __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi); 330 __ lea(rax, Address(rsp, wordSize*6)); // entry_sp 331 // arguments: 332 __ push(rbp); // interpreter frame pointer 333 __ push(rsi); // saved_sp 334 __ push(rax); // entry_sp 335 __ push(rcx); // mh 336 __ push(rcx); 337 __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek)); 338 __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5); 339 __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax); 340 } 341 #endif //PRODUCT 442 443 trace_method_handle(_masm, entry_name(ek)); 444 445 BLOCK_COMMENT(entry_name(ek)); 342 446 343 447 switch ((int) ek) { 344 448 case _raise_exception: 345 449 { 346 // Not a real MH entry, but rather shared code for raising an exception. 347 // Extra local arguments are pushed on stack, as required type at TOS+8, 348 // failing object (or NULL) at TOS+4, failing bytecode type at TOS. 349 // Beyond those local arguments are the PC, of course. 350 Register rdx_code = rdx_temp; 351 Register rcx_fail = rcx_recv; 352 Register rax_want = rax_argslot; 353 Register rdi_pc = rdi; 354 __ pop(rdx_code); // TOS+0 355 __ pop(rcx_fail); // TOS+4 356 __ pop(rax_want); // TOS+8 357 __ pop(rdi_pc); // caller PC 358 359 __ mov(rsp, rsi); // cut the stack back to where the caller started 360 361 // Repush the arguments as if coming from the interpreter. 362 __ push(rdx_code); 363 __ push(rcx_fail); 364 __ push(rax_want); 450 // Not a real MH entry, but rather shared code for raising an 451 // exception. Since we use a C2I adapter to set up the 452 // interpreter state, arguments are expected in compiler 453 // argument registers. 454 assert(raise_exception_method(), "must be set"); 455 address c2i_entry = raise_exception_method()->get_c2i_entry(); 456 assert(c2i_entry, "method must be linked"); 457 458 const Register rdi_pc = rax; 459 __ pop(rdi_pc); // caller PC 460 __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started 365 461 366 462 Register rbx_method = rbx_temp; 367 Label no_method;463 Label L_no_method; 368 464 // FIXME: fill in _raise_exception_method with a suitable sun.dyn method 369 465 __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); 370 466 __ testptr(rbx_method, rbx_method); 371 __ jccb(Assembler::zero, no_method); 372 int jobject_oop_offset = 0; 467 __ jccb(Assembler::zero, L_no_method); 468 469 const int jobject_oop_offset = 0; 373 470 __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject 374 471 __ testptr(rbx_method, rbx_method); 375 __ jccb(Assembler::zero, no_method);472 __ jccb(Assembler::zero, L_no_method); 376 473 __ verify_oop(rbx_method); 377 __ push(rdi_pc); // and restore caller PC 378 __ jmp(rbx_method_fie); 474 475 // 32-bit: push remaining arguments as if coming from the compiler. 476 NOT_LP64(__ push(rarg2_required)); 477 478 __ push(rdi_pc); // restore caller PC 479 __ jump(ExternalAddress(c2i_entry)); // do C2I transition 379 480 380 481 // If we get here, the Java runtime did not do its job of creating the exception. 381 482 // Do something that is at least causes a valid throw from the interpreter. 382 __ bind(no_method); 383 __ pop(rax_want); 384 __ pop(rcx_fail); 385 __ push(rax_want); 386 __ push(rcx_fail); 483 __ bind(L_no_method); 484 __ push(rarg2_required); 485 __ push(rarg1_actual); 387 486 __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry())); 388 487 } … … 393 492 { 394 493 Register rbx_method = rbx_temp; 395 __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop494 __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop 396 495 __ verify_oop(rbx_method); 397 496 // same as TemplateTable::invokestatic or invokespecial, … … 450 549 Register rdx_intf = rdx_temp; 451 550 Register rbx_index = rbx_temp; 452 __ movptr(rdx_intf,rcx_mh_vmtarget);453 __ movl(rbx_index, 551 __ load_heap_oop(rdx_intf, rcx_mh_vmtarget); 552 __ movl(rbx_index, rcx_dmh_vmindex); 454 553 __ movptr(rcx_recv, __ argument_address(rax_argslot, -1)); 455 554 __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes()); … … 479 578 // Throw an exception. 480 579 // For historical reasons, it will be IncompatibleClassChangeError. 481 __ pushptr(Address(rdx_intf, java_mirror_offset)); // required interface 482 __ push(rcx_recv); // bad receiver 483 __ push((int)Bytecodes::_invokeinterface); // who is complaining? 580 __ mov(rbx_temp, rcx_recv); // rarg2_required might be RCX 581 assert_different_registers(rarg2_required, rbx_temp); 582 __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset)); // required interface 583 __ mov( rarg1_actual, rbx_temp); // bad receiver 584 __ movl( rarg0_code, (int) Bytecodes::_invokeinterface); // who is complaining? 484 585 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); 485 586 } … … 506 607 507 608 // store bound argument into the new stack slot: 508 __ movptr(rbx_temp, rcx_bmh_argument);609 __ load_heap_oop(rbx_temp, rcx_bmh_argument); 509 610 Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 510 611 if (arg_type == T_OBJECT) { … … 524 625 if (direct_to_method) { 525 626 Register rbx_method = rbx_temp; 526 __ movptr(rbx_method, rcx_mh_vmtarget);627 __ load_heap_oop(rbx_method, rcx_mh_vmtarget); 527 628 __ verify_oop(rbx_method); 528 629 __ jmp(rbx_method_fie); 529 630 } else { 530 __ movptr(rcx_recv, rcx_mh_vmtarget);631 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 531 632 __ verify_oop(rcx_recv); 532 633 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); … … 538 639 case _adapter_retype_raw: 539 640 // immediately jump to the next MH layer: 540 __ movptr(rcx_recv, rcx_mh_vmtarget);641 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 541 642 __ verify_oop(rcx_recv); 542 643 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); … … 555 656 556 657 // What class are we casting to? 557 __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!558 __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));658 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! 659 __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); 559 660 560 661 Label done; 561 662 __ movptr(rdx_temp, vmarg); 562 663 __ testptr(rdx_temp, rdx_temp); 563 __ jcc b(Assembler::zero, done); // no cast if null664 __ jcc(Assembler::zero, done); // no cast if null 564 665 __ load_klass(rdx_temp, rdx_temp); 565 666 … … 576 677 __ movptr(rdx_temp, vmarg); 577 678 578 __ pushptr(rcx_amh_argument); // required class 579 __ push(rdx_temp); // bad object 580 __ push((int)Bytecodes::_checkcast); // who is complaining? 679 assert_different_registers(rarg2_required, rdx_temp); 680 __ load_heap_oop(rarg2_required, rcx_amh_argument); // required class 681 __ mov( rarg1_actual, rdx_temp); // bad object 682 __ movl( rarg0_code, (int) Bytecodes::_checkcast); // who is complaining? 581 683 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); 582 684 583 685 __ bind(done); 584 686 // get the new MH: 585 __ movptr(rcx_recv, rcx_mh_vmtarget);687 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 586 688 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 587 689 } … … 645 747 646 748 // get the new MH: 647 __ movptr(rcx_recv, rcx_mh_vmtarget);749 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 648 750 // (now we are done with the old MH) 649 751 … … 720 822 } 721 823 722 __ movptr(rcx_recv, rcx_mh_vmtarget);824 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 723 825 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 724 826 } … … 762 864 } 763 865 764 __ movptr(rcx_recv, rcx_mh_vmtarget);866 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 765 867 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 766 868 } … … 873 975 } 874 976 875 __ movptr(rcx_recv, rcx_mh_vmtarget);977 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 876 978 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 877 979 } … … 933 1035 __ pop(rdi); // restore temp 934 1036 935 __ movptr(rcx_recv, rcx_mh_vmtarget);1037 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 936 1038 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 937 1039 } … … 956 1058 __ pop(rdi); // restore temp 957 1059 958 __ movptr(rcx_recv, rcx_mh_vmtarget);1060 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 959 1061 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 960 1062 } … … 1007 1109 // Check the array type. 1008 1110 Register rbx_klass = rbx_temp; 1009 __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!1010 __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));1111 __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object! 1112 __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes())); 1011 1113 1012 1114 Label ok_array_klass, bad_array_klass, bad_array_length; … … 1090 1192 // Arguments are spread. Move to next method handle. 1091 1193 UNPUSH_RSI_RDI; 1092 __ movptr(rcx_recv, rcx_mh_vmtarget);1194 __ load_heap_oop(rcx_recv, rcx_mh_vmtarget); 1093 1195 __ jump_to_method_handle_entry(rcx_recv, rdx_temp); 1094 1196 1095 1197 __ bind(bad_array_klass); 1096 1198 UNPUSH_RSI_RDI; 1097 __ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type 1098 __ pushptr(vmarg); // bad array 1099 __ push((int)Bytecodes::_aaload); // who is complaining? 1199 assert(!vmarg.uses(rarg2_required), "must be different registers"); 1200 __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type 1201 __ movptr(rarg1_actual, vmarg); // bad array 1202 __ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining? 1100 1203 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); 1101 1204 1102 1205 __ bind(bad_array_length); 1103 1206 UNPUSH_RSI_RDI; 1104 __ push(rcx_recv); // AMH requiring a certain length 1105 __ pushptr(vmarg); // bad array 1106 __ push((int)Bytecodes::_arraylength); // who is complaining? 1207 assert(!vmarg.uses(rarg2_required), "must be different registers"); 1208 __ mov (rarg2_required, rcx_recv); // AMH requiring a certain length 1209 __ movptr(rarg1_actual, vmarg); // bad array 1210 __ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining? 1107 1211 __ jump(ExternalAddress(from_interpreted_entry(_raise_exception))); 1108 1212 -
trunk/openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_nativeInst_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "nativeInst_x86.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/sharedRuntime.hpp" 32 #include "runtime/stubRoutines.hpp" 33 #include "utilities/ostream.hpp" 34 #ifdef COMPILER1 35 #include "c1/c1_Runtime1.hpp" 36 #endif 27 37 28 38 void NativeInstruction::wrote(int offset) { -
trunk/openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_NATIVEINST_X86_HPP 26 #define CPU_X86_VM_NATIVEINST_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "memory/allocation.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/os.hpp" 32 #include "utilities/top.hpp" 24 33 25 34 // We have interfaces for the following instructions: … … 548 557 #endif // AMD64 549 558 } 559 560 #endif // CPU_X86_VM_NATIVEINST_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/registerMap_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_REGISTERMAP_X86_HPP 26 #define CPU_X86_VM_REGISTERMAP_X86_HPP 27 25 28 // machine-dependent implemention for register maps 26 29 friend class frame; … … 38 41 void pd_initialize() {} 39 42 void pd_initialize_from(const RegisterMap* map) {} 43 44 #endif // CPU_X86_VM_REGISTERMAP_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/register_definitions_x86.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_register_definitions_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/register.hpp" 28 #include "register_x86.hpp" 29 #ifdef TARGET_ARCH_MODEL_x86_32 30 # include "interp_masm_x86_32.hpp" 31 #endif 32 #ifdef TARGET_ARCH_MODEL_x86_64 33 # include "interp_masm_x86_64.hpp" 34 #endif 27 35 28 36 REGISTER_DEFINITION(Register, noreg); -
trunk/openjdk/hotspot/src/cpu/x86/vm/register_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_register_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "register_x86.hpp" 27 27 28 #ifndef AMD64 28 29 const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers; -
trunk/openjdk/hotspot/src/cpu/x86/vm/register_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2000, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_REGISTER_X86_HPP 26 #define CPU_X86_VM_REGISTER_X86_HPP 27 28 #include "asm/register.hpp" 29 #include "vm_version_x86.hpp" 24 30 25 31 class VMRegImpl; … … 220 226 221 227 }; 228 229 #endif // CPU_X86_VM_REGISTER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/relocInfo_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_relocInfo_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "code/relocInfo.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/safepoint.hpp" 27 32 28 33 -
trunk/openjdk/hotspot/src/cpu/x86/vm/relocInfo_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_RELOCINFO_X86_HPP 26 #define CPU_X86_VM_RELOCINFO_X86_HPP 27 25 28 // machine-dependent parts of class relocInfo 26 29 private: … … 37 40 #endif 38 41 }; 42 43 #endif // CPU_X86_VM_RELOCINFO_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
r278 r309 23 23 */ 24 24 25 #include "precompiled.hpp" 26 #ifdef COMPILER2 27 #include "asm/assembler.hpp" 28 #include "assembler_x86.inline.hpp" 29 #include "classfile/systemDictionary.hpp" 30 #include "code/vmreg.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "opto/runtime.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "utilities/globalDefinitions.hpp" 39 #include "vmreg_x86.inline.hpp" 40 #endif 25 41 26 #include "incls/_precompiled.incl"27 #include "incls/_runtime_x86_32.cpp.incl"28 42 29 43 #define __ masm-> -
trunk/openjdk/hotspot/src/cpu/x86/vm/runtime_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 20 06, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 25 #include "precompiled.hpp" 26 #ifdef COMPILER2 27 #include "asm/assembler.hpp" 28 #include "assembler_x86.inline.hpp" 29 #include "classfile/systemDictionary.hpp" 30 #include "code/vmreg.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "opto/runtime.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/sharedRuntime.hpp" 36 #include "runtime/stubRoutines.hpp" 37 #include "runtime/vframeArray.hpp" 38 #include "utilities/globalDefinitions.hpp" 39 #include "vmreg_x86.inline.hpp" 40 #endif 41 26 42 27 43 // This file should really contain the code for generating the OptoRuntime -
trunk/openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_sharedRuntime_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "oops/compiledICHolderOop.hpp" 33 #include "prims/jvmtiRedefineClassesTrace.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/vframeArray.hpp" 36 #include "vmreg_x86.inline.hpp" 37 #ifdef COMPILER1 38 #include "c1/c1_Runtime1.hpp" 39 #endif 40 #ifdef COMPILER2 41 #include "opto/runtime.hpp" 42 #endif 27 43 28 44 #define __ masm-> -
trunk/openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_sharedRuntime_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "oops/compiledICHolderOop.hpp" 33 #include "prims/jvmtiRedefineClassesTrace.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/vframeArray.hpp" 36 #include "vmreg_x86.inline.hpp" 37 #ifdef COMPILER1 38 #include "c1/c1_Runtime1.hpp" 39 #endif 40 #ifdef COMPILER2 41 #include "opto/runtime.hpp" 42 #endif 27 43 28 44 DeoptimizationBlob *SharedRuntime::_deopt_blob; -
trunk/openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubGenerator_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/methodOop.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/top.hpp" 41 #ifdef TARGET_OS_FAMILY_linux 42 # include "thread_linux.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_solaris 45 # include "thread_solaris.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_FAMILY_windows 48 # include "thread_windows.inline.hpp" 49 #endif 50 #ifdef COMPILER2 51 #include "opto/runtime.hpp" 52 #endif 27 53 28 54 // Declaration and definition of StubGenerator (no .hpp file). -
trunk/openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
r278 r309 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubGenerator_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/methodOop.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "utilities/top.hpp" 41 #ifdef TARGET_OS_FAMILY_linux 42 # include "thread_linux.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_FAMILY_solaris 45 # include "thread_solaris.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_FAMILY_windows 48 # include "thread_windows.inline.hpp" 49 #endif 50 #ifdef COMPILER2 51 #include "opto/runtime.hpp" 52 #endif 27 53 28 54 // Declaration and definition of StubGenerator (no .hpp file). … … 2172 2198 __ enter(); // required for proper stackwalking of RuntimeStub frame 2173 2199 2174 checkcast_copy_entry = __ pc();2175 BLOCK_COMMENT("Entry:");2176 2177 2200 #ifdef ASSERT 2178 2201 // caller guarantees that the arrays really are different … … 2185 2208 #endif //ASSERT 2186 2209 2187 // allocate spill slots for r13, r142188 enum {2189 saved_r13_offset,2190 saved_r14_offset,2191 saved_rbp_offset,2192 saved_rip_offset,2193 saved_rarg0_offset2194 };2195 __ subptr(rsp, saved_rbp_offset * wordSize);2196 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);2197 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);2198 2210 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2199 2211 // ckoff => rcx, ckval => r8 … … 2201 2213 #ifdef _WIN64 2202 2214 // last argument (#4) is on stack on Win64 2203 const int ckval_offset = saved_rarg0_offset + 4; 2204 __ movptr(ckval, Address(rsp, ckval_offset * wordSize)); 2215 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2205 2216 #endif 2217 2218 // Caller of this entry point must set up the argument registers. 2219 checkcast_copy_entry = __ pc(); 2220 BLOCK_COMMENT("Entry:"); 2221 2222 // allocate spill slots for r13, r14 2223 enum { 2224 saved_r13_offset, 2225 saved_r14_offset, 2226 saved_rbp_offset 2227 }; 2228 __ subptr(rsp, saved_rbp_offset * wordSize); 2229 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2230 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2206 2231 2207 2232 // check that int operands are properly extended to size_t … … 2418 2443 const Register dst = c_rarg2; // destination array oop 2419 2444 const Register dst_pos = c_rarg3; // destination position 2420 // elements count is on stack on Win64 2421 #ifdef _WIN64 2422 #define C_RARG4 Address(rsp, 6 * wordSize) 2445 #ifndef _WIN64 2446 const Register length = c_rarg4; 2423 2447 #else 2424 #define C_RARG4 c_rarg42448 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2425 2449 #endif 2426 2450 … … 2489 2513 const Register r11_length = r11; // elements count to copy 2490 2514 const Register r10_src_klass = r10; // array klass 2491 const Register r9_dst_klass = r9; // dest array klass2492 2515 2493 2516 // if (length < 0) return -1; 2494 __ movl(r11_length, C_RARG4);// length (elements count, 32-bits value)2517 __ movl(r11_length, length); // length (elements count, 32-bits value) 2495 2518 __ testl(r11_length, r11_length); 2496 2519 __ jccb(Assembler::negative, L_failed_0); … … 2499 2522 #ifdef ASSERT 2500 2523 // assert(src->klass() != NULL); 2501 BLOCK_COMMENT("assert klasses not null"); 2502 { Label L1, L2; 2524 { 2525 BLOCK_COMMENT("assert klasses not null {"); 2526 Label L1, L2; 2503 2527 __ testptr(r10_src_klass, r10_src_klass); 2504 2528 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL … … 2506 2530 __ stop("broken null klass"); 2507 2531 __ bind(L2); 2508 __ load_klass(r 9_dst_klass, dst);2509 __ cmpq(r 9_dst_klass, 0);2532 __ load_klass(rax, dst); 2533 __ cmpq(rax, 0); 2510 2534 __ jcc(Assembler::equal, L1); // this would be broken also 2511 BLOCK_COMMENT(" assertdone");2535 BLOCK_COMMENT("} assert klasses not null done"); 2512 2536 } 2513 2537 #endif … … 2521 2545 // 2522 2546 2523 int lh_offset = klassOopDesc::header_size() * HeapWordSize + 2524 Klass::layout_helper_offset_in_bytes(); 2547 const int lh_offset = klassOopDesc::header_size() * HeapWordSize + 2548 Klass::layout_helper_offset_in_bytes(); 2549 2550 // Handle objArrays completely differently... 2551 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2552 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2553 __ jcc(Assembler::equal, L_objArray); 2554 2555 // if (src->klass() != dst->klass()) return -1; 2556 __ load_klass(rax, dst); 2557 __ cmpq(r10_src_klass, rax); 2558 __ jcc(Assembler::notEqual, L_failed); 2525 2559 2526 2560 const Register rax_lh = rax; // layout helper 2527 2528 2561 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2529 2530 // Handle objArrays completely differently...2531 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2532 __ cmpl(rax_lh, objArray_lh);2533 __ jcc(Assembler::equal, L_objArray);2534 2535 // if (src->klass() != dst->klass()) return -1;2536 __ load_klass(r9_dst_klass, dst);2537 __ cmpq(r10_src_klass, r9_dst_klass);2538 __ jcc(Assembler::notEqual, L_failed);2539 2562 2540 2563 // if (!src->is_Array()) return -1; … … 2544 2567 // At this point, it is known to be a typeArray (array_tag 0x3). 2545 2568 #ifdef ASSERT 2546 { Label L; 2569 { 2570 BLOCK_COMMENT("assert primitive array {"); 2571 Label L; 2547 2572 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2548 2573 __ jcc(Assembler::greaterEqual, L); 2549 2574 __ stop("must be a primitive array"); 2550 2575 __ bind(L); 2576 BLOCK_COMMENT("} assert primitive array done"); 2551 2577 } 2552 2578 #endif … … 2606 2632 __ BIND(L_copy_longs); 2607 2633 #ifdef ASSERT 2608 { Label L; 2634 { 2635 BLOCK_COMMENT("assert long copy {"); 2636 Label L; 2609 2637 __ cmpl(rax_elsize, LogBytesPerLong); 2610 2638 __ jcc(Assembler::equal, L); 2611 2639 __ stop("must be long copy, but elsize is wrong"); 2612 2640 __ bind(L); 2641 BLOCK_COMMENT("} assert long copy done"); 2613 2642 } 2614 2643 #endif … … 2620 2649 // objArrayKlass 2621 2650 __ BIND(L_objArray); 2622 // live at this point: r10_src_klass, src[_pos], dst[_pos]2651 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2623 2652 2624 2653 Label L_plain_copy, L_checkcast_copy; 2625 2654 // test array classes for subtyping 2626 __ load_klass(r 9_dst_klass, dst);2627 __ cmpq(r10_src_klass, r 9_dst_klass); // usual case is exact equality2655 __ load_klass(rax, dst); 2656 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2628 2657 __ jcc(Assembler::notEqual, L_checkcast_copy); 2629 2658 … … 2641 2670 2642 2671 __ BIND(L_checkcast_copy); 2643 // live at this point: r10_src_klass, !r11_length2672 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2644 2673 { 2645 // assert(r11_length == C_RARG4); // will reload from here2646 Register r11_dst_klass = r11;2647 __ load_klass(r11_dst_klass, dst);2648 2649 2674 // Before looking at dst.length, make sure dst is also an objArray. 2650 __ cmpl(Address(r 11_dst_klass, lh_offset), objArray_lh);2675 __ cmpl(Address(rax, lh_offset), objArray_lh); 2651 2676 __ jcc(Assembler::notEqual, L_failed); 2652 2677 2653 2678 // It is safe to examine both src.length and dst.length. 2654 #ifndef _WIN642655 arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,2656 rax, L_failed);2657 #else2658 __ movl(r11_length, C_RARG4); // reload2659 2679 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2660 2680 rax, L_failed); 2681 2682 const Register r11_dst_klass = r11; 2661 2683 __ load_klass(r11_dst_klass, dst); // reload 2662 #endif2663 2684 2664 2685 // Marshal the base address arguments now, freeing registers. … … 2667 2688 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2668 2689 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2669 __ movl(count, C_RARG4);// length (reloaded)2690 __ movl(count, length); // length (reloaded) 2670 2691 Register sco_temp = c_rarg3; // this register is free now 2671 2692 assert_different_registers(from, to, count, sco_temp, … … 2674 2695 2675 2696 // Generate the type check. 2676 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +2677 Klass::super_check_offset_offset_in_bytes());2697 const int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 2698 Klass::super_check_offset_offset_in_bytes()); 2678 2699 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2679 2700 assert_clean_int(sco_temp, rax); … … 2684 2705 objArrayKlass::element_klass_offset_in_bytes()); 2685 2706 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2686 __ movl( sco_temp, Address(r11_dst_klass, sco_offset));2707 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2687 2708 assert_clean_int(sco_temp, rax); 2688 2709 2689 2710 // the checkcast_copy loop needs two extra arguments: 2690 2711 assert(c_rarg3 == sco_temp, "#3 already in place"); 2691 __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass 2712 // Set up arguments for checkcast_copy_entry. 2713 setup_arg_regs(4); 2714 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2692 2715 __ jump(RuntimeAddress(checkcast_copy_entry)); 2693 2716 } … … 2701 2724 return start; 2702 2725 } 2703 2704 #undef length_arg2705 2726 2706 2727 void generate_arraycopy_stubs() { -
trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubRoutines_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "runtime/deoptimization.hpp" 27 #include "runtime/frame.inline.hpp" 28 #include "runtime/stubRoutines.hpp" 29 #ifdef TARGET_OS_FAMILY_linux 30 # include "thread_linux.inline.hpp" 31 #endif 32 #ifdef TARGET_OS_FAMILY_solaris 33 # include "thread_solaris.inline.hpp" 34 #endif 35 #ifdef TARGET_OS_FAMILY_windows 36 # include "thread_windows.inline.hpp" 37 #endif 27 38 28 39 // Implementation of the platform-specific part of StubRoutines - for -
trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp
r278 r309 23 23 */ 24 24 25 #ifndef CPU_X86_VM_STUBROUTINES_X86_32_HPP 26 #define CPU_X86_VM_STUBROUTINES_X86_32_HPP 27 25 28 // This file holds the platform specific parts of the StubRoutines 26 29 // definition. See stubRoutines.hpp for a description on how to … … 34 37 // MethodHandles adapters 35 38 enum method_handles_platform_dependent_constants { 36 method_handles_adapters_code_size = 500039 method_handles_adapters_code_size = 10000 37 40 }; 38 41 … … 61 64 static bool returns_to_call_stub(address return_pc) { return (return_pc == _call_stub_return_address) || 62 65 return_pc == x86::get_call_stub_compiled_return(); } 66 67 #endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 20 08, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubRoutines_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "runtime/deoptimization.hpp" 27 #include "runtime/frame.inline.hpp" 28 #include "runtime/stubRoutines.hpp" 29 #ifdef TARGET_OS_FAMILY_linux 30 # include "thread_linux.inline.hpp" 31 #endif 32 #ifdef TARGET_OS_FAMILY_solaris 33 # include "thread_solaris.inline.hpp" 34 #endif 35 #ifdef TARGET_OS_FAMILY_windows 36 # include "thread_windows.inline.hpp" 37 #endif 27 38 28 39 // Implementation of the platform-specific part of StubRoutines - for -
trunk/openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp
r278 r309 23 23 */ 24 24 25 #ifndef CPU_X86_VM_STUBROUTINES_X86_64_HPP 26 #define CPU_X86_VM_STUBROUTINES_X86_64_HPP 27 25 28 // This file holds the platform specific parts of the StubRoutines 26 29 // definition. See stubRoutines.hpp for a description on how to … … 36 39 // MethodHandles adapters 37 40 enum method_handles_platform_dependent_constants { 38 method_handles_adapters_code_size = 1300041 method_handles_adapters_code_size = 40000 39 42 }; 40 43 … … 114 117 } 115 118 }; 119 120 #endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP 26 #define CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP 27 25 28 protected: 26 29 … … 28 31 29 32 // address generate_asm_interpreter_entry(bool synchronized); 33 34 #endif // CPU_X86_VM_TEMPLATEINTERPRETERGENERATOR_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp
r278 r309 23 23 */ 24 24 25 #ifndef CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP 26 #define CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP 27 25 28 26 29 protected: … … 36 39 const static int InterpreterCodeSize = 168 * 1024; 37 40 #endif // AMD64 41 42 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_templateInterpreter_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 27 47 28 48 #define __ _masm-> … … 360 380 // 361 381 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 362 363 const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset()); 364 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); 365 366 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 367 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); 368 } 369 // Update standard invocation counters 370 __ movl(rax, backedge_counter); // load backedge counter 371 372 __ incrementl(rcx, InvocationCounter::count_increment); 373 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 374 375 __ movl(invocation_counter, rcx); // save invocation count 376 __ addl(rcx, rax); // add both counters 377 378 // profile_method is non-null only for interpreted method so 379 // profile_method != NULL == !native_call 380 // BytecodeInterpreter only calls for native so code is elided. 381 382 if (ProfileInterpreter && profile_method != NULL) { 383 // Test to see if we should create a method data oop 382 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) + 383 in_bytes(InvocationCounter::counter_offset())); 384 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not. 385 if (TieredCompilation) { 386 int increment = InvocationCounter::count_increment; 387 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 388 Label no_mdo, done; 389 if (ProfileInterpreter) { 390 // Are we profiling? 391 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset())); 392 __ testptr(rax, rax); 393 __ jccb(Assembler::zero, no_mdo); 394 // Increment counter in the MDO 395 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) + 396 in_bytes(InvocationCounter::counter_offset())); 397 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 398 __ jmpb(done); 399 } 400 __ bind(no_mdo); 401 // Increment counter in methodOop (we don't need to load it, it's in rcx). 402 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); 403 __ bind(done); 404 } else { 405 const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + 406 InvocationCounter::counter_offset()); 407 408 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 409 __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); 410 } 411 // Update standard invocation counters 412 __ movl(rax, backedge_counter); // load backedge counter 413 414 __ incrementl(rcx, InvocationCounter::count_increment); 415 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 416 417 __ movl(invocation_counter, rcx); // save invocation count 418 __ addl(rcx, rax); // add both counters 419 420 // profile_method is non-null only for interpreted method so 421 // profile_method != NULL == !native_call 422 // BytecodeInterpreter only calls for native so code is elided. 423 424 if (ProfileInterpreter && profile_method != NULL) { 425 // Test to see if we should create a method data oop 426 __ cmp32(rcx, 427 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 428 __ jcc(Assembler::less, *profile_method_continue); 429 430 // if no method data exists, go to profile_method 431 __ test_method_data_pointer(rax, *profile_method); 432 } 433 384 434 __ cmp32(rcx, 385 ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 386 __ jcc(Assembler::less, *profile_method_continue); 387 388 // if no method data exists, go to profile_method 389 __ test_method_data_pointer(rax, *profile_method); 390 } 391 392 __ cmp32(rcx, 393 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 394 __ jcc(Assembler::aboveEqual, *overflow); 395 435 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 436 __ jcc(Assembler::aboveEqual, *overflow); 437 } 396 438 } 397 439 … … 1326 1368 // We have decided to profile this method in the interpreter 1327 1369 __ bind(profile_method); 1328 1329 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true); 1330 1331 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop 1332 __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); 1333 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); 1334 __ test_method_data_pointer(rax, profile_method_continue); 1335 __ addptr(rax, in_bytes(methodDataOopDesc::data_offset())); 1336 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); 1370 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1371 __ set_method_data_pointer_for_bcp(); 1372 __ get_method(rbx); 1337 1373 __ jmp(profile_method_continue); 1338 1374 } -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_interpreter_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterGenerator.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateTable.hpp" 32 #include "oops/arrayOop.hpp" 33 #include "oops/methodDataOop.hpp" 34 #include "oops/methodOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/jvmtiExport.hpp" 37 #include "prims/jvmtiThreadState.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/synchronizer.hpp" 44 #include "runtime/timer.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "utilities/debug.hpp" 27 47 28 48 #define __ _masm-> … … 311 331 Label* profile_method, 312 332 Label* profile_method_continue) { 313 314 const Address invocation_counter(rbx, 315 methodOopDesc::invocation_counter_offset() + 333 const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) + 334 in_bytes(InvocationCounter::counter_offset())); 335 // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not. 336 if (TieredCompilation) { 337 int increment = InvocationCounter::count_increment; 338 int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 339 Label no_mdo, done; 340 if (ProfileInterpreter) { 341 // Are we profiling? 342 __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset())); 343 __ testptr(rax, rax); 344 __ jccb(Assembler::zero, no_mdo); 345 // Increment counter in the MDO 346 const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) + 347 in_bytes(InvocationCounter::counter_offset())); 348 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 349 __ jmpb(done); 350 } 351 __ bind(no_mdo); 352 // Increment counter in methodOop (we don't need to load it, it's in ecx). 353 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); 354 __ bind(done); 355 } else { 356 const Address backedge_counter(rbx, 357 methodOopDesc::backedge_counter_offset() + 316 358 InvocationCounter::counter_offset()); 317 const Address backedge_counter(rbx, 318 methodOopDesc::backedge_counter_offset() + 319 InvocationCounter::counter_offset()); 320 321 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 322 __ incrementl(Address(rbx, 323 methodOopDesc::interpreter_invocation_counter_offset())); 324 } 325 // Update standard invocation counters 326 __ movl(rax, backedge_counter); // load backedge counter 327 328 __ incrementl(rcx, InvocationCounter::count_increment); 329 __ andl(rax, InvocationCounter::count_mask_value); // mask out the 330 // status bits 331 332 __ movl(invocation_counter, rcx); // save invocation count 333 __ addl(rcx, rax); // add both counters 334 335 // profile_method is non-null only for interpreted method so 336 // profile_method != NULL == !native_call 337 338 if (ProfileInterpreter && profile_method != NULL) { 339 // Test to see if we should create a method data oop 340 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 341 __ jcc(Assembler::less, *profile_method_continue); 342 343 // if no method data exists, go to profile_method 344 __ test_method_data_pointer(rax, *profile_method); 345 } 346 347 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 348 __ jcc(Assembler::aboveEqual, *overflow); 359 360 if (ProfileInterpreter) { // %%% Merge this into methodDataOop 361 __ incrementl(Address(rbx, 362 methodOopDesc::interpreter_invocation_counter_offset())); 363 } 364 // Update standard invocation counters 365 __ movl(rax, backedge_counter); // load backedge counter 366 367 __ incrementl(rcx, InvocationCounter::count_increment); 368 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 369 370 __ movl(invocation_counter, rcx); // save invocation count 371 __ addl(rcx, rax); // add both counters 372 373 // profile_method is non-null only for interpreted method so 374 // profile_method != NULL == !native_call 375 376 if (ProfileInterpreter && profile_method != NULL) { 377 // Test to see if we should create a method data oop 378 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit)); 379 __ jcc(Assembler::less, *profile_method_continue); 380 381 // if no method data exists, go to profile_method 382 __ test_method_data_pointer(rax, *profile_method); 383 } 384 385 __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); 386 __ jcc(Assembler::aboveEqual, *overflow); 387 } 349 388 } 350 389 … … 1031 1070 // 1032 1071 __ mov(c_rarg0, r15_thread); 1033 __ mov(r12, rsp); // remember sp 1072 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1034 1073 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1035 1074 __ andptr(rsp, -16); // align stack as required by ABI … … 1078 1117 1079 1118 __ pusha(); // XXX only save smashed registers 1080 __ mov(r12, rsp); // remember sp 1119 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1081 1120 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1082 1121 __ andptr(rsp, -16); // align stack as required by ABI … … 1345 1384 // We have decided to profile this method in the interpreter 1346 1385 __ bind(profile_method); 1347 1348 __ call_VM(noreg, 1349 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), 1350 r13, true); 1351 1352 __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop 1353 __ movptr(rax, Address(rbx, 1354 in_bytes(methodOopDesc::method_data_offset()))); 1355 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), 1356 rax); 1357 __ test_method_data_pointer(rax, profile_method_continue); 1358 __ addptr(rax, in_bytes(methodDataOopDesc::data_offset())); 1359 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), 1360 rax); 1386 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1387 __ set_method_data_pointer_for_bcp(); 1388 __ get_method(rbx); 1361 1389 __ jmp(profile_method_continue); 1362 1390 } … … 1869 1897 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1870 1898 "entry must have been generated"); 1871 __ mov(r12, rsp); // remember sp 1899 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1872 1900 __ andptr(rsp, -16); // align stack as required by ABI 1873 1901 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_templateTable_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/templateTable.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodDataOop.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/stubRoutines.hpp" 36 #include "runtime/synchronizer.hpp" 27 37 28 38 #ifndef CC_INTERP … … 400 410 __ verify_oop(rax); 401 411 } 412 413 Label L_done, L_throw_exception; 414 const Register con_klass_temp = rcx; // same as Rcache 415 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); 416 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); 417 __ jcc(Assembler::notEqual, L_done); 418 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0); 419 __ jcc(Assembler::notEqual, L_throw_exception); 420 __ xorptr(rax, rax); 421 __ jmp(L_done); 422 423 // Load the exception from the system-array which wraps it: 424 __ bind(L_throw_exception); 425 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 426 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 427 428 __ bind(L_done); 402 429 } 403 430 … … 1559 1586 __ jcc(Assembler::positive, dispatch); // count only if backward branch 1560 1587 1561 // increment counter 1562 __ movl(rax, Address(rcx, be_offset)); // load backedge counter 1563 __ incrementl(rax, InvocationCounter::count_increment); // increment counter 1564 __ movl(Address(rcx, be_offset), rax); // store counter 1565 1566 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter 1567 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits 1568 __ addl(rax, Address(rcx, be_offset)); // add both counters 1569 1570 if (ProfileInterpreter) { 1571 // Test to see if we should create a method data oop 1572 __ cmp32(rax, 1573 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); 1574 __ jcc(Assembler::less, dispatch); 1575 1576 // if no method data exists, go to profile method 1577 __ test_method_data_pointer(rax, profile_method); 1578 1579 if (UseOnStackReplacement) { 1580 // check for overflow against rbx, which is the MDO taken count 1581 __ cmp32(rbx, 1582 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1583 __ jcc(Assembler::below, dispatch); 1584 1585 // When ProfileInterpreter is on, the backedge_count comes from the 1586 // methodDataOop, which value does not get reset on the call to 1587 // frequency_counter_overflow(). To avoid excessive calls to the overflow 1588 // routine while the method is being compiled, add a second test to make 1589 // sure the overflow function is called only once every overflow_frequency. 1590 const int overflow_frequency = 1024; 1591 __ andptr(rbx, overflow_frequency-1); 1592 __ jcc(Assembler::zero, backedge_counter_overflow); 1593 1588 if (TieredCompilation) { 1589 Label no_mdo; 1590 int increment = InvocationCounter::count_increment; 1591 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1592 if (ProfileInterpreter) { 1593 // Are we profiling? 1594 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); 1595 __ testptr(rbx, rbx); 1596 __ jccb(Assembler::zero, no_mdo); 1597 // Increment the MDO backedge counter 1598 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) + 1599 in_bytes(InvocationCounter::counter_offset())); 1600 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1601 rax, false, Assembler::zero, &backedge_counter_overflow); 1602 __ jmp(dispatch); 1594 1603 } 1604 __ bind(no_mdo); 1605 // Increment backedge counter in methodOop 1606 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, 1607 rax, false, Assembler::zero, &backedge_counter_overflow); 1595 1608 } else { 1596 if (UseOnStackReplacement) { 1597 // check for overflow against rax, which is the sum of the counters 1609 // increment counter 1610 __ movl(rax, Address(rcx, be_offset)); // load backedge counter 1611 __ incrementl(rax, InvocationCounter::count_increment); // increment counter 1612 __ movl(Address(rcx, be_offset), rax); // store counter 1613 1614 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter 1615 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits 1616 __ addl(rax, Address(rcx, be_offset)); // add both counters 1617 1618 if (ProfileInterpreter) { 1619 // Test to see if we should create a method data oop 1598 1620 __ cmp32(rax, 1599 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1600 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); 1601 1621 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); 1622 __ jcc(Assembler::less, dispatch); 1623 1624 // if no method data exists, go to profile method 1625 __ test_method_data_pointer(rax, profile_method); 1626 1627 if (UseOnStackReplacement) { 1628 // check for overflow against rbx, which is the MDO taken count 1629 __ cmp32(rbx, 1630 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1631 __ jcc(Assembler::below, dispatch); 1632 1633 // When ProfileInterpreter is on, the backedge_count comes from the 1634 // methodDataOop, which value does not get reset on the call to 1635 // frequency_counter_overflow(). To avoid excessive calls to the overflow 1636 // routine while the method is being compiled, add a second test to make 1637 // sure the overflow function is called only once every overflow_frequency. 1638 const int overflow_frequency = 1024; 1639 __ andptr(rbx, overflow_frequency-1); 1640 __ jcc(Assembler::zero, backedge_counter_overflow); 1641 } 1642 } else { 1643 if (UseOnStackReplacement) { 1644 // check for overflow against rax, which is the sum of the counters 1645 __ cmp32(rax, 1646 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1647 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); 1648 1649 } 1602 1650 } 1603 1651 } … … 1618 1666 // Out-of-line code to allocate method data oop. 1619 1667 __ bind(profile_method); 1620 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method) , rsi);1668 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1621 1669 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode 1622 __ movptr(rcx, Address(rbp, method_offset)); 1623 __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); 1624 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); 1625 __ test_method_data_pointer(rcx, dispatch); 1626 // offset non-null mdp by MDO::data_offset() + IR::profile_method() 1627 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); 1628 __ addptr(rcx, rax); 1629 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx); 1670 __ set_method_data_pointer_for_bcp(); 1630 1671 __ jmp(dispatch); 1631 1672 } … … 3091 3132 // rax: CallSite object (f1) 3092 3133 // rbx: unused (f2) 3134 // rcx: receiver address 3093 3135 // rdx: flags (unused) 3094 3136 3137 Register rax_callsite = rax; 3138 Register rcx_method_handle = rcx; 3139 3095 3140 if (ProfileInterpreter) { 3096 Label L;3097 3141 // %%% should make a type profile for any invokedynamic that takes a ref argument 3098 3142 // profile this call … … 3100 3144 } 3101 3145 3102 __ movptr(rcx , Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));3103 __ null_check(rcx );3146 __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); 3147 __ null_check(rcx_method_handle); 3104 3148 __ prepare_to_jump_from_interpreted(); 3105 __ jump_to_method_handle_entry(rcx , rdx);3149 __ jump_to_method_handle_entry(rcx_method_handle, rdx); 3106 3150 } 3107 3151 … … 3153 3197 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3154 3198 3199 const Register thread = rcx; 3200 if (UseTLAB || allow_shared_alloc) { 3201 __ get_thread(thread); 3202 } 3203 3155 3204 if (UseTLAB) { 3156 const Register thread = rcx;3157 3158 __ get_thread(thread);3159 3205 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); 3160 3206 __ lea(rbx, Address(rax, rdx, Address::times_1)); … … 3197 3243 // if someone beat us on the allocation, try again, otherwise continue 3198 3244 __ jcc(Assembler::notEqual, retry); 3245 3246 __ incr_allocated_bytes(thread, rdx, 0); 3199 3247 } 3200 3248 … … 3206 3254 __ jcc(Assembler::zero, initialize_header); 3207 3255 3208 // Initialize topmost object field, divide rdx by 8, check if odd and3209 // test if zero.3256 // Initialize topmost object field, divide rdx by 8, check if odd and 3257 // test if zero. 3210 3258 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) 3211 3259 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd 3212 3260 3213 // rdx must have been multiple of 83261 // rdx must have been multiple of 8 3214 3262 #ifdef ASSERT 3215 3263 // make sure rdx was multiple of 8 -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.hpp
r278 r309 1 1 /* 2 * Copyright (c) 1998, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_TEMPLATETABLE_X86_32_HPP 26 #define CPU_X86_VM_TEMPLATETABLE_X86_32_HPP 27 25 28 static void prepare_invoke(Register method, Register index, int byte_no); 26 29 static void invokevirtual_helper(Register index, Register recv, … … 31 34 static void index_check(Register array, Register index); 32 35 static void index_check_without_pop(Register array, Register index); 36 37 #endif // CPU_X86_VM_TEMPLATETABLE_X86_32_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 201 0, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_templateTable_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "interpreter/interpreterRuntime.hpp" 28 #include "interpreter/templateTable.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/methodDataOop.hpp" 31 #include "oops/objArrayKlass.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/methodHandles.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "runtime/stubRoutines.hpp" 36 #include "runtime/synchronizer.hpp" 27 37 28 38 #ifndef CC_INTERP … … 414 424 __ verify_oop(rax); 415 425 } 426 427 Label L_done, L_throw_exception; 428 const Register con_klass_temp = rcx; // same as cache 429 const Register array_klass_temp = rdx; // same as index 430 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); 431 __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); 432 __ cmpptr(con_klass_temp, Address(array_klass_temp, 0)); 433 __ jcc(Assembler::notEqual, L_done); 434 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0); 435 __ jcc(Assembler::notEqual, L_throw_exception); 436 __ xorptr(rax, rax); 437 __ jmp(L_done); 438 439 // Load the exception from the system-array which wraps it: 440 __ bind(L_throw_exception); 441 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 442 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 443 444 __ bind(L_done); 416 445 } 417 446 … … 1584 1613 __ testl(rdx, rdx); // check if forward or backward branch 1585 1614 __ jcc(Assembler::positive, dispatch); // count only if backward branch 1586 1587 // increment counter 1588 __ movl(rax, Address(rcx, be_offset)); // load backedge counter 1589 __ incrementl(rax, InvocationCounter::count_increment); // increment 1590 // counter 1591 __ movl(Address(rcx, be_offset), rax); // store counter 1592 1593 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter 1594 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits 1595 __ addl(rax, Address(rcx, be_offset)); // add both counters 1596 1597 if (ProfileInterpreter) { 1598 // Test to see if we should create a method data oop 1599 __ cmp32(rax, 1600 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); 1601 __ jcc(Assembler::less, dispatch); 1602 1603 // if no method data exists, go to profile method 1604 __ test_method_data_pointer(rax, profile_method); 1605 1606 if (UseOnStackReplacement) { 1607 // check for overflow against ebx which is the MDO taken count 1608 __ cmp32(rbx, 1609 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1610 __ jcc(Assembler::below, dispatch); 1611 1612 // When ProfileInterpreter is on, the backedge_count comes 1613 // from the methodDataOop, which value does not get reset on 1614 // the call to frequency_counter_overflow(). To avoid 1615 // excessive calls to the overflow routine while the method is 1616 // being compiled, add a second test to make sure the overflow 1617 // function is called only once every overflow_frequency. 1618 const int overflow_frequency = 1024; 1619 __ andl(rbx, overflow_frequency - 1); 1620 __ jcc(Assembler::zero, backedge_counter_overflow); 1621 1615 if (TieredCompilation) { 1616 Label no_mdo; 1617 int increment = InvocationCounter::count_increment; 1618 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1619 if (ProfileInterpreter) { 1620 // Are we profiling? 1621 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); 1622 __ testptr(rbx, rbx); 1623 __ jccb(Assembler::zero, no_mdo); 1624 // Increment the MDO backedge counter 1625 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) + 1626 in_bytes(InvocationCounter::counter_offset())); 1627 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, 1628 rax, false, Assembler::zero, &backedge_counter_overflow); 1629 __ jmp(dispatch); 1622 1630 } 1631 __ bind(no_mdo); 1632 // Increment backedge counter in methodOop 1633 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, 1634 rax, false, Assembler::zero, &backedge_counter_overflow); 1623 1635 } else { 1624 if (UseOnStackReplacement) { 1625 // check for overflow against eax, which is the sum of the 1626 // counters 1636 // increment counter 1637 __ movl(rax, Address(rcx, be_offset)); // load backedge counter 1638 __ incrementl(rax, InvocationCounter::count_increment); // increment counter 1639 __ movl(Address(rcx, be_offset), rax); // store counter 1640 1641 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter 1642 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits 1643 __ addl(rax, Address(rcx, be_offset)); // add both counters 1644 1645 if (ProfileInterpreter) { 1646 // Test to see if we should create a method data oop 1627 1647 __ cmp32(rax, 1628 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1629 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); 1630 1648 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); 1649 __ jcc(Assembler::less, dispatch); 1650 1651 // if no method data exists, go to profile method 1652 __ test_method_data_pointer(rax, profile_method); 1653 1654 if (UseOnStackReplacement) { 1655 // check for overflow against ebx which is the MDO taken count 1656 __ cmp32(rbx, 1657 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1658 __ jcc(Assembler::below, dispatch); 1659 1660 // When ProfileInterpreter is on, the backedge_count comes 1661 // from the methodDataOop, which value does not get reset on 1662 // the call to frequency_counter_overflow(). To avoid 1663 // excessive calls to the overflow routine while the method is 1664 // being compiled, add a second test to make sure the overflow 1665 // function is called only once every overflow_frequency. 1666 const int overflow_frequency = 1024; 1667 __ andl(rbx, overflow_frequency - 1); 1668 __ jcc(Assembler::zero, backedge_counter_overflow); 1669 1670 } 1671 } else { 1672 if (UseOnStackReplacement) { 1673 // check for overflow against eax, which is the sum of the 1674 // counters 1675 __ cmp32(rax, 1676 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); 1677 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); 1678 1679 } 1631 1680 } 1632 1681 } … … 1647 1696 // Out-of-line code to allocate method data oop. 1648 1697 __ bind(profile_method); 1649 __ call_VM(noreg, 1650 CAST_FROM_FN_PTR(address, 1651 InterpreterRuntime::profile_method), r13); 1698 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1652 1699 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode 1653 __ movptr(rcx, Address(rbp, method_offset)); 1654 __ movptr(rcx, Address(rcx, 1655 in_bytes(methodOopDesc::method_data_offset()))); 1656 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), 1657 rcx); 1658 __ test_method_data_pointer(rcx, dispatch); 1659 // offset non-null mdp by MDO::data_offset() + IR::profile_method() 1660 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); 1661 __ addptr(rcx, rax); 1662 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), 1663 rcx); 1700 __ set_method_data_pointer_for_bcp(); 1664 1701 __ jmp(dispatch); 1665 1702 } … … 2714 2751 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); 2715 2752 __ verify_oop(rax); 2716 __ mov(r12,rax); // save object pointer before call_VM() clobbers it2753 __ push_ptr(rax); // save object pointer before call_VM() clobbers it 2717 2754 __ mov(c_rarg1, rax); 2718 2755 // c_rarg1: object pointer copied above … … 2722 2759 InterpreterRuntime::post_field_access), 2723 2760 c_rarg1, c_rarg2); 2724 __ mov(rax, r12); // restore object pointer 2725 __ reinit_heapbase(); 2761 __ pop_ptr(rax); // restore object pointer 2726 2762 __ bind(L1); 2727 2763 } … … 2913 2949 Register recv, 2914 2950 Register flags) { 2915 // Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx); 2951 // Uses temporary registers rax, rdx 2952 assert_different_registers(index, recv, rax, rdx); 2916 2953 2917 2954 // Test for an invoke of a final method … … 3100 3137 // rdx: flags (unused) 3101 3138 3139 Register rax_callsite = rax; 3140 Register rcx_method_handle = rcx; 3141 3102 3142 if (ProfileInterpreter) { 3103 Label L;3104 3143 // %%% should make a type profile for any invokedynamic that takes a ref argument 3105 3144 // profile this call … … 3107 3146 } 3108 3147 3109 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));3110 __ null_check(rcx );3148 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); 3149 __ null_check(rcx_method_handle); 3111 3150 __ prepare_to_jump_from_interpreted(); 3112 __ jump_to_method_handle_entry(rcx , rdx);3151 __ jump_to_method_handle_entry(rcx_method_handle, rdx); 3113 3152 } 3114 3153 … … 3216 3255 // if someone beat us on the allocation, try again, otherwise continue 3217 3256 __ jcc(Assembler::notEqual, retry); 3257 3258 __ incr_allocated_bytes(r15_thread, rdx, 0); 3218 3259 } 3219 3260 … … 3314 3355 __ jcc(Assembler::equal, quicked); 3315 3356 __ push(atos); // save receiver for result, and for GC 3316 __ mov(r12, rcx); // save rcx XXX3317 3357 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3318 __ movq(rcx, r12); // restore rcx XXX3319 __ reinit_heapbase();3320 3358 __ pop_ptr(rdx); // restore receiver 3321 3359 __ jmpb(resolved); … … 3371 3409 3372 3410 __ push(atos); // save receiver for result, and for GC 3373 __ mov(r12, rcx); // save rcx3374 3411 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3375 __ movq(rcx, r12); // restore rcx3376 __ reinit_heapbase();3377 3412 __ pop_ptr(rdx); // restore receiver 3413 __ verify_oop(rdx); 3378 3414 __ load_klass(rdx, rdx); 3379 3415 __ jmpb(resolved); -
trunk/openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 20 05, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_TEMPLATETABLE_X86_64_HPP 26 #define CPU_X86_VM_TEMPLATETABLE_X86_64_HPP 27 25 28 static void prepare_invoke(Register method, Register index, int byte_no); 26 29 static void invokevirtual_helper(Register index, Register recv, … … 31 34 static void index_check(Register array, Register index); 32 35 static void index_check_without_pop(Register array, Register index); 36 37 #endif // CPU_X86_VM_TEMPLATETABLE_X86_64_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/vmStructs_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2001, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_VMSTRUCTS_X86_HPP 26 #define CPU_X86_VM_VMSTRUCTS_X86_HPP 24 27 25 28 // These are the CPU-specific fields, types and integer … … 61 64 /* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must */ 62 65 /* be present there) */ 66 67 #endif // CPU_X86_VM_VMSTRUCTS_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
r278 r309 23 23 */ 24 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_vm_version_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "assembler_x86.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "runtime/java.hpp" 29 #include "runtime/stubCodeGenerator.hpp" 30 #include "vm_version_x86.hpp" 31 #ifdef TARGET_OS_FAMILY_linux 32 # include "os_linux.inline.hpp" 33 #endif 34 #ifdef TARGET_OS_FAMILY_solaris 35 # include "os_solaris.inline.hpp" 36 #endif 37 #ifdef TARGET_OS_FAMILY_windows 38 # include "os_windows.inline.hpp" 39 #endif 27 40 28 41 … … 336 349 337 350 char buf[256]; 338 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s %s",351 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 339 352 cores_per_cpu(), threads_per_core(), 340 353 cpu_family(), _model, _stepping, … … 351 364 (supports_popcnt() ? ", popcnt" : ""), 352 365 (supports_mmx_ext() ? ", mmxext" : ""), 353 (supports_3dnow() ? ", 3dnow" : ""), 354 (supports_3dnow2() ? ", 3dnowext" : ""), 366 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 355 367 (supports_lzcnt() ? ", lzcnt": ""), 356 368 (supports_sse4a() ? ", sse4a": ""), … … 417 429 } 418 430 } 431 if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) { 432 if( supports_sse4_2() && UseSSE >= 4 ) { 433 UseSSE42Intrinsics = true; 434 } 435 } 419 436 420 437 // Use count leading zeros count instruction if available. … … 424 441 } 425 442 } 443 444 // some defaults for AMD family 15h 445 if ( cpu_family() == 0x15 ) { 446 // On family 15h processors default is no sw prefetch 447 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 448 AllocatePrefetchStyle = 0; 449 } 450 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW 451 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) { 452 AllocatePrefetchInstr = 3; 453 } 454 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy 455 if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) { 456 UseXMMForArrayCopy = true; 457 } 458 if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) { 459 UseUnalignedLoadStores = true; 460 } 461 } 462 426 463 } 427 464 … … 498 535 if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; 499 536 if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; 500 if( ReadPrefetchInstr == 3 && !supports_3dnow () ) ReadPrefetchInstr = 0;501 if( !supports_sse() && supports_3dnow () ) ReadPrefetchInstr = 3;537 if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0; 538 if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3; 502 539 503 540 if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; 504 541 if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; 505 if( AllocatePrefetchInstr == 3 && !supports_3dnow () ) AllocatePrefetchInstr=0;506 if( !supports_sse() && supports_3dnow () ) AllocatePrefetchInstr = 3;542 if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; 543 if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; 507 544 508 545 // Allocation prefetch settings … … 551 588 logical_processors_per_package()); 552 589 tty->print_cr("UseSSE=%d",UseSSE); 553 tty->print("Allocation :");554 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow ()) {555 tty->print_cr(" no prefetching");590 tty->print("Allocation"); 591 if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { 592 tty->print_cr(": no prefetching"); 556 593 } else { 557 if (UseSSE == 0 && supports_3dnow()) { 594 tty->print(" prefetching: "); 595 if (UseSSE == 0 && supports_3dnow_prefetch()) { 558 596 tty->print("PREFETCHW"); 559 597 } else if (UseSSE >= 1) { … … 596 634 vm_exit_during_initialization("Unable to allocate getPsrInfo_stub"); 597 635 } 598 CodeBuffer c(stub_blob->instructions_begin(), 599 stub_blob->instructions_size()); 636 CodeBuffer c(stub_blob); 600 637 VM_Version_StubGenerator g(&c); 601 638 getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, -
trunk/openjdk/hotspot/src/cpu/x86/vm/vm_version_x86.hpp
r278 r309 23 23 */ 24 24 25 #ifndef CPU_X86_VM_VM_VERSION_X86_HPP 26 #define CPU_X86_VM_VM_VERSION_X86_HPP 27 28 #include "runtime/globals_extension.hpp" 29 #include "runtime/vm_version.hpp" 30 25 31 class VM_Version : public Abstract_VM_Version { 26 32 public: … … 183 189 CPU_HT = (1 << 3), 184 190 CPU_MMX = (1 << 4), 185 CPU_3DNOW = (1 << 5), // 3DNow comes from cpuid 0x80000001 (EDX) 191 CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions 192 // may not necessarily support other 3dnow instructions 186 193 CPU_SSE = (1 << 6), 187 194 CPU_SSE2 = (1 << 7), … … 297 304 if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0) 298 305 result |= CPU_CMOV; 299 if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() &&300 _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0) 306 if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() && 307 _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)) 301 308 result |= CPU_FXSR; 302 309 // HT flag is set for multi-core processors also. 303 310 if (threads_per_core() > 1) 304 311 result |= CPU_HT; 305 if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() &&306 _cpuid_info.ext_cpuid1_edx.bits.mmx != 0) 312 if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() && 313 _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)) 307 314 result |= CPU_MMX; 308 315 if (_cpuid_info.std_cpuid1_edx.bits.sse != 0) … … 323 330 // AMD features. 324 331 if (is_amd()) { 325 if (_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) 326 result |= CPU_3DNOW; 332 if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) || 333 (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0)) 334 result |= CPU_3DNOW_PREFETCH; 327 335 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) 328 336 result |= CPU_LZCNT; … … 441 449 // AMD features 442 450 // 443 static bool supports_3dnow () { return (_cpuFeatures & CPU_3DNOW) != 0; }451 static bool supports_3dnow_prefetch() { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; } 444 452 static bool supports_mmx_ext() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; } 445 static bool supports_3dnow2() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.tdnow2 != 0; }446 453 static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; } 447 454 static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; } 455 456 // Intel Core and newer cpus have fast IDIV instruction (excluding Atom). 457 static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 && 458 supports_sse3() && _model != 0x1C; } 448 459 449 460 static bool supports_compare_and_exchange() { return true; } … … 517 528 } 518 529 }; 530 531 #endif // CPU_X86_VM_VM_VERSION_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/vmreg_x86.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2006, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_vmreg_x86.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "code/vmreg.hpp" 27 28 28 29 -
trunk/openjdk/hotspot/src/cpu/x86/vm/vmreg_x86.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #ifndef CPU_X86_VM_VMREG_X86_HPP 26 #define CPU_X86_VM_VMREG_X86_HPP 27 25 28 bool is_Register(); 26 29 Register as_Register(); … … 31 34 bool is_XMMRegister(); 32 35 XMMRegister as_XMMRegister(); 36 37 #endif // CPU_X86_VM_VMREG_X86_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/vmreg_x86.inline.hpp
r278 r309 1 1 /* 2 * Copyright (c) 2006, 20 07, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 22 22 * 23 23 */ 24 25 #ifndef CPU_X86_VM_VMREG_X86_INLINE_HPP 26 #define CPU_X86_VM_VMREG_X86_INLINE_HPP 24 27 25 28 inline VMReg RegisterImpl::as_VMReg() { … … 83 86 return is_even(value()); 84 87 } 88 89 #endif // CPU_X86_VM_VMREG_X86_INLINE_HPP -
trunk/openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp
r278 r309 1 1 /* 2 * Copyright (c) 1997, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_vtableStubs_x86_32.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "code/vtableStubs.hpp" 29 #include "interp_masm_x86_32.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/instanceKlass.hpp" 32 #include "oops/klassVtable.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "vmreg_x86.inline.hpp" 35 #ifdef COMPILER2 36 #include "opto/runtime.hpp" 37 #endif 27 38 28 39 // machine-dependent part of VtableStubs: create VtableStub of correct size and -
trunk/openjdk/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp
r278 r309 1 1 /* 2 * Copyright (c) 2003, 20 09, Oracle and/or its affiliates. All rights reserved.2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 4 * … … 23 23 */ 24 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_vtableStubs_x86_64.cpp.incl" 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "assembler_x86.inline.hpp" 28 #include "code/vtableStubs.hpp" 29 #include "interp_masm_x86_64.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/instanceKlass.hpp" 32 #include "oops/klassVtable.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "vmreg_x86.inline.hpp" 35 #ifdef COMPILER2 36 #include "opto/runtime.hpp" 37 #endif 27 38 28 39 // machine-dependent part of VtableStubs: create VtableStub of correct size and … … 210 221 } else { 211 222 // Itable stub size 212 return (DebugVtables ? 512 : 7 2) + (CountCompiledCalls ? 13 : 0) +223 return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) + 213 224 (UseCompressedOops ? 32 : 0); // 2 leaqs 214 225 } -
trunk/openjdk/hotspot/src/cpu/x86/vm/x86_32.ad
r278 r309 351 351 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) { 352 352 unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3); 353 *(cbuf.code_end()) = c; 354 cbuf.set_code_end(cbuf.code_end() + 1); 353 cbuf.insts()->emit_int8(c); 355 354 } 356 355 … … 358 357 void emit_cc(CodeBuffer &cbuf, int f1, int f2) { 359 358 unsigned char c = (unsigned char)( f1 | f2 ); 360 *(cbuf.code_end()) = c; 361 cbuf.set_code_end(cbuf.code_end() + 1); 359 cbuf.insts()->emit_int8(c); 362 360 } 363 361 364 362 // EMIT_OPCODE() 365 363 void emit_opcode(CodeBuffer &cbuf, int code) { 366 *(cbuf.code_end()) = (unsigned char)code; 367 cbuf.set_code_end(cbuf.code_end() + 1); 364 cbuf.insts()->emit_int8((unsigned char) code); 368 365 } 369 366 370 367 // EMIT_OPCODE() w/ relocation information 371 368 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) { 372 cbuf.relocate(cbuf.inst _mark() + offset, reloc);369 cbuf.relocate(cbuf.insts_mark() + offset, reloc); 373 370 emit_opcode(cbuf, code); 374 371 } … … 376 373 // EMIT_D8() 377 374 void emit_d8(CodeBuffer &cbuf, int d8) { 378 *(cbuf.code_end()) = (unsigned char)d8; 379 cbuf.set_code_end(cbuf.code_end() + 1); 375 cbuf.insts()->emit_int8((unsigned char) d8); 380 376 } 381 377 382 378 // EMIT_D16() 383 379 void emit_d16(CodeBuffer &cbuf, int d16) { 384 *((short *)(cbuf.code_end())) = d16; 385 cbuf.set_code_end(cbuf.code_end() + 2); 380 cbuf.insts()->emit_int16(d16); 386 381 } 387 382 388 383 // EMIT_D32() 389 384 void emit_d32(CodeBuffer &cbuf, int d32) { 390 *((int *)(cbuf.code_end())) = d32; 391 cbuf.set_code_end(cbuf.code_end() + 4); 385 cbuf.insts()->emit_int32(d32); 392 386 } 393 387 … … 395 389 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc, 396 390 int format) { 397 cbuf.relocate(cbuf.inst_mark(), reloc, format); 398 399 *((int *)(cbuf.code_end())) = d32; 400 cbuf.set_code_end(cbuf.code_end() + 4); 391 cbuf.relocate(cbuf.insts_mark(), reloc, format); 392 cbuf.insts()->emit_int32(d32); 401 393 } 402 394 … … 409 401 } 410 402 #endif 411 cbuf.relocate(cbuf.inst_mark(), rspec, format); 412 413 *((int *)(cbuf.code_end())) = d32; 414 cbuf.set_code_end(cbuf.code_end() + 4); 403 cbuf.relocate(cbuf.insts_mark(), rspec, format); 404 cbuf.insts()->emit_int32(d32); 415 405 } 416 406 … … 518 508 519 509 //============================================================================= 510 const bool Matcher::constant_table_absolute_addressing = true; 511 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; 512 513 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 514 // Empty encoding 515 } 516 517 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 518 return 0; 519 } 520 521 #ifndef PRODUCT 522 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 523 st->print("# MachConstantBaseNode (empty encoding)"); 524 } 525 #endif 526 527 528 //============================================================================= 520 529 #ifndef PRODUCT 521 530 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { … … 614 623 emit_d32(cbuf, framesize); 615 624 } 616 C->set_frame_complete(cbuf. code_end() - cbuf.code_begin());625 C->set_frame_complete(cbuf.insts_size()); 617 626 618 627 #ifdef ASSERT … … 696 705 697 706 if( do_polling() && C->is_method_compilation() ) { 698 cbuf.relocate(cbuf. code_end(), relocInfo::poll_return_type, 0);707 cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0); 699 708 emit_opcode(cbuf,0x85); 700 709 emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX … … 1212 1221 // jmp -1 1213 1222 1214 address mark = cbuf.inst _mark(); // get mark within main instrs section1215 1216 // Note that the code buffer's inst _mark is always relative to insts.1223 address mark = cbuf.insts_mark(); // get mark within main instrs section 1224 1225 // Note that the code buffer's insts_mark is always relative to insts. 1217 1226 // That's why we must use the macroassembler to generate a stub. 1218 1227 MacroAssembler _masm(&cbuf); … … 1229 1238 1230 1239 __ end_a_stub(); 1231 // Update current stubs pointer and restore code_end.1240 // Update current stubs pointer and restore insts_end. 1232 1241 } 1233 1242 // size of call stub, compiled java to interpretor … … 1255 1264 MacroAssembler masm(&cbuf); 1256 1265 #ifdef ASSERT 1257 uint code_size = cbuf.code_size();1266 uint insts_size = cbuf.insts_size(); 1258 1267 #endif 1259 1268 masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); … … 1267 1276 masm.nop(nops_cnt); 1268 1277 1269 assert(cbuf. code_size() - code_size == size(ra_), "checking code size of inline cache node");1278 assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node"); 1270 1279 } 1271 1280 … … 1289 1298 int emit_exception_handler(CodeBuffer& cbuf) { 1290 1299 1291 // Note that the code buffer's inst _mark is always relative to insts.1300 // Note that the code buffer's insts_mark is always relative to insts. 1292 1301 // That's why we must use the macroassembler to generate a handler. 1293 1302 MacroAssembler _masm(&cbuf); … … 1296 1305 if (base == NULL) return 0; // CodeBuffer::expand failed 1297 1306 int offset = __ offset(); 1298 __ jump(RuntimeAddress(OptoRuntime::exception_blob()-> instructions_begin()));1307 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); 1299 1308 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1300 1309 __ end_a_stub(); … … 1314 1323 int emit_deopt_handler(CodeBuffer& cbuf) { 1315 1324 1316 // Note that the code buffer's inst _mark is always relative to insts.1325 // Note that the code buffer's insts_mark is always relative to insts. 1317 1326 // That's why we must use the macroassembler to generate a handler. 1318 1327 MacroAssembler _masm(&cbuf); … … 1331 1340 1332 1341 1333 static void emit_double_constant(CodeBuffer& cbuf, double x) {1334 int mark = cbuf.insts()->mark_off();1335 MacroAssembler _masm(&cbuf);1336 address double_address = __ double_constant(x);1337 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift1338 emit_d32_reloc(cbuf,1339 (int)double_address,1340 internal_word_Relocation::spec(double_address),1341 RELOC_DISP32);1342 }1343 1344 static void emit_float_constant(CodeBuffer& cbuf, float x) {1345 int mark = cbuf.insts()->mark_off();1346 MacroAssembler _masm(&cbuf);1347 address float_address = __ float_constant(x);1348 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift1349 emit_d32_reloc(cbuf,1350 (int)float_address,1351 internal_word_Relocation::spec(float_address),1352 RELOC_DISP32);1353 }1354 1355 1356 1342 const bool Matcher::match_rule_supported(int opcode) { 1357 1343 if (!has_match_rule(opcode)) … … 1363 1349 int Matcher::regnum_to_fpu_offset(int regnum) { 1364 1350 return regnum - 32; // The FP registers are in the second chunk 1365 }1366 1367 bool is_positive_zero_float(jfloat f) {1368 return jint_cast(f) == jint_cast(0.0F);1369 }1370 1371 bool is_positive_one_float(jfloat f) {1372 return jint_cast(f) == jint_cast(1.0F);1373 }1374 1375 bool is_positive_zero_double(jdouble d) {1376 return jlong_cast(d) == jlong_cast(0.0);1377 }1378 1379 bool is_positive_one_double(jdouble d) {1380 return jlong_cast(d) == jlong_cast(1.0);1381 1351 } 1382 1352 … … 1519 1489 } 1520 1490 1491 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1492 // Use hardware integer DIV instruction when 1493 // it is faster than a code which use multiply. 1494 // Only when constant divisor fits into 32 bit 1495 // (min_jint is excluded to get only correct 1496 // positive 32 bit values from negative). 1497 return VM_Version::has_fast_idiv() && 1498 (divisor == (int)divisor && divisor != min_jint); 1499 } 1500 1521 1501 // Register for DIVI projection of divmodI 1522 1502 RegMask Matcher::divI_proj_mask() { … … 1556 1536 return true; 1557 1537 } 1538 } 1539 if (opc == Op_ConL && (n->get_long() & 0xFFFFFFFF00000000LL) == 0LL) { 1540 return true; 1558 1541 } 1559 1542 return false; … … 1729 1712 enc_class Lbl (label labl) %{ // JMP, CALL 1730 1713 Label *l = $labl$$label; 1731 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf. code_size()+4)) : 0);1714 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0); 1732 1715 %} 1733 1716 1734 1717 enc_class LblShort (label labl) %{ // JMP, CALL 1735 1718 Label *l = $labl$$label; 1736 int disp = l ? (l->loc_pos() - (cbuf. code_size()+1)) : 0;1719 int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0; 1737 1720 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 1738 1721 emit_d8(cbuf, disp); … … 1765 1748 $$$emit8$primary; 1766 1749 emit_cc(cbuf, $secondary, $cop$$cmpcode); 1767 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf. code_size()+4)) : 0);1750 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0); 1768 1751 %} 1769 1752 … … 1771 1754 Label *l = $labl$$label; 1772 1755 emit_cc(cbuf, $primary, $cop$$cmpcode); 1773 int disp = l ? (l->loc_pos() - (cbuf. code_size()+1)) : 0;1756 int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0; 1774 1757 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 1775 1758 emit_d8(cbuf, disp); … … 1839 1822 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf 1840 1823 // This is the instruction starting address for relocation info. 1841 cbuf.set_inst _mark();1824 cbuf.set_insts_mark(); 1842 1825 $$$emit8$primary; 1843 1826 // CALL directly to the runtime 1844 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf. code_end()) - 4),1827 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 1845 1828 runtime_call_Relocation::spec(), RELOC_IMM32 ); 1846 1829 … … 1872 1855 enc_class pre_call_FPU %{ 1873 1856 // If method sets FPU control word restore it here 1874 debug_only(int off0 = cbuf. code_size());1857 debug_only(int off0 = cbuf.insts_size()); 1875 1858 if( Compile::current()->in_24_bit_fp_mode() ) { 1876 1859 MacroAssembler masm(&cbuf); 1877 1860 masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1878 1861 } 1879 debug_only(int off1 = cbuf. code_size());1862 debug_only(int off1 = cbuf.insts_size()); 1880 1863 assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction"); 1881 1864 %} … … 1890 1873 1891 1874 enc_class preserve_SP %{ 1892 debug_only(int off0 = cbuf. code_size());1875 debug_only(int off0 = cbuf.insts_size()); 1893 1876 MacroAssembler _masm(&cbuf); 1894 1877 // RBP is preserved across all calls, even compiled calls. 1895 1878 // Use it to preserve RSP in places where the callee might change the SP. 1896 1879 __ movptr(rbp_mh_SP_save, rsp); 1897 debug_only(int off1 = cbuf. code_size());1880 debug_only(int off1 = cbuf.insts_size()); 1898 1881 assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); 1899 1882 %} … … 1907 1890 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 1908 1891 // who we intended to call. 1909 cbuf.set_inst _mark();1892 cbuf.set_insts_mark(); 1910 1893 $$$emit8$primary; 1911 1894 if ( !_method ) { 1912 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf. code_end()) - 4),1895 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 1913 1896 runtime_call_Relocation::spec(), RELOC_IMM32 ); 1914 1897 } else if(_optimized_virtual) { 1915 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf. code_end()) - 4),1898 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 1916 1899 opt_virtual_call_Relocation::spec(), RELOC_IMM32 ); 1917 1900 } else { 1918 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf. code_end()) - 4),1901 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 1919 1902 static_call_Relocation::spec(), RELOC_IMM32 ); 1920 1903 } … … 1928 1911 // Generate "Mov EAX,0x00", placeholder instruction to load oop-info 1929 1912 // emit_call_dynamic_prologue( cbuf ); 1930 cbuf.set_inst _mark();1913 cbuf.set_insts_mark(); 1931 1914 emit_opcode(cbuf, 0xB8 + EAX_enc); // mov EAX,-1 1932 1915 emit_d32_reloc(cbuf, (int)Universe::non_oop_word(), oop_Relocation::spec_for_immediate(), RELOC_IMM32); 1933 address virtual_call_oop_addr = cbuf.inst _mark();1916 address virtual_call_oop_addr = cbuf.insts_mark(); 1934 1917 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 1935 1918 // who we intended to call. 1936 cbuf.set_inst _mark();1919 cbuf.set_insts_mark(); 1937 1920 $$$emit8$primary; 1938 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf. code_end()) - 4),1921 emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 1939 1922 virtual_call_Relocation::spec(virtual_call_oop_addr), RELOC_IMM32 ); 1940 1923 %} … … 1945 1928 1946 1929 // CALL *[EAX+in_bytes(methodOopDesc::from_compiled_code_entry_point_offset())] 1947 cbuf.set_inst _mark();1930 cbuf.set_insts_mark(); 1948 1931 $$$emit8$primary; 1949 1932 emit_rm(cbuf, 0x01, $secondary, EAX_enc ); // R/M byte … … 1977 1960 // 1978 1961 // // CALL to interpreter. 1979 // cbuf.set_inst _mark();1962 // cbuf.set_insts_mark(); 1980 1963 // $$$emit8$primary; 1981 // emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf. code_end()) - 4),1964 // emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4), 1982 1965 // runtime_call_Relocation::spec(), RELOC_IMM32 ); 1983 1966 // %} … … 2031 2014 emit_d32(cbuf, src_con); 2032 2015 } 2033 %}2034 2035 2036 enc_class LdImmD (immD src) %{ // Load Immediate2037 if( is_positive_zero_double($src$$constant)) {2038 // FLDZ2039 emit_opcode(cbuf,0xD9);2040 emit_opcode(cbuf,0xEE);2041 } else if( is_positive_one_double($src$$constant)) {2042 // FLD12043 emit_opcode(cbuf,0xD9);2044 emit_opcode(cbuf,0xE8);2045 } else {2046 emit_opcode(cbuf,0xDD);2047 emit_rm(cbuf, 0x0, 0x0, 0x5);2048 emit_double_constant(cbuf, $src$$constant);2049 }2050 %}2051 2052 2053 enc_class LdImmF (immF src) %{ // Load Immediate2054 if( is_positive_zero_float($src$$constant)) {2055 emit_opcode(cbuf,0xD9);2056 emit_opcode(cbuf,0xEE);2057 } else if( is_positive_one_float($src$$constant)) {2058 emit_opcode(cbuf,0xD9);2059 emit_opcode(cbuf,0xE8);2060 } else {2061 $$$emit8$primary;2062 // Load immediate does not have a zero or sign extended version2063 // for 8-bit immediates2064 // First load to TOS, then move to dst2065 emit_rm(cbuf, 0x0, 0x0, 0x5);2066 emit_float_constant(cbuf, $src$$constant);2067 }2068 %}2069 2070 enc_class LdImmX (regX dst, immXF con) %{ // Load Immediate2071 emit_rm(cbuf, 0x0, $dst$$reg, 0x5);2072 emit_float_constant(cbuf, $con$$constant);2073 %}2074 2075 enc_class LdImmXD (regXD dst, immXD con) %{ // Load Immediate2076 emit_rm(cbuf, 0x0, $dst$$reg, 0x5);2077 emit_double_constant(cbuf, $con$$constant);2078 %}2079 2080 enc_class load_conXD (regXD dst, immXD con) %{ // Load double constant2081 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)2082 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);2083 emit_opcode(cbuf, 0x0F);2084 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);2085 emit_rm(cbuf, 0x0, $dst$$reg, 0x5);2086 emit_double_constant(cbuf, $con$$constant);2087 %}2088 2089 enc_class Opc_MemImm_F(immF src) %{2090 cbuf.set_inst_mark();2091 $$$emit8$primary;2092 emit_rm(cbuf, 0x0, $secondary, 0x5);2093 emit_float_constant(cbuf, $src$$constant);2094 2016 %} 2095 2017 … … 2281 2203 2282 2204 enc_class set_instruction_start( ) %{ 2283 cbuf.set_inst _mark(); // Mark start of opcode for reloc info in mem operand2205 cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand 2284 2206 %} 2285 2207 … … 2320 2242 emit_opcode( cbuf, 0x8B ); // Move 2321 2243 emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg)); 2322 emit_d8(cbuf,$primary); 2323 emit_rm(cbuf, 0x3, $secondary, $dst$$reg); 2324 emit_d8(cbuf,$cnt$$constant-32); 2244 if( $cnt$$constant > 32 ) { // Shift, if not by zero 2245 emit_d8(cbuf,$primary); 2246 emit_rm(cbuf, 0x3, $secondary, $dst$$reg); 2247 emit_d8(cbuf,$cnt$$constant-32); 2248 } 2325 2249 emit_d8(cbuf,$primary); 2326 2250 emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW($dst$$reg)); … … 2430 2354 emit_d8( cbuf, 0xC0-1+$src$$reg ); 2431 2355 } 2432 cbuf.set_inst _mark(); // Mark start of opcode for reloc info in mem operand2356 cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand 2433 2357 emit_opcode(cbuf,$primary); 2434 2358 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop); … … 2475 2399 emit_rm(cbuf, 0x3, tmpReg, tmpReg); 2476 2400 // AND $tmp,$y 2477 cbuf.set_inst _mark(); // Mark start of opcode for reloc info in mem operand2401 cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand 2478 2402 emit_opcode(cbuf,0x23); 2479 2403 int reg_encoding = tmpReg; … … 3158 3082 emit_opcode(cbuf, 0x50+$src2$$reg ); 3159 3083 // CALL directly to the runtime 3160 cbuf.set_inst _mark();3084 cbuf.set_insts_mark(); 3161 3085 emit_opcode(cbuf,0xE8); // Call into runtime 3162 emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );3086 emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 3163 3087 // Restore stack 3164 3088 emit_opcode(cbuf, 0x83); // add SP, #framesize … … 3177 3101 emit_opcode(cbuf, 0x50+$src2$$reg ); 3178 3102 // CALL directly to the runtime 3179 cbuf.set_inst _mark();3103 cbuf.set_insts_mark(); 3180 3104 emit_opcode(cbuf,0xE8); // Call into runtime 3181 emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );3105 emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 3182 3106 // Restore stack 3183 3107 emit_opcode(cbuf, 0x83); // add SP, #framesize … … 3496 3420 3497 3421 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes 3498 if ((EmitSync & 2048) && VM_Version::supports_3dnow () && os::is_MP()) {3422 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { 3499 3423 // prefetchw [eax + Offset(_owner)-2] 3500 3424 masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); … … 3540 3464 3541 3465 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes 3542 if ((EmitSync & 2048) && VM_Version::supports_3dnow () && os::is_MP()) {3466 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { 3543 3467 // prefetchw [eax + Offset(_owner)-2] 3544 3468 masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); … … 3687 3611 3688 3612 masm.get_thread (boxReg) ; 3689 if ((EmitSync & 4096) && VM_Version::supports_3dnow () && os::is_MP()) {3613 if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { 3690 3614 // prefetchw [ebx + Offset(_owner)-2] 3691 3615 masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2)); … … 3825 3749 3826 3750 enc_class enc_rethrow() %{ 3827 cbuf.set_inst _mark();3751 cbuf.set_insts_mark(); 3828 3752 emit_opcode(cbuf, 0xE9); // jmp entry 3829 emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf. code_end())-4,3753 emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4, 3830 3754 runtime_call_Relocation::spec(), RELOC_IMM32 ); 3831 3755 %} … … 3874 3798 emit_d8 (cbuf,0xC0-1+$src$$reg ); 3875 3799 // CALL directly to the runtime 3876 cbuf.set_inst _mark();3800 cbuf.set_insts_mark(); 3877 3801 emit_opcode(cbuf,0xE8); // Call into runtime 3878 emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );3802 emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 3879 3803 // Carry on here... 3880 3804 %} … … 3916 3840 emit_d8 (cbuf,0xC0-1+$src$$reg ); 3917 3841 // CALL directly to the runtime 3918 cbuf.set_inst _mark();3842 cbuf.set_insts_mark(); 3919 3843 emit_opcode(cbuf,0xE8); // Call into runtime 3920 emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );3844 emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 3921 3845 // Carry on here... 3922 3846 %} … … 3989 3913 3990 3914 // CALL directly to the runtime 3991 cbuf.set_inst _mark();3915 cbuf.set_insts_mark(); 3992 3916 emit_opcode(cbuf,0xE8); // Call into runtime 3993 emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );3917 emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 3994 3918 // Carry on here... 3995 3919 %} … … 4063 3987 4064 3988 // CALL directly to the runtime 4065 cbuf.set_inst _mark();3989 cbuf.set_insts_mark(); 4066 3990 emit_opcode(cbuf,0xE8); // Call into runtime 4067 emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );3991 emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 4068 3992 // Carry on here... 4069 3993 %} … … 4123 4047 4124 4048 // CALL directly to the runtime 4125 cbuf.set_inst _mark();4049 cbuf.set_insts_mark(); 4126 4050 emit_opcode(cbuf,0xE8); // Call into runtime 4127 emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf. code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );4051 emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 4128 4052 4129 4053 // Carry on here... … … 4322 4246 enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{ 4323 4247 store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp ); 4324 cbuf.set_inst _mark(); // Mark start of FIST in case $mem has an oop4248 cbuf.set_insts_mark(); // Mark start of FIST in case $mem has an oop 4325 4249 emit_opcode(cbuf,0xDF); 4326 4250 int rm_byte_opcode = 0x07; … … 4346 4270 encode_RegMem(cbuf, $tmp$$reg, base, index, scale, displace, disp_is_oop); 4347 4271 } 4348 cbuf.set_inst _mark(); // Mark start of MOVSD in case $mem has an oop4272 cbuf.set_insts_mark(); // Mark start of MOVSD in case $mem has an oop 4349 4273 { // MOVSD $mem,$tmp ! atomic long store 4350 4274 emit_opcode(cbuf,0xF2); … … 4379 4303 emit_rm(cbuf, 0x3, $tmp$$reg, $tmp2$$reg); 4380 4304 } 4381 cbuf.set_inst _mark(); // Mark start of MOVSD in case $mem has an oop4305 cbuf.set_insts_mark(); // Mark start of MOVSD in case $mem has an oop 4382 4306 { // MOVSD $mem,$tmp ! atomic long store 4383 4307 emit_opcode(cbuf,0xF2); … … 4400 4324 4401 4325 enc_class Safepoint_Poll() %{ 4402 cbuf.relocate(cbuf.inst _mark(), relocInfo::poll_type, 0);4326 cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); 4403 4327 emit_opcode(cbuf,0x85); 4404 4328 emit_rm (cbuf, 0x0, 0x7, 0x5); … … 4797 4721 %} 4798 4722 4799 // Double Immediate 4723 // Double Immediate one 4800 4724 operand immD1() %{ 4801 4725 predicate( UseSSE<=1 && n->getd() == 1.0 ); … … 4840 4764 // Float Immediate zero 4841 4765 operand immF0() %{ 4842 predicate( UseSSE == 0 && n->getf() == 0.0 ); 4766 predicate(UseSSE == 0 && n->getf() == 0.0F); 4767 match(ConF); 4768 4769 op_cost(5); 4770 format %{ %} 4771 interface(CONST_INTER); 4772 %} 4773 4774 // Float Immediate one 4775 operand immF1() %{ 4776 predicate(UseSSE == 0 && n->getf() == 1.0F); 4843 4777 match(ConF); 4844 4778 … … 7211 7145 7212 7146 // The instruction usage is guarded by predicate in operand immF(). 7213 instruct loadConF(regF dst, immF src) %{7214 match(Set dst src);7147 instruct loadConF(regF dst, immF con) %{ 7148 match(Set dst con); 7215 7149 ins_cost(125); 7216 7217 format %{ "FLD_S ST,$src\n\t" 7150 format %{ "FLD_S ST,[$constantaddress]\t# load from constant table: float=$con\n\t" 7218 7151 "FSTP $dst" %} 7219 opcode(0xD9, 0x00); /* D9 /0 */ 7220 ins_encode(LdImmF(src), Pop_Reg_F(dst) ); 7221 ins_pipe( fpu_reg_con ); 7152 ins_encode %{ 7153 __ fld_s($constantaddress($con)); 7154 __ fstp_d($dst$$reg); 7155 %} 7156 ins_pipe(fpu_reg_con); 7157 %} 7158 7159 // The instruction usage is guarded by predicate in operand immF0(). 7160 instruct loadConF0(regF dst, immF0 con) %{ 7161 match(Set dst con); 7162 ins_cost(125); 7163 format %{ "FLDZ ST\n\t" 7164 "FSTP $dst" %} 7165 ins_encode %{ 7166 __ fldz(); 7167 __ fstp_d($dst$$reg); 7168 %} 7169 ins_pipe(fpu_reg_con); 7170 %} 7171 7172 // The instruction usage is guarded by predicate in operand immF1(). 7173 instruct loadConF1(regF dst, immF1 con) %{ 7174 match(Set dst con); 7175 ins_cost(125); 7176 format %{ "FLD1 ST\n\t" 7177 "FSTP $dst" %} 7178 ins_encode %{ 7179 __ fld1(); 7180 __ fstp_d($dst$$reg); 7181 %} 7182 ins_pipe(fpu_reg_con); 7222 7183 %} 7223 7184 … … 7226 7187 match(Set dst con); 7227 7188 ins_cost(125); 7228 format %{ "MOVSS $dst,[$con]" %} 7229 ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x10), LdImmX(dst, con)); 7230 ins_pipe( pipe_slow ); 7189 format %{ "MOVSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 7190 ins_encode %{ 7191 __ movflt($dst$$XMMRegister, $constantaddress($con)); 7192 %} 7193 ins_pipe(pipe_slow); 7231 7194 %} 7232 7195 … … 7236 7199 ins_cost(100); 7237 7200 format %{ "XORPS $dst,$dst\t# float 0.0" %} 7238 ins_encode( Opcode(0x0F), Opcode(0x57), RegReg(dst,dst)); 7239 ins_pipe( pipe_slow ); 7201 ins_encode %{ 7202 __ xorps($dst$$XMMRegister, $dst$$XMMRegister); 7203 %} 7204 ins_pipe(pipe_slow); 7240 7205 %} 7241 7206 7242 7207 // The instruction usage is guarded by predicate in operand immD(). 7243 instruct loadConD(regD dst, immD src) %{7244 match(Set dst src);7208 instruct loadConD(regD dst, immD con) %{ 7209 match(Set dst con); 7245 7210 ins_cost(125); 7246 7211 7247 format %{ "FLD_D ST, $src\n\t"7212 format %{ "FLD_D ST,[$constantaddress]\t# load from constant table: double=$con\n\t" 7248 7213 "FSTP $dst" %} 7249 ins_encode(LdImmD(src), Pop_Reg_D(dst) ); 7250 ins_pipe( fpu_reg_con ); 7214 ins_encode %{ 7215 __ fld_d($constantaddress($con)); 7216 __ fstp_d($dst$$reg); 7217 %} 7218 ins_pipe(fpu_reg_con); 7219 %} 7220 7221 // The instruction usage is guarded by predicate in operand immD0(). 7222 instruct loadConD0(regD dst, immD0 con) %{ 7223 match(Set dst con); 7224 ins_cost(125); 7225 7226 format %{ "FLDZ ST\n\t" 7227 "FSTP $dst" %} 7228 ins_encode %{ 7229 __ fldz(); 7230 __ fstp_d($dst$$reg); 7231 %} 7232 ins_pipe(fpu_reg_con); 7233 %} 7234 7235 // The instruction usage is guarded by predicate in operand immD1(). 7236 instruct loadConD1(regD dst, immD1 con) %{ 7237 match(Set dst con); 7238 ins_cost(125); 7239 7240 format %{ "FLD1 ST\n\t" 7241 "FSTP $dst" %} 7242 ins_encode %{ 7243 __ fld1(); 7244 __ fstp_d($dst$$reg); 7245 %} 7246 ins_pipe(fpu_reg_con); 7251 7247 %} 7252 7248 … … 7255 7251 match(Set dst con); 7256 7252 ins_cost(125); 7257 format %{ "MOVSD $dst,[$con]" %} 7258 ins_encode(load_conXD(dst, con)); 7259 ins_pipe( pipe_slow ); 7253 format %{ "MOVSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 7254 ins_encode %{ 7255 __ movdbl($dst$$XMMRegister, $constantaddress($con)); 7256 %} 7257 ins_pipe(pipe_slow); 7260 7258 %} 7261 7259 … … 7332 7330 7333 7331 instruct prefetchr0( memory mem ) %{ 7334 predicate(UseSSE==0 && !VM_Version::supports_3dnow ());7332 predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch()); 7335 7333 match(PrefetchRead mem); 7336 7334 ins_cost(0); … … 7342 7340 7343 7341 instruct prefetchr( memory mem ) %{ 7344 predicate(UseSSE==0 && VM_Version::supports_3dnow () || ReadPrefetchInstr==3);7342 predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || ReadPrefetchInstr==3); 7345 7343 match(PrefetchRead mem); 7346 7344 ins_cost(100); … … 7386 7384 7387 7385 instruct prefetchw0( memory mem ) %{ 7388 predicate(UseSSE==0 && !VM_Version::supports_3dnow ());7386 predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch()); 7389 7387 match(PrefetchWrite mem); 7390 7388 ins_cost(0); … … 7396 7394 7397 7395 instruct prefetchw( memory mem ) %{ 7398 predicate(UseSSE==0 && VM_Version::supports_3dnow () || AllocatePrefetchInstr==3);7396 predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || AllocatePrefetchInstr==3); 7399 7397 match( PrefetchWrite mem ); 7400 7398 ins_cost(100); … … 8853 8851 %} 8854 8852 8853 // Divide Register Long (no special case since divisor != -1) 8854 instruct divL_eReg_imm32( eADXRegL dst, immL32 imm, eRegI tmp, eRegI tmp2, eFlagsReg cr ) %{ 8855 match(Set dst (DivL dst imm)); 8856 effect( TEMP tmp, TEMP tmp2, KILL cr ); 8857 ins_cost(1000); 8858 format %{ "MOV $tmp,abs($imm) # ldiv EDX:EAX,$imm\n\t" 8859 "XOR $tmp2,$tmp2\n\t" 8860 "CMP $tmp,EDX\n\t" 8861 "JA,s fast\n\t" 8862 "MOV $tmp2,EAX\n\t" 8863 "MOV EAX,EDX\n\t" 8864 "MOV EDX,0\n\t" 8865 "JLE,s pos\n\t" 8866 "LNEG EAX : $tmp2\n\t" 8867 "DIV $tmp # unsigned division\n\t" 8868 "XCHG EAX,$tmp2\n\t" 8869 "DIV $tmp\n\t" 8870 "LNEG $tmp2 : EAX\n\t" 8871 "JMP,s done\n" 8872 "pos:\n\t" 8873 "DIV $tmp\n\t" 8874 "XCHG EAX,$tmp2\n" 8875 "fast:\n\t" 8876 "DIV $tmp\n" 8877 "done:\n\t" 8878 "MOV EDX,$tmp2\n\t" 8879 "NEG EDX:EAX # if $imm < 0" %} 8880 ins_encode %{ 8881 int con = (int)$imm$$constant; 8882 assert(con != 0 && con != -1 && con != min_jint, "wrong divisor"); 8883 int pcon = (con > 0) ? con : -con; 8884 Label Lfast, Lpos, Ldone; 8885 8886 __ movl($tmp$$Register, pcon); 8887 __ xorl($tmp2$$Register,$tmp2$$Register); 8888 __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register)); 8889 __ jccb(Assembler::above, Lfast); // result fits into 32 bit 8890 8891 __ movl($tmp2$$Register, $dst$$Register); // save 8892 __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register)); 8893 __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags 8894 __ jccb(Assembler::lessEqual, Lpos); // result is positive 8895 8896 // Negative dividend. 8897 // convert value to positive to use unsigned division 8898 __ lneg($dst$$Register, $tmp2$$Register); 8899 __ divl($tmp$$Register); 8900 __ xchgl($dst$$Register, $tmp2$$Register); 8901 __ divl($tmp$$Register); 8902 // revert result back to negative 8903 __ lneg($tmp2$$Register, $dst$$Register); 8904 __ jmpb(Ldone); 8905 8906 __ bind(Lpos); 8907 __ divl($tmp$$Register); // Use unsigned division 8908 __ xchgl($dst$$Register, $tmp2$$Register); 8909 // Fallthrow for final divide, tmp2 has 32 bit hi result 8910 8911 __ bind(Lfast); 8912 // fast path: src is positive 8913 __ divl($tmp$$Register); // Use unsigned division 8914 8915 __ bind(Ldone); 8916 __ movl(HIGH_FROM_LOW($dst$$Register),$tmp2$$Register); 8917 if (con < 0) { 8918 __ lneg(HIGH_FROM_LOW($dst$$Register), $dst$$Register); 8919 } 8920 %} 8921 ins_pipe( pipe_slow ); 8922 %} 8923 8924 // Remainder Register Long (remainder fit into 32 bits) 8925 instruct modL_eReg_imm32( eADXRegL dst, immL32 imm, eRegI tmp, eRegI tmp2, eFlagsReg cr ) %{ 8926 match(Set dst (ModL dst imm)); 8927 effect( TEMP tmp, TEMP tmp2, KILL cr ); 8928 ins_cost(1000); 8929 format %{ "MOV $tmp,abs($imm) # lrem EDX:EAX,$imm\n\t" 8930 "CMP $tmp,EDX\n\t" 8931 "JA,s fast\n\t" 8932 "MOV $tmp2,EAX\n\t" 8933 "MOV EAX,EDX\n\t" 8934 "MOV EDX,0\n\t" 8935 "JLE,s pos\n\t" 8936 "LNEG EAX : $tmp2\n\t" 8937 "DIV $tmp # unsigned division\n\t" 8938 "MOV EAX,$tmp2\n\t" 8939 "DIV $tmp\n\t" 8940 "NEG EDX\n\t" 8941 "JMP,s done\n" 8942 "pos:\n\t" 8943 "DIV $tmp\n\t" 8944 "MOV EAX,$tmp2\n" 8945 "fast:\n\t" 8946 "DIV $tmp\n" 8947 "done:\n\t" 8948 "MOV EAX,EDX\n\t" 8949 "SAR EDX,31\n\t" %} 8950 ins_encode %{ 8951 int con = (int)$imm$$constant; 8952 assert(con != 0 && con != -1 && con != min_jint, "wrong divisor"); 8953 int pcon = (con > 0) ? con : -con; 8954 Label Lfast, Lpos, Ldone; 8955 8956 __ movl($tmp$$Register, pcon); 8957 __ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register)); 8958 __ jccb(Assembler::above, Lfast); // src is positive and result fits into 32 bit 8959 8960 __ movl($tmp2$$Register, $dst$$Register); // save 8961 __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register)); 8962 __ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags 8963 __ jccb(Assembler::lessEqual, Lpos); // result is positive 8964 8965 // Negative dividend. 8966 // convert value to positive to use unsigned division 8967 __ lneg($dst$$Register, $tmp2$$Register); 8968 __ divl($tmp$$Register); 8969 __ movl($dst$$Register, $tmp2$$Register); 8970 __ divl($tmp$$Register); 8971 // revert remainder back to negative 8972 __ negl(HIGH_FROM_LOW($dst$$Register)); 8973 __ jmpb(Ldone); 8974 8975 __ bind(Lpos); 8976 __ divl($tmp$$Register); 8977 __ movl($dst$$Register, $tmp2$$Register); 8978 8979 __ bind(Lfast); 8980 // fast path: src is positive 8981 __ divl($tmp$$Register); 8982 8983 __ bind(Ldone); 8984 __ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register)); 8985 __ sarl(HIGH_FROM_LOW($dst$$Register), 31); // result sign 8986 8987 %} 8988 ins_pipe( pipe_slow ); 8989 %} 8990 8855 8991 // Integer Shift Instructions 8856 8992 // Shift Left by one … … 10161 10297 %} 10162 10298 10163 instruct addD_reg_imm1(regD dst, immD1 src) %{10299 instruct addD_reg_imm1(regD dst, immD1 con) %{ 10164 10300 predicate(UseSSE<=1); 10165 match(Set dst (AddD dst src));10301 match(Set dst (AddD dst con)); 10166 10302 ins_cost(125); 10167 10303 format %{ "FLD1\n\t" 10168 10304 "DADDp $dst,ST" %} 10169 opcode(0xDE, 0x00); 10170 ins_encode( LdImmD(src), 10171 OpcP, RegOpc(dst) ); 10172 ins_pipe( fpu_reg ); 10173 %} 10174 10175 instruct addD_reg_imm(regD dst, immD src) %{ 10305 ins_encode %{ 10306 __ fld1(); 10307 __ faddp($dst$$reg); 10308 %} 10309 ins_pipe(fpu_reg); 10310 %} 10311 10312 instruct addD_reg_imm(regD dst, immD con) %{ 10176 10313 predicate(UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 ); 10177 match(Set dst (AddD dst src));10314 match(Set dst (AddD dst con)); 10178 10315 ins_cost(200); 10179 format %{ "FLD_D [$ src]\n\t"10316 format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t" 10180 10317 "DADDp $dst,ST" %} 10181 opcode(0xDE, 0x00); /* DE /0 */ 10182 ins_encode( LdImmD(src), 10183 OpcP, RegOpc(dst)); 10184 ins_pipe( fpu_reg_mem ); 10318 ins_encode %{ 10319 __ fld_d($constantaddress($con)); 10320 __ faddp($dst$$reg); 10321 %} 10322 ins_pipe(fpu_reg_mem); 10185 10323 %} 10186 10324 … … 10189 10327 match(Set dst (RoundDouble (AddD src con))); 10190 10328 ins_cost(200); 10191 format %{ "FLD_D [$con ]\n\t"10329 format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t" 10192 10330 "DADD ST,$src\n\t" 10193 10331 "FSTP_D $dst\t# D-round" %} 10194 opcode(0xD8, 0x00); /* D8 /0 */ 10195 ins_encode( LdImmD(con), 10196 OpcP, RegOpc(src), Pop_Mem_D(dst)); 10197 ins_pipe( fpu_mem_reg_con ); 10332 ins_encode %{ 10333 __ fld_d($constantaddress($con)); 10334 __ fadd($src$$reg); 10335 __ fstp_d(Address(rsp, $dst$$disp)); 10336 %} 10337 ins_pipe(fpu_mem_reg_con); 10198 10338 %} 10199 10339 … … 10210 10350 predicate(UseSSE>=2); 10211 10351 match(Set dst (AddD dst con)); 10212 format %{ "ADDSD $dst,[$con]" %} 10213 ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x58), LdImmXD(dst, con) ); 10214 ins_pipe( pipe_slow ); 10352 format %{ "ADDSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 10353 ins_encode %{ 10354 __ addsd($dst$$XMMRegister, $constantaddress($con)); 10355 %} 10356 ins_pipe(pipe_slow); 10215 10357 %} 10216 10358 … … 10235 10377 predicate(UseSSE>=2); 10236 10378 match(Set dst (SubD dst con)); 10237 format %{ "SUBSD $dst,[$con]" %} 10238 ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5C), LdImmXD(dst, con) ); 10239 ins_pipe( pipe_slow ); 10379 format %{ "SUBSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 10380 ins_encode %{ 10381 __ subsd($dst$$XMMRegister, $constantaddress($con)); 10382 %} 10383 ins_pipe(pipe_slow); 10240 10384 %} 10241 10385 … … 10260 10404 predicate(UseSSE>=2); 10261 10405 match(Set dst (MulD dst con)); 10262 format %{ "MULSD $dst,[$con]" %} 10263 ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x59), LdImmXD(dst, con) ); 10264 ins_pipe( pipe_slow ); 10406 format %{ "MULSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 10407 ins_encode %{ 10408 __ mulsd($dst$$XMMRegister, $constantaddress($con)); 10409 %} 10410 ins_pipe(pipe_slow); 10265 10411 %} 10266 10412 … … 10286 10432 predicate(UseSSE>=2); 10287 10433 match(Set dst (DivD dst con)); 10288 format %{ "DIVSD $dst,[$con]" %} 10289 ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5E), LdImmXD(dst, con)); 10290 ins_pipe( pipe_slow ); 10434 format %{ "DIVSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 10435 ins_encode %{ 10436 __ divsd($dst$$XMMRegister, $constantaddress($con)); 10437 %} 10438 ins_pipe(pipe_slow); 10291 10439 %} 10292 10440 … … 10339 10487 %} 10340 10488 10341 instruct mulD_reg_imm(regD dst, immD src) %{10489 instruct mulD_reg_imm(regD dst, immD con) %{ 10342 10490 predicate( UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 ); 10343 match(Set dst (MulD dst src));10491 match(Set dst (MulD dst con)); 10344 10492 ins_cost(200); 10345 format %{ "FLD_D [$ src]\n\t"10493 format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t" 10346 10494 "DMULp $dst,ST" %} 10347 opcode(0xDE, 0x1); /* DE /1 */ 10348 ins_encode( LdImmD(src), 10349 OpcP, RegOpc(dst) ); 10350 ins_pipe( fpu_reg_mem ); 10495 ins_encode %{ 10496 __ fld_d($constantaddress($con)); 10497 __ fmulp($dst$$reg); 10498 %} 10499 ins_pipe(fpu_reg_mem); 10351 10500 %} 10352 10501 … … 11082 11231 predicate(UseSSE>=1); 11083 11232 match(Set dst (AddF dst con)); 11084 format %{ "ADDSS $dst,[$con]" %} 11085 ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x58), LdImmX(dst, con) ); 11086 ins_pipe( pipe_slow ); 11233 format %{ "ADDSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 11234 ins_encode %{ 11235 __ addss($dst$$XMMRegister, $constantaddress($con)); 11236 %} 11237 ins_pipe(pipe_slow); 11087 11238 %} 11088 11239 … … 11107 11258 predicate(UseSSE>=1); 11108 11259 match(Set dst (SubF dst con)); 11109 format %{ "SUBSS $dst,[$con]" %} 11110 ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5C), LdImmX(dst, con) ); 11111 ins_pipe( pipe_slow ); 11260 format %{ "SUBSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 11261 ins_encode %{ 11262 __ subss($dst$$XMMRegister, $constantaddress($con)); 11263 %} 11264 ins_pipe(pipe_slow); 11112 11265 %} 11113 11266 … … 11132 11285 predicate(UseSSE>=1); 11133 11286 match(Set dst (MulF dst con)); 11134 format %{ "MULSS $dst,[$con]" %} 11135 ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x59), LdImmX(dst, con) ); 11136 ins_pipe( pipe_slow ); 11287 format %{ "MULSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 11288 ins_encode %{ 11289 __ mulss($dst$$XMMRegister, $constantaddress($con)); 11290 %} 11291 ins_pipe(pipe_slow); 11137 11292 %} 11138 11293 … … 11157 11312 predicate(UseSSE>=1); 11158 11313 match(Set dst (DivF dst con)); 11159 format %{ "DIVSS $dst,[$con]" %} 11160 ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5E), LdImmX(dst, con) ); 11161 ins_pipe( pipe_slow ); 11314 format %{ "DIVSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 11315 ins_encode %{ 11316 __ divss($dst$$XMMRegister, $constantaddress($con)); 11317 %} 11318 ins_pipe(pipe_slow); 11162 11319 %} 11163 11320 … … 11314 11471 11315 11472 // Spill to obtain 24-bit precision 11316 instruct addF24_reg_imm(stackSlotF dst, regF src 1, immF src2) %{11473 instruct addF24_reg_imm(stackSlotF dst, regF src, immF con) %{ 11317 11474 predicate(UseSSE==0 && Compile::current()->select_24_bit_instr()); 11318 match(Set dst (AddF src 1 src2));11319 format %{ "FLD $src 1\n\t"11320 "FADD $src2\n\t"11475 match(Set dst (AddF src con)); 11476 format %{ "FLD $src\n\t" 11477 "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t" 11321 11478 "FSTP_S $dst" %} 11322 opcode(0xD8, 0x00); /* D8 /0 */ 11323 ins_encode( Push_Reg_F(src1), 11324 Opc_MemImm_F(src2), 11325 Pop_Mem_F(dst)); 11326 ins_pipe( fpu_mem_reg_con ); 11479 ins_encode %{ 11480 __ fld_s($src$$reg - 1); // FLD ST(i-1) 11481 __ fadd_s($constantaddress($con)); 11482 __ fstp_s(Address(rsp, $dst$$disp)); 11483 %} 11484 ins_pipe(fpu_mem_reg_con); 11327 11485 %} 11328 11486 // 11329 11487 // This instruction does not round to 24-bits 11330 instruct addF_reg_imm(regF dst, regF src 1, immF src2) %{11488 instruct addF_reg_imm(regF dst, regF src, immF con) %{ 11331 11489 predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); 11332 match(Set dst (AddF src1 src2)); 11333 format %{ "FLD $src1\n\t" 11334 "FADD $src2\n\t" 11335 "FSTP_S $dst" %} 11336 opcode(0xD8, 0x00); /* D8 /0 */ 11337 ins_encode( Push_Reg_F(src1), 11338 Opc_MemImm_F(src2), 11339 Pop_Reg_F(dst)); 11340 ins_pipe( fpu_reg_reg_con ); 11490 match(Set dst (AddF src con)); 11491 format %{ "FLD $src\n\t" 11492 "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t" 11493 "FSTP $dst" %} 11494 ins_encode %{ 11495 __ fld_s($src$$reg - 1); // FLD ST(i-1) 11496 __ fadd_s($constantaddress($con)); 11497 __ fstp_d($dst$$reg); 11498 %} 11499 ins_pipe(fpu_reg_reg_con); 11341 11500 %} 11342 11501 … … 11417 11576 11418 11577 // Spill to obtain 24-bit precision 11419 instruct mulF24_reg_imm(stackSlotF dst, regF src 1, immF src2) %{11578 instruct mulF24_reg_imm(stackSlotF dst, regF src, immF con) %{ 11420 11579 predicate(UseSSE==0 && Compile::current()->select_24_bit_instr()); 11421 match(Set dst (MulF src1 src2)); 11422 11423 format %{ "FMULc $dst,$src1,$src2" %} 11424 opcode(0xD8, 0x1); /* D8 /1*/ 11425 ins_encode( Push_Reg_F(src1), 11426 Opc_MemImm_F(src2), 11427 Pop_Mem_F(dst)); 11428 ins_pipe( fpu_mem_reg_con ); 11580 match(Set dst (MulF src con)); 11581 11582 format %{ "FLD $src\n\t" 11583 "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t" 11584 "FSTP_S $dst" %} 11585 ins_encode %{ 11586 __ fld_s($src$$reg - 1); // FLD ST(i-1) 11587 __ fmul_s($constantaddress($con)); 11588 __ fstp_s(Address(rsp, $dst$$disp)); 11589 %} 11590 ins_pipe(fpu_mem_reg_con); 11429 11591 %} 11430 11592 // 11431 11593 // This instruction does not round to 24-bits 11432 instruct mulF_reg_imm(regF dst, regF src 1, immF src2) %{11594 instruct mulF_reg_imm(regF dst, regF src, immF con) %{ 11433 11595 predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); 11434 match(Set dst (MulF src1 src2)); 11435 11436 format %{ "FMULc $dst. $src1, $src2" %} 11437 opcode(0xD8, 0x1); /* D8 /1*/ 11438 ins_encode( Push_Reg_F(src1), 11439 Opc_MemImm_F(src2), 11440 Pop_Reg_F(dst)); 11441 ins_pipe( fpu_reg_reg_con ); 11596 match(Set dst (MulF src con)); 11597 11598 format %{ "FLD $src\n\t" 11599 "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t" 11600 "FSTP $dst" %} 11601 ins_encode %{ 11602 __ fld_s($src$$reg - 1); // FLD ST(i-1) 11603 __ fmul_s($constantaddress($con)); 11604 __ fstp_d($dst$$reg); 11605 %} 11606 ins_pipe(fpu_reg_reg_con); 11442 11607 %} 11443 11608 … … 12465 12630 %} 12466 12631 12467 instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, e BXRegI cnt2,12468 eAXRegI result, regXD tmp1, regXD tmp2,eFlagsReg cr) %{12632 instruct string_compare(eDIRegP str1, eCXRegI cnt1, eSIRegP str2, eDXRegI cnt2, 12633 eAXRegI result, regXD tmp1, eFlagsReg cr) %{ 12469 12634 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 12470 effect(TEMP tmp1, TEMP tmp2,USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);12471 12472 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1 , $tmp2" %}12635 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 12636 12637 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 12473 12638 ins_encode %{ 12474 12639 __ string_compare($str1$$Register, $str2$$Register, 12475 12640 $cnt1$$Register, $cnt2$$Register, $result$$Register, 12476 $tmp1$$XMMRegister , $tmp2$$XMMRegister);12641 $tmp1$$XMMRegister); 12477 12642 %} 12478 12643 ins_pipe( pipe_slow ); … … 12797 12962 match(Jump switch_val); 12798 12963 ins_cost(350); 12799 12800 format %{ "JMP [table_base](,$switch_val,1)\n\t" %} 12801 12964 format %{ "JMP [$constantaddress](,$switch_val,1)\n\t" %} 12802 12965 ins_encode %{ 12803 address table_base = __ address_table_constant(_index2label);12804 12805 12966 // Jump to Address(table_base + switch_reg) 12806 InternalAddress table(table_base);12807 12967 Address index(noreg, $switch_val$$Register, Address::times_1); 12808 __ jump(ArrayAddress( table, index));12968 __ jump(ArrayAddress($constantaddress, index)); 12809 12969 %} 12810 12970 ins_pc_relative(1); … … 12933 13093 if ($cop$$cmpcode == Assembler::notEqual) { 12934 13094 // the two jumps 6 bytes apart so the jump distances are too 12935 parity_disp = l ? (l->loc_pos() - (cbuf. code_size() + 4)) : 0;13095 parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 12936 13096 } else if ($cop$$cmpcode == Assembler::equal) { 12937 13097 parity_disp = 6; … … 12943 13103 $$$emit8$primary; 12944 13104 emit_cc(cbuf, $secondary, $cop$$cmpcode); 12945 int disp = l ? (l->loc_pos() - (cbuf. code_size() + 4)) : 0;13105 int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 12946 13106 emit_d32(cbuf, disp); 12947 13107 %} … … 13129 13289 int parity_disp = -1; 13130 13290 if ($cop$$cmpcode == Assembler::notEqual) { 13131 parity_disp = l ? (l->loc_pos() - (cbuf. code_size() + 1)) : 0;13291 parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 13132 13292 } else if ($cop$$cmpcode == Assembler::equal) { 13133 13293 parity_disp = 2; … … 13137 13297 emit_d8(cbuf, parity_disp); 13138 13298 emit_cc(cbuf, $primary, $cop$$cmpcode); 13139 int disp = l ? (l->loc_pos() - (cbuf. code_size() + 1)) : 0;13299 int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 13140 13300 emit_d8(cbuf, disp); 13141 13301 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); -
trunk/openjdk/hotspot/src/cpu/x86/vm/x86_64.ad
r278 r309 620 620 621 621 // EMIT_RM() 622 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) 623 { 622 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) { 624 623 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3); 625 *(cbuf.code_end()) = c; 626 cbuf.set_code_end(cbuf.code_end() + 1); 624 cbuf.insts()->emit_int8(c); 627 625 } 628 626 629 627 // EMIT_CC() 630 void emit_cc(CodeBuffer &cbuf, int f1, int f2) 631 { 628 void emit_cc(CodeBuffer &cbuf, int f1, int f2) { 632 629 unsigned char c = (unsigned char) (f1 | f2); 633 *(cbuf.code_end()) = c; 634 cbuf.set_code_end(cbuf.code_end() + 1); 630 cbuf.insts()->emit_int8(c); 635 631 } 636 632 637 633 // EMIT_OPCODE() 638 void emit_opcode(CodeBuffer &cbuf, int code) 639 { 640 *(cbuf.code_end()) = (unsigned char) code; 641 cbuf.set_code_end(cbuf.code_end() + 1); 634 void emit_opcode(CodeBuffer &cbuf, int code) { 635 cbuf.insts()->emit_int8((unsigned char) code); 642 636 } 643 637 … … 646 640 int code, relocInfo::relocType reloc, int offset, int format) 647 641 { 648 cbuf.relocate(cbuf.inst _mark() + offset, reloc, format);642 cbuf.relocate(cbuf.insts_mark() + offset, reloc, format); 649 643 emit_opcode(cbuf, code); 650 644 } 651 645 652 646 // EMIT_D8() 653 void emit_d8(CodeBuffer &cbuf, int d8) 654 { 655 *(cbuf.code_end()) = (unsigned char) d8; 656 cbuf.set_code_end(cbuf.code_end() + 1); 647 void emit_d8(CodeBuffer &cbuf, int d8) { 648 cbuf.insts()->emit_int8((unsigned char) d8); 657 649 } 658 650 659 651 // EMIT_D16() 660 void emit_d16(CodeBuffer &cbuf, int d16) 661 { 662 *((short *)(cbuf.code_end())) = d16; 663 cbuf.set_code_end(cbuf.code_end() + 2); 652 void emit_d16(CodeBuffer &cbuf, int d16) { 653 cbuf.insts()->emit_int16(d16); 664 654 } 665 655 666 656 // EMIT_D32() 667 void emit_d32(CodeBuffer &cbuf, int d32) 668 { 669 *((int *)(cbuf.code_end())) = d32; 670 cbuf.set_code_end(cbuf.code_end() + 4); 657 void emit_d32(CodeBuffer &cbuf, int d32) { 658 cbuf.insts()->emit_int32(d32); 671 659 } 672 660 673 661 // EMIT_D64() 674 void emit_d64(CodeBuffer &cbuf, int64_t d64) 675 { 676 *((int64_t*) (cbuf.code_end())) = d64; 677 cbuf.set_code_end(cbuf.code_end() + 8); 662 void emit_d64(CodeBuffer &cbuf, int64_t d64) { 663 cbuf.insts()->emit_int64(d64); 678 664 } 679 665 … … 685 671 { 686 672 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc"); 687 cbuf.relocate(cbuf.inst_mark(), reloc, format); 688 689 *((int*) (cbuf.code_end())) = d32; 690 cbuf.set_code_end(cbuf.code_end() + 4); 673 cbuf.relocate(cbuf.insts_mark(), reloc, format); 674 cbuf.insts()->emit_int32(d32); 691 675 } 692 676 693 677 // emit 32 bit value and construct relocation entry from RelocationHolder 694 void emit_d32_reloc(CodeBuffer& cbuf, 695 int d32, 696 RelocationHolder const& rspec, 697 int format) 698 { 678 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) { 699 679 #ifdef ASSERT 700 680 if (rspec.reloc()->type() == relocInfo::oop_type && … … 703 683 } 704 684 #endif 705 cbuf.relocate(cbuf.inst_mark(), rspec, format); 706 707 *((int* )(cbuf.code_end())) = d32; 708 cbuf.set_code_end(cbuf.code_end() + 4); 685 cbuf.relocate(cbuf.insts_mark(), rspec, format); 686 cbuf.insts()->emit_int32(d32); 709 687 } 710 688 711 689 void emit_d32_reloc(CodeBuffer& cbuf, address addr) { 712 address next_ip = cbuf. code_end() + 4;690 address next_ip = cbuf.insts_end() + 4; 713 691 emit_d32_reloc(cbuf, (int) (addr - next_ip), 714 692 external_word_Relocation::spec(addr), … … 718 696 719 697 // emit 64 bit value and construct relocation entry from relocInfo::relocType 720 void emit_d64_reloc(CodeBuffer& cbuf, 721 int64_t d64, 722 relocInfo::relocType reloc, 723 int format) 724 { 725 cbuf.relocate(cbuf.inst_mark(), reloc, format); 726 727 *((int64_t*) (cbuf.code_end())) = d64; 728 cbuf.set_code_end(cbuf.code_end() + 8); 698 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) { 699 cbuf.relocate(cbuf.insts_mark(), reloc, format); 700 cbuf.insts()->emit_int64(d64); 729 701 } 730 702 731 703 // emit 64 bit value and construct relocation entry from RelocationHolder 732 void emit_d64_reloc(CodeBuffer& cbuf, 733 int64_t d64, 734 RelocationHolder const& rspec, 735 int format) 736 { 704 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) { 737 705 #ifdef ASSERT 738 706 if (rspec.reloc()->type() == relocInfo::oop_type && … … 742 710 } 743 711 #endif 744 cbuf.relocate(cbuf.inst_mark(), rspec, format); 745 746 *((int64_t*) (cbuf.code_end())) = d64; 747 cbuf.set_code_end(cbuf.code_end() + 8); 712 cbuf.relocate(cbuf.insts_mark(), rspec, format); 713 cbuf.insts()->emit_int64(d64); 748 714 } 749 715 … … 868 834 869 835 //============================================================================= 836 const bool Matcher::constant_table_absolute_addressing = true; 837 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; 838 839 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 840 // Empty encoding 841 } 842 843 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 844 return 0; 845 } 846 847 #ifndef PRODUCT 848 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 849 st->print("# MachConstantBaseNode (empty encoding)"); 850 } 851 #endif 852 853 854 //============================================================================= 870 855 #ifndef PRODUCT 871 856 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const … … 967 952 } 968 953 969 C->set_frame_complete(cbuf. code_end() - cbuf.code_begin());954 C->set_frame_complete(cbuf.insts_size()); 970 955 971 956 #ifdef ASSERT … … 1051 1036 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes 1052 1037 // XXX reg_mem doesn't support RIP-relative addressing yet 1053 cbuf.set_inst _mark();1054 cbuf.relocate(cbuf.inst _mark(), relocInfo::poll_return_type, 0); // XXX1038 cbuf.set_insts_mark(); 1039 cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_return_type, 0); // XXX 1055 1040 emit_opcode(cbuf, 0x85); // testl 1056 1041 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 1057 // cbuf.inst _mark() is beginning of instruction1042 // cbuf.insts_mark() is beginning of instruction 1058 1043 emit_d32_reloc(cbuf, os::get_polling_page()); 1059 1044 // relocInfo::poll_return_type, … … 1815 1800 // jmp -5 # to self 1816 1801 1817 address mark = cbuf.inst _mark(); // get mark within main instrs section1818 1819 // Note that the code buffer's inst _mark is always relative to insts.1802 address mark = cbuf.insts_mark(); // get mark within main instrs section 1803 1804 // Note that the code buffer's insts_mark is always relative to insts. 1820 1805 // That's why we must use the macroassembler to generate a stub. 1821 1806 MacroAssembler _masm(&cbuf); … … 1831 1816 __ jump(RuntimeAddress(__ pc())); 1832 1817 1833 // Update current stubs pointer and restore code_end.1818 // Update current stubs pointer and restore insts_end. 1834 1819 __ end_a_stub(); 1835 1820 } … … 1869 1854 { 1870 1855 MacroAssembler masm(&cbuf); 1871 uint code_size = cbuf.code_size();1856 uint insts_size = cbuf.insts_size(); 1872 1857 if (UseCompressedOops) { 1873 1858 masm.load_klass(rscratch1, j_rarg0); … … 1881 1866 /* WARNING these NOPs are critical so that verified entry point is properly 1882 1867 4 bytes aligned for patching by NativeJump::patch_verified_entry() */ 1883 int nops_cnt = 4 - ((cbuf. code_size() - code_size) & 0x3);1868 int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3); 1884 1869 if (OptoBreakpoint) { 1885 1870 // Leave space for int3 … … 1911 1896 { 1912 1897 1913 // Note that the code buffer's inst _mark is always relative to insts.1898 // Note that the code buffer's insts_mark is always relative to insts. 1914 1899 // That's why we must use the macroassembler to generate a handler. 1915 1900 MacroAssembler _masm(&cbuf); … … 1918 1903 if (base == NULL) return 0; // CodeBuffer::expand failed 1919 1904 int offset = __ offset(); 1920 __ jump(RuntimeAddress(OptoRuntime::exception_blob()-> instructions_begin()));1905 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); 1921 1906 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1922 1907 __ end_a_stub(); … … 1934 1919 { 1935 1920 1936 // Note that the code buffer's inst _mark is always relative to insts.1921 // Note that the code buffer's insts_mark is always relative to insts. 1937 1922 // That's why we must use the macroassembler to generate a handler. 1938 1923 MacroAssembler _masm(&cbuf); … … 1955 1940 __ end_a_stub(); 1956 1941 return offset; 1957 }1958 1959 static void emit_double_constant(CodeBuffer& cbuf, double x) {1960 int mark = cbuf.insts()->mark_off();1961 MacroAssembler _masm(&cbuf);1962 address double_address = __ double_constant(x);1963 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift1964 emit_d32_reloc(cbuf,1965 (int) (double_address - cbuf.code_end() - 4),1966 internal_word_Relocation::spec(double_address),1967 RELOC_DISP32);1968 }1969 1970 static void emit_float_constant(CodeBuffer& cbuf, float x) {1971 int mark = cbuf.insts()->mark_off();1972 MacroAssembler _masm(&cbuf);1973 address float_address = __ float_constant(x);1974 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift1975 emit_d32_reloc(cbuf,1976 (int) (float_address - cbuf.code_end() - 4),1977 internal_word_Relocation::spec(float_address),1978 RELOC_DISP32);1979 1942 } 1980 1943 … … 2098 2061 { 2099 2062 return can_be_java_arg(reg); 2063 } 2064 2065 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 2066 // In 64 bit mode a code which use multiply when 2067 // devisor is constant is faster than hardware 2068 // DIV instruction (it uses MulHiL). 2069 return false; 2100 2070 } 2101 2071 … … 2482 2452 // JMP, CALL 2483 2453 Label* l = $labl$$label; 2484 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf. code_size() + 4)) : 0);2454 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0); 2485 2455 %} 2486 2456 … … 2489 2459 // JMP, CALL 2490 2460 Label* l = $labl$$label; 2491 int disp = l ? (l->loc_pos() - (cbuf. code_size() + 1)) : 0;2461 int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 2492 2462 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 2493 2463 emit_d8(cbuf, disp); … … 2518 2488 $$$emit8$primary; 2519 2489 emit_cc(cbuf, $secondary, $cop$$cmpcode); 2520 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf. code_size() + 4)) : 0);2490 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0); 2521 2491 %} 2522 2492 … … 2526 2496 Label *l = $labl$$label; 2527 2497 emit_cc(cbuf, $primary, $cop$$cmpcode); 2528 int disp = l ? (l->loc_pos() - (cbuf. code_size() + 1)) : 0;2498 int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 2529 2499 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 2530 2500 emit_d8(cbuf, disp); … … 2610 2580 // CALL Java_To_Interpreter 2611 2581 // This is the instruction starting address for relocation info. 2612 cbuf.set_inst _mark();2582 cbuf.set_insts_mark(); 2613 2583 $$$emit8$primary; 2614 2584 // CALL directly to the runtime 2615 2585 emit_d32_reloc(cbuf, 2616 (int) ($meth$$method - ((intptr_t) cbuf. code_end()) - 4),2586 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2617 2587 runtime_call_Relocation::spec(), 2618 2588 RELOC_DISP32); … … 2620 2590 2621 2591 enc_class preserve_SP %{ 2622 debug_only(int off0 = cbuf. code_size());2592 debug_only(int off0 = cbuf.insts_size()); 2623 2593 MacroAssembler _masm(&cbuf); 2624 2594 // RBP is preserved across all calls, even compiled calls. 2625 2595 // Use it to preserve RSP in places where the callee might change the SP. 2626 2596 __ movptr(rbp_mh_SP_save, rsp); 2627 debug_only(int off1 = cbuf. code_size());2597 debug_only(int off1 = cbuf.insts_size()); 2628 2598 assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); 2629 2599 %} … … 2639 2609 // CALL to fixup routine. Fixup routine uses ScopeDesc info to 2640 2610 // determine who we intended to call. 2641 cbuf.set_inst _mark();2611 cbuf.set_insts_mark(); 2642 2612 $$$emit8$primary; 2643 2613 2644 2614 if (!_method) { 2645 2615 emit_d32_reloc(cbuf, 2646 (int) ($meth$$method - ((intptr_t) cbuf. code_end()) - 4),2616 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2647 2617 runtime_call_Relocation::spec(), 2648 2618 RELOC_DISP32); 2649 2619 } else if (_optimized_virtual) { 2650 2620 emit_d32_reloc(cbuf, 2651 (int) ($meth$$method - ((intptr_t) cbuf. code_end()) - 4),2621 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2652 2622 opt_virtual_call_Relocation::spec(), 2653 2623 RELOC_DISP32); 2654 2624 } else { 2655 2625 emit_d32_reloc(cbuf, 2656 (int) ($meth$$method - ((intptr_t) cbuf. code_end()) - 4),2626 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2657 2627 static_call_Relocation::spec(), 2658 2628 RELOC_DISP32); … … 2670 2640 // Generate "movq rax, -1", placeholder instruction to load oop-info 2671 2641 // emit_call_dynamic_prologue( cbuf ); 2672 cbuf.set_inst _mark();2642 cbuf.set_insts_mark(); 2673 2643 2674 2644 // movq rax, -1 … … 2678 2648 (int64_t) Universe::non_oop_word(), 2679 2649 oop_Relocation::spec_for_immediate(), RELOC_IMM64); 2680 address virtual_call_oop_addr = cbuf.inst _mark();2650 address virtual_call_oop_addr = cbuf.insts_mark(); 2681 2651 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2682 2652 // who we intended to call. 2683 cbuf.set_inst _mark();2653 cbuf.set_insts_mark(); 2684 2654 $$$emit8$primary; 2685 2655 emit_d32_reloc(cbuf, 2686 (int) ($meth$$method - ((intptr_t) cbuf. code_end()) - 4),2656 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2687 2657 virtual_call_Relocation::spec(virtual_call_oop_addr), 2688 2658 RELOC_DISP32); … … 2698 2668 2699 2669 // callq *disp(%rax) 2700 cbuf.set_inst _mark();2670 cbuf.set_insts_mark(); 2701 2671 $$$emit8$primary; 2702 2672 if (disp < 0x80) { … … 2815 2785 emit_d64(cbuf, $src$$constant); 2816 2786 } 2817 %}2818 2819 enc_class load_immF(regF dst, immF con)2820 %{2821 // XXX reg_mem doesn't support RIP-relative addressing yet2822 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 1012823 emit_float_constant(cbuf, $con$$constant);2824 %}2825 2826 enc_class load_immD(regD dst, immD con)2827 %{2828 // XXX reg_mem doesn't support RIP-relative addressing yet2829 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 1012830 emit_double_constant(cbuf, $con$$constant);2831 %}2832 2833 enc_class load_conF (regF dst, immF con) %{ // Load float constant2834 emit_opcode(cbuf, 0xF3);2835 if ($dst$$reg >= 8) {2836 emit_opcode(cbuf, Assembler::REX_R);2837 }2838 emit_opcode(cbuf, 0x0F);2839 emit_opcode(cbuf, 0x10);2840 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 1012841 emit_float_constant(cbuf, $con$$constant);2842 %}2843 2844 enc_class load_conD (regD dst, immD con) %{ // Load double constant2845 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)2846 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);2847 if ($dst$$reg >= 8) {2848 emit_opcode(cbuf, Assembler::REX_R);2849 }2850 emit_opcode(cbuf, 0x0F);2851 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);2852 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 1012853 emit_double_constant(cbuf, $con$$constant);2854 2787 %} 2855 2788 … … 2952 2885 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 ); 2953 2886 emit_d32(cbuf, 0x00); 2954 %}2955 2956 enc_class jump_enc(rRegL switch_val, rRegI dest) %{2957 MacroAssembler masm(&cbuf);2958 2959 Register switch_reg = as_Register($switch_val$$reg);2960 Register dest_reg = as_Register($dest$$reg);2961 address table_base = masm.address_table_constant(_index2label);2962 2963 // We could use jump(ArrayAddress) except that the macro assembler needs to use r102964 // to do that and the compiler is using that register as one it can allocate.2965 // So we build it all by hand.2966 // Address index(noreg, switch_reg, Address::times_1);2967 // ArrayAddress dispatch(table, index);2968 2969 Address dispatch(dest_reg, switch_reg, Address::times_1);2970 2971 masm.lea(dest_reg, InternalAddress(table_base));2972 masm.jmp(dispatch);2973 %}2974 2975 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{2976 MacroAssembler masm(&cbuf);2977 2978 Register switch_reg = as_Register($switch_val$$reg);2979 Register dest_reg = as_Register($dest$$reg);2980 address table_base = masm.address_table_constant(_index2label);2981 2982 // We could use jump(ArrayAddress) except that the macro assembler needs to use r102983 // to do that and the compiler is using that register as one it can allocate.2984 // So we build it all by hand.2985 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);2986 // ArrayAddress dispatch(table, index);2987 2988 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);2989 2990 masm.lea(dest_reg, InternalAddress(table_base));2991 masm.jmp(dispatch);2992 %}2993 2994 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{2995 MacroAssembler masm(&cbuf);2996 2997 Register switch_reg = as_Register($switch_val$$reg);2998 Register dest_reg = as_Register($dest$$reg);2999 address table_base = masm.address_table_constant(_index2label);3000 3001 // We could use jump(ArrayAddress) except that the macro assembler needs to use r103002 // to do that and the compiler is using that register as one it can allocate.3003 // So we build it all by hand.3004 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);3005 // ArrayAddress dispatch(table, index);3006 3007 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);3008 masm.lea(dest_reg, InternalAddress(table_base));3009 masm.jmp(dispatch);3010 3011 2887 %} 3012 2888 … … 3730 3606 enc_class enc_rethrow() 3731 3607 %{ 3732 cbuf.set_inst _mark();3608 cbuf.set_insts_mark(); 3733 3609 emit_opcode(cbuf, 0xE9); // jmp entry 3734 3610 emit_d32_reloc(cbuf, 3735 (int) (OptoRuntime::rethrow_stub() - cbuf. code_end() - 4),3611 (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4), 3736 3612 runtime_call_Relocation::spec(), 3737 3613 RELOC_DISP32); … … 3743 3619 address signmask_address = (address) StubRoutines::x86::float_sign_mask(); 3744 3620 3745 cbuf.set_inst _mark();3621 cbuf.set_insts_mark(); 3746 3622 if (dstenc >= 8) { 3747 3623 emit_opcode(cbuf, Assembler::REX_R); … … 3760 3636 address signmask_address = (address) StubRoutines::x86::double_sign_mask(); 3761 3637 3762 cbuf.set_inst _mark();3638 cbuf.set_insts_mark(); 3763 3639 emit_opcode(cbuf, 0x66); 3764 3640 if (dstenc >= 8) { … … 3778 3654 address signflip_address = (address) StubRoutines::x86::float_sign_flip(); 3779 3655 3780 cbuf.set_inst _mark();3656 cbuf.set_insts_mark(); 3781 3657 if (dstenc >= 8) { 3782 3658 emit_opcode(cbuf, Assembler::REX_R); … … 3795 3671 address signflip_address = (address) StubRoutines::x86::double_sign_flip(); 3796 3672 3797 cbuf.set_inst _mark();3673 cbuf.set_insts_mark(); 3798 3674 emit_opcode(cbuf, 0x66); 3799 3675 if (dstenc >= 8) { … … 3847 3723 3848 3724 // call f2i_fixup 3849 cbuf.set_inst _mark();3725 cbuf.set_insts_mark(); 3850 3726 emit_opcode(cbuf, 0xE8); 3851 3727 emit_d32_reloc(cbuf, 3852 3728 (int) 3853 (StubRoutines::x86::f2i_fixup() - cbuf. code_end() - 4),3729 (StubRoutines::x86::f2i_fixup() - cbuf.insts_end() - 4), 3854 3730 runtime_call_Relocation::spec(), 3855 3731 RELOC_DISP32); … … 3871 3747 3872 3748 // cmpq $dst, [0x8000000000000000] 3873 cbuf.set_inst _mark();3749 cbuf.set_insts_mark(); 3874 3750 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR); 3875 3751 emit_opcode(cbuf, 0x39); … … 3905 3781 3906 3782 // call f2l_fixup 3907 cbuf.set_inst _mark();3783 cbuf.set_insts_mark(); 3908 3784 emit_opcode(cbuf, 0xE8); 3909 3785 emit_d32_reloc(cbuf, 3910 3786 (int) 3911 (StubRoutines::x86::f2l_fixup() - cbuf. code_end() - 4),3787 (StubRoutines::x86::f2l_fixup() - cbuf.insts_end() - 4), 3912 3788 runtime_call_Relocation::spec(), 3913 3789 RELOC_DISP32); … … 3961 3837 3962 3838 // call d2i_fixup 3963 cbuf.set_inst _mark();3839 cbuf.set_insts_mark(); 3964 3840 emit_opcode(cbuf, 0xE8); 3965 3841 emit_d32_reloc(cbuf, 3966 3842 (int) 3967 (StubRoutines::x86::d2i_fixup() - cbuf. code_end() - 4),3843 (StubRoutines::x86::d2i_fixup() - cbuf.insts_end() - 4), 3968 3844 runtime_call_Relocation::spec(), 3969 3845 RELOC_DISP32); … … 3985 3861 3986 3862 // cmpq $dst, [0x8000000000000000] 3987 cbuf.set_inst _mark();3863 cbuf.set_insts_mark(); 3988 3864 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR); 3989 3865 emit_opcode(cbuf, 0x39); … … 4019 3895 4020 3896 // call d2l_fixup 4021 cbuf.set_inst _mark();3897 cbuf.set_insts_mark(); 4022 3898 emit_opcode(cbuf, 0xE8); 4023 3899 emit_d32_reloc(cbuf, 4024 3900 (int) 4025 (StubRoutines::x86::d2l_fixup() - cbuf. code_end() - 4),3901 (StubRoutines::x86::d2l_fixup() - cbuf.insts_end() - 4), 4026 3902 runtime_call_Relocation::spec(), 4027 3903 RELOC_DISP32); … … 4043 3919 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes 4044 3920 // XXX reg_mem doesn't support RIP-relative addressing yet 4045 cbuf.set_inst _mark();4046 cbuf.relocate(cbuf.inst _mark(), relocInfo::poll_type, 0); // XXX3921 cbuf.set_insts_mark(); 3922 cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); // XXX 4047 3923 emit_opcode(cbuf, 0x85); // testl 4048 3924 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 4049 // cbuf.inst _mark() is beginning of instruction3925 // cbuf.insts_mark() is beginning of instruction 4050 3926 emit_d32_reloc(cbuf, os::get_polling_page()); 4051 3927 // relocInfo::poll_type, … … 6669 6545 %} 6670 6546 6671 instruct loadConP(rRegP dst, immP src) 6672 %{ 6673 match(Set dst src); 6674 6675 format %{ "movq $dst, $src\t# ptr" %} 6676 ins_encode(load_immP(dst, src)); 6547 instruct loadConP(rRegP dst, immP con) %{ 6548 match(Set dst con); 6549 6550 format %{ "movq $dst, $con\t# ptr" %} 6551 ins_encode(load_immP(dst, con)); 6677 6552 ins_pipe(ialu_reg_fat); // XXX 6678 6553 %} … … 6701 6576 %} 6702 6577 6703 instruct loadConF(regF dst, immF src) 6704 %{ 6705 match(Set dst src); 6578 instruct loadConF(regF dst, immF con) %{ 6579 match(Set dst con); 6706 6580 ins_cost(125); 6707 6708 format %{ "movss $dst, [$src]" %} 6709 ins_encode(load_conF(dst, src)); 6581 format %{ "movss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 6582 ins_encode %{ 6583 __ movflt($dst$$XMMRegister, $constantaddress($con)); 6584 %} 6710 6585 ins_pipe(pipe_slow); 6711 6586 %} … … 6749 6624 6750 6625 // Use the same format since predicate() can not be used here. 6751 instruct loadConD(regD dst, immD src) 6752 %{ 6753 match(Set dst src); 6626 instruct loadConD(regD dst, immD con) %{ 6627 match(Set dst con); 6754 6628 ins_cost(125); 6755 6756 format %{ "movsd $dst, [$src]" %} 6757 ins_encode(load_conD(dst, src)); 6629 format %{ "movsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 6630 ins_encode %{ 6631 __ movdbl($dst$$XMMRegister, $constantaddress($con)); 6632 %} 6758 6633 ins_pipe(pipe_slow); 6759 6634 %} … … 7383 7258 ins_pipe( ialu_reg ); 7384 7259 %} 7385 7386 instruct loadI_reversed(rRegI dst, memory src) %{7387 match(Set dst (ReverseBytesI (LoadI src)));7388 7389 format %{ "bswap_movl $dst, $src" %}7390 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */7391 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src), REX_reg(dst), OpcS, opc3_reg(dst));7392 ins_pipe( ialu_reg_mem );7393 %}7394 7395 instruct loadL_reversed(rRegL dst, memory src) %{7396 match(Set dst (ReverseBytesL (LoadL src)));7397 7398 format %{ "bswap_movq $dst, $src" %}7399 opcode(0x8B, 0x0F, 0xC8); /* Opcode 8B 0F C8 */7400 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src), REX_reg_wide(dst), OpcS, opc3_reg(dst));7401 ins_pipe( ialu_reg_mem );7402 %}7403 7404 instruct storeI_reversed(memory dst, rRegI src) %{7405 match(Set dst (StoreI dst (ReverseBytesI src)));7406 7407 format %{ "movl_bswap $dst, $src" %}7408 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */7409 ins_encode( REX_reg(src), OpcP, opc2_reg(src), REX_reg_mem(src, dst), OpcT, reg_mem(src, dst) );7410 ins_pipe( ialu_mem_reg );7411 %}7412 7413 instruct storeL_reversed(memory dst, rRegL src) %{7414 match(Set dst (StoreL dst (ReverseBytesL src)));7415 7416 format %{ "movq_bswap $dst, $src" %}7417 opcode(0x0F, 0xC8, 0x89); /* Opcode 0F C8 89 */7418 ins_encode( REX_reg_wide(src), OpcP, opc2_reg(src), REX_reg_mem_wide(src, dst), OpcT, reg_mem(src, dst) );7419 ins_pipe( ialu_mem_reg );7420 %}7421 7422 7260 7423 7261 //---------- Zeros Count Instructions ------------------------------------------ … … 7759 7597 effect(TEMP dest); 7760 7598 7761 format %{ "leaq $dest, table_base\n\t"7599 format %{ "leaq $dest, [$constantaddress]\n\t" 7762 7600 "jmp [$dest + $switch_val << $shift]\n\t" %} 7763 ins_encode(jump_enc_offset(switch_val, shift, dest)); 7601 ins_encode %{ 7602 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 7603 // to do that and the compiler is using that register as one it can allocate. 7604 // So we build it all by hand. 7605 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant); 7606 // ArrayAddress dispatch(table, index); 7607 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant); 7608 __ lea($dest$$Register, $constantaddress); 7609 __ jmp(dispatch); 7610 %} 7764 7611 ins_pipe(pipe_jmp); 7765 7612 ins_pc_relative(1); … … 7771 7618 effect(TEMP dest); 7772 7619 7773 format %{ "leaq $dest, table_base\n\t"7620 format %{ "leaq $dest, [$constantaddress]\n\t" 7774 7621 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %} 7775 ins_encode(jump_enc_addr(switch_val, shift, offset, dest)); 7622 ins_encode %{ 7623 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 7624 // to do that and the compiler is using that register as one it can allocate. 7625 // So we build it all by hand. 7626 // Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 7627 // ArrayAddress dispatch(table, index); 7628 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 7629 __ lea($dest$$Register, $constantaddress); 7630 __ jmp(dispatch); 7631 %} 7776 7632 ins_pipe(pipe_jmp); 7777 7633 ins_pc_relative(1); … … 7783 7639 effect(TEMP dest); 7784 7640 7785 format %{ "leaq $dest, table_base\n\t"7641 format %{ "leaq $dest, [$constantaddress]\n\t" 7786 7642 "jmp [$dest + $switch_val]\n\t" %} 7787 ins_encode(jump_enc(switch_val, dest)); 7643 ins_encode %{ 7644 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 7645 // to do that and the compiler is using that register as one it can allocate. 7646 // So we build it all by hand. 7647 // Address index(noreg, switch_reg, Address::times_1); 7648 // ArrayAddress dispatch(table, index); 7649 Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1); 7650 __ lea($dest$$Register, $constantaddress); 7651 __ jmp(dispatch); 7652 %} 7788 7653 ins_pipe(pipe_jmp); 7789 7654 ins_pc_relative(1); … … 10441 10306 %} 10442 10307 10443 instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2) 10444 %{ 10445 match(Set cr (CmpF src1 src2)); 10308 instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{ 10309 match(Set cr (CmpF src con)); 10446 10310 10447 10311 ins_cost(145); 10448 format %{ "ucomiss $src 1, $src2\n\t"10312 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 10449 10313 "jnp,s exit\n\t" 10450 10314 "pushfq\t# saw NaN, set CF\n\t" … … 10452 10316 "popfq\n" 10453 10317 "exit: nop\t# avoid branch to branch" %} 10454 opcode(0x0F, 0x2E); 10455 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2), 10456 cmpfp_fixup); 10318 ins_encode %{ 10319 Label L_exit; 10320 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10321 __ jcc(Assembler::noParity, L_exit); 10322 __ pushf(); 10323 __ andq(rsp, 0xffffff2b); 10324 __ popf(); 10325 __ bind(L_exit); 10326 __ nop(); 10327 %} 10457 10328 ins_pipe(pipe_slow); 10458 10329 %} 10459 10330 10460 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{ 10461 match(Set cr (CmpF src1 src2)); 10462 10331 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{ 10332 match(Set cr (CmpF src con)); 10463 10333 ins_cost(100); 10464 format %{ "ucomiss $src1, $src2" %} 10465 opcode(0x0F, 0x2E); 10466 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2)); 10334 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %} 10335 ins_encode %{ 10336 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10337 %} 10467 10338 ins_pipe(pipe_slow); 10468 10339 %} … … 10523 10394 %} 10524 10395 10525 instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2) 10526 %{ 10527 match(Set cr (CmpD src1 src2)); 10396 instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{ 10397 match(Set cr (CmpD src con)); 10528 10398 10529 10399 ins_cost(145); 10530 format %{ "ucomisd $src 1, [$src2]\n\t"10400 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 10531 10401 "jnp,s exit\n\t" 10532 10402 "pushfq\t# saw NaN, set CF\n\t" … … 10534 10404 "popfq\n" 10535 10405 "exit: nop\t# avoid branch to branch" %} 10536 opcode(0x66, 0x0F, 0x2E); 10537 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2), 10538 cmpfp_fixup); 10406 ins_encode %{ 10407 Label L_exit; 10408 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10409 __ jcc(Assembler::noParity, L_exit); 10410 __ pushf(); 10411 __ andq(rsp, 0xffffff2b); 10412 __ popf(); 10413 __ bind(L_exit); 10414 __ nop(); 10415 %} 10539 10416 ins_pipe(pipe_slow); 10540 10417 %} 10541 10418 10542 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{ 10543 match(Set cr (CmpD src1 src2)); 10544 10419 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{ 10420 match(Set cr (CmpD src con)); 10545 10421 ins_cost(100); 10546 format %{ "ucomisd $src1, [$src2]" %} 10547 opcode(0x66, 0x0F, 0x2E); 10548 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2)); 10422 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %} 10423 ins_encode %{ 10424 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10425 %} 10549 10426 ins_pipe(pipe_slow); 10550 10427 %} … … 10593 10470 10594 10471 // Compare into -1,0,1 10595 instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr) 10596 %{ 10597 match(Set dst (CmpF3 src1 src2)); 10472 instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{ 10473 match(Set dst (CmpF3 src con)); 10598 10474 effect(KILL cr); 10599 10475 10600 10476 ins_cost(275); 10601 format %{ "ucomiss $src 1, [$src2]\n\t"10477 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 10602 10478 "movl $dst, #-1\n\t" 10603 10479 "jp,s done\n\t" … … 10606 10482 "movzbl $dst, $dst\n" 10607 10483 "done:" %} 10608 10609 opcode(0x0F, 0x2E); 10610 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2), 10611 cmpfp3(dst)); 10484 ins_encode %{ 10485 Label L_done; 10486 Register Rdst = $dst$$Register; 10487 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10488 __ movl(Rdst, -1); 10489 __ jcc(Assembler::parity, L_done); 10490 __ jcc(Assembler::below, L_done); 10491 __ setb(Assembler::notEqual, Rdst); 10492 __ movzbl(Rdst, Rdst); 10493 __ bind(L_done); 10494 %} 10612 10495 ins_pipe(pipe_slow); 10613 10496 %} … … 10656 10539 10657 10540 // Compare into -1,0,1 10658 instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr) 10659 %{ 10660 match(Set dst (CmpD3 src1 src2)); 10541 instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{ 10542 match(Set dst (CmpD3 src con)); 10661 10543 effect(KILL cr); 10662 10544 10663 10545 ins_cost(275); 10664 format %{ "ucomisd $src 1, [$src2]\n\t"10546 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 10665 10547 "movl $dst, #-1\n\t" 10666 10548 "jp,s done\n\t" … … 10669 10551 "movzbl $dst, $dst\n" 10670 10552 "done:" %} 10671 10672 opcode(0x66, 0x0F, 0x2E); 10673 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2), 10674 cmpfp3(dst)); 10553 ins_encode %{ 10554 Register Rdst = $dst$$Register; 10555 Label L_done; 10556 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10557 __ movl(Rdst, -1); 10558 __ jcc(Assembler::parity, L_done); 10559 __ jcc(Assembler::below, L_done); 10560 __ setb(Assembler::notEqual, Rdst); 10561 __ movzbl(Rdst, Rdst); 10562 __ bind(L_done); 10563 %} 10675 10564 ins_pipe(pipe_slow); 10676 10565 %} … … 10698 10587 %} 10699 10588 10700 instruct addF_imm(regF dst, immF src) 10701 %{ 10702 match(Set dst (AddF dst src)); 10703 10704 format %{ "addss $dst, [$src]" %} 10589 instruct addF_imm(regF dst, immF con) %{ 10590 match(Set dst (AddF dst con)); 10591 format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10705 10592 ins_cost(150); // XXX 10706 opcode(0xF3, 0x0F, 0x58); 10707 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 10593 ins_encode %{ 10594 __ addss($dst$$XMMRegister, $constantaddress($con)); 10595 %} 10708 10596 ins_pipe(pipe_slow); 10709 10597 %} … … 10731 10619 %} 10732 10620 10733 instruct addD_imm(regD dst, immD src) 10734 %{ 10735 match(Set dst (AddD dst src)); 10736 10737 format %{ "addsd $dst, [$src]" %} 10621 instruct addD_imm(regD dst, immD con) %{ 10622 match(Set dst (AddD dst con)); 10623 format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10738 10624 ins_cost(150); // XXX 10739 opcode(0xF2, 0x0F, 0x58); 10740 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 10625 ins_encode %{ 10626 __ addsd($dst$$XMMRegister, $constantaddress($con)); 10627 %} 10741 10628 ins_pipe(pipe_slow); 10742 10629 %} … … 10764 10651 %} 10765 10652 10766 instruct subF_imm(regF dst, immF src) 10767 %{ 10768 match(Set dst (SubF dst src)); 10769 10770 format %{ "subss $dst, [$src]" %} 10653 instruct subF_imm(regF dst, immF con) %{ 10654 match(Set dst (SubF dst con)); 10655 format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10771 10656 ins_cost(150); // XXX 10772 opcode(0xF3, 0x0F, 0x5C); 10773 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 10657 ins_encode %{ 10658 __ subss($dst$$XMMRegister, $constantaddress($con)); 10659 %} 10774 10660 ins_pipe(pipe_slow); 10775 10661 %} … … 10797 10683 %} 10798 10684 10799 instruct subD_imm(regD dst, immD src) 10800 %{ 10801 match(Set dst (SubD dst src)); 10802 10803 format %{ "subsd $dst, [$src]" %} 10685 instruct subD_imm(regD dst, immD con) %{ 10686 match(Set dst (SubD dst con)); 10687 format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10804 10688 ins_cost(150); // XXX 10805 opcode(0xF2, 0x0F, 0x5C); 10806 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 10689 ins_encode %{ 10690 __ subsd($dst$$XMMRegister, $constantaddress($con)); 10691 %} 10807 10692 ins_pipe(pipe_slow); 10808 10693 %} … … 10830 10715 %} 10831 10716 10832 instruct mulF_imm(regF dst, immF src) 10833 %{ 10834 match(Set dst (MulF dst src)); 10835 10836 format %{ "mulss $dst, [$src]" %} 10717 instruct mulF_imm(regF dst, immF con) %{ 10718 match(Set dst (MulF dst con)); 10719 format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10837 10720 ins_cost(150); // XXX 10838 opcode(0xF3, 0x0F, 0x59); 10839 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 10721 ins_encode %{ 10722 __ mulss($dst$$XMMRegister, $constantaddress($con)); 10723 %} 10840 10724 ins_pipe(pipe_slow); 10841 10725 %} … … 10863 10747 %} 10864 10748 10865 instruct mulD_imm(regD dst, immD src) 10866 %{ 10867 match(Set dst (MulD dst src)); 10868 10869 format %{ "mulsd $dst, [$src]" %} 10749 instruct mulD_imm(regD dst, immD con) %{ 10750 match(Set dst (MulD dst con)); 10751 format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10870 10752 ins_cost(150); // XXX 10871 opcode(0xF2, 0x0F, 0x59); 10872 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 10753 ins_encode %{ 10754 __ mulsd($dst$$XMMRegister, $constantaddress($con)); 10755 %} 10873 10756 ins_pipe(pipe_slow); 10874 10757 %} … … 10896 10779 %} 10897 10780 10898 instruct divF_imm(regF dst, immF src) 10899 %{ 10900 match(Set dst (DivF dst src)); 10901 10902 format %{ "divss $dst, [$src]" %} 10781 instruct divF_imm(regF dst, immF con) %{ 10782 match(Set dst (DivF dst con)); 10783 format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10903 10784 ins_cost(150); // XXX 10904 opcode(0xF3, 0x0F, 0x5E); 10905 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 10785 ins_encode %{ 10786 __ divss($dst$$XMMRegister, $constantaddress($con)); 10787 %} 10906 10788 ins_pipe(pipe_slow); 10907 10789 %} … … 10929 10811 %} 10930 10812 10931 instruct divD_imm(regD dst, immD src) 10932 %{ 10933 match(Set dst (DivD dst src)); 10934 10935 format %{ "divsd $dst, [$src]" %} 10813 instruct divD_imm(regD dst, immD con) %{ 10814 match(Set dst (DivD dst con)); 10815 format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10936 10816 ins_cost(150); // XXX 10937 opcode(0xF2, 0x0F, 0x5E); 10938 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 10817 ins_encode %{ 10818 __ divsd($dst$$XMMRegister, $constantaddress($con)); 10819 %} 10939 10820 ins_pipe(pipe_slow); 10940 10821 %} … … 10962 10843 %} 10963 10844 10964 instruct sqrtF_imm(regF dst, immF src) 10965 %{ 10966 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 10967 10968 format %{ "sqrtss $dst, [$src]" %} 10845 instruct sqrtF_imm(regF dst, immF con) %{ 10846 match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); 10847 format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10969 10848 ins_cost(150); // XXX 10970 opcode(0xF3, 0x0F, 0x51); 10971 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 10849 ins_encode %{ 10850 __ sqrtss($dst$$XMMRegister, $constantaddress($con)); 10851 %} 10972 10852 ins_pipe(pipe_slow); 10973 10853 %} … … 10995 10875 %} 10996 10876 10997 instruct sqrtD_imm(regD dst, immD src) 10998 %{ 10999 match(Set dst (SqrtD src)); 11000 11001 format %{ "sqrtsd $dst, [$src]" %} 10877 instruct sqrtD_imm(regD dst, immD con) %{ 10878 match(Set dst (SqrtD con)); 10879 format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 11002 10880 ins_cost(150); // XXX 11003 opcode(0xF2, 0x0F, 0x51); 11004 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 10881 ins_encode %{ 10882 __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); 10883 %} 11005 10884 ins_pipe(pipe_slow); 11006 10885 %} … … 11705 11584 %} 11706 11585 11707 instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, r bx_RegI cnt2,11708 rax_RegI result, regD tmp1, r egD tmp2, rFlagsReg cr)11586 instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11587 rax_RegI result, regD tmp1, rFlagsReg cr) 11709 11588 %{ 11710 11589 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11711 effect(TEMP tmp1, TEMP tmp2,USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);11712 11713 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1 , $tmp2" %}11590 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11591 11592 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11714 11593 ins_encode %{ 11715 11594 __ string_compare($str1$$Register, $str2$$Register, 11716 11595 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11717 $tmp1$$XMMRegister , $tmp2$$XMMRegister);11596 $tmp1$$XMMRegister); 11718 11597 %} 11719 11598 ins_pipe( pipe_slow ); … … 12305 12184 if ($cop$$cmpcode == Assembler::notEqual) { 12306 12185 // the two jumps 6 bytes apart so the jump distances are too 12307 parity_disp = l ? (l->loc_pos() - (cbuf. code_size() + 4)) : 0;12186 parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 12308 12187 } else if ($cop$$cmpcode == Assembler::equal) { 12309 12188 parity_disp = 6; … … 12314 12193 $$$emit8$primary; 12315 12194 emit_cc(cbuf, $secondary, $cop$$cmpcode); 12316 int disp = l ? (l->loc_pos() - (cbuf. code_size() + 4)) : 0;12195 int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 12317 12196 emit_d32(cbuf, disp); 12318 12197 %} … … 12509 12388 int parity_disp = -1; 12510 12389 if ($cop$$cmpcode == Assembler::notEqual) { 12511 parity_disp = l ? (l->loc_pos() - (cbuf. code_size() + 1)) : 0;12390 parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 12512 12391 } else if ($cop$$cmpcode == Assembler::equal) { 12513 12392 parity_disp = 2; … … 12517 12396 emit_d8(cbuf, parity_disp); 12518 12397 emit_cc(cbuf, $primary, $cop$$cmpcode); 12519 int disp = l ? (l->loc_pos() - (cbuf. code_size() + 1)) : 0;12398 int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 12520 12399 emit_d8(cbuf, disp); 12521 12400 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
Note:
See TracChangeset
for help on using the changeset viewer.