Changeset 110
- Timestamp:
- Jun 21, 2011, 2:39:30 PM (14 years ago)
- Location:
- trunk/src/os2ahci
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/os2ahci/ahci.c
r103 r110 1300 1300 1301 1301 if (handled) { 1302 /* trigger state machine to process next IORBs, if any */ 1303 spin_lock(drv_lock); 1304 trigger_engine(); 1305 spin_unlock(drv_lock); 1302 /* Trigger state machine to process next IORBs, if any. Due to excessive 1303 * IORB requeue operations (e.g. when processing large unaligned reads or 1304 * writes), we may be stacking interrupts on top of each other. If we 1305 * detect this, we'll pass this on to the engine context hook. 1306 */ 1307 if ((u16) (u32) (void _far *) &irq_stat < 0xf000) { 1308 ddprintf("IRQ stack running low; arming engine context hook\n"); 1309 DevHelp_ArmCtxHook(0, engine_ctxhook_h); 1310 1311 } else { 1312 spin_lock(drv_lock); 1313 trigger_engine(); 1314 spin_unlock(drv_lock); 1315 } 1306 1316 1307 1317 /* complete the interrupt */ … … 1328 1338 u32 done_mask; 1329 1339 1330 ddprintf("port interrupt for adapter #%d, port #%d\n", ad_no(ai), p); 1340 ddprintf("port interrupt for adapter #%d, port #%d, stack frame %Fp\n", ad_no(ai), 1341 p, (void _far *) &done_queue); 1331 1342 memset(&done_queue, 0x00, sizeof(done_queue)); 1332 1343 -
trunk/src/os2ahci/ata.c
r101 r110 33 33 34 34 /* -------------------------- function prototypes -------------------------- */ 35 36 static int ata_cmd_read (IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot, 37 ULONG sector, ULONG count, SCATGATENTRY _far *sg_list, 38 ULONG sg_cnt); 39 40 static int ata_cmd_write(IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot, 41 ULONG sector, ULONG count, SCATGATENTRY _far *sg_list, 42 ULONG sg_cnt, int write_through); 35 43 36 44 /* ------------------------ global/static variables ------------------------ */ … … 124 132 if (ata_cmd.lba_l & 0xf0000000UL) { 125 133 dprintf("error: LBA-28 address %ld has more than 28 bits\n", ata_cmd.lba_l); 126 return( -1);134 return(ATA_CMD_INVALID_PARM); 127 135 } 128 136 /* add upper 4 bits to device field */ … … 152 160 case AP_VADDR: 153 161 /* virtual buffer address in addr/len format (up to 4K) */ 154 DevHelp_VirtToPhys(va_arg(va, void _far *), &sg_single.ppXferBuf);162 sg_single.ppXferBuf = virt_to_phys(va_arg(va, void _far *)); 155 163 sg_single.XferBufLen = va_arg(va, u16); 156 164 sg_list = &sg_single; … … 175 183 default: 176 184 dprintf("error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap); 177 return( -1);185 return(ATA_CMD_INVALID_PARM); 178 186 } 179 187 … … 254 262 return(i - 1); 255 263 } 264 if ((sg_addr & 1) || (chunk & 1)) { 265 ddprintf("error: ata_cmd() called with unaligned S/G element(s)\n"); 266 return(ATA_CMD_UNALIGNED_ADDR); 267 } 256 268 cmd_tbl->sg_list[n].addr = sg_addr; 257 269 cmd_tbl->sg_list[n].size = chunk - 1; … … 277 289 } 278 290 279 return( 0);291 return(ATA_CMD_SUCCESS); 280 292 } 281 293 … … 567 579 int rc; 568 580 581 if (io->BlockCount == 0) { 582 /* NOP; return -1 without error in IORB to indicate success */ 583 return(-1); 584 } 585 586 if (add_workspace(iorb)->unaligned) { 587 /* unaligned S/G addresses present; need to use double buffers */ 588 return(ata_read_unaligned(iorb, slot)); 589 } 590 569 591 /* Kludge: some I/O commands during boot use excessive S/G buffer lengths 570 592 * which cause NCQ commands to lock up. If there's only one S/G element … … 573 595 */ 574 596 if (io->BlocksXferred == 0 && io->cSGList == 1 && 575 io->pSGList[0].XferBufLen > io->BlockCount * io->BlockSize) {576 io->pSGList[0].XferBufLen = io->BlockCount * io->BlockSize;597 io->pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize) { 598 io->pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize; 577 599 } 578 600 … … 581 603 sg_indx = ata_get_sg_indx(io); 582 604 sg_cnt = io->cSGList - sg_indx; 583 584 if (sector >= (1UL << 28) || count > 256 || add_workspace(iorb)->is_ncq) { 585 /* need LBA48 for this command */ 586 if (!ai->ports[p].devs[d].lba48) { 587 iorb_seterr(iorb, IOERR_RBA_LIMIT); 588 return(-1); 589 } 590 if (add_workspace(iorb)->is_ncq) { 591 /* use NCQ read; count goes into feature register, tag into count! */ 592 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ, 593 AP_SECTOR_48, (u32) sector, (u16) 0, 594 AP_FEATURES, (u16) count, 595 AP_COUNT, (u16) (slot << 3), /* tag = slot */ 596 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 597 AP_DEVICE, 0x40, 598 AP_END); 599 } else { 600 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 601 AP_SECTOR_48, (u32) sector, (u16) 0, 602 AP_COUNT, (u16) count, 603 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 604 AP_DEVICE, 0x40, 605 AP_END); 606 } 607 608 } else { 609 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 610 AP_SECTOR_28, (u32) sector, 611 AP_COUNT, (u16) count & 0xffU, 612 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 613 AP_DEVICE, 0x40, 614 AP_END); 615 } 616 617 if (rc > 0) { 605 if ((rc = ata_cmd_read(iorb, ai, p, d, slot, sector, count, 606 io->pSGList + sg_indx, sg_cnt)) > 0) { 618 607 /* couldn't map all S/G elements */ 619 608 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); … … 628 617 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 629 618 619 } else if (rc == ATA_CMD_UNALIGNED_ADDR) { 620 /* unaligned S/G addresses detected; need to use double buffers */ 621 add_workspace(iorb)->unaligned = 1; 622 return(ata_read_unaligned(iorb, slot)); 623 624 } else { 625 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 626 } 627 628 return(rc); 629 } 630 631 /****************************************************************************** 632 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI 633 * only allows aligned S/G addresses while OS/2 doesn't have these kind of 634 * restrictions. This doesn't happen very often but when it does, we need to 635 * use a transfer buffer and copy the data manually. 636 */ 637 int ata_read_unaligned(IORBH _far *iorb, int slot) 638 { 639 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 640 ADD_WORKSPACE _far *aws = add_workspace(iorb); 641 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 642 ULONG sector = io->RBA + io->BlocksXferred; 643 SCATGATENTRY sg_single; 644 int p = iorb_unit_port(iorb); 645 int d = iorb_unit_device(iorb); 646 int rc; 647 648 ddprintf("ata_read_unaligned(%d.%d.%d, %ld)\n", ad_no(ai), p, d, sector); 649 650 /* allocate transfer buffer */ 651 if ((aws->buf = malloc(io->BlockSize)) == NULL) { 652 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE); 653 return(-1); 654 } 655 656 /* prepare read command using transfer buffer */ 657 sg_single.ppXferBuf = virt_to_phys(aws->buf); 658 sg_single.XferBufLen = io->BlockSize; 659 rc = ata_cmd_read(iorb, ai, p, d, slot, sector, 1, &sg_single, 1); 660 661 if (rc == 0) { 662 add_workspace(iorb)->blocks = 1; 663 add_workspace(iorb)->ppfunc = ata_read_pp; 664 665 } else if (rc > 0) { 666 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 667 630 668 } else { 631 669 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); … … 638 676 * Post processing function for ata_read(); this function updates the 639 677 * BlocksXferred counter in the IORB and, if not all blocks have been 640 * transferred, requeues the IORB to process the remaining sectors. 678 * transferred, requeues the IORB to process the remaining sectors. It also 679 * takes care of copying data from the transfer buffer for unaligned reads. 641 680 */ 642 681 void ata_read_pp(IORBH _far *iorb) 643 682 { 644 683 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 684 ADD_WORKSPACE _far *aws = add_workspace(iorb); 685 686 if (aws->unaligned) { 687 /* copy transfer buffer to corresponding physical address in S/G list */ 688 sg_memcpy(io->pSGList, io->cSGList, 689 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize, 690 aws->buf, io->BlockSize, BUF_TO_SG); 691 } 645 692 646 693 io->BlocksXferred += add_workspace(iorb)->blocks; … … 666 713 int d = iorb_unit_device(iorb); 667 714 int rc; 715 716 if (io->BlockCount == 0) { 717 /* NOP; return -1 without error in IORB to indicate success */ 718 return(-1); 719 } 668 720 669 721 /* prepare verify command */ … … 704 756 int rc; 705 757 758 if (io->BlockCount == 0) { 759 /* NOP; return -1 without error in IORB to indicate success */ 760 return(-1); 761 } 762 763 if (add_workspace(iorb)->unaligned) { 764 /* unaligned S/G addresses present; need to use double buffers */ 765 return(ata_write_unaligned(iorb, slot)); 766 } 767 706 768 /* prepare write command while keeping an eye on S/G count limitations */ 707 769 do { 708 770 sg_indx = ata_get_sg_indx(io); 709 771 sg_cnt = io->cSGList - sg_indx; 710 711 if (sector >= (1UL << 28) || count > 256 || add_workspace(iorb)->is_ncq) { 712 /* need LBA48 for this command */ 713 if (!ai->ports[p].devs[d].lba48) { 714 iorb_seterr(iorb, IOERR_RBA_LIMIT); 715 return(-1); 716 } 717 if (add_workspace(iorb)->is_ncq) { 718 /* use NCQ write; count goes into feature register, tag into count! */ 719 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE, 720 AP_SECTOR_48, (u32) sector, (u16) 0, 721 AP_FEATURES, (u16) count, 722 AP_COUNT, (u16) (slot << 3), /* tag = slot */ 723 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 724 AP_DEVICE, 0x40, 725 AP_DEVICE, (io->Flags & XIO_DISABLE_HW_WRITE_CACHE) ? 726 0x80 : 0, /* force unit access */ 727 AP_WRITE, 1, 728 AP_END); 729 } else { 730 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 731 AP_SECTOR_48, (u32) sector, (u16) 0, 732 AP_COUNT, (u16) count, 733 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 734 AP_DEVICE, 0x40, 735 AP_WRITE, 1, 736 AP_END); 737 } 738 739 } else { 740 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 741 AP_SECTOR_28, (u32) sector, 742 AP_COUNT, (u16) count & 0xffU, 743 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 744 AP_DEVICE, 0x40, 745 AP_WRITE, 1, 746 AP_END); 747 } 748 749 if (rc > 0) { 772 if ((rc = ata_cmd_write(iorb, ai, p, d, slot, sector, count, 773 io->pSGList + sg_indx, sg_cnt, 774 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0) { 750 775 /* couldn't map all S/G elements */ 751 776 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); … … 760 785 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 761 786 787 } else if (rc == ATA_CMD_UNALIGNED_ADDR) { 788 /* unaligned S/G addresses detected; need to use double buffers */ 789 add_workspace(iorb)->unaligned = 1; 790 return(ata_write_unaligned(iorb, slot)); 791 762 792 } else { 763 793 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); … … 766 796 return(rc); 767 797 } 798 799 /****************************************************************************** 800 * Write sectors from AHCI device with unaligned S/G element addresses. AHCI 801 * only allows aligned S/G addresses while OS/2 doesn't have these kind of 802 * restrictions. This doesn't happen very often but when it does, we need to 803 * use a transfer buffer and copy the data manually. 804 */ 805 int ata_write_unaligned(IORBH _far *iorb, int slot) 806 { 807 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 808 ADD_WORKSPACE _far *aws = add_workspace(iorb); 809 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 810 ULONG sector = io->RBA + io->BlocksXferred; 811 SCATGATENTRY sg_single; 812 int p = iorb_unit_port(iorb); 813 int d = iorb_unit_device(iorb); 814 int rc; 815 816 ddprintf("ata_write_unaligned(%d.%d.%d, %ld)\n", ad_no(ai), p, d, sector); 817 818 /* allocate transfer buffer */ 819 if ((aws->buf = malloc(io->BlockSize)) == NULL) { 820 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE); 821 return(-1); 822 } 823 824 /* copy next sector from S/G list to transfer buffer */ 825 sg_memcpy(io->pSGList, io->cSGList, 826 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize, 827 aws->buf, io->BlockSize, SG_TO_BUF); 828 829 /* prepare write command using transfer buffer */ 830 sg_single.ppXferBuf = virt_to_phys(aws->buf); 831 sg_single.XferBufLen = io->BlockSize; 832 rc = ata_cmd_write(iorb, ai, p, d, slot, sector, 1, &sg_single, 1, 833 io->Flags & XIO_DISABLE_HW_WRITE_CACHE); 834 835 if (rc == 0) { 836 add_workspace(iorb)->blocks = 1; 837 add_workspace(iorb)->ppfunc = ata_write_pp; 838 839 } else if (rc > 0) { 840 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 841 842 } else { 843 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 844 } 845 846 return(rc); 847 } 848 768 849 769 850 /****************************************************************************** … … 888 969 } 889 970 971 /****************************************************************************** 972 * Fabricate ATA READ command based on the capabilities of the corresponding 973 * device and the paramters set from above (NCQ, etc). 974 */ 975 static int ata_cmd_read(IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot, 976 ULONG sector, ULONG count, SCATGATENTRY _far *sg_list, 977 ULONG sg_cnt) 978 { 979 int rc; 980 981 if (sector >= (1UL << 28) || count > 256 || add_workspace(iorb)->is_ncq) { 982 /* need LBA48 for this command */ 983 if (!ai->ports[p].devs[d].lba48) { 984 iorb_seterr(iorb, IOERR_RBA_LIMIT); 985 return(-1); 986 } 987 if (add_workspace(iorb)->is_ncq) { 988 /* use NCQ read; count goes into feature register, tag into count! */ 989 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ, 990 AP_SECTOR_48, (u32) sector, (u16) 0, 991 AP_FEATURES, (u16) count, 992 AP_COUNT, (u16) (slot << 3), /* tag == slot */ 993 AP_SGLIST, sg_list, (u16) sg_cnt, 994 AP_DEVICE, 0x40, 995 AP_END); 996 } else { 997 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 998 AP_SECTOR_48, (u32) sector, (u16) 0, 999 AP_COUNT, (u16) count, 1000 AP_SGLIST, sg_list, (u16) sg_cnt, 1001 AP_DEVICE, 0x40, 1002 AP_END); 1003 } 1004 1005 } else { 1006 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 1007 AP_SECTOR_28, (u32) sector, 1008 AP_COUNT, (u16) count & 0xffU, 1009 AP_SGLIST, sg_list, (u16) sg_cnt, 1010 AP_DEVICE, 0x40, 1011 AP_END); 1012 } 1013 1014 return(rc); 1015 } 1016 1017 /****************************************************************************** 1018 * Fabricate ATA WRITE command based on the capabilities of the corresponding 1019 * device and the paramters set from above (NCQ, etc) 1020 */ 1021 static int ata_cmd_write(IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot, 1022 ULONG sector, ULONG count, SCATGATENTRY _far *sg_list, 1023 ULONG sg_cnt, int write_through) 1024 { 1025 int rc; 1026 1027 if (sector >= (1UL << 28) || count > 256 || add_workspace(iorb)->is_ncq) { 1028 /* need LBA48 for this command */ 1029 if (!ai->ports[p].devs[d].lba48) { 1030 iorb_seterr(iorb, IOERR_RBA_LIMIT); 1031 return(-1); 1032 } 1033 if (add_workspace(iorb)->is_ncq) { 1034 /* use NCQ write; count goes into feature register, tag into count! */ 1035 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE, 1036 AP_SECTOR_48, (u32) sector, (u16) 0, 1037 AP_FEATURES, (u16) count, 1038 AP_COUNT, (u16) (slot << 3), /* tag = slot */ 1039 AP_SGLIST, sg_list, (u16) sg_cnt, 1040 AP_DEVICE, 0x40, 1041 AP_DEVICE, (write_through) ? 0x80 : 0, /* force unit access */ 1042 AP_WRITE, 1, 1043 AP_END); 1044 } else { 1045 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 1046 AP_SECTOR_48, (u32) sector, (u16) 0, 1047 AP_COUNT, (u16) count, 1048 AP_SGLIST, sg_list, (u16) sg_cnt, 1049 AP_DEVICE, 0x40, 1050 AP_WRITE, 1, 1051 AP_END); 1052 } 1053 1054 } else { 1055 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 1056 AP_SECTOR_28, (u32) sector, 1057 AP_COUNT, (u16) count & 0xffU, 1058 AP_SGLIST, sg_list, (u16) sg_cnt, 1059 AP_DEVICE, 0x40, 1060 AP_WRITE, 1, 1061 AP_END); 1062 } 1063 1064 return(rc); 1065 } -
trunk/src/os2ahci/ata.h
r87 r110 432 432 } ATA_PARM; 433 433 434 /****************************************************************************** 435 * Return codes for ata_cmd(); please note that positive return codes indicate 436 * that not all S/G elements could be mapped, 0 means success and negative 437 * values indicate error conditions. 438 */ 439 #define ATA_CMD_SUCCESS 0 440 #define ATA_CMD_INVALID_PARM -1 441 #define ATA_CMD_UNALIGNED_ADDR -2 442 434 443 /* ------------------------ typedefs and structures ------------------------ */ 435 444 … … 473 482 /* -------------------------- function prototypes -------------------------- */ 474 483 475 extern int ata_cmd (AD_INFO *ai, int port, int device, 476 int slot, int cmd, ...); 477 extern int v_ata_cmd (AD_INFO *ai, int port, int device, 478 int slot, int cmd, va_list va); 479 extern void ata_cmd_to_fis (u8 _far *fis, ATA_CMD _far *cmd, 480 int device); 481 extern USHORT ata_get_sg_indx (IORB_EXECUTEIO _far *io); 482 extern void ata_max_sg_cnt (IORB_EXECUTEIO _far *io, USHORT sg_indx, 483 USHORT sg_max, USHORT _far *sg_cnt, 484 USHORT _far *sector_cnt); 485 486 extern int ata_get_geometry (IORBH _far *iorb, int slot); 487 extern void ata_get_geometry_pp (IORBH _far *iorb); 488 extern int ata_unit_ready (IORBH _far *iorb, int slot); 489 extern int ata_read (IORBH _far *iorb, int slot); 490 extern void ata_read_pp (IORBH _far *iorb); 491 extern int ata_verify (IORBH _far *iorb, int slot); 492 extern int ata_write (IORBH _far *iorb, int slot); 493 extern void ata_write_pp (IORBH _far *iorb); 494 extern int ata_execute_ata (IORBH _far *iorb, int slot); 495 extern int ata_req_sense (IORBH _far *iorb, int slot); 496 497 extern char *ata_dev_name (u16 *id_buf); 498 484 extern int ata_cmd (AD_INFO *ai, int port, int device, 485 int slot, int cmd, ...); 486 extern int v_ata_cmd (AD_INFO *ai, int port, int device, 487 int slot, int cmd, va_list va); 488 extern void ata_cmd_to_fis (u8 _far *fis, ATA_CMD _far *cmd, 489 int device); 490 extern USHORT ata_get_sg_indx (IORB_EXECUTEIO _far *io); 491 extern void ata_max_sg_cnt (IORB_EXECUTEIO _far *io, 492 USHORT sg_indx, USHORT sg_max, 493 USHORT _far *sg_cnt, 494 USHORT _far *sector_cnt); 495 496 extern int ata_get_geometry (IORBH _far *iorb, int slot); 497 extern void ata_get_geometry_pp (IORBH _far *iorb); 498 extern int ata_unit_ready (IORBH _far *iorb, int slot); 499 extern int ata_read (IORBH _far *iorb, int slot); 500 extern int ata_read_unaligned (IORBH _far *iorb, int slot); 501 extern void ata_read_pp (IORBH _far *iorb); 502 extern int ata_verify (IORBH _far *iorb, int slot); 503 extern int ata_write (IORBH _far *iorb, int slot); 504 extern int ata_write_unaligned (IORBH _far *iorb, int slot); 505 extern void ata_write_pp (IORBH _far *iorb); 506 extern int ata_execute_ata (IORBH _far *iorb, int slot); 507 extern int ata_req_sense (IORBH _far *iorb, int slot); 508 509 extern char *ata_dev_name (u16 *id_buf); 510 -
trunk/src/os2ahci/atapi.c
r87 r110 80 80 int rc; 81 81 82 if (io->BlockCount == 0) { 83 /* NOP; return -1 without error in IORB to indicate success */ 84 return(-1); 85 } 86 87 if (add_workspace(iorb)->unaligned) { 88 /* unaligned S/G addresses present; need to use double buffers */ 89 return(atapi_read_unaligned(iorb, slot)); 90 } 91 82 92 /* translate read command to SCSI/ATAPI READ12 command. 83 93 * READ12 seems to be the most supported READ variant - according to MMC, … … 112 122 if (rc == 0) { 113 123 add_workspace(iorb)->blocks = count; 124 add_workspace(iorb)->ppfunc = ata_read_pp; 125 126 } else if (rc > 0) { 127 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 128 129 } else if (rc == ATA_CMD_UNALIGNED_ADDR) { 130 /* unaligned S/G addresses detected; need to use double buffers */ 131 add_workspace(iorb)->unaligned = 1; 132 return(atapi_read_unaligned(iorb, slot)); 133 134 } else { 135 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 136 } 137 138 return(rc); 139 } 140 141 /****************************************************************************** 142 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI 143 * only allows aligned S/G addresses while OS/2 doesn't have these kind of 144 * restrictions. This doesn't happen very often but when it does, we need to 145 * use a transfer buffer and copy the data manually. 146 */ 147 int atapi_read_unaligned(IORBH _far *iorb, int slot) 148 { 149 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 150 ADD_WORKSPACE _far *aws = add_workspace(iorb); 151 ATAPI_CDB_12 cdb; 152 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 153 int p = iorb_unit_port(iorb); 154 int d = iorb_unit_device(iorb); 155 int rc; 156 157 /* translate read command to SCSI/ATAPI READ12 command. 158 * READ12 seems to be the most supported READ variant - according to MMC, 159 * and it's enough even for BluRay. 160 */ 161 memset(&cdb, 0x00, sizeof(cdb)); 162 cdb.cmd = ATAPI_CMD_READ_12; 163 SET_CDB_32(cdb.lba, io->RBA + io->BlocksXferred); 164 SET_CDB_32(cdb.trans_len, 1); 165 166 /* allocate transfer buffer */ 167 if ((aws->buf = malloc(io->BlockSize)) == NULL) { 168 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE); 169 return(-1); 170 } 171 172 rc = ata_cmd(ai, p, d, slot, ATA_CMD_PACKET, 173 AP_ATAPI_CMD, (void _far *) &cdb, sizeof(cdb), 174 AP_VADDR, (void _far *) aws->buf, (u16) io->BlockSize, 175 AP_DEVICE, 0x40, 176 AP_FEATURES, ATAPI_FEAT_DMA | ATAPI_FEAT_DMA_TO_HOST, 177 AP_END); 178 179 if (rc == 0) { 180 add_workspace(iorb)->blocks = 1; 114 181 add_workspace(iorb)->ppfunc = ata_read_pp; 115 182 -
trunk/src/os2ahci/atapi.h
r87 r110 169 169 extern int atapi_unit_ready (IORBH _far *iorb, int slot); 170 170 extern int atapi_read (IORBH _far *iorb, int slot); 171 extern int atapi_read_unaligned (IORBH _far *iorb, int slot); 171 172 extern int atapi_verify (IORBH _far *iorb, int slot); 172 173 extern int atapi_write (IORBH _far *iorb, int slot); -
trunk/src/os2ahci/ioctl.c
r87 r110 204 204 ic->iorb.cSGList = ic->sg_cnt; 205 205 ic->iorb.pSGList = ic->sg_lst; 206 DevHelp_VirtToPhys(ic->sg_lst, &ic->iorb.ppSGLIST);206 ic->iorb.ppSGLIST = virt_to_phys(ic->sg_lst); 207 207 208 208 ic->iorb.ControllerCmdLen = req->cmdlen; -
trunk/src/os2ahci/libc.c
r87 r110 96 96 static u8 heap_buf[HEAP_SIZE]; 97 97 static u8 heap_units[HEAP_UNIT_CNT]; 98 static ULONG heap_phys_addr; 98 99 99 100 /* ----------------------------- start of code ----------------------------- */ … … 106 107 DevHelp_CreateSpinLock(&mem_lock); 107 108 DevHelp_CreateSpinLock(&com_lock); 109 110 DevHelp_VirtToPhys(heap_buf, &heap_phys_addr); 108 111 } 109 112 … … 347 350 va_list va; 348 351 const unsigned char _far *buf = p; 349 long pos = 0;350 352 int i; 351 353 … … 378 380 printf("\n"); 379 381 380 pos += 16;381 382 buf += 16; 382 383 len -= 16; … … 424 425 } 425 426 return(0); 427 } 428 429 /****************************************************************************** 430 * Copy block from S/G list to virtual address or vice versa. 431 */ 432 void sg_memcpy(SCATGATENTRY _far *sg_list, USHORT sg_cnt, ULONG sg_off, 433 void _far *buf, USHORT len, SG_MEMCPY_DIRECTION dir) 434 { 435 USHORT mode_flag; 436 USHORT i; 437 USHORT l; 438 ULONG phys_addr; 439 ULONG pos = 0; 440 char _far *p; 441 442 /* walk through S/G list to find the elements involved in the operation */ 443 for (i = 0; i < sg_cnt && len > 0; i++) { 444 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off) { 445 446 /* this S/G element intersects with the block to be copied */ 447 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos); 448 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len) { 449 l = len; 450 } 451 452 if (DevHelp_PhysToVirt(phys_addr, l, (PVOID) &p, &mode_flag)) { 453 panic("sg_memcpy(): DevHelp_PhysToVirt() failed"); 454 } 455 if (dir == SG_TO_BUF) { 456 memcpy(buf, p, l); 457 } else { 458 memcpy(p, buf, l); 459 } 460 sg_off += l; 461 buf = (char _far *) buf + l; 462 len -= l; 463 } 464 465 pos += sg_list[i].XferBufLen; 466 } 426 467 } 427 468 … … 550 591 551 592 /****************************************************************************** 593 * Return the physical address of a pointer inside the heap buffer. This is 594 * necessary because DevHelp_VirtToPhys() can't be called at interrupt time 595 * and we need physical addresses for heap objects when requeueing unaligned 596 * IORBs inside ahci_intr -> trigger_engine. 597 * 598 * If the pointer is not a heap pointer, this function falls back to calling 599 * DevHelp_VirtToPhys with all consequences (i.e. a trap when this is done 600 * at interrupt time). 601 */ 602 ULONG virt_to_phys(void _far *ptr) 603 { 604 if (ptr < heap_buf || ptr > heap_buf + sizeof(heap_buf)) { 605 ULONG addr; 606 607 if (DevHelp_VirtToPhys(ptr, &addr) != 0) { 608 panic("virt_to_phys(): invalid pointer or execution mode"); 609 } 610 return(addr); 611 } 612 613 return(heap_phys_addr + ((char _far *) ptr - (char _far *) heap_buf)); 614 } 615 616 /****************************************************************************** 552 617 * Calibrate 'mdelay()' loop. This is done by setting up a 1 second timer 553 618 * with a callback that sets 'mdelay_done' to MD_CALIBRATION_END. Then it … … 681 746 * interrupts were already disabled or != 0, if not. 682 747 * 683 * NOTE: SMP systems must use spinlocks, thus this function will only be 684 * compiled on non-SMP builds. 685 */ 686 #ifndef OS2AHCI_SMP 748 * NOTE: SMP systems should use spinlocks. 749 */ 687 750 int disable(void) 688 751 { … … 699 762 return(rc); 700 763 } 701 #endif702 764 703 765 /****************************************************************************** … … 705 767 * that the presence of _asm statements will disable compiler optimizations. 706 768 * 707 * NOTE: SMP systems must use spinlocks, thus this function will only be 708 * compiled on non-SMP builds. 709 */ 710 #ifndef OS2AHCI_SMP 769 * NOTE: SMP systems should use spinlocks. 770 */ 711 771 void enable(void) 712 772 { 713 773 _asm sti; 714 774 } 715 #endif716 775 717 776 /****************************************************************************** -
trunk/src/os2ahci/os2ahci.c
r108 r110 307 307 308 308 default: 309 cprintf(" invalid option: /%c\n", *s);310 goto init_fail;309 cprintf("unknown option: /%c - ignored\n", *s); 310 break; 311 311 } 312 312 } … … 481 481 * hand off to a context hook which will continue to trigger the engine until 482 482 * all IORBs have been sent. 483 * 484 * NOTE: While initialization has not completed (or during APM suspend/resume 485 * operations), this function will loop indefinitely because we can't 486 * rely on interrupt handlers or context hooks and complex IORBs 487 * requiring multiple requeues would eventually hang and time out if 488 * we stopped triggering here. 483 489 */ 484 490 void trigger_engine(void) … … 486 492 int i; 487 493 488 for (i = 0; i < 3 ; i++) {494 for (i = 0; i < 3 || !init_complete; i++) { 489 495 if (trigger_engine_1() == 0) { 490 496 /* done -- all IORBs have been sent on their way */ -
trunk/src/os2ahci/os2ahci.h
r108 r110 281 281 typedef struct { 282 282 IORB_QUEUE iorb_queue; /* IORB queue for this port */ 283 unsigned dev_max : 4;/* maximum device number on this port (0-15) */284 unsigned cmd_slot : 5;/* current command slot index (using round-283 unsigned dev_max : 4; /* maximum device number on this port (0-15) */ 284 unsigned cmd_slot : 5; /* current command slot index (using round- 285 285 * robin indexes to prevent starvation) */ 286 286 … … 349 349 unsigned is_ncq : 1; /* should use native command queueing */ 350 350 unsigned complete : 1; /* IORB has completed processing */ 351 unsigned unaligned : 1; /* unaligned S/G; need to use transfer buffer */ 351 352 unsigned cmd_slot : 5; /* AHCI command slot for this IORB */ 352 353 } ADD_WORKSPACE; 354 355 /* sg_memcpy() direction */ 356 typedef enum { 357 SG_TO_BUF, /* copy from S/G list to buffer */ 358 BUF_TO_SG /* copy from buffer to S/G list */ 359 } SG_MEMCPY_DIRECTION; 353 360 354 361 /* -------------------------- function prototypes -------------------------- */ … … 444 451 extern char _far *strcpy (char _far *dst, const char _far *src); 445 452 extern int memcmp (void _far *p1, void _far *p2, size_t len); 453 extern void sg_memcpy (SCATGATENTRY _far *sg_list, USHORT sg_cnt, 454 ULONG sg_off, void _far *buf, USHORT len, 455 SG_MEMCPY_DIRECTION dir); 446 456 extern long strtol (const char _far *buf, 447 457 const char _far * _far *ep, int base); 448 458 extern void *malloc (size_t len); 449 459 extern void free (void *ptr); 460 extern ULONG virt_to_phys (void _far *ptr); 450 461 extern void mdelay_cal (void); 451 462 extern void mdelay (u32 millies); -
trunk/src/os2ahci/version.h
r102 r110 14 14 15 15 16 #define VERSION 11 4/* driver version (2 implied decimals) */16 #define VERSION 115 /* driver version (2 implied decimals) */ 17 17
Note:
See TracChangeset
for help on using the changeset viewer.