Changeset 178 for trunk/src/os2ahci/ata.c
- Timestamp:
- Nov 29, 2016, 5:30:22 AM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/os2ahci/ata.c
r176 r178 4 4 * Copyright (c) 2011 thi.guten Software Development 5 5 * Copyright (c) 2011 Mensys B.V. 6 * Portions copyright (c) 2013-2015David Azarewicz6 * Copyright (c) 2013-2016 David Azarewicz 7 7 * 8 8 * Authors: Christian Mueller, Markus Thielen … … 35 35 /* -------------------------- function prototypes -------------------------- */ 36 36 37 static int ata_cmd_read (IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot,38 ULONG sector, ULONG count, SCATGATENTRY _far*sg_list,39 40 41 static int ata_cmd_write(IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot,42 ULONG sector, ULONG count, SCATGATENTRY _far*sg_list,37 static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot, 38 ULONG sector, ULONG count, SCATGATENTRY *sg_list, 39 ULONG sg_cnt); 40 41 static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot, 42 ULONG sector, ULONG count, SCATGATENTRY *sg_list, 43 43 ULONG sg_cnt, int write_through); 44 44 … … 81 81 int v_ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, va_list va) 82 82 { 83 AHCI_PORT_DMA _far*dma_base_virt;84 AHCI_CMD_HDR _far*cmd_hdr;85 AHCI_CMD_TBL _far*cmd_tbl;86 SCATGATENTRY _far*sg_list = NULL;83 AHCI_PORT_DMA *dma_base_virt; 84 AHCI_CMD_HDR *cmd_hdr; 85 AHCI_CMD_TBL *cmd_tbl; 86 SCATGATENTRY *sg_list = NULL; 87 87 SCATGATENTRY sg_single; 88 88 ATA_PARM ap; 89 89 ATA_CMD ata_cmd; 90 void _far*atapi_cmd = NULL;90 void *atapi_cmd = NULL; 91 91 u32 dma_base_phys; 92 u 16atapi_cmd_len = 0;93 u 16ahci_flags = 0;94 u 16sg_cnt = 0;95 inti;96 intn;92 u32 atapi_cmd_len = 0; 93 u32 ahci_flags = 0; 94 u32 sg_cnt = 0; 95 u32 i; 96 u32 n; 97 97 98 98 /* -------------------------------------------------------------------------- … … 102 102 */ 103 103 memset(&ata_cmd, 0x00, sizeof(ata_cmd)); 104 ata_cmd.cmd = (u8)cmd;104 ata_cmd.cmd = cmd; 105 105 106 106 /* parse variable arguments */ 107 do { 108 switch ((ap = va_arg(va, ATA_PARM))) { 107 do 108 { 109 switch ((ap = va_arg(va, ATA_PARM))) 110 { 109 111 110 112 case AP_AHCI_FLAGS: 111 ahci_flags |= va_arg(va, u 16);113 ahci_flags |= va_arg(va, u32); 112 114 break; 113 115 114 116 case AP_WRITE: 115 if (va_arg(va, u16) != 0) { 117 if (va_arg(va, u32) != 0) 118 { 116 119 ahci_flags |= AHCI_CMD_WRITE; 117 120 } … … 120 123 case AP_FEATURES: 121 124 /* ATA features word */ 122 ata_cmd.features |= va_arg(va, u 16);125 ata_cmd.features |= va_arg(va, u32); 123 126 break; 124 127 125 128 case AP_COUNT: 126 129 /* transfer count */ 127 ata_cmd.count = va_arg(va, u 16);130 ata_cmd.count = va_arg(va, u32); 128 131 break; 129 132 … … 131 134 /* 28-bit sector address */ 132 135 ata_cmd.lba_l = va_arg(va, u32); 133 if (ata_cmd.lba_l & 0xf0000000UL) { 134 dprintf("error: LBA-28 address %ld has more than 28 bits\n", ata_cmd.lba_l); 136 if (ata_cmd.lba_l & 0xf0000000UL) 137 { 138 DPRINTF(0,"error: LBA-28 address %d has more than 28 bits\n", ata_cmd.lba_l); 135 139 return(ATA_CMD_INVALID_PARM); 136 140 } … … 144 148 /* 48-bit sector address */ 145 149 ata_cmd.lba_l = va_arg(va, u32); 146 ata_cmd.lba_h = va_arg(va, u 16);150 ata_cmd.lba_h = va_arg(va, u32); 147 151 break; 148 152 … … 150 154 /* ATA device byte; note that this byte contains the highest 151 155 * 4 bits of LBA-28 address; we have to leave them alone here. */ 152 ata_cmd.device |= va_arg(va, u 16) & 0xf0U;156 ata_cmd.device |= va_arg(va, u32) & 0xf0; 153 157 break; 154 158 155 159 case AP_SGLIST: 156 160 /* scatter/gather list in SCATGATENTRY/count format */ 157 sg_list = va_arg(va, void _far*);158 sg_cnt = va_arg(va, u 16);161 sg_list = va_arg(va, void *); 162 sg_cnt = va_arg(va, u32); 159 163 break; 160 164 161 165 case AP_VADDR: 162 166 /* virtual buffer address in addr/len format (up to 4K) */ 163 sg_single.ppXferBuf = virt_to_phys(va_arg(va, void _far*));164 sg_single.XferBufLen = va_arg(va, u 16);167 sg_single.ppXferBuf = MemPhysAdr(va_arg(va, void *)); 168 sg_single.XferBufLen = va_arg(va, u32); 165 169 sg_list = &sg_single; 166 170 sg_cnt = 1; … … 169 173 case AP_ATAPI_CMD: 170 174 /* ATAPI command */ 171 atapi_cmd = va_arg(va, void _far*);172 atapi_cmd_len = va_arg(va, u 16);175 atapi_cmd = va_arg(va, void *); 176 atapi_cmd_len = va_arg(va, u32); 173 177 ahci_flags |= AHCI_CMD_ATAPI; 174 178 break; … … 176 180 case AP_ATA_CMD: 177 181 /* ATA command "pass-through" */ 178 memcpy(&ata_cmd, va_arg(va, void _far*), sizeof(ATA_CMD));182 memcpy(&ata_cmd, va_arg(va, void *), sizeof(ATA_CMD)); 179 183 break; 180 184 … … 183 187 184 188 default: 185 dprintf("error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap);189 DPRINTF(0,"error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap); 186 190 return(ATA_CMD_INVALID_PARM); 187 191 } … … 212 216 213 217 /* AHCI command header */ 214 cmd_hdr = dma_base_virt->cmd_hdr + slot;218 cmd_hdr = &dma_base_virt->cmd_hdr[slot]; 215 219 memset(cmd_hdr, 0x00, sizeof(*cmd_hdr)); 216 220 cmd_hdr->options = ((d & 0x0f) << 12); … … 218 222 cmd_hdr->options |= 5; /* length of command FIS in 32-bit words */ 219 223 cmd_hdr->tbl_addr = dma_base_phys + offsetof(AHCI_PORT_DMA, cmd_tbl[slot]); 224 /* DAZ can use MemPhysAdr(&dma_base_virt->cmd_tbl[slot]), but is probably slower. */ 220 225 221 226 /* AHCI command table */ 222 cmd_tbl = dma_base_virt->cmd_tbl + slot;227 cmd_tbl = &dma_base_virt->cmd_tbl[slot]; 223 228 memset(cmd_tbl, 0x00, sizeof(*cmd_tbl)); 224 229 ata_cmd_to_fis(cmd_tbl->cmd_fis, &ata_cmd, d); 225 230 226 if (atapi_cmd != NULL) { 231 if (atapi_cmd != NULL) 232 { 227 233 /* copy ATAPI command */ 228 234 memcpy(cmd_tbl->atapi_cmd, atapi_cmd, atapi_cmd_len); … … 250 256 * successfully mapped. 251 257 */ 252 for (i = n = 0; i < sg_cnt; i++) { 258 for (i = n = 0; i < sg_cnt; i++) 259 { 253 260 u32 sg_addr = sg_list[i].ppXferBuf; 254 261 u32 sg_size = sg_list[i].XferBufLen; 255 262 256 do { 263 do 264 { 257 265 u32 chunk = (sg_size > AHCI_MAX_SG_ELEMENT_LEN) ? AHCI_MAX_SG_ELEMENT_LEN 258 266 : sg_size; 259 if (n >= AHCI_MAX_SG) { 267 if (n >= AHCI_MAX_SG) 268 { 260 269 /* couldn't store all S/G elements in our DMA buffer */ 261 ddprintf("ata_cmd(): too many S/G elements\n");270 DPRINTF(0,"ata_cmd(): too many S/G elements\n"); 262 271 return(i - 1); 263 272 } 264 if ((sg_addr & 1) || (chunk & 1)) { 265 ddprintf("error: ata_cmd() called with unaligned S/G element(s)\n"); 273 if ((sg_addr & 1) || (chunk & 1)) 274 { 275 DPRINTF(0,"error: ata_cmd() called with unaligned S/G element(s)\n"); 266 276 return(ATA_CMD_UNALIGNED_ADDR); 267 277 } … … 275 285 276 286 /* set final S/G count in AHCI command header */ 277 cmd_hdr->options |= (u32) n << 16; 278 279 if (debug >= 2) { 280 aprintf("ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot); 281 phex(cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: "); 282 phex(&ata_cmd, sizeof(ata_cmd), "ata_cmd: "); 283 if (atapi_cmd != NULL) { 284 phex(atapi_cmd, atapi_cmd_len, "atapi_cmd: "); 285 } 286 if (n > 0) { 287 phex(cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: "); 287 cmd_hdr->options |= n << 16; 288 289 if (D32g_DbgLevel >= 2) 290 { 291 DPRINTF(2,"ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot); 292 dHexDump(0,cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: "); 293 dHexDump(0,&ata_cmd, sizeof(ata_cmd), "ata_cmd: "); 294 if (atapi_cmd != NULL) 295 { 296 dHexDump(0,atapi_cmd, atapi_cmd_len, "atapi_cmd: "); 297 } 298 if (n > 0) 299 { 300 dHexDump(0,cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: "); 288 301 } 289 302 } … … 312 325 * +----------------+----------------+----------------+----------------+ 313 326 */ 314 void ata_cmd_to_fis(u8 _far *fis, ATA_CMD _far*ata_cmd, int d)327 void ata_cmd_to_fis(u8 *fis, ATA_CMD *ata_cmd, int d) 315 328 { 316 329 fis[0] = 0x27; /* register - host to device FIS */ … … 344 357 * lists. 345 358 */ 346 u16 ata_get_sg_indx(IORB_EXECUTEIO _far*io)359 u16 ata_get_sg_indx(IORB_EXECUTEIO *io) 347 360 { 348 361 ULONG offset = io->BlocksXferred * io->BlockSize; 362 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList); 349 363 USHORT i; 350 364 351 for (i = 0; i < io->cSGList && offset > 0; i++) { 352 offset -= io->pSGList[i].XferBufLen; 365 for (i = 0; i < io->cSGList && offset > 0; i++) 366 { 367 offset -= pSGList[i].XferBufLen; 353 368 } 354 369 … … 380 395 * boundaries which will still fit into our HW S/G list. 381 396 */ 382 void ata_max_sg_cnt(IORB_EXECUTEIO _far*io, USHORT sg_indx, USHORT sg_max,383 USHORT _far *sg_cnt, USHORT _far*sector_cnt)397 void ata_max_sg_cnt(IORB_EXECUTEIO *io, USHORT sg_indx, USHORT sg_max, 398 USHORT *sg_cnt, USHORT *sector_cnt) 384 399 { 385 400 ULONG max_sector_cnt = 0; … … 387 402 ULONG offset = 0; 388 403 USHORT i; 389 390 for (i = sg_indx; i < io->cSGList; i++) { 391 if (i - sg_indx >= sg_max) { 404 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList); 405 406 for (i = sg_indx; i < io->cSGList; i++) 407 { 408 if (i - sg_indx >= sg_max) 409 { 392 410 /* we're beyond the number of S/G elements we can map */ 393 411 break; 394 412 } 395 413 396 offset += io->pSGList[i].XferBufLen; 397 if (offset % io->BlockSize == 0) { 414 offset += pSGList[i].XferBufLen; 415 if (offset % io->BlockSize == 0) 416 { 398 417 /* this S/G element ends on a sector boundary */ 399 418 max_sector_cnt = offset / io->BlockSize; … … 414 433 * and handled by atapi_get_geometry(). 415 434 */ 416 int ata_get_geometry(IORBH _far *iorb, int slot)417 { 418 ADD_WORKSPACE _far *aws = add_workspace(iorb);435 int ata_get_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 436 { 437 ADD_WORKSPACE *aws = add_workspace(pIorb); 419 438 int rc; 420 439 421 440 /* allocate buffer for ATA identify information */ 422 if ((aws->buf = malloc(ATA_ID_WORDS * sizeof(u16))) == NULL) { 423 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE); 441 if ((aws->buf = MemAlloc(ATA_ID_WORDS * sizeof(u16))) == NULL) 442 { 443 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE); 424 444 return(-1); 425 445 } … … 427 447 /* request ATA identify information */ 428 448 aws->ppfunc = ata_get_geometry_pp; 429 rc = ata_cmd(ad_infos + iorb_unit_adapter( iorb),430 iorb_unit_port( iorb),431 iorb_unit_device( iorb),449 rc = ata_cmd(ad_infos + iorb_unit_adapter(pIorb), 450 iorb_unit_port(pIorb), 451 iorb_unit_device(pIorb), 432 452 slot, 433 453 ATA_CMD_ID_ATA, 434 AP_VADDR, (void _far*) aws->buf, ATA_ID_WORDS * sizeof(u16),454 AP_VADDR, (void *) aws->buf, ATA_ID_WORDS * sizeof(u16), 435 455 AP_END); 436 456 437 if (rc != 0) { 438 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 457 if (rc != 0) 458 { 459 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 439 460 } 440 461 … … 445 466 * geometry to the last full cylinder. 446 467 */ 447 int adjust_cylinders(GEOMETRY _far *geometry, ULONG TotalSectors) { 468 int adjust_cylinders(GEOMETRY *geometry, ULONG TotalSectors) 469 { 448 470 USHORT SecPerCyl; 449 471 int rc = FALSE; … … 451 473 geometry->TotalSectors = TotalSectors; 452 474 SecPerCyl = geometry->SectorsPerTrack * geometry->NumHeads; 453 if (SecPerCyl > 0) { 475 if (SecPerCyl > 0) 476 { 454 477 ULONG TotalCylinders = TotalSectors / SecPerCyl; 455 478 456 479 geometry->TotalSectors = TotalCylinders * SecPerCyl; 457 480 geometry->TotalCylinders = TotalCylinders; 458 if (TotalCylinders >> 16) { 481 if (TotalCylinders >> 16) 482 { 459 483 geometry->TotalCylinders = 65535; 460 484 rc = TRUE; … … 470 494 #define BIOS_MAX_NUMHEADS 255 471 495 #define BIOS_MAX_SECTORSPERTRACK 63 472 void log_geom_calculate_LBA_assist(GEOMETRY _far*geometry, ULONG TotalSectors)496 void log_geom_calculate_LBA_assist(GEOMETRY *geometry, ULONG TotalSectors) 473 497 { 474 498 UCHAR numSpT = BIOS_MAX_SECTORSPERTRACK; … … 476 500 ULONG Cylinders; 477 501 478 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK)) { 502 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK)) 503 { 479 504 USHORT temp = (TotalSectors - 1) / (BIOS_MAX_CYLINDERS * BIOS_MAX_SECTORSPERTRACK); 480 505 … … 485 510 } 486 511 487 do { 512 do 513 { 488 514 Cylinders = TotalSectors / (USHORT)(numHeads * numSpT); 489 if (Cylinders >> 16) { 515 if (Cylinders >> 16) 516 { 490 517 if (numSpT < 128) 491 518 numSpT = (numSpT << 1) | 1; … … 500 527 } 501 528 502 int check_lvm(IORBH _far *iorb, ULONG sector)503 { 504 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace( iorb)->buf;505 AD_INFO *ai = ad_infos + iorb_unit_adapter( iorb);506 GEOMETRY _far *geometry = ((IORB_GEOMETRY _far *) iorb)->pGeometry;507 int p = iorb_unit_port( iorb);529 int check_lvm(IORBH *pIorb, ULONG sector) 530 { 531 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace(pIorb)->buf; 532 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 533 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry; 534 int p = iorb_unit_port(pIorb); 508 535 int rc; 509 536 510 537 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_READ, 511 AP_SECTOR_28, (u32)sector-1,512 AP_COUNT, (u16)1,513 AP_VADDR, (void _far *)pDLA, 512,538 AP_SECTOR_28, sector-1, 539 AP_COUNT, 1, 540 AP_VADDR, (void *)pDLA, 512, 514 541 AP_DEVICE, 0x40, 515 542 AP_END); 516 543 if (rc) return 0; 517 544 518 ddphex(pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1);545 DHEXDUMP(3,pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1); 519 546 520 547 if ((pDLA->DLA_Signature1 == DLA_TABLE_SIGNATURE1) && (pDLA->DLA_Signature2 == DLA_TABLE_SIGNATURE2)) { 521 ddprintf("is_lvm_geometry found at sector %d\n", sector-1);548 DPRINTF(3,"is_lvm_geometry found at sector %d\n", sector-1); 522 549 geometry->TotalCylinders = pDLA->Cylinders; 523 550 geometry->NumHeads = pDLA->Heads_Per_Cylinder; … … 536 563 * return the saved values when ata_get_geometry() is called. 537 564 */ 538 int is_lvm_geometry(IORBH _far *iorb)539 { 540 GEOMETRY _far *geometry = ((IORB_GEOMETRY _far *) iorb)->pGeometry;565 int is_lvm_geometry(IORBH *pIorb) 566 { 567 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry; 541 568 ULONG sector; 542 569 543 570 if (init_complete) return 0; /* We cannot use ahci_exec_polled_cmd() after init_complete */ 544 571 545 if (use_lvm_info) { 572 if (use_lvm_info) 573 { 546 574 #ifdef DEBUG 547 AD_INFO *ai = ad_infos + iorb_unit_adapter( iorb);548 int p = iorb_unit_port( iorb);549 int d = iorb_unit_device( iorb);550 ddprintf("is_lvm_geometry (%d.%d.%d)\n", ad_no(ai), p, d);575 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 576 int p = iorb_unit_port(pIorb); 577 int d = iorb_unit_device(pIorb); 578 DPRINTF(3,"is_lvm_geometry (%d.%d.%d)\n", ad_no(ai), p, d); 551 579 #endif 552 580 553 581 /* First check the sector reported by the hardware */ 554 if (check_lvm(iorb, geometry->SectorsPerTrack)) return 1; 555 556 for (sector = 255; sector >= 63; sector >>= 1) { 582 if (check_lvm(pIorb, geometry->SectorsPerTrack)) return 1; 583 584 for (sector = 255; sector >= 63; sector >>= 1) 585 { 557 586 if (sector == geometry->SectorsPerTrack) continue; 558 if (check_lvm( iorb, sector)) return 1;587 if (check_lvm(pIorb, sector)) return 1; 559 588 } 560 589 } … … 567 596 * information to OS/2 IOCC_GEOMETRY information. 568 597 */ 569 void ata_get_geometry_pp(IORBH _far *iorb)570 { 571 GEOMETRY _far *geometry = ((IORB_GEOMETRY _far *) iorb)->pGeometry;572 USHORT geometry_len = ((IORB_GEOMETRY _far *) iorb)->GeometryLen;573 u16 *id_buf = add_workspace( iorb)->buf;574 int a = iorb_unit_adapter( iorb);575 int p = iorb_unit_port( iorb);598 void ata_get_geometry_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb) 599 { 600 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry; 601 USHORT geometry_len = ((IORB_GEOMETRY *)pIorb)->GeometryLen; 602 u16 *id_buf = add_workspace(pIorb)->buf; 603 int a = iorb_unit_adapter(pIorb); 604 int p = iorb_unit_port(pIorb); 576 605 char *Method; 577 606 … … 607 636 608 637 /* extract total number of sectors */ 609 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400) { 638 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400) 639 { 610 640 /* 48-bit LBA supported */ 611 if (ATA_CAPACITY48_H(id_buf) != 0) { 641 if (ATA_CAPACITY48_H(id_buf) != 0) 642 { 612 643 /* more than 32 bits for number of sectors */ 613 dprintf("warning: limiting disk %d.%d.%d to 2TB\n",614 iorb_unit_adapter( iorb), iorb_unit_port(iorb),615 iorb_unit_device( iorb));644 DPRINTF(0,"warning: limiting disk %d.%d.%d to 2TB\n", 645 iorb_unit_adapter(pIorb), iorb_unit_port(pIorb), 646 iorb_unit_device(pIorb)); 616 647 geometry->TotalSectors = 0xffffffffUL; 617 } else { 648 } 649 else 650 { 618 651 geometry->TotalSectors = ATA_CAPACITY48_L(id_buf); 619 652 } 620 } else { 653 } 654 else 655 { 621 656 /* 28-bit LBA */ 622 657 geometry->TotalSectors = ATA_CAPACITY(id_buf) & 0x0fffffffUL; … … 625 660 Method = "None"; 626 661 /* fabricate the remaining geometry fields */ 627 if (track_size[a][p] != 0) { 662 if (track_size[a][p] != 0) 663 { 628 664 /* A specific track size has been requested for this port; this is 629 665 * typically done for disks with 4K sectors to make sure partitions … … 634 670 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack); 635 671 Method = "Custom"; 636 } else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 && 637 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf)) { 672 } 673 else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 && 674 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf)) 675 { 638 676 /* BIOS-supplied (aka "current") geometry values look valid */ 639 677 geometry->NumHeads = CUR_HEADS(id_buf); … … 641 679 geometry->TotalCylinders = CUR_CYLS(id_buf); 642 680 Method = "BIOS"; 643 } else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0) { 681 } 682 else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0) 683 { 644 684 /* ATA-supplied values for geometry look valid */ 645 685 geometry->NumHeads = ATA_HEADS(id_buf); … … 647 687 geometry->TotalCylinders = ATA_CYLS(id_buf); 648 688 Method = "ATA"; 649 } else { 689 } 690 else 691 { 650 692 /* use typical SCSI geometry */ 651 693 geometry->NumHeads = 255; … … 655 697 } 656 698 657 dprintf("Physical geometry: %ld cylinders, %d heads, %d sectors per track (%ldMB) (%s)\n",658 (u32) geometry->TotalCylinders, (u16) geometry->NumHeads, (u16)geometry->SectorsPerTrack,659 ( u32) (geometry->TotalSectors / 2048), Method);699 DPRINTF(2,"Physical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n", 700 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack, 701 (geometry->TotalSectors / 2048), Method); 660 702 661 703 /* Fixup the geometry in case the geometry reported by the BIOS is bad */ 662 if (adjust_cylinders(geometry, geometry->TotalSectors)) { // cylinder overflow 704 if (adjust_cylinders(geometry, geometry->TotalSectors)) 705 { // cylinder overflow 663 706 log_geom_calculate_LBA_assist(geometry, geometry->TotalSectors); 664 707 geometry->TotalSectors = (USHORT)(geometry->NumHeads * geometry->SectorsPerTrack) * (ULONG)geometry->TotalCylinders; … … 666 709 adjust_cylinders(geometry, geometry->TotalSectors); 667 710 668 dprintf("Logical geometry: %ld cylinders, %d heads, %d sectors per track (%ldMB) (%s)\n",669 (u32) geometry->TotalCylinders, (u16) geometry->NumHeads, (u16)geometry->SectorsPerTrack,670 ( u32) (geometry->TotalSectors / 2048), Method);671 672 if (is_lvm_geometry( iorb)) Method = "LVM";711 DPRINTF(2,"Logical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n", 712 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack, 713 (geometry->TotalSectors / 2048), Method); 714 715 if (is_lvm_geometry(pIorb)) Method = "LVM"; 673 716 ad_infos[a].ports[p].devs[0].dev_info.Cylinders = geometry->TotalCylinders; 674 717 ad_infos[a].ports[p].devs[0].dev_info.HeadsPerCylinder = geometry->NumHeads; … … 677 720 ad_infos[a].ports[p].devs[0].dev_info.Method = Method; 678 721 679 dprintf("Reported geometry: %ld cylinders, %d heads, %d sectors per track (%ldMB) (%s)\n",680 (u32) geometry->TotalCylinders, (u16) geometry->NumHeads, (u16)geometry->SectorsPerTrack,681 ( u32) (geometry->TotalSectors / 2048), Method);722 DPRINTF(2,"Reported geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n", 723 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack, 724 (geometry->TotalSectors / 2048), Method); 682 725 683 726 /* tell interrupt handler that this IORB is complete */ 684 add_workspace( iorb)->complete = 1;727 add_workspace(pIorb)->complete = 1; 685 728 } 686 729 … … 688 731 * Test whether unit is ready. 689 732 */ 690 int ata_unit_ready(IORBH _far *iorb, int slot)733 int ata_unit_ready(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 691 734 { 692 735 /* This is a NOP for ATA devices (at least right now); returning an error … … 694 737 * HW command and the IORB will complete successfully. 695 738 */ 696 ((IORB_UNIT_STATUS _far *) iorb)->UnitStatus = US_READY | US_POWER;739 ((IORB_UNIT_STATUS *)pIorb)->UnitStatus = US_READY | US_POWER; 697 740 return(-1); 698 741 } … … 701 744 * Read sectors from AHCI device. 702 745 */ 703 int ata_read(IORBH _far *iorb, int slot) 704 { 705 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 706 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 746 int ata_read(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 747 { 748 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 749 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList); 750 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 707 751 ULONG sector = io->RBA + io->BlocksXferred; 708 752 USHORT count = io->BlockCount - io->BlocksXferred; 709 753 USHORT sg_indx; 710 754 USHORT sg_cnt; 711 int p = iorb_unit_port( iorb);712 int d = iorb_unit_device( iorb);755 int p = iorb_unit_port(pIorb); 756 int d = iorb_unit_device(pIorb); 713 757 int rc; 714 758 715 if (io->BlockCount == 0) { 759 if (io->BlockCount == 0) 760 { 716 761 /* NOP; return -1 without error in IORB to indicate success */ 717 762 return(-1); 718 763 } 719 764 720 if (add_workspace(iorb)->unaligned) { 765 if (add_workspace(pIorb)->unaligned) 766 { 721 767 /* unaligned S/G addresses present; need to use double buffers */ 722 return(ata_read_unaligned( iorb, slot));768 return(ata_read_unaligned(pIorb, slot)); 723 769 } 724 770 … … 729 775 */ 730 776 if (io->BlocksXferred == 0 && io->cSGList == 1 && 731 io->pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize) { 732 io->pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize; 777 pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize) 778 { 779 pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize; 733 780 } 734 781 735 782 /* prepare read command while keeping an eye on S/G count limitations */ 736 do { 783 do 784 { 737 785 sg_indx = ata_get_sg_indx(io); 738 786 sg_cnt = io->cSGList - sg_indx; 739 if ((rc = ata_cmd_read(iorb, ai, p, d, slot, sector, count, 740 io->pSGList + sg_indx, sg_cnt)) > 0) { 787 if ((rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, count, 788 pSGList + sg_indx, sg_cnt)) > 0) 789 { 741 790 /* couldn't map all S/G elements */ 742 ata_max_sg_cnt(io, sg_indx, (USHORT)rc, &sg_cnt, &count);791 ata_max_sg_cnt(io, sg_indx, rc, &sg_cnt, &count); 743 792 } 744 793 } while (rc > 0 && sg_cnt > 0); 745 794 746 if (rc == 0) { 747 add_workspace(iorb)->blocks = count; 748 add_workspace(iorb)->ppfunc = ata_read_pp; 749 750 } else if (rc > 0) { 751 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 752 753 } else if (rc == ATA_CMD_UNALIGNED_ADDR) { 795 if (rc == 0) 796 { 797 add_workspace(pIorb)->blocks = count; 798 add_workspace(pIorb)->ppfunc = ata_read_pp; 799 } 800 else if (rc > 0) 801 { 802 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD); 803 } 804 else if (rc == ATA_CMD_UNALIGNED_ADDR) 805 { 754 806 /* unaligned S/G addresses detected; need to use double buffers */ 755 add_workspace(iorb)->unaligned = 1; 756 return(ata_read_unaligned(iorb, slot)); 757 758 } else { 759 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 807 add_workspace(pIorb)->unaligned = 1; 808 return(ata_read_unaligned(pIorb, slot)); 809 810 } 811 else 812 { 813 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 760 814 } 761 815 … … 769 823 * use a transfer buffer and copy the data manually. 770 824 */ 771 int ata_read_unaligned(IORBH _far *iorb, int slot)772 { 773 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb;774 ADD_WORKSPACE _far *aws = add_workspace(iorb);775 AD_INFO *ai = ad_infos + iorb_unit_adapter( iorb);825 int ata_read_unaligned(IORBH *pIorb, int slot) 826 { 827 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 828 ADD_WORKSPACE *aws = add_workspace(pIorb); 829 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 776 830 ULONG sector = io->RBA + io->BlocksXferred; 777 831 SCATGATENTRY sg_single; 778 int p = iorb_unit_port( iorb);779 int d = iorb_unit_device( iorb);832 int p = iorb_unit_port(pIorb); 833 int d = iorb_unit_device(pIorb); 780 834 int rc; 781 835 782 ddprintf("ata_read_unaligned(%d.%d.%d, %ld)\n", ad_no(ai), p, d, sector);836 DPRINTF(3,"ata_read_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector); 783 837 784 838 /* allocate transfer buffer */ 785 if ((aws->buf = malloc(io->BlockSize)) == NULL) { 786 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE); 839 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL) 840 { 841 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE); 787 842 return(-1); 788 843 } 789 844 790 845 /* prepare read command using transfer buffer */ 791 sg_single.ppXferBuf = virt_to_phys(aws->buf);846 sg_single.ppXferBuf = MemPhysAdr(aws->buf); 792 847 sg_single.XferBufLen = io->BlockSize; 793 rc = ata_cmd_read( iorb, ai, p, d, slot, sector, 1, &sg_single, 1);848 rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1); 794 849 795 850 if (rc == 0) { 796 add_workspace( iorb)->blocks = 1;797 add_workspace( iorb)->ppfunc = ata_read_pp;851 add_workspace(pIorb)->blocks = 1; 852 add_workspace(pIorb)->ppfunc = ata_read_pp; 798 853 799 854 } else if (rc > 0) { 800 iorb_seterr( iorb, IOERR_CMD_SGLIST_BAD);855 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD); 801 856 802 857 } else { 803 iorb_seterr( iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);858 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 804 859 } 805 860 … … 813 868 * takes care of copying data from the transfer buffer for unaligned reads. 814 869 */ 815 void ata_read_pp(IORBH _far *iorb) 816 { 817 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 818 ADD_WORKSPACE _far *aws = add_workspace(iorb); 819 820 if (aws->unaligned) { 870 void ata_read_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb) 871 { 872 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 873 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList); 874 ADD_WORKSPACE *aws = add_workspace(pIorb); 875 876 if (aws->unaligned) 877 { 821 878 /* copy transfer buffer to corresponding physical address in S/G list */ 822 sg_memcpy( io->pSGList, io->cSGList,879 sg_memcpy(pSGList, io->cSGList, 823 880 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize, 824 881 aws->buf, io->BlockSize, BUF_TO_SG); 825 882 } 826 883 827 io->BlocksXferred += add_workspace(iorb)->blocks; 828 ddprintf("ata_read_pp(): blocks transferred = %d\n", (int) io->BlocksXferred); 829 830 if (io->BlocksXferred >= io->BlockCount) { 884 io->BlocksXferred += add_workspace(pIorb)->blocks; 885 DPRINTF(3,"ata_read_pp(): blocks transferred = %d\n", io->BlocksXferred); 886 887 if (io->BlocksXferred >= io->BlockCount) 888 { 831 889 /* we're done; tell IRQ handler the IORB is complete */ 832 add_workspace(iorb)->complete = 1; 833 } else { 890 add_workspace(pIorb)->complete = 1; 891 } 892 else 893 { 834 894 /* requeue this IORB for next iteration */ 835 iorb_requeue( iorb);895 iorb_requeue(pIorb); 836 896 } 837 897 } … … 840 900 * Verify readability of sectors on ATA device. 841 901 */ 842 int ata_verify(IORBH _far *iorb, int slot)843 { 844 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb;845 AD_INFO *ai = ad_infos + iorb_unit_adapter( iorb);846 int p = iorb_unit_port( iorb);847 int d = iorb_unit_device( iorb);902 int ata_verify(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 903 { 904 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 905 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 906 int p = iorb_unit_port(pIorb); 907 int d = iorb_unit_device(pIorb); 848 908 int rc; 849 909 850 if (io->BlockCount == 0) { 910 if (io->BlockCount == 0) 911 { 851 912 /* NOP; return -1 without error in IORB to indicate success */ 852 913 return(-1); … … 854 915 855 916 /* prepare verify command */ 856 if (io->RBA >= (1UL << 28) || io->BlockCount > 256) { 917 if (io->RBA >= (1UL << 28) || io->BlockCount > 256) 918 { 857 919 /* need LBA48 for this command */ 858 920 if (!ai->ports[p].devs[d].lba48) { 859 iorb_seterr( iorb, IOERR_RBA_LIMIT);921 iorb_seterr(pIorb, IOERR_RBA_LIMIT); 860 922 return(-1); 861 923 } 862 924 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY_EXT, 863 AP_SECTOR_48, (u32) io->RBA, (u16)0,864 AP_COUNT, (u16)io->BlockCount,925 AP_SECTOR_48, io->RBA, 0, 926 AP_COUNT, io->BlockCount, 865 927 AP_DEVICE, 0x40, 866 928 AP_END); 867 929 } else { 868 930 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY, 869 AP_SECTOR_28, (u32)io->RBA,870 AP_COUNT, (u16)io->BlockCount & 0xffU,931 AP_SECTOR_28, io->RBA, 932 AP_COUNT, io->BlockCount & 0xffU, 871 933 AP_DEVICE, 0x40, 872 934 AP_END); … … 879 941 * Write sectors to AHCI device. 880 942 */ 881 int ata_write(IORBH _far *iorb, int slot) 882 { 883 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 884 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 943 int ata_write(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 944 { 945 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 946 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList); 947 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 885 948 ULONG sector = io->RBA + io->BlocksXferred; 886 949 USHORT count = io->BlockCount - io->BlocksXferred; 887 950 USHORT sg_indx; 888 951 USHORT sg_cnt; 889 int p = iorb_unit_port( iorb);890 int d = iorb_unit_device( iorb);952 int p = iorb_unit_port(pIorb); 953 int d = iorb_unit_device(pIorb); 891 954 int rc; 892 955 893 if (io->BlockCount == 0) { 956 if (io->BlockCount == 0) 957 { 894 958 /* NOP; return -1 without error in IORB to indicate success */ 895 959 return(-1); 896 960 } 897 961 898 if (add_workspace(iorb)->unaligned) { 962 if (add_workspace(pIorb)->unaligned) 963 { 899 964 /* unaligned S/G addresses present; need to use double buffers */ 900 return(ata_write_unaligned( iorb, slot));965 return(ata_write_unaligned(pIorb, slot)); 901 966 } 902 967 … … 905 970 sg_indx = ata_get_sg_indx(io); 906 971 sg_cnt = io->cSGList - sg_indx; 907 if ((rc = ata_cmd_write(iorb, ai, p, d, slot, sector, count, 908 io->pSGList + sg_indx, sg_cnt, 909 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0) { 972 if ((rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, count, 973 pSGList + sg_indx, sg_cnt, 974 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0) 975 { 910 976 /* couldn't map all S/G elements */ 911 977 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); … … 913 979 } while (rc > 0 && sg_cnt > 0); 914 980 915 if (rc == 0) { 916 add_workspace(iorb)->blocks = count; 917 add_workspace(iorb)->ppfunc = ata_write_pp; 918 919 } else if (rc > 0) { 920 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 921 922 } else if (rc == ATA_CMD_UNALIGNED_ADDR) { 981 if (rc == 0) 982 { 983 add_workspace(pIorb)->blocks = count; 984 add_workspace(pIorb)->ppfunc = ata_write_pp; 985 } 986 else if (rc > 0) 987 { 988 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD); 989 } 990 else if (rc == ATA_CMD_UNALIGNED_ADDR) 991 { 923 992 /* unaligned S/G addresses detected; need to use double buffers */ 924 add_workspace(iorb)->unaligned = 1; 925 return(ata_write_unaligned(iorb, slot)); 926 927 } else { 928 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 993 add_workspace(pIorb)->unaligned = 1; 994 return(ata_write_unaligned(pIorb, slot)); 995 } 996 else 997 { 998 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 929 999 } 930 1000 … … 938 1008 * use a transfer buffer and copy the data manually. 939 1009 */ 940 int ata_write_unaligned(IORBH _far *iorb, int slot) 941 { 942 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 943 ADD_WORKSPACE _far *aws = add_workspace(iorb); 944 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 1010 int ata_write_unaligned(IORBH *pIorb, int slot) 1011 { 1012 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 1013 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList); 1014 ADD_WORKSPACE *aws = add_workspace(pIorb); 1015 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 945 1016 ULONG sector = io->RBA + io->BlocksXferred; 946 1017 SCATGATENTRY sg_single; 947 int p = iorb_unit_port( iorb);948 int d = iorb_unit_device( iorb);1018 int p = iorb_unit_port(pIorb); 1019 int d = iorb_unit_device(pIorb); 949 1020 int rc; 950 1021 951 ddprintf("ata_write_unaligned(%d.%d.%d, %ld)\n", ad_no(ai), p, d, sector);1022 DPRINTF(3,"ata_write_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector); 952 1023 953 1024 /* allocate transfer buffer */ 954 if ((aws->buf = malloc(io->BlockSize)) == NULL) { 955 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE); 1025 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL) 1026 { 1027 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE); 956 1028 return(-1); 957 1029 } 958 1030 959 1031 /* copy next sector from S/G list to transfer buffer */ 960 sg_memcpy( io->pSGList, io->cSGList,1032 sg_memcpy(pSGList, io->cSGList, 961 1033 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize, 962 1034 aws->buf, io->BlockSize, SG_TO_BUF); 963 1035 964 1036 /* prepare write command using transfer buffer */ 965 sg_single.ppXferBuf = virt_to_phys(aws->buf);1037 sg_single.ppXferBuf = MemPhysAdr(aws->buf); 966 1038 sg_single.XferBufLen = io->BlockSize; 967 rc = ata_cmd_write( iorb, ai, p, d, slot, sector, 1, &sg_single, 1,1039 rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1, 968 1040 io->Flags & XIO_DISABLE_HW_WRITE_CACHE); 969 1041 970 if (rc == 0) { 971 add_workspace(iorb)->blocks = 1; 972 add_workspace(iorb)->ppfunc = ata_write_pp; 973 974 } else if (rc > 0) { 975 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 976 977 } else { 978 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 1042 if (rc == 0) 1043 { 1044 add_workspace(pIorb)->blocks = 1; 1045 add_workspace(pIorb)->ppfunc = ata_write_pp; 1046 } 1047 else if (rc > 0) 1048 { 1049 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD); 1050 } 1051 else 1052 { 1053 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 979 1054 } 980 1055 … … 988 1063 * transferred, requeues the IORB to process the remaining sectors. 989 1064 */ 990 void ata_write_pp(IORBH _far *iorb) 991 { 992 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 993 994 io->BlocksXferred += add_workspace(iorb)->blocks; 995 ddprintf("ata_write_pp(): blocks transferred = %d\n", (int) io->BlocksXferred); 996 997 if (io->BlocksXferred >= io->BlockCount) { 1065 void ata_write_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb) 1066 { 1067 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb; 1068 1069 io->BlocksXferred += add_workspace(pIorb)->blocks; 1070 DPRINTF(3,"ata_write_pp(): blocks transferred = %d\n", io->BlocksXferred); 1071 1072 if (io->BlocksXferred >= io->BlockCount) 1073 { 998 1074 /* we're done; tell IRQ handler the IORB is complete */ 999 add_workspace(iorb)->complete = 1; 1000 } else { 1075 add_workspace(pIorb)->complete = 1; 1076 } 1077 else 1078 { 1001 1079 /* requeue this IORB for next iteration */ 1002 iorb_requeue( iorb);1080 iorb_requeue(pIorb); 1003 1081 } 1004 1082 } … … 1007 1085 * Execute ATA command. 1008 1086 */ 1009 int ata_execute_ata(IORBH _far *iorb, int slot) 1010 { 1011 IORB_ADAPTER_PASSTHRU _far *apt = (IORB_ADAPTER_PASSTHRU _far *) iorb; 1012 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 1013 int p = iorb_unit_port(iorb); 1014 int d = iorb_unit_device(iorb); 1087 int ata_execute_ata(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 1088 { 1089 IORB_ADAPTER_PASSTHRU *apt = (IORB_ADAPTER_PASSTHRU *)pIorb; 1090 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(apt->pSGList); 1091 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 1092 int p = iorb_unit_port(pIorb); 1093 int d = iorb_unit_device(pIorb); 1015 1094 int rc; 1016 1095 1017 if (apt->ControllerCmdLen != sizeof(ATA_CMD)) { 1018 iorb_seterr(iorb, IOERR_CMD_SYNTAX); 1096 if (apt->ControllerCmdLen != sizeof(ATA_CMD)) 1097 { 1098 iorb_seterr(pIorb, IOERR_CMD_SYNTAX); 1019 1099 return(-1); 1020 1100 } 1021 1101 1022 1102 rc = ata_cmd(ai, p, d, slot, 0, 1023 AP_SGLIST, apt->pSGList, apt->cSGList,1024 AP_ATA_CMD, apt->pControllerCmd,1103 AP_SGLIST, pSGList, apt->cSGList, 1104 AP_ATA_CMD, Far16ToFlat(apt->pControllerCmd), 1025 1105 AP_WRITE, !(apt->Flags & PT_DIRECTION_IN), 1026 1106 AP_END); 1027 1107 1028 if (rc == 0) { 1029 add_workspace(iorb)->ppfunc = ata_execute_ata_pp; 1108 if (rc == 0) 1109 { 1110 add_workspace(pIorb)->ppfunc = ata_execute_ata_pp; 1030 1111 } 1031 1112 … … 1040 1121 * See ata_cmd_to_fis() for an explanation of the mapping. 1041 1122 */ 1042 void ata_execute_ata_pp(IORBH _far *iorb)1043 { 1044 AHCI_PORT_DMA _far*dma_base;1045 ATA_CMD _far*cmd;1123 void ata_execute_ata_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb) 1124 { 1125 AHCI_PORT_DMA *dma_base; 1126 ATA_CMD *cmd; 1046 1127 AD_INFO *ai; 1047 u8 _far*fis;1128 u8 *fis; 1048 1129 int p; 1049 1130 1050 1131 /* get address of D2H FIS */ 1051 ai = ad_infos + iorb_unit_adapter( iorb);1052 p = iorb_unit_port( iorb);1132 ai = ad_infos + iorb_unit_adapter(pIorb); 1133 p = iorb_unit_port(pIorb); 1053 1134 dma_base = port_dma_base(ai, p); 1054 1135 fis = dma_base->rx_fis + 0x40; 1055 1136 1056 if (fis[0] != 0x34) { 1137 if (fis[0] != 0x34) 1138 { 1057 1139 /* this is not a D2H FIS - give up silently */ 1058 ddprintf("ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]);1059 add_workspace( iorb)->complete = 1;1140 DPRINTF(3,"ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]); 1141 add_workspace(pIorb)->complete = 1; 1060 1142 return; 1061 1143 } 1062 1144 1063 1145 /* map D2H FIS to the original ATA controller command structure */ 1064 cmd = (ATA_CMD _far *) ((IORB_ADAPTER_PASSTHRU _far *) iorb)->pControllerCmd;1146 cmd = (ATA_CMD *)Far16ToFlat(((IORB_ADAPTER_PASSTHRU*)pIorb)->pControllerCmd); 1065 1147 1066 1148 cmd->cmd = fis[2]; … … 1077 1159 | ((u16) fis[13] << 8); 1078 1160 1079 dphex(cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n");1161 DHEXDUMP(0,cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n"); 1080 1162 1081 1163 /* signal completion to interrupt handler */ 1082 add_workspace( iorb)->complete = 1;1164 add_workspace(pIorb)->complete = 1; 1083 1165 } 1084 1166 … … 1099 1181 * else with a generic error code. 1100 1182 */ 1101 int ata_req_sense(IORBH _far *iorb, int slot)1102 { 1103 AD_INFO *ai = ad_infos + iorb_unit_adapter( iorb);1104 u8 _far *port_mmio = port_base(ai, iorb_unit_port(iorb));1183 int ata_req_sense(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot) 1184 { 1185 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb); 1186 u8 *port_mmio = port_base(ai, iorb_unit_port(pIorb)); 1105 1187 u32 tf_data = readl(port_mmio + PORT_TFDATA); 1106 u8 err = (u8) (tf_data >> 8); 1107 u8 sts = (u8) (tf_data); 1108 1109 if (sts & ATA_ERR) { 1110 if (sts & ATA_DF) { 1188 u8 err = (tf_data >> 8); 1189 u8 sts = (tf_data); 1190 1191 if (sts & ATA_ERR) 1192 { 1193 if (sts & ATA_DF) 1194 { 1111 1195 /* there is a device-specific error condition */ 1112 if (err & ATA_ICRC) { 1113 iorb_seterr(iorb, IOERR_ADAPTER_DEVICEBUSCHECK); 1114 } else if (err & ATA_UNC) { 1115 iorb_seterr(iorb, IOERR_MEDIA); 1116 } else if (err & ATA_IDNF) { 1117 iorb_seterr(iorb, IOERR_RBA_ADDRESSING_ERROR); 1118 } else { 1119 iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC); 1196 if (err & ATA_ICRC) 1197 { 1198 iorb_seterr(pIorb, IOERR_ADAPTER_DEVICEBUSCHECK); 1120 1199 } 1121 1122 } else { 1123 iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC); 1124 } 1125 } else { 1200 else if (err & ATA_UNC) 1201 { 1202 iorb_seterr(pIorb, IOERR_MEDIA); 1203 } 1204 else if (err & ATA_IDNF) 1205 { 1206 iorb_seterr(pIorb, IOERR_RBA_ADDRESSING_ERROR); 1207 } 1208 else 1209 { 1210 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC); 1211 } 1212 1213 } 1214 else 1215 { 1216 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC); 1217 } 1218 } 1219 else 1220 { 1126 1221 /* this function only gets called when we received an error interrupt */ 1127 iorb_seterr( iorb, IOERR_DEVICE_NONSPECIFIC);1222 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC); 1128 1223 } 1129 1224 … … 1162 1257 * device and the paramters set from above (NCQ, etc). 1163 1258 */ 1164 static int ata_cmd_read(IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot,1165 ULONG sector, ULONG count, SCATGATENTRY _far*sg_list,1259 static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot, 1260 ULONG sector, ULONG count, SCATGATENTRY *sg_list, 1166 1261 ULONG sg_cnt) 1167 1262 { 1168 1263 int rc; 1169 1264 1170 if (sector >= (1UL << 28) || count > 256 || add_workspace(iorb)->is_ncq) { 1265 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq) 1266 { 1171 1267 /* need LBA48 for this command */ 1172 if (!ai->ports[p].devs[d].lba48) { 1173 iorb_seterr(iorb, IOERR_RBA_LIMIT); 1268 if (!ai->ports[p].devs[d].lba48) 1269 { 1270 iorb_seterr(pIorb, IOERR_RBA_LIMIT); 1174 1271 return(-1); 1175 1272 } 1176 if (add_workspace(iorb)->is_ncq) { 1273 if (add_workspace(pIorb)->is_ncq) 1274 { 1177 1275 /* use NCQ read; count goes into feature register, tag into count! */ 1178 1276 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ, 1179 AP_SECTOR_48, (u32) sector, (u16)0,1180 AP_FEATURES, (u16)count,1181 AP_COUNT, ( u16) (slot << 3), /* tag == slot */1182 AP_SGLIST, sg_list, (u16)sg_cnt,1277 AP_SECTOR_48, sector, 0, 1278 AP_FEATURES, count, 1279 AP_COUNT, (slot << 3), /* tag == slot */ 1280 AP_SGLIST, sg_list, sg_cnt, 1183 1281 AP_DEVICE, 0x40, 1184 1282 AP_END); 1185 } else { 1283 } 1284 else 1285 { 1186 1286 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 1187 AP_SECTOR_48, (u32) sector, (u16)0,1188 AP_COUNT, (u16)count,1189 AP_SGLIST, sg_list, (u16)sg_cnt,1287 AP_SECTOR_48, sector, 0, 1288 AP_COUNT, count, 1289 AP_SGLIST, sg_list, sg_cnt, 1190 1290 AP_DEVICE, 0x40, 1191 1291 AP_END); 1192 1292 } 1193 1293 1194 } else { 1294 } 1295 else 1296 { 1195 1297 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 1196 AP_SECTOR_28, (u32)sector,1197 AP_COUNT, (u16)count & 0xffU,1198 AP_SGLIST, sg_list, (u16)sg_cnt,1298 AP_SECTOR_28, sector, 1299 AP_COUNT, count & 0xffU, 1300 AP_SGLIST, sg_list, sg_cnt, 1199 1301 AP_DEVICE, 0x40, 1200 1302 AP_END); … … 1208 1310 * device and the paramters set from above (NCQ, etc) 1209 1311 */ 1210 static int ata_cmd_write(IORBH _far *iorb, AD_INFO *ai, int p, int d, int slot,1211 ULONG sector, ULONG count, SCATGATENTRY _far*sg_list,1312 static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot, 1313 ULONG sector, ULONG count, SCATGATENTRY *sg_list, 1212 1314 ULONG sg_cnt, int write_through) 1213 1315 { 1214 1316 int rc; 1215 1317 1216 if (sector >= (1UL << 28) || count > 256 || add_workspace(iorb)->is_ncq) { 1318 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq) 1319 { 1217 1320 /* need LBA48 for this command */ 1218 if (!ai->ports[p].devs[d].lba48) { 1219 iorb_seterr(iorb, IOERR_RBA_LIMIT); 1321 if (!ai->ports[p].devs[d].lba48) 1322 { 1323 iorb_seterr(pIorb, IOERR_RBA_LIMIT); 1220 1324 return(-1); 1221 1325 } 1222 if (add_workspace(iorb)->is_ncq) { 1326 if (add_workspace(pIorb)->is_ncq) 1327 { 1223 1328 /* use NCQ write; count goes into feature register, tag into count! */ 1224 1329 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE, 1225 AP_SECTOR_48, (u32) sector, (u16)0,1226 AP_FEATURES, (u16)count,1330 AP_SECTOR_48, sector, 0, 1331 AP_FEATURES, count, 1227 1332 /* tag = slot */ 1228 AP_COUNT, ( u16) (slot << 3),1229 AP_SGLIST, sg_list, (u16)sg_cnt,1333 AP_COUNT, (slot << 3), 1334 AP_SGLIST, sg_list, sg_cnt, 1230 1335 AP_DEVICE, 0x40, 1231 1336 /* force unit access */ … … 1233 1338 AP_WRITE, 1, 1234 1339 AP_END); 1235 } else { 1340 } 1341 else 1342 { 1236 1343 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 1237 AP_SECTOR_48, (u32) sector, (u16)0,1238 AP_COUNT, (u16)count,1239 AP_SGLIST, sg_list, (u16)sg_cnt,1344 AP_SECTOR_48, sector, 0, 1345 AP_COUNT, count, 1346 AP_SGLIST, sg_list, sg_cnt, 1240 1347 AP_DEVICE, 0x40, 1241 1348 AP_WRITE, 1, 1242 1349 AP_END); 1243 1350 } 1244 1245 } else { 1351 } 1352 else 1353 { 1246 1354 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 1247 AP_SECTOR_28, (u32)sector,1248 AP_COUNT, (u16)count & 0xffU,1249 AP_SGLIST, sg_list, (u16)sg_cnt,1355 AP_SECTOR_28, sector, 1356 AP_COUNT, count & 0xffU, 1357 AP_SGLIST, sg_list, sg_cnt, 1250 1358 AP_DEVICE, 0x40, 1251 1359 AP_WRITE, 1, … … 1255 1363 return(rc); 1256 1364 } 1365 1366 /****************************************************************************** 1367 * Copy block from S/G list to virtual address or vice versa. 1368 */ 1369 void sg_memcpy(SCATGATENTRY *sg_list, USHORT sg_cnt, ULONG sg_off, 1370 void *buf, USHORT len, SG_MEMCPY_DIRECTION dir) 1371 { 1372 USHORT i; 1373 USHORT l; 1374 ULONG phys_addr; 1375 ULONG pos = 0; 1376 char *p; 1377 1378 /* walk through S/G list to find the elements involved in the operation */ 1379 for (i = 0; i < sg_cnt && len > 0; i++) 1380 { 1381 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off) 1382 { 1383 /* this S/G element intersects with the block to be copied */ 1384 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos); 1385 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len) 1386 { 1387 l = len; 1388 } 1389 1390 if (Dev32Help_PhysToLin(phys_addr, l, (PVOID) &p)) 1391 { 1392 panic("sg_memcpy(): DevHelp_PhysToLin() failed"); 1393 } 1394 if (dir == SG_TO_BUF) 1395 { 1396 memcpy(buf, p, l); 1397 } 1398 else 1399 { 1400 memcpy(p, buf, l); 1401 } 1402 sg_off += l; 1403 buf = (char *) buf + l; 1404 len -= l; 1405 } 1406 1407 pos += sg_list[i].XferBufLen; 1408 } 1409 } 1410 1411 /****************************************************************************** 1412 * Halt processing by submitting an internal error. This is a last resort and 1413 * should only be called when the system state is corrupt. 1414 */ 1415 void panic(char *msg) 1416 { 1417 Dev32Help_InternalError(msg, strlen(msg)); 1418 } 1419
Note:
See TracChangeset
for help on using the changeset viewer.