Changeset 8 for trunk/src/ata.c
- Timestamp:
- Sep 10, 2010, 11:30:39 AM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/ata.c
r4 r8 98 98 break; 99 99 100 case AP_ H2D:100 case AP_WRITE: 101 101 ahci_flags |= AHCI_CMD_WRITE; 102 102 break; … … 221 221 * than AHCI_MAX_SG entries. In order to help the caller, the return value 222 222 * of this function will indicate how many OS/2 S/G entries were 223 * successfully bemapped.223 * successfully mapped. 224 224 * 225 225 */ … … 306 306 307 307 /****************************************************************************** 308 * Get index in S/G list for the number of transferred sectors in the IORB. 309 * 310 * Returning io->cSGList indicates an error. 311 * 312 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW 313 * limit will never cross sector boundaries. This means that splitting 314 * S/G lists into multiple commands can be done without editing the S/G 315 * lists. 316 */ 317 u16 ata_get_sg_indx(IORB_EXECUTEIO _far *io) 318 { 319 ULONG offset = io->BlocksXferred * io->BlockSize; 320 USHORT i; 321 322 for (i = 0; i < io->cSGList && offset > 0; i++) { 323 offset -= io->pSGList[i].XferBufLen; 324 } 325 326 return(i); 327 } 328 329 /****************************************************************************** 330 * Get max S/G count which will fit into our HW S/G buffers. This function is 331 * called when the S/G list is too long and we need to split the IORB into 332 * multiple commands. It returns both the number of sectors and S/G list 333 * elements that we can handle in a single command. 334 * 335 * The parameter 'sg_indx' indicates the current start index in the S/G list 336 * (0 if this is the first command iteration). 337 * 338 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates 339 * how many S/G elements were successfully mapped. Whatever we return needs to 340 * be less or equal to this value. 341 * 342 * Returning 0 in *sg_cnt indicates an error. 343 * 344 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits 345 * will never cross sector boundaries. This means that splitting S/G 346 * lists into multiple commands can be done without editing S/G list 347 * elements. Since AHCI only allows 22 bits for each S/G element, the 348 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based 349 * on the actual length of S/G elements. This function looks for the 350 * maximum number of S/G elements that can be mapped on sector 351 * boundaries which will still fit into our HW S/G list. 352 */ 353 void ata_max_sg_cnt(IORB_EXECUTEIO _far *io, USHORT sg_indx, USHORT sg_max, 354 USHORT _far *sg_cnt, USHORT _far *sector_cnt) 355 { 356 ULONG max_sector_cnt = 0; 357 USHORT max_sg_cnt = 0; 358 ULONG offset = 0; 359 USHORT i; 360 361 for (i = sg_indx; i < io->cSGList; i++) { 362 if (i - sg_indx >= sg_max) { 363 /* we're beyond the number of S/G elements we can map */ 364 break; 365 } 366 367 offset += io->pSGList[i].XferBufLen; 368 if (offset % io->BlockSize == 0) { 369 /* this S/G element ends on a sector boundary */ 370 max_sector_cnt = offset / io->BlockSize; 371 max_sg_cnt = i + 1; 372 } 373 } 374 375 /* return the best match we found so far (0 indicating failure) */ 376 *sector_cnt = max_sector_cnt; 377 *sg_cnt = max_sg_cnt; 378 } 379 380 381 /****************************************************************************** 308 382 * Get device or media geometry. Device and media geometry are expected to be 309 383 * the same for non-removable devices, which will always be the case for the … … 334 408 if (rc != 0) { 335 409 free(aws->buf); 336 aws->buf = NULL;337 410 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 338 411 } … … 404 477 ((u32) geometry->NumHeads * 405 478 (u32) geometry->SectorsPerTrack); 479 480 /* tell interrupt handler that this IORB is complete */ 481 add_workspace(iorb)->complete = 1; 406 482 } 407 483 … … 426 502 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 427 503 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 504 ULONG sector = io->RBA + io->BlocksXferred; 505 USHORT count = io->BlockCount - io->BlocksXferred; 506 USHORT sg_indx; 507 USHORT sg_cnt; 428 508 int p = iorb_unit_port(iorb); 429 509 int d = iorb_unit_device(iorb); 430 510 int rc; 431 511 432 /* prepare read command */ 433 if (io->RBA >= (1UL << 28) || io->BlockCount > 256) { 434 /* need LBA48 for this command */ 435 if (!ai->ports[p].devs[d].lba48) { 436 iorb_seterr(iorb, IOERR_RBA_LIMIT); 437 return(-1); 438 } 439 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 440 AP_SECTOR_48, (u32) io->RBA, (u16) 0, 441 AP_COUNT, (u16) io->BlockCount, 442 AP_SGLIST, io->pSGList, (u16) io->cSGList, 443 AP_DEVICE, 0x4000, 444 AP_END); 512 /* prepare read command while keeping an eye on S/G count limitations */ 513 do { 514 sg_indx = ata_get_sg_indx(io); 515 sg_cnt = io->cSGList - sg_indx; 516 517 if (sector >= (1UL << 28) || count > 256) { 518 /* need LBA48 for this command */ 519 if (!ai->ports[p].devs[d].lba48) { 520 iorb_seterr(iorb, IOERR_RBA_LIMIT); 521 return(-1); 522 } 523 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 524 AP_SECTOR_48, (u32) sector, (u16) 0, 525 AP_COUNT, (u16) count, 526 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 527 AP_DEVICE, 0x4000, 528 AP_END); 529 } else { 530 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 531 AP_SECTOR_28, (u32) sector, 532 AP_COUNT, (u16) count & 0xffU, 533 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 534 AP_DEVICE, 0x4000, 535 AP_END); 536 } 537 538 if (rc > 0) { 539 /* couldn't map all S/G elements */ 540 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); 541 } 542 } while (rc > 0 && sg_cnt > 0); 543 544 if (rc == 0) { 545 add_workspace(iorb)->blocks = count; 546 add_workspace(iorb)->ppfunc = ata_read_pp; 547 548 } else if (rc > 0) { 549 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 550 445 551 } else { 446 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 447 AP_SECTOR_28, (u32) io->RBA, 448 AP_COUNT, (u16) io->BlockCount & 0xffU, 449 AP_SGLIST, io->pSGList, (u16) io->cSGList, 450 AP_DEVICE, 0x4000, 451 AP_END); 552 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 452 553 } 453 554 … … 456 557 457 558 /****************************************************************************** 458 * Verify readability of sectors on AHCI device. 559 * Post processing function for ata_read(); this function updates the 560 * BlocksXferred counter in the IORB and, if not all blocks have been 561 * transferred, requeues the IORB to process the remaining sectors. 562 */ 563 void ata_read_pp(IORBH _far *iorb) 564 { 565 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 566 567 io->BlocksXferred += add_workspace(iorb)->blocks; 568 ddprintf("ata_read_pp(): blocks transferred = %d\n", (int) io->BlocksXferred); 569 570 if (io->BlocksXferred >= io->BlockCount) { 571 /* we're done; tell IRQ handler the IORB is complete */ 572 add_workspace(iorb)->complete = 1; 573 } else { 574 /* requeue this IORB for next iteration */ 575 iorb_requeue(iorb); 576 } 577 } 578 579 /****************************************************************************** 580 * Verify readability of sectors on ATA device. 459 581 */ 460 582 int ata_verify(IORBH _far *iorb, int slot) … … 476 598 AP_SECTOR_48, (u32) io->RBA, (u16) 0, 477 599 AP_COUNT, (u16) io->BlockCount, 478 AP_SGLIST, io->pSGList, (u16) io->cSGList,479 600 AP_DEVICE, 0x4000, 480 601 AP_END); … … 483 604 AP_SECTOR_28, (u32) io->RBA, 484 605 AP_COUNT, (u16) io->BlockCount & 0xffU, 485 AP_SGLIST, io->pSGList, (u16) io->cSGList,486 606 AP_END); 487 607 } … … 497 617 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 498 618 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 619 ULONG sector = io->RBA + io->BlocksXferred; 620 USHORT count = io->BlockCount - io->BlocksXferred; 621 USHORT sg_indx; 622 USHORT sg_cnt; 499 623 int p = iorb_unit_port(iorb); 500 624 int d = iorb_unit_device(iorb); 501 625 int rc; 502 626 503 /* prepare write command */ 504 if (io->RBA >= (1UL << 28) || io->BlockCount > 256) { 505 /* need LBA48 for this command */ 506 if (!ai->ports[p].devs[d].lba48) { 507 iorb_seterr(iorb, IOERR_RBA_LIMIT); 508 return(-1); 509 } 510 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 511 AP_SECTOR_48, (u32) io->RBA, (u16) 0, 512 AP_COUNT, (u16) io->BlockCount, 513 AP_SGLIST, io->pSGList, (u16) io->cSGList, 514 AP_DEVICE, 0x4000, 515 AP_END); 627 /* prepare write command while keeping an eye on S/G count limitations */ 628 do { 629 sg_indx = ata_get_sg_indx(io); 630 sg_cnt = io->cSGList - sg_indx; 631 632 if (sector >= (1UL << 28) || count > 256) { 633 /* need LBA48 for this command */ 634 if (!ai->ports[p].devs[d].lba48) { 635 iorb_seterr(iorb, IOERR_RBA_LIMIT); 636 return(-1); 637 } 638 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 639 AP_SECTOR_48, (u32) sector, (u16) 0, 640 AP_COUNT, (u16) count, 641 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 642 AP_DEVICE, 0x4000, 643 AP_WRITE, 644 AP_END); 645 } else { 646 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 647 AP_SECTOR_28, (u32) sector, 648 AP_COUNT, (u16) count & 0xffU, 649 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 650 AP_DEVICE, 0x4000, 651 AP_WRITE, 652 AP_END); 653 } 654 655 if (rc > 0) { 656 /* couldn't map all S/G elements */ 657 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); 658 } 659 } while (rc > 0 && sg_cnt > 0); 660 661 if (rc == 0) { 662 add_workspace(iorb)->blocks = count; 663 add_workspace(iorb)->ppfunc = ata_write_pp; 664 665 } else if (rc > 0) { 666 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 667 516 668 } else { 517 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 518 AP_SECTOR_28, (u32) io->RBA, 519 AP_COUNT, (u16) io->BlockCount & 0xffU, 520 AP_SGLIST, io->pSGList, (u16) io->cSGList, 521 AP_DEVICE, 0x4000, 522 AP_END); 669 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 523 670 } 524 671 525 672 return(rc); 673 } 674 675 /****************************************************************************** 676 * Post processing function for ata_write(); this function updates the 677 * BlocksXferred counter in the IORB and, if not all blocks have been 678 * transferred, requeues the IORB to process the remaining sectors. 679 */ 680 void ata_write_pp(IORBH _far *iorb) 681 { 682 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 683 684 io->BlocksXferred += add_workspace(iorb)->blocks; 685 ddprintf("ata_write_pp(): blocks transferred = %d\n", (int) io->BlocksXferred); 686 687 if (io->BlocksXferred >= io->BlockCount) { 688 /* we're done; tell IRQ handler the IORB is complete */ 689 add_workspace(iorb)->complete = 1; 690 } else { 691 /* requeue this IORB for next iteration */ 692 iorb_requeue(iorb); 693 } 526 694 } 527 695 … … 536 704 537 705 /****************************************************************************** 538 * Request sense information (which means "read ATA log page" for ATA devices) 706 * Request sense information for a failed command. Since there is no "request 707 * sense" command for ATA devices, we need to read the current error code from 708 * the AHCI task file register and fabricate the sense information. 709 * 710 * NOTES: 711 * 712 * - This function must be called right after an ATA command has failed and 713 * before any other commands are queued on the corresponding port. This 714 * function is typically called in the port restart context hook which is 715 * triggered by an AHCI error interrupt. 716 * 717 * - The ATA error bits are a complete mess. We'll try and catch the most 718 * interesting error codes (such as medium errors) and report everything 719 * else with a generic error code. 539 720 */ 540 721 int ata_req_sense(IORBH _far *iorb, int slot) 541 722 { 542 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED); 723 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 724 u8 _far *port_mmio = port_base(ai, iorb_unit_port(iorb)); 725 u32 tf_data = readl(port_mmio + PORT_TFDATA); 726 u8 err = (u8) (tf_data >> 8); 727 u8 sts = (u8) (tf_data); 728 729 if (sts & ATA_ERR) { 730 if (sts & ATA_DF) { 731 /* there is a device-specific error condition */ 732 if (err & ATA_ICRC) { 733 iorb_seterr(iorb, IOERR_ADAPTER_DEVICEBUSCHECK); 734 } else if (err & ATA_UNC) { 735 iorb_seterr(iorb, IOERR_MEDIA); 736 } else if (err & ATA_IDNF) { 737 iorb_seterr(iorb, IOERR_RBA_ADDRESSING_ERROR); 738 } else { 739 iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC); 740 } 741 742 } else { 743 iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC); 744 } 745 } 746 747 /* TBD: fill in SCSI sense buffer in IORB */ 748 749 /* Return an error to indicate there's no HW command to be submitted and 750 * that the IORB can be completed "as is" (the upstream code expects the 751 * IORB error code, if any, to be set when this happens and this is exactly 752 * what this function is all about). 753 */ 543 754 return(-1); 544 755 }
Note:
See TracChangeset
for help on using the changeset viewer.