Legend:
- Unmodified
- Added
- Removed
-
trunk/src/ahci.c
r4 r8 37 37 /* -------------------------- function prototypes -------------------------- */ 38 38 39 static int exec_polled_cmd (AD_INFO *ai, int p, int d, int timeout,40 int cmd, ...);41 39 static void ahci_setup_device (AD_INFO *ai, int p, int d, u16 *id_buf); 42 40 static void _far timeout_callback (ULONG timer_handle, ULONG p1, ULONG p2); … … 265 263 266 264 /* set maximum command slot number */ 267 ai->cmd_max = (u16) ((ai->cap >> 8) & 31);265 ai->cmd_max = (u16) ((ai->cap >> 8) & 0x1f); 268 266 269 267 return(0); … … 404 402 * - This function is expected to be called with the spinlock released but 405 403 * the corresponding adapter's busy flag set. It will aquire the spinlock 406 * temporarily to allocate/free memory for ATA identify buffer.404 * temporarily to allocate/free memory for the ATA identify buffer. 407 405 */ 408 406 int ahci_scan_ports(AD_INFO *ai) … … 457 455 /* this port has a device attached and is ready to accept commands */ 458 456 ddprintf("port #%d seems to be attached to a device; probing...\n", p); 459 rc = exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATA,460 AP_VADDR, (void _far *) id_buf, 512,461 AP_END);457 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATA, 458 AP_VADDR, (void _far *) id_buf, 512, 459 AP_END); 462 460 463 461 if (rc != 0 || id_buf[ATA_ID_CONFIG] & (1U << 15)) { 464 462 /* this might be an ATAPI device; run IDENTIFY_PACKET_DEVICE */ 465 rc = exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATAPI,466 AP_VADDR, (void _far *) id_buf, 512,467 AP_END);463 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATAPI, 464 AP_VADDR, (void _far *) id_buf, 512, 465 AP_END); 468 466 } 469 467 … … 826 824 { 827 825 volatile u32 *cmds; 826 ADD_WORKSPACE _far *aws = add_workspace(iorb); 828 827 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 829 828 P_INFO *port = ai->ports + iorb_unit_port(iorb); 830 829 ULONG timeout = (iorb->Timeout > 0) ? iorb->Timeout : DEFAULT_TIMEOUT; 831 830 u8 _far *port_mmio = port_base(ai, iorb_unit_port(iorb)); 832 int is_ncq = ((ai->cap & HOST_CAP_NCQ) &&833 ncq_capable &&834 !add_workspace(iorb)->no_ncq);835 831 int i; 836 832 837 /* NCQ disabled temporarily until non-NCQ commands are working fine */ 838 is_ncq = 0; 833 /* determine whether this will be an NCQ request */ 834 aws->is_ncq = ((ai->cap & HOST_CAP_NCQ) && ncq_capable && !aws->no_ncq); 835 836 /* NCQ disabled temporarily until non-NCQ commands are fully working */ 837 aws->is_ncq = 0; 839 838 840 839 /* check whether adapter is available */ … … 851 850 852 851 /* prevent NCQ/regular command mix */ 853 if (is_ncq && port->reg_cmds == 0 || !is_ncq && port->ncq_cmds == 0) { 852 if (aws->is_ncq && port->reg_cmds == 0 || 853 !aws->is_ncq && port->ncq_cmds == 0) { 854 854 855 855 /* Find next available command slot. We use a simple round-robin … … 857 857 * from stalling when new commands are coming in frequently. 858 858 */ 859 cmds = ( is_ncq) ? &port->ncq_cmds : &port->reg_cmds;859 cmds = (aws->is_ncq) ? &port->ncq_cmds : &port->reg_cmds; 860 860 for (i = 0; i <= ai->cmd_max; i++) { 861 861 if ((*cmds & (1UL << port->cmd_slot)) == 0) { 862 862 break; 863 863 } 864 if (++(port->cmd_slot) > =ai->cmd_max) {864 if (++(port->cmd_slot) > ai->cmd_max) { 865 865 port->cmd_slot = 0; 866 866 } … … 879 879 880 880 /* start timer for this IORB */ 881 ADD_StartTimerMS(&add_workspace(iorb)->timer, timeout, 882 (PFN) timeout_callback, iorb, 0); 883 884 /* update IORB and increment next command index */ 885 add_workspace(iorb)->queued_hw = 1; 886 add_workspace(iorb)->cmd_slot = port->cmd_slot; 887 if (++(port->cmd_slot) >= ai->cmd_max) { 888 port->cmd_slot = 0; 889 } 881 ADD_StartTimerMS(&aws->timer, timeout, (PFN) timeout_callback, iorb, 0); 882 883 /* update IORB */ 884 aws->queued_hw = 1; 885 aws->cmd_slot = port->cmd_slot; 890 886 891 887 /* issue command to hardware */ 892 d printf("issuing command on slot %d\n", port->cmd_slot);888 ddprintf("issuing command on slot %d\n", port->cmd_slot); 893 889 *cmds |= (1UL << port->cmd_slot); 894 if ( is_ncq) {890 if (aws->is_ncq) { 895 891 writel(port_mmio + PORT_SCR_ACT, (1UL << port->cmd_slot)); 896 892 readl(port_mmio + PORT_SCR_ACT); /* flush */ … … 899 895 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 900 896 897 /* make sure next cmd won't use the same slot to prevent starvation */ 898 if (++(port->cmd_slot) > ai->cmd_max) { 899 port->cmd_slot = 0; 900 } 901 901 spin_unlock(drv_lock); 902 902 return; … … 906 906 907 907 /* requeue this IORB; it will be picked up again in trigger_engine() */ 908 a dd_workspace(iorb)->processing = 0;908 aws->processing = 0; 909 909 spin_unlock(drv_lock); 910 910 } … … 916 916 * 917 917 * - We need to restore the BIOS configuration after we're done with this 918 * command because we might still need the BIOS to load additional drivers. 918 * command because someone might still call int 13 routines; sending 919 * asynchronous commands and waiting for interrupts to indicate completion 920 * won't work in such a scenario. 919 921 * - Our context hooks won't work while the device managers are initializing 920 922 * (they can't yield at init time). … … 951 953 } 952 954 953 /* restart port (includes the necessary port configuration */955 /* restart port (includes the necessary port configuration) */ 954 956 if (ahci_stop_port(ai, p) || ahci_start_port(ai, p, 0)) { 955 957 iorb_seterr(iorb, IOERR_ADAPTER_NONSPECIFIC); … … 983 985 if (add_workspace(iorb)->ppfunc != NULL) { 984 986 add_workspace(iorb)->ppfunc(iorb); 987 } else { 988 add_workspace(iorb)->complete = 1; 985 989 } 986 990 } … … 994 998 ahci_restore_bios_config(ai); 995 999 996 iorb_done(iorb); 1000 if (add_workspace(iorb)->complete | (iorb->Status | IORB_ERROR)) { 1001 aws_free(add_workspace(iorb)); 1002 iorb_done(iorb); 1003 } 997 1004 return; 1005 } 1006 1007 /****************************************************************************** 1008 * Execute polled ATA/ATAPI command. This function will block until the command 1009 * has completed or the timeout has expired, thus it should only be used during 1010 * initialization. Furthermore, it will always use command slot zero. 1011 * 1012 * The difference to ahci_exec_polled_iorb() is that this function executes 1013 * arbitrary ATA/ATAPI commands outside the context of an IORB. It's typically 1014 * used when scanning for devices during initialization. 1015 */ 1016 int ahci_exec_polled_cmd(AD_INFO *ai, int p, int d, int timeout, int cmd, ...) 1017 { 1018 va_list va; 1019 u8 _far *port_mmio = port_base(ai, p); 1020 u32 tmp; 1021 int rc; 1022 1023 /* verify that command slot 0 is idle */ 1024 if (readl(port_mmio + PORT_CMD_ISSUE) & 1) { 1025 ddprintf("port %d slot 0 is not idle; not executing polled cmd\n", p); 1026 return(-1); 1027 } 1028 1029 /* fill in command slot 0 */ 1030 va_start(va, cmd); 1031 if ((rc = v_ata_cmd(ai, p, d, 0, cmd, va)) != 0) { 1032 return(rc); 1033 } 1034 1035 /* start command execution for slot 0 */ 1036 ddprintf("executing polled cmd..."); 1037 writel(port_mmio + PORT_CMD_ISSUE, 1); 1038 1039 /* wait until command has completed */ 1040 while (timeout > 0 && (readl(port_mmio + PORT_CMD_ISSUE) & 1)) { 1041 mdelay(10); 1042 timeout -= 10; 1043 } 1044 ddprintf(" done (time left = %d)\n", timeout); 1045 1046 /* check error condition */ 1047 if ((tmp = readl(port_mmio + PORT_SCR_ERR)) != 0) { 1048 dprintf("SERR = 0x%08lx\n", tmp); 1049 return(-1); 1050 } 1051 if (((tmp = readl(port_mmio + PORT_TFDATA)) & 0x89) != 0) { 1052 dprintf("TFDATA = 0x%08lx\n", tmp); 1053 return(-1); 1054 } 1055 1056 return((timeout <= 0) ? -1 : 0); 998 1057 } 999 1058 … … 1119 1178 if (aws->ppfunc != NULL) { 1120 1179 aws->ppfunc(iorb); 1180 } else { 1181 aws->complete = 1; 1121 1182 } 1122 aws_free(aws); 1123 1124 /* move IORB to our temporary done queue */ 1125 iorb_queue_del(&ai->ports[p].iorb_queue, iorb); 1126 iorb_queue_add(&done_queue, iorb); 1183 1184 if (aws->complete) { 1185 /* this IORB is complete */ 1186 aws_free(aws); 1187 1188 /* move IORB to our temporary done queue */ 1189 iorb_queue_del(&ai->ports[p].iorb_queue, iorb); 1190 iorb_queue_add(&done_queue, iorb); 1191 } 1127 1192 1128 1193 /* clear corresponding bit in issued command bitmaps */ … … 1205 1270 void ahci_get_geometry(IORBH _far *iorb) 1206 1271 { 1272 dprintf("ahci_get_geometry(%d.%d.%d)\n", (int) iorb_unit_adapter(iorb), 1273 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb)); 1274 1207 1275 ahci_exec_iorb(iorb, 0, cmd_func(iorb, get_geometry)); 1208 1276 } … … 1213 1281 void ahci_unit_ready(IORBH _far *iorb) 1214 1282 { 1283 dprintf("ahci_unit_ready(%d.%d.%d)\n", (int) iorb_unit_adapter(iorb), 1284 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb)); 1285 1215 1286 ahci_exec_iorb(iorb, 0, cmd_func(iorb, unit_ready)); 1216 1287 } … … 1221 1292 void ahci_read(IORBH _far *iorb) 1222 1293 { 1294 dprintf("ahci_read(%d.%d.%d, %ld, %ld)\n", (int) iorb_unit_adapter(iorb), 1295 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb), 1296 (long) ((IORB_EXECUTEIO _far *) iorb)->RBA, 1297 (long) ((IORB_EXECUTEIO _far *) iorb)->BlockCount); 1298 1223 1299 ahci_exec_iorb(iorb, 1, cmd_func(iorb, read)); 1224 1300 } … … 1229 1305 void ahci_verify(IORBH _far *iorb) 1230 1306 { 1307 dprintf("ahci_verify(%d.%d.%d, %ld, %ld)\n", (int) iorb_unit_adapter(iorb), 1308 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb), 1309 (long) ((IORB_EXECUTEIO _far *) iorb)->RBA, 1310 (long) ((IORB_EXECUTEIO _far *) iorb)->BlockCount); 1311 1231 1312 ahci_exec_iorb(iorb, 0, cmd_func(iorb, verify)); 1232 1313 } … … 1237 1318 void ahci_write(IORBH _far *iorb) 1238 1319 { 1320 dprintf("ahci_write(%d.%d.%d, %ld, %ld)\n", (int) iorb_unit_adapter(iorb), 1321 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb), 1322 (long) ((IORB_EXECUTEIO _far *) iorb)->RBA, 1323 (long) ((IORB_EXECUTEIO _far *) iorb)->BlockCount); 1324 1239 1325 ahci_exec_iorb(iorb, 1, cmd_func(iorb, write)); 1240 1326 } … … 1248 1334 int p = iorb_unit_port(iorb); 1249 1335 int d = iorb_unit_device(iorb); 1336 1337 dphex(((IORB_ADAPTER_PASSTHRU _far *) iorb)->pControllerCmd, 1338 ((IORB_ADAPTER_PASSTHRU _far *) iorb)->ControllerCmdLen, 1339 "ahci_execute_cdb(%d.%d.%d)", (int) iorb_unit_adapter(iorb), 1340 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb)); 1250 1341 1251 1342 if (ad_infos[a].ports[p].devs[d].atapi) { … … 1266 1357 int d = iorb_unit_device(iorb); 1267 1358 1359 dphex(((IORB_ADAPTER_PASSTHRU _far *) iorb)->pControllerCmd, 1360 ((IORB_ADAPTER_PASSTHRU _far *) iorb)->ControllerCmdLen, 1361 "ahci_execute_cdb(%d.%d.%d)", (int) iorb_unit_adapter(iorb), 1362 (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb)); 1363 1268 1364 if (ad_infos[a].ports[p].devs[d].atapi) { 1269 1365 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED); … … 1272 1368 ahci_exec_iorb(iorb, 0, ata_execute_ata); 1273 1369 } 1274 }1275 1276 /******************************************************************************1277 * Execute polled ATA/ATAPI command. This function will block until the command1278 * has completed or the timeout has expired, thus it should only be used during1279 * initialization. Furthermore, it will always use command slot zero...1280 */1281 static int exec_polled_cmd(AD_INFO *ai, int p, int d, int timeout, int cmd, ...)1282 {1283 va_list va;1284 u8 _far *port_mmio = port_base(ai, p);1285 u32 tmp;1286 int rc;1287 1288 /* verify that command slot 0 is idle */1289 if (readl(port_mmio + PORT_CMD_ISSUE) & 1) {1290 ddprintf("port %d slot 0 is not idle; not executing polled cmd\n", p);1291 return(-1);1292 }1293 1294 /* fill in command slot 0 */1295 va_start(va, cmd);1296 if ((rc = v_ata_cmd(ai, p, d, 0, cmd, va)) != 0) {1297 return(rc);1298 }1299 1300 /* start command execution for slot 0 */1301 ddprintf("executing polled cmd...");1302 writel(port_mmio + PORT_CMD_ISSUE, 1);1303 1304 /* wait until command has completed */1305 while (timeout > 0 && (readl(port_mmio + PORT_CMD_ISSUE) & 1)) {1306 mdelay(10);1307 timeout -= 10;1308 }1309 ddprintf(" done (time left = %d)\n", timeout);1310 1311 /* check error condition */1312 if ((tmp = readl(port_mmio + PORT_SCR_ERR)) != 0) {1313 dprintf("SERR = 0x%08lx\n", tmp);1314 return(-1);1315 }1316 if (((tmp = readl(port_mmio + PORT_TFDATA)) & 0x89) != 0) {1317 dprintf("TFDATA = 0x%08lx\n", tmp);1318 return(-1);1319 }1320 1321 return((timeout <= 0) ? -1 : 0);1322 1370 } 1323 1371 -
trunk/src/ahci.h
r4 r8 209 209 * interface it's developed for, is based on x86 design patterns, we're 210 210 * not even going to start making a difference between little and big 211 * endian architectures. PCI is little end oian, AHCI is little endian,211 * endian architectures. PCI is little endian, AHCI is little endian, 212 212 * x86 is little endian, and that's it. 213 213 */ -
trunk/src/ata.c
r4 r8 98 98 break; 99 99 100 case AP_ H2D:100 case AP_WRITE: 101 101 ahci_flags |= AHCI_CMD_WRITE; 102 102 break; … … 221 221 * than AHCI_MAX_SG entries. In order to help the caller, the return value 222 222 * of this function will indicate how many OS/2 S/G entries were 223 * successfully bemapped.223 * successfully mapped. 224 224 * 225 225 */ … … 306 306 307 307 /****************************************************************************** 308 * Get index in S/G list for the number of transferred sectors in the IORB. 309 * 310 * Returning io->cSGList indicates an error. 311 * 312 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW 313 * limit will never cross sector boundaries. This means that splitting 314 * S/G lists into multiple commands can be done without editing the S/G 315 * lists. 316 */ 317 u16 ata_get_sg_indx(IORB_EXECUTEIO _far *io) 318 { 319 ULONG offset = io->BlocksXferred * io->BlockSize; 320 USHORT i; 321 322 for (i = 0; i < io->cSGList && offset > 0; i++) { 323 offset -= io->pSGList[i].XferBufLen; 324 } 325 326 return(i); 327 } 328 329 /****************************************************************************** 330 * Get max S/G count which will fit into our HW S/G buffers. This function is 331 * called when the S/G list is too long and we need to split the IORB into 332 * multiple commands. It returns both the number of sectors and S/G list 333 * elements that we can handle in a single command. 334 * 335 * The parameter 'sg_indx' indicates the current start index in the S/G list 336 * (0 if this is the first command iteration). 337 * 338 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates 339 * how many S/G elements were successfully mapped. Whatever we return needs to 340 * be less or equal to this value. 341 * 342 * Returning 0 in *sg_cnt indicates an error. 343 * 344 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits 345 * will never cross sector boundaries. This means that splitting S/G 346 * lists into multiple commands can be done without editing S/G list 347 * elements. Since AHCI only allows 22 bits for each S/G element, the 348 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based 349 * on the actual length of S/G elements. This function looks for the 350 * maximum number of S/G elements that can be mapped on sector 351 * boundaries which will still fit into our HW S/G list. 352 */ 353 void ata_max_sg_cnt(IORB_EXECUTEIO _far *io, USHORT sg_indx, USHORT sg_max, 354 USHORT _far *sg_cnt, USHORT _far *sector_cnt) 355 { 356 ULONG max_sector_cnt = 0; 357 USHORT max_sg_cnt = 0; 358 ULONG offset = 0; 359 USHORT i; 360 361 for (i = sg_indx; i < io->cSGList; i++) { 362 if (i - sg_indx >= sg_max) { 363 /* we're beyond the number of S/G elements we can map */ 364 break; 365 } 366 367 offset += io->pSGList[i].XferBufLen; 368 if (offset % io->BlockSize == 0) { 369 /* this S/G element ends on a sector boundary */ 370 max_sector_cnt = offset / io->BlockSize; 371 max_sg_cnt = i + 1; 372 } 373 } 374 375 /* return the best match we found so far (0 indicating failure) */ 376 *sector_cnt = max_sector_cnt; 377 *sg_cnt = max_sg_cnt; 378 } 379 380 381 /****************************************************************************** 308 382 * Get device or media geometry. Device and media geometry are expected to be 309 383 * the same for non-removable devices, which will always be the case for the … … 334 408 if (rc != 0) { 335 409 free(aws->buf); 336 aws->buf = NULL;337 410 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 338 411 } … … 404 477 ((u32) geometry->NumHeads * 405 478 (u32) geometry->SectorsPerTrack); 479 480 /* tell interrupt handler that this IORB is complete */ 481 add_workspace(iorb)->complete = 1; 406 482 } 407 483 … … 426 502 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 427 503 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 504 ULONG sector = io->RBA + io->BlocksXferred; 505 USHORT count = io->BlockCount - io->BlocksXferred; 506 USHORT sg_indx; 507 USHORT sg_cnt; 428 508 int p = iorb_unit_port(iorb); 429 509 int d = iorb_unit_device(iorb); 430 510 int rc; 431 511 432 /* prepare read command */ 433 if (io->RBA >= (1UL << 28) || io->BlockCount > 256) { 434 /* need LBA48 for this command */ 435 if (!ai->ports[p].devs[d].lba48) { 436 iorb_seterr(iorb, IOERR_RBA_LIMIT); 437 return(-1); 438 } 439 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 440 AP_SECTOR_48, (u32) io->RBA, (u16) 0, 441 AP_COUNT, (u16) io->BlockCount, 442 AP_SGLIST, io->pSGList, (u16) io->cSGList, 443 AP_DEVICE, 0x4000, 444 AP_END); 512 /* prepare read command while keeping an eye on S/G count limitations */ 513 do { 514 sg_indx = ata_get_sg_indx(io); 515 sg_cnt = io->cSGList - sg_indx; 516 517 if (sector >= (1UL << 28) || count > 256) { 518 /* need LBA48 for this command */ 519 if (!ai->ports[p].devs[d].lba48) { 520 iorb_seterr(iorb, IOERR_RBA_LIMIT); 521 return(-1); 522 } 523 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT, 524 AP_SECTOR_48, (u32) sector, (u16) 0, 525 AP_COUNT, (u16) count, 526 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 527 AP_DEVICE, 0x4000, 528 AP_END); 529 } else { 530 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 531 AP_SECTOR_28, (u32) sector, 532 AP_COUNT, (u16) count & 0xffU, 533 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 534 AP_DEVICE, 0x4000, 535 AP_END); 536 } 537 538 if (rc > 0) { 539 /* couldn't map all S/G elements */ 540 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); 541 } 542 } while (rc > 0 && sg_cnt > 0); 543 544 if (rc == 0) { 545 add_workspace(iorb)->blocks = count; 546 add_workspace(iorb)->ppfunc = ata_read_pp; 547 548 } else if (rc > 0) { 549 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 550 445 551 } else { 446 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ, 447 AP_SECTOR_28, (u32) io->RBA, 448 AP_COUNT, (u16) io->BlockCount & 0xffU, 449 AP_SGLIST, io->pSGList, (u16) io->cSGList, 450 AP_DEVICE, 0x4000, 451 AP_END); 552 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 452 553 } 453 554 … … 456 557 457 558 /****************************************************************************** 458 * Verify readability of sectors on AHCI device. 559 * Post processing function for ata_read(); this function updates the 560 * BlocksXferred counter in the IORB and, if not all blocks have been 561 * transferred, requeues the IORB to process the remaining sectors. 562 */ 563 void ata_read_pp(IORBH _far *iorb) 564 { 565 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 566 567 io->BlocksXferred += add_workspace(iorb)->blocks; 568 ddprintf("ata_read_pp(): blocks transferred = %d\n", (int) io->BlocksXferred); 569 570 if (io->BlocksXferred >= io->BlockCount) { 571 /* we're done; tell IRQ handler the IORB is complete */ 572 add_workspace(iorb)->complete = 1; 573 } else { 574 /* requeue this IORB for next iteration */ 575 iorb_requeue(iorb); 576 } 577 } 578 579 /****************************************************************************** 580 * Verify readability of sectors on ATA device. 459 581 */ 460 582 int ata_verify(IORBH _far *iorb, int slot) … … 476 598 AP_SECTOR_48, (u32) io->RBA, (u16) 0, 477 599 AP_COUNT, (u16) io->BlockCount, 478 AP_SGLIST, io->pSGList, (u16) io->cSGList,479 600 AP_DEVICE, 0x4000, 480 601 AP_END); … … 483 604 AP_SECTOR_28, (u32) io->RBA, 484 605 AP_COUNT, (u16) io->BlockCount & 0xffU, 485 AP_SGLIST, io->pSGList, (u16) io->cSGList,486 606 AP_END); 487 607 } … … 497 617 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 498 618 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 619 ULONG sector = io->RBA + io->BlocksXferred; 620 USHORT count = io->BlockCount - io->BlocksXferred; 621 USHORT sg_indx; 622 USHORT sg_cnt; 499 623 int p = iorb_unit_port(iorb); 500 624 int d = iorb_unit_device(iorb); 501 625 int rc; 502 626 503 /* prepare write command */ 504 if (io->RBA >= (1UL << 28) || io->BlockCount > 256) { 505 /* need LBA48 for this command */ 506 if (!ai->ports[p].devs[d].lba48) { 507 iorb_seterr(iorb, IOERR_RBA_LIMIT); 508 return(-1); 509 } 510 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 511 AP_SECTOR_48, (u32) io->RBA, (u16) 0, 512 AP_COUNT, (u16) io->BlockCount, 513 AP_SGLIST, io->pSGList, (u16) io->cSGList, 514 AP_DEVICE, 0x4000, 515 AP_END); 627 /* prepare write command while keeping an eye on S/G count limitations */ 628 do { 629 sg_indx = ata_get_sg_indx(io); 630 sg_cnt = io->cSGList - sg_indx; 631 632 if (sector >= (1UL << 28) || count > 256) { 633 /* need LBA48 for this command */ 634 if (!ai->ports[p].devs[d].lba48) { 635 iorb_seterr(iorb, IOERR_RBA_LIMIT); 636 return(-1); 637 } 638 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT, 639 AP_SECTOR_48, (u32) sector, (u16) 0, 640 AP_COUNT, (u16) count, 641 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 642 AP_DEVICE, 0x4000, 643 AP_WRITE, 644 AP_END); 645 } else { 646 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 647 AP_SECTOR_28, (u32) sector, 648 AP_COUNT, (u16) count & 0xffU, 649 AP_SGLIST, io->pSGList + sg_indx, (u16) sg_cnt, 650 AP_DEVICE, 0x4000, 651 AP_WRITE, 652 AP_END); 653 } 654 655 if (rc > 0) { 656 /* couldn't map all S/G elements */ 657 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count); 658 } 659 } while (rc > 0 && sg_cnt > 0); 660 661 if (rc == 0) { 662 add_workspace(iorb)->blocks = count; 663 add_workspace(iorb)->ppfunc = ata_write_pp; 664 665 } else if (rc > 0) { 666 iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD); 667 516 668 } else { 517 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE, 518 AP_SECTOR_28, (u32) io->RBA, 519 AP_COUNT, (u16) io->BlockCount & 0xffU, 520 AP_SGLIST, io->pSGList, (u16) io->cSGList, 521 AP_DEVICE, 0x4000, 522 AP_END); 669 iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE); 523 670 } 524 671 525 672 return(rc); 673 } 674 675 /****************************************************************************** 676 * Post processing function for ata_write(); this function updates the 677 * BlocksXferred counter in the IORB and, if not all blocks have been 678 * transferred, requeues the IORB to process the remaining sectors. 679 */ 680 void ata_write_pp(IORBH _far *iorb) 681 { 682 IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb; 683 684 io->BlocksXferred += add_workspace(iorb)->blocks; 685 ddprintf("ata_write_pp(): blocks transferred = %d\n", (int) io->BlocksXferred); 686 687 if (io->BlocksXferred >= io->BlockCount) { 688 /* we're done; tell IRQ handler the IORB is complete */ 689 add_workspace(iorb)->complete = 1; 690 } else { 691 /* requeue this IORB for next iteration */ 692 iorb_requeue(iorb); 693 } 526 694 } 527 695 … … 536 704 537 705 /****************************************************************************** 538 * Request sense information (which means "read ATA log page" for ATA devices) 706 * Request sense information for a failed command. Since there is no "request 707 * sense" command for ATA devices, we need to read the current error code from 708 * the AHCI task file register and fabricate the sense information. 709 * 710 * NOTES: 711 * 712 * - This function must be called right after an ATA command has failed and 713 * before any other commands are queued on the corresponding port. This 714 * function is typically called in the port restart context hook which is 715 * triggered by an AHCI error interrupt. 716 * 717 * - The ATA error bits are a complete mess. We'll try and catch the most 718 * interesting error codes (such as medium errors) and report everything 719 * else with a generic error code. 539 720 */ 540 721 int ata_req_sense(IORBH _far *iorb, int slot) 541 722 { 542 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED); 723 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb); 724 u8 _far *port_mmio = port_base(ai, iorb_unit_port(iorb)); 725 u32 tf_data = readl(port_mmio + PORT_TFDATA); 726 u8 err = (u8) (tf_data >> 8); 727 u8 sts = (u8) (tf_data); 728 729 if (sts & ATA_ERR) { 730 if (sts & ATA_DF) { 731 /* there is a device-specific error condition */ 732 if (err & ATA_ICRC) { 733 iorb_seterr(iorb, IOERR_ADAPTER_DEVICEBUSCHECK); 734 } else if (err & ATA_UNC) { 735 iorb_seterr(iorb, IOERR_MEDIA); 736 } else if (err & ATA_IDNF) { 737 iorb_seterr(iorb, IOERR_RBA_ADDRESSING_ERROR); 738 } else { 739 iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC); 740 } 741 742 } else { 743 iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC); 744 } 745 } 746 747 /* TBD: fill in SCSI sense buffer in IORB */ 748 749 /* Return an error to indicate there's no HW command to be submitted and 750 * that the IORB can be completed "as is" (the upstream code expects the 751 * IORB error code, if any, to be set when this happens and this is exactly 752 * what this function is all about). 753 */ 543 754 return(-1); 544 755 } -
trunk/src/ata.h
r4 r8 396 396 AP_SGLIST, /* [void _far *, u16] buffer S/G (SCATGATENTRY/count) */ 397 397 AP_VADDR, /* [void _far *, u16] buffer virtual address (buf/len) */ 398 AP_ H2D, /* [] host-to-device direction*/398 AP_WRITE, /* [] indicates a DMA write operation */ 399 399 AP_AHCI_FLAGS, /* [u16] AHCI command header flags */ 400 400 AP_ATAPI_CMD, /* [void _far *, u16] ATAPI command (CDB) and length */ … … 442 442 extern void ata_cmd_to_fis (u8 _far *fis, ATA_CMD _far *cmd, 443 443 int device); 444 extern USHORT ata_get_sg_indx (IORB_EXECUTEIO _far *io); 445 extern void ata_max_sg_cnt (IORB_EXECUTEIO _far *io, USHORT sg_indx, 446 USHORT sg_max, USHORT _far *sg_cnt, 447 USHORT _far *sector_cnt); 444 448 445 449 extern int ata_get_geometry (IORBH _far *iorb, int slot); … … 447 451 extern int ata_unit_ready (IORBH _far *iorb, int slot); 448 452 extern int ata_read (IORBH _far *iorb, int slot); 453 extern void ata_read_pp (IORBH _far *iorb); 449 454 extern int ata_verify (IORBH _far *iorb, int slot); 450 455 extern int ata_write (IORBH _far *iorb, int slot); 456 extern void ata_write_pp (IORBH _far *iorb); 451 457 extern int ata_execute_ata (IORBH _far *iorb, int slot); 452 458 extern int ata_req_sense (IORBH _far *iorb, int slot); 459 453 460 extern char *ata_dev_name (u16 *id_buf); 461 -
trunk/src/ctxhook.c
r4 r8 82 82 void restart_ctxhook(ULONG parm) 83 83 { 84 IORB_QUEUE done_queue; 84 85 AD_INFO *ai; 85 86 IORBH _far *problem_iorb; … … 92 93 int a; 93 94 int p; 94 int d;95 95 96 96 dprintf("restart_ctxhook() started\n"); 97 memset(&done_queue, 0x00, sizeof(done_queue)); 98 97 99 spin_lock(drv_lock); 98 100 … … 108 110 for (p = 0; p <= ai->port_max; p++) { 109 111 if (ports_to_restart[a] & (1UL << p)) { 112 ports_to_restart[a] &= ~(1UL << p); 110 113 111 114 /* restart this port */ 112 ports_to_restart[a] &= ~(1UL << p);113 115 port_mmio = port_base(ai, p); 114 116 problem_iorb = NULL; … … 124 126 if (aws->queued_hw) { 125 127 if (ai->ports[p].ncq_cmds != 0) { 128 /* NCQ commands active; force non-NCQ mode and trigger port reset */ 129 aws->no_ncq = 1; 126 130 need_reset = 1; 127 131 } else if (aws->cmd_slot == ccs) { … … 129 133 problem_iorb = iorb; 130 134 } 131 132 /* requeue this command with the 'no_ncq' flag set */ 133 aws_free(aws); 134 memset(&iorb->ADDWorkSpace, sizeof(iorb->ADDWorkSpace), 0x00); 135 aws->no_ncq = 1; 135 iorb_requeue(iorb); 136 136 137 137 /* remove requeued command from the issued command bitmaps */ … … 166 166 ai->busy = 0; 167 167 168 /* reset port status */ 169 ai->ports[p].cmd_slot = 0; 168 /* reset internal port status */ 170 169 ai->ports[p].ncq_cmds = 0; 171 170 ai->ports[p].reg_cmds = 0; 172 173 if (!need_reset && problem_iorb != NULL) { 174 /* request sense data for the failing command in cmd slot #0 */ 175 ADD_WORKSPACE _far *aws = add_workspace(problem_iorb); 176 aws->processing = 1; 177 aws->queued_hw = 1; 178 d = iorb_unit_device(problem_iorb); 179 if (ai->ports[p].devs[d].atapi) { 180 atapi_req_sense(problem_iorb, 0); 171 ai->ports[p].cmd_slot = 0; 172 173 if (problem_iorb != NULL) { 174 /* get details about the error that caused this IORB to fail */ 175 if (need_reset) { 176 /* no way to retrieve error details after a reset */ 177 iorb_seterr(problem_iorb, IOERR_DEVICE_NONSPECIFIC); 178 iorb_queue_del(&ai->ports[p].iorb_queue, problem_iorb); 179 iorb_queue_add(&done_queue, problem_iorb); 180 181 181 } else { 182 ata_req_sense(problem_iorb, 0); 182 /* get sense information */ 183 ADD_WORKSPACE _far *aws = add_workspace(problem_iorb); 184 int d = iorb_unit_device(problem_iorb); 185 int (*req_sense)(IORBH _far *, int) = 186 (ai->ports[p].devs[d].atapi) ? atapi_req_sense : ata_req_sense; 187 188 aws->processing = 1; 189 aws->queued_hw = 1; 190 191 if (req_sense(problem_iorb, 0) == 0) { 192 /* execute request sense on slot #0 before anything else comes along */ 193 ai->ports[p].reg_cmds = 1; 194 writel(port_mmio + PORT_CMD_ISSUE, 1); 195 readl(port_mmio); /* flush */ 196 197 } else { 198 /* IORB is expected to contain the error code; just move to done queue */ 199 iorb_queue_del(&ai->ports[p].iorb_queue, problem_iorb); 200 iorb_queue_add(&done_queue, problem_iorb); 201 } 183 202 } 184 ai->ports[p].reg_cmds = 1;185 writel(port_mmio + PORT_CMD_ISSUE, 1);186 readl(port_mmio); /* flush */187 203 } 188 204 } … … 190 206 } 191 207 208 spin_unlock(drv_lock); 209 210 /* call notification routine on all IORBs which have completed */ 211 for (iorb = done_queue.root; iorb != NULL; iorb = next) { 212 next = iorb->pNxtIORB; 213 if (iorb->RequestControl & IORB_ASYNC_POST) { 214 iorb->NotifyAddress(iorb); 215 } 216 } 217 192 218 /* restart engine to resume IORB processing */ 219 spin_lock(drv_lock); 193 220 trigger_engine(); 194 221 spin_unlock(drv_lock); … … 283 310 for (p = 0; p <= ai->port_max; p++) { 284 311 if (ports_to_reset[a] & (1UL << p)) { 312 ports_to_reset[a] &= ~(1UL << p); 313 285 314 /* Reset this port. Since this is a rather slow operation, we'll 286 315 * release the spinlock while doing so. The adapter is marked as … … 288 317 * interfering. 289 318 */ 290 ports_to_reset[a] &= ~(1UL << p);291 319 ai->busy = 1; 292 320 spin_unlock(drv_lock); -
trunk/src/os2ahci.c
r4 r8 700 700 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT; 701 701 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT; 702 ptr->MaxHWSGList = AHCI_MAX_SG; 702 703 /* AHCI limits S/G elements to 22 bits, thus we'll report only half of 704 * our S/G list buffers to reduce complexity. The command preparation code 705 * will always try to map as many S/G elements as possible so the physical 706 * S/G list capacity is not really wasted except in rare conditions where 707 * we need to split commands with long S/G lists without any suitable split 708 * points except those at the reported MaxHWSGList. 709 */ 710 ptr->MaxHWSGList = AHCI_MAX_SG / 2; 703 711 704 712 if (!ad_info->port_scan_done) { … … 978 986 } 979 987 988 /****************************************************************************** 989 * Requeue the specified IORB such that it will be sent downstream for 990 * processing again. This includes freeing all resources currently allocated 991 * (timer, buffer, ...) and resetting the flags to 0. 992 * 993 * The following flags are preserved: 994 * - no_ncq 995 */ 996 void iorb_requeue(IORBH _far *iorb) 997 { 998 ADD_WORKSPACE _far *aws = add_workspace(iorb); 999 u16 no_ncq = aws->no_ncq; 1000 1001 aws_free(aws); 1002 memset(aws, 0x00, sizeof(*aws)); 1003 aws->no_ncq = no_ncq; 1004 } 1005 -
trunk/src/os2ahci.h
r4 r8 317 317 /* ADD workspace in IORB (must not exceed 16 bytes) */ 318 318 typedef struct { 319 IORBH _far *next; /* link to next IORB in our own queues */320 319 void (*ppfunc)(IORBH _far *iorb); /* post-processing function */ 321 320 void *buf; /* response buffer (e.g. for identify cmds) */ 322 321 ULONG timer; /* timer for timeout procesing */ 322 USHORT blocks; /* number of blocks to be transferred */ 323 323 unsigned processing : 1; /* IORB is being processd */ 324 324 unsigned idempotent : 1; /* IORB is idempotent (can be retried) */ 325 325 unsigned queued_hw : 1; /* IORB has been queued to hardware */ 326 unsigned no_ncq : 1; /* don't use native command queuing */ 326 unsigned no_ncq : 1; /* must not use native command queuing */ 327 unsigned is_ncq : 1; /* should use native command queueing */ 328 unsigned complete : 1; /* IORB has completed processing */ 327 329 unsigned cmd_slot : 4; /* AHCI command slot for this IORB */ 328 330 } ADD_WORKSPACE; … … 357 359 extern void iorb_seterr (IORBH _far *iorb, USHORT error_code); 358 360 extern void iorb_done (IORBH _far *iorb); 361 extern void iorb_requeue (IORBH _far *iorb); 359 362 360 363 /* ahci.c */ … … 380 383 int (*func)(IORBH _far *, int), 381 384 ULONG timeout); 385 extern int ahci_exec_polled_cmd (AD_INFO *ai, int p, int d, 386 int timeout, int cmd, ...); 382 387 383 388 extern int ahci_intr (u16 irq);
Note:
See TracChangeset
for help on using the changeset viewer.