Changeset 8 for trunk


Ignore:
Timestamp:
Sep 10, 2010, 11:30:39 AM (15 years ago)
Author:
markus
Message:

latest changes by Christian

Location:
trunk/src
Files:
2 deleted
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/ahci.c

    r4 r8  
    3737/* -------------------------- function prototypes -------------------------- */
    3838
    39 static int  exec_polled_cmd       (AD_INFO *ai, int p, int d, int timeout,
    40                                    int cmd, ...);
    4139static void ahci_setup_device     (AD_INFO *ai, int p, int d, u16 *id_buf);
    4240static void _far timeout_callback (ULONG timer_handle, ULONG p1, ULONG p2);
     
    265263
    266264  /* set maximum command slot number */
    267   ai->cmd_max = (u16) ((ai->cap >> 8) & 31);
     265  ai->cmd_max = (u16) ((ai->cap >> 8) & 0x1f);
    268266
    269267  return(0);
     
    404402 *  - This function is expected to be called with the spinlock released but
    405403 *    the corresponding adapter's busy flag set. It will aquire the spinlock
    406  *    temporarily to allocate/free memory for ATA identify buffer.
     404 *    temporarily to allocate/free memory for the ATA identify buffer.
    407405 */
    408406int ahci_scan_ports(AD_INFO *ai)
     
    457455      /* this port has a device attached and is ready to accept commands */
    458456      ddprintf("port #%d seems to be attached to a device; probing...\n", p);
    459       rc = exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATA,
    460                            AP_VADDR, (void _far *) id_buf, 512,
    461                            AP_END);
     457      rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATA,
     458                                AP_VADDR, (void _far *) id_buf, 512,
     459                                AP_END);
    462460
    463461      if (rc != 0 || id_buf[ATA_ID_CONFIG] & (1U << 15)) {
    464462        /* this might be an ATAPI device; run IDENTIFY_PACKET_DEVICE */
    465         rc = exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATAPI,
    466                              AP_VADDR, (void _far *) id_buf, 512,
    467                              AP_END);
     463        rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_ID_ATAPI,
     464                                  AP_VADDR, (void _far *) id_buf, 512,
     465                                  AP_END);
    468466      }
    469467
     
    826824{
    827825  volatile u32 *cmds;
     826  ADD_WORKSPACE _far *aws = add_workspace(iorb);
    828827  AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
    829828  P_INFO *port = ai->ports + iorb_unit_port(iorb);
    830829  ULONG timeout = (iorb->Timeout > 0) ? iorb->Timeout : DEFAULT_TIMEOUT;
    831830  u8 _far *port_mmio = port_base(ai, iorb_unit_port(iorb));
    832   int is_ncq = ((ai->cap & HOST_CAP_NCQ) &&
    833                 ncq_capable &&
    834                 !add_workspace(iorb)->no_ncq);
    835831  int i;
    836832
    837   /* NCQ disabled temporarily until non-NCQ commands are working fine */
    838   is_ncq = 0;
     833  /* determine whether this will be an NCQ request */
     834  aws->is_ncq = ((ai->cap & HOST_CAP_NCQ) && ncq_capable && !aws->no_ncq);
     835
     836  /* NCQ disabled temporarily until non-NCQ commands are fully working */
     837  aws->is_ncq = 0;
    839838
    840839  /* check whether adapter is available */
     
    851850
    852851    /* prevent NCQ/regular command mix */
    853     if (is_ncq && port->reg_cmds == 0 || !is_ncq && port->ncq_cmds == 0) {
     852    if (aws->is_ncq && port->reg_cmds == 0 ||
     853        !aws->is_ncq && port->ncq_cmds == 0) {
    854854
    855855      /* Find next available command slot. We use a simple round-robin
     
    857857       * from stalling when new commands are coming in frequently.
    858858       */
    859       cmds = (is_ncq) ? &port->ncq_cmds : &port->reg_cmds;
     859      cmds = (aws->is_ncq) ? &port->ncq_cmds : &port->reg_cmds;
    860860      for (i = 0; i <= ai->cmd_max; i++) {
    861861        if ((*cmds & (1UL << port->cmd_slot)) == 0) {
    862862          break;
    863863        }
    864         if (++(port->cmd_slot) >= ai->cmd_max) {
     864        if (++(port->cmd_slot) > ai->cmd_max) {
    865865          port->cmd_slot = 0;
    866866        }
     
    879879
    880880        /* start timer for this IORB */
    881         ADD_StartTimerMS(&add_workspace(iorb)->timer, timeout,
    882                          (PFN) timeout_callback, iorb, 0);
    883 
    884         /* update IORB and increment next command index */
    885         add_workspace(iorb)->queued_hw = 1;
    886         add_workspace(iorb)->cmd_slot = port->cmd_slot;
    887         if (++(port->cmd_slot) >= ai->cmd_max) {
    888           port->cmd_slot = 0;
    889         }
     881        ADD_StartTimerMS(&aws->timer, timeout, (PFN) timeout_callback, iorb, 0);
     882
     883        /* update IORB */
     884        aws->queued_hw = 1;
     885        aws->cmd_slot = port->cmd_slot;
    890886
    891887        /* issue command to hardware */
    892         dprintf("issuing command on slot %d\n", port->cmd_slot);
     888        ddprintf("issuing command on slot %d\n", port->cmd_slot);
    893889        *cmds |= (1UL << port->cmd_slot);
    894         if (is_ncq) {
     890        if (aws->is_ncq) {
    895891          writel(port_mmio + PORT_SCR_ACT, (1UL << port->cmd_slot));
    896892          readl(port_mmio + PORT_SCR_ACT); /* flush */
     
    899895        readl(port_mmio + PORT_CMD_ISSUE); /* flush */
    900896
     897        /* make sure next cmd won't use the same slot to prevent starvation */
     898        if (++(port->cmd_slot) > ai->cmd_max) {
     899          port->cmd_slot = 0;
     900        }
    901901        spin_unlock(drv_lock);
    902902        return;
     
    906906
    907907  /* requeue this IORB; it will be picked up again in trigger_engine() */
    908   add_workspace(iorb)->processing = 0;
     908  aws->processing = 0;
    909909  spin_unlock(drv_lock);
    910910}
     
    916916 *
    917917 *  - We need to restore the BIOS configuration after we're done with this
    918  *    command because we might still need the BIOS to load additional drivers.
     918 *    command because someone might still call int 13 routines; sending
     919 *    asynchronous commands and waiting for interrupts to indicate completion
     920 *    won't work in such a scenario.
    919921 *  - Our context hooks won't work while the device managers are initializing
    920922 *    (they can't yield at init time).
     
    951953  }
    952954
    953   /* restart port (includes the necessary port configuration */
     955  /* restart port (includes the necessary port configuration) */
    954956  if (ahci_stop_port(ai, p) || ahci_start_port(ai, p, 0)) {
    955957    iorb_seterr(iorb, IOERR_ADAPTER_NONSPECIFIC);
     
    983985      if (add_workspace(iorb)->ppfunc != NULL) {
    984986        add_workspace(iorb)->ppfunc(iorb);
     987      } else {
     988        add_workspace(iorb)->complete = 1;
    985989      }
    986990    }
     
    994998  ahci_restore_bios_config(ai);
    995999
    996   iorb_done(iorb);
     1000  if (add_workspace(iorb)->complete | (iorb->Status | IORB_ERROR)) {
     1001    aws_free(add_workspace(iorb));
     1002    iorb_done(iorb);
     1003  }
    9971004  return;
     1005}
     1006
     1007/******************************************************************************
     1008 * Execute polled ATA/ATAPI command. This function will block until the command
     1009 * has completed or the timeout has expired, thus it should only be used during
     1010 * initialization. Furthermore, it will always use command slot zero.
     1011 *
     1012 * The difference to ahci_exec_polled_iorb() is that this function executes
     1013 * arbitrary ATA/ATAPI commands outside the context of an IORB. It's typically
     1014 * used when scanning for devices during initialization.
     1015 */
     1016int ahci_exec_polled_cmd(AD_INFO *ai, int p, int d, int timeout, int cmd, ...)
     1017{
     1018  va_list va;
     1019  u8 _far *port_mmio = port_base(ai, p);
     1020  u32 tmp;
     1021  int rc;
     1022
     1023  /* verify that command slot 0 is idle */
     1024  if (readl(port_mmio + PORT_CMD_ISSUE) & 1) {
     1025    ddprintf("port %d slot 0 is not idle; not executing polled cmd\n", p);
     1026    return(-1);
     1027  }
     1028
     1029  /* fill in command slot 0 */
     1030  va_start(va, cmd);
     1031  if ((rc = v_ata_cmd(ai, p, d, 0, cmd, va)) != 0) {
     1032    return(rc);
     1033  }
     1034
     1035  /* start command execution for slot 0 */
     1036  ddprintf("executing polled cmd...");
     1037  writel(port_mmio + PORT_CMD_ISSUE, 1);
     1038
     1039  /* wait until command has completed */
     1040  while (timeout > 0 && (readl(port_mmio + PORT_CMD_ISSUE) & 1)) {
     1041    mdelay(10);
     1042    timeout -= 10;
     1043  }
     1044  ddprintf(" done (time left = %d)\n", timeout);
     1045
     1046  /* check error condition */
     1047  if ((tmp = readl(port_mmio + PORT_SCR_ERR)) != 0) {
     1048    dprintf("SERR = 0x%08lx\n", tmp);
     1049    return(-1);
     1050  }
     1051  if (((tmp = readl(port_mmio + PORT_TFDATA)) & 0x89) != 0) {
     1052    dprintf("TFDATA = 0x%08lx\n", tmp);
     1053    return(-1);
     1054  }
     1055
     1056  return((timeout <= 0) ? -1 : 0);
    9981057}
    9991058
     
    11191178      if (aws->ppfunc != NULL) {
    11201179        aws->ppfunc(iorb);
     1180      } else {
     1181        aws->complete = 1;
    11211182      }
    1122       aws_free(aws);
    1123 
    1124       /* move IORB to our temporary done queue */
    1125       iorb_queue_del(&ai->ports[p].iorb_queue, iorb);
    1126       iorb_queue_add(&done_queue, iorb);
     1183
     1184      if (aws->complete) {
     1185        /* this IORB is complete */
     1186        aws_free(aws);
     1187
     1188        /* move IORB to our temporary done queue */
     1189        iorb_queue_del(&ai->ports[p].iorb_queue, iorb);
     1190        iorb_queue_add(&done_queue, iorb);
     1191      }
    11271192
    11281193      /* clear corresponding bit in issued command bitmaps */
     
    12051270void ahci_get_geometry(IORBH _far *iorb)
    12061271{
     1272  dprintf("ahci_get_geometry(%d.%d.%d)\n", (int) iorb_unit_adapter(iorb),
     1273          (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb));
     1274
    12071275  ahci_exec_iorb(iorb, 0, cmd_func(iorb, get_geometry));
    12081276}
     
    12131281void ahci_unit_ready(IORBH _far *iorb)
    12141282{
     1283  dprintf("ahci_unit_ready(%d.%d.%d)\n", (int) iorb_unit_adapter(iorb),
     1284          (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb));
     1285
    12151286  ahci_exec_iorb(iorb, 0, cmd_func(iorb, unit_ready));
    12161287}
     
    12211292void ahci_read(IORBH _far *iorb)
    12221293{
     1294  dprintf("ahci_read(%d.%d.%d, %ld, %ld)\n", (int) iorb_unit_adapter(iorb),
     1295          (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb),
     1296          (long) ((IORB_EXECUTEIO _far *) iorb)->RBA,
     1297          (long) ((IORB_EXECUTEIO _far *) iorb)->BlockCount);
     1298
    12231299  ahci_exec_iorb(iorb, 1, cmd_func(iorb, read));
    12241300
     
    12291305void ahci_verify(IORBH _far *iorb)
    12301306{
     1307  dprintf("ahci_verify(%d.%d.%d, %ld, %ld)\n", (int) iorb_unit_adapter(iorb),
     1308          (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb),
     1309          (long) ((IORB_EXECUTEIO _far *) iorb)->RBA,
     1310          (long) ((IORB_EXECUTEIO _far *) iorb)->BlockCount);
     1311
    12311312  ahci_exec_iorb(iorb, 0, cmd_func(iorb, verify));
    12321313}
     
    12371318void ahci_write(IORBH _far *iorb)
    12381319{
     1320  dprintf("ahci_write(%d.%d.%d, %ld, %ld)\n", (int) iorb_unit_adapter(iorb),
     1321          (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb),
     1322          (long) ((IORB_EXECUTEIO _far *) iorb)->RBA,
     1323          (long) ((IORB_EXECUTEIO _far *) iorb)->BlockCount);
     1324
    12391325  ahci_exec_iorb(iorb, 1, cmd_func(iorb, write));
    12401326}
     
    12481334  int p = iorb_unit_port(iorb);
    12491335  int d = iorb_unit_device(iorb);
     1336
     1337  dphex(((IORB_ADAPTER_PASSTHRU _far *) iorb)->pControllerCmd,
     1338        ((IORB_ADAPTER_PASSTHRU _far *) iorb)->ControllerCmdLen,
     1339        "ahci_execute_cdb(%d.%d.%d)", (int) iorb_unit_adapter(iorb),
     1340        (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb));
    12501341 
    12511342  if (ad_infos[a].ports[p].devs[d].atapi) {
     
    12661357  int d = iorb_unit_device(iorb);
    12671358 
     1359  dphex(((IORB_ADAPTER_PASSTHRU _far *) iorb)->pControllerCmd,
     1360        ((IORB_ADAPTER_PASSTHRU _far *) iorb)->ControllerCmdLen,
     1361        "ahci_execute_cdb(%d.%d.%d)", (int) iorb_unit_adapter(iorb),
     1362        (int) iorb_unit_port(iorb), (int) iorb_unit_device(iorb));
     1363 
    12681364  if (ad_infos[a].ports[p].devs[d].atapi) {
    12691365    iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
     
    12721368    ahci_exec_iorb(iorb, 0, ata_execute_ata);
    12731369  }
    1274 }
    1275 
    1276 /******************************************************************************
    1277  * Execute polled ATA/ATAPI command. This function will block until the command
    1278  * has completed or the timeout has expired, thus it should only be used during
    1279  * initialization. Furthermore, it will always use command slot zero...
    1280  */
    1281 static int exec_polled_cmd(AD_INFO *ai, int p, int d, int timeout, int cmd, ...)
    1282 {
    1283   va_list va;
    1284   u8 _far *port_mmio = port_base(ai, p);
    1285   u32 tmp;
    1286   int rc;
    1287 
    1288   /* verify that command slot 0 is idle */
    1289   if (readl(port_mmio + PORT_CMD_ISSUE) & 1) {
    1290     ddprintf("port %d slot 0 is not idle; not executing polled cmd\n", p);
    1291     return(-1);
    1292   }
    1293 
    1294   /* fill in command slot 0 */
    1295   va_start(va, cmd);
    1296   if ((rc = v_ata_cmd(ai, p, d, 0, cmd, va)) != 0) {
    1297     return(rc);
    1298   }
    1299 
    1300   /* start command execution for slot 0 */
    1301   ddprintf("executing polled cmd...");
    1302   writel(port_mmio + PORT_CMD_ISSUE, 1);
    1303 
    1304   /* wait until command has completed */
    1305   while (timeout > 0 && (readl(port_mmio + PORT_CMD_ISSUE) & 1)) {
    1306     mdelay(10);
    1307     timeout -= 10;
    1308   }
    1309   ddprintf(" done (time left = %d)\n", timeout);
    1310 
    1311   /* check error condition */
    1312   if ((tmp = readl(port_mmio + PORT_SCR_ERR)) != 0) {
    1313     dprintf("SERR = 0x%08lx\n", tmp);
    1314     return(-1);
    1315   }
    1316   if (((tmp = readl(port_mmio + PORT_TFDATA)) & 0x89) != 0) {
    1317     dprintf("TFDATA = 0x%08lx\n", tmp);
    1318     return(-1);
    1319   }
    1320 
    1321   return((timeout <= 0) ? -1 : 0);
    13221370}
    13231371
  • trunk/src/ahci.h

    r4 r8  
    209209 *       interface it's developed for, is based on x86 design patterns, we're
    210210 *       not even going to start making a difference between little and big
    211  *       endian architectures. PCI is little endoian, AHCI is little endian,
     211 *       endian architectures. PCI is little endian, AHCI is little endian,
    212212 *       x86 is little endian, and that's it.
    213213 */
  • trunk/src/ata.c

    r4 r8  
    9898      break;
    9999
    100     case AP_H2D:
     100    case AP_WRITE:
    101101      ahci_flags |= AHCI_CMD_WRITE;
    102102      break;
     
    221221   *    than AHCI_MAX_SG entries. In order to help the caller, the return value
    222222   *    of this function will indicate how many OS/2 S/G entries were
    223    *    successfully be mapped.
     223   *    successfully mapped.
    224224   *
    225225   */
     
    306306
    307307/******************************************************************************
     308 * Get index in S/G list for the number of transferred sectors in the IORB.
     309 *
     310 * Returning io->cSGList indicates an error.
     311 *
     312 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW
     313 *       limit will never cross sector boundaries. This means that splitting
     314 *       S/G lists into multiple commands can be done without editing the S/G
     315 *       lists.
     316 */
     317u16 ata_get_sg_indx(IORB_EXECUTEIO _far *io)
     318{
     319  ULONG offset = io->BlocksXferred * io->BlockSize;
     320  USHORT i;
     321
     322  for (i = 0; i < io->cSGList && offset > 0; i++) {
     323    offset -= io->pSGList[i].XferBufLen;
     324  }
     325
     326  return(i);
     327}
     328
     329/******************************************************************************
     330 * Get max S/G count which will fit into our HW S/G buffers. This function is
     331 * called when the S/G list is too long and we need to split the IORB into
     332 * multiple commands. It returns both the number of sectors and S/G list
     333 * elements that we can handle in a single command.
     334 *
     335 * The parameter 'sg_indx' indicates the current start index in the S/G list
     336 * (0 if this is the first command iteration).
     337 *
     338 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates
     339 * how many S/G elements were successfully mapped. Whatever we return needs to
     340 * be less or equal to this value.
     341 *
     342 * Returning 0 in *sg_cnt indicates an error.
     343 *
     344 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits
     345 *       will never cross sector boundaries. This means that splitting S/G
     346 *       lists into multiple commands can be done without editing S/G list
     347 *       elements. Since AHCI only allows 22 bits for each S/G element, the
     348 *       hardware limits are reported as AHCI_MAX_SG / 2 but will vary based
     349 *       on the actual length of S/G elements. This function looks for the
     350 *       maximum number of S/G elements that can be mapped on sector
     351 *       boundaries which will still fit into our HW S/G list.
     352 */
     353void ata_max_sg_cnt(IORB_EXECUTEIO _far *io, USHORT sg_indx, USHORT sg_max,
     354                    USHORT _far *sg_cnt, USHORT _far *sector_cnt)
     355{
     356  ULONG max_sector_cnt = 0;
     357  USHORT max_sg_cnt = 0;
     358  ULONG offset = 0;
     359  USHORT i;
     360
     361  for (i = sg_indx; i < io->cSGList; i++) {
     362    if (i - sg_indx >= sg_max) {
     363      /* we're beyond the number of S/G elements we can map */
     364      break;
     365    }
     366
     367    offset += io->pSGList[i].XferBufLen;
     368    if (offset % io->BlockSize == 0) {
     369      /* this S/G element ends on a sector boundary */
     370      max_sector_cnt = offset / io->BlockSize;
     371      max_sg_cnt = i + 1;
     372    }
     373  }
     374
     375  /* return the best match we found so far (0 indicating failure) */
     376  *sector_cnt = max_sector_cnt;
     377  *sg_cnt = max_sg_cnt;
     378}
     379
     380
     381/******************************************************************************
    308382 * Get device or media geometry. Device and media geometry are expected to be
    309383 * the same for non-removable devices, which will always be the case for the
     
    334408  if (rc != 0) {
    335409    free(aws->buf);
    336     aws->buf = NULL;
    337410    iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
    338411  }
     
    404477                               ((u32) geometry->NumHeads *
    405478                                (u32) geometry->SectorsPerTrack);
     479
     480  /* tell interrupt handler that this IORB is complete */
     481  add_workspace(iorb)->complete = 1;
    406482}
    407483
     
    426502  IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb;
    427503  AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
     504  ULONG sector = io->RBA + io->BlocksXferred;
     505  USHORT count = io->BlockCount - io->BlocksXferred;
     506  USHORT sg_indx;
     507  USHORT sg_cnt;
    428508  int p = iorb_unit_port(iorb);
    429509  int d = iorb_unit_device(iorb);
    430510  int rc;
    431511
    432   /* prepare read command */
    433   if (io->RBA >= (1UL << 28) || io->BlockCount > 256) {
    434     /* need LBA48 for this command */
    435     if (!ai->ports[p].devs[d].lba48) {
    436       iorb_seterr(iorb, IOERR_RBA_LIMIT);
    437       return(-1);
    438     }
    439     rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
    440                  AP_SECTOR_48, (u32) io->RBA, (u16) 0,
    441                  AP_COUNT,     (u16) io->BlockCount,
    442                  AP_SGLIST,    io->pSGList, (u16) io->cSGList,
    443                  AP_DEVICE,    0x4000,
    444                  AP_END);
     512  /* prepare read command while keeping an eye on S/G count limitations */
     513  do {
     514    sg_indx = ata_get_sg_indx(io);
     515    sg_cnt = io->cSGList - sg_indx;
     516
     517    if (sector >= (1UL << 28) || count > 256) {
     518      /* need LBA48 for this command */
     519      if (!ai->ports[p].devs[d].lba48) {
     520        iorb_seterr(iorb, IOERR_RBA_LIMIT);
     521        return(-1);
     522      }
     523      rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
     524                   AP_SECTOR_48, (u32) sector, (u16) 0,
     525                   AP_COUNT,     (u16) count,
     526                   AP_SGLIST,    io->pSGList + sg_indx, (u16) sg_cnt,
     527                   AP_DEVICE,    0x4000,
     528                   AP_END);
     529    } else {
     530      rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
     531                   AP_SECTOR_28, (u32) sector,
     532                   AP_COUNT,     (u16) count & 0xffU,
     533                   AP_SGLIST,    io->pSGList + sg_indx, (u16) sg_cnt,
     534                   AP_DEVICE,    0x4000,
     535                   AP_END);
     536    }
     537
     538    if (rc > 0) {
     539      /* couldn't map all S/G elements */
     540      ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
     541    }
     542  } while (rc > 0 && sg_cnt > 0);
     543
     544  if (rc == 0) {
     545    add_workspace(iorb)->blocks = count;
     546    add_workspace(iorb)->ppfunc = ata_read_pp;
     547
     548  } else if (rc > 0) {
     549    iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD);
     550
    445551  } else {
    446     rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
    447                  AP_SECTOR_28, (u32) io->RBA,
    448                  AP_COUNT,     (u16) io->BlockCount & 0xffU,
    449                  AP_SGLIST,    io->pSGList, (u16) io->cSGList,
    450                  AP_DEVICE,    0x4000,
    451                  AP_END);
     552    iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
    452553  }
    453554
     
    456557
    457558/******************************************************************************
    458  * Verify readability of sectors on AHCI device.
     559 * Post processing function for ata_read(); this function updates the
     560 * BlocksXferred counter in the IORB and, if not all blocks have been
     561 * transferred, requeues the IORB to process the remaining sectors.
     562 */
     563void ata_read_pp(IORBH _far *iorb)
     564{
     565  IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb;
     566
     567  io->BlocksXferred += add_workspace(iorb)->blocks;
     568  ddprintf("ata_read_pp(): blocks transferred = %d\n", (int) io->BlocksXferred);
     569
     570  if (io->BlocksXferred >= io->BlockCount) {
     571    /* we're done; tell IRQ handler the IORB is complete */
     572    add_workspace(iorb)->complete = 1;
     573  } else {
     574    /* requeue this IORB for next iteration */
     575    iorb_requeue(iorb);
     576  }
     577}
     578
     579/******************************************************************************
     580 * Verify readability of sectors on ATA device.
    459581 */
    460582int ata_verify(IORBH _far *iorb, int slot)
     
    476598                 AP_SECTOR_48, (u32) io->RBA, (u16) 0,
    477599                 AP_COUNT,     (u16) io->BlockCount,
    478                  AP_SGLIST,    io->pSGList, (u16) io->cSGList,
    479600                 AP_DEVICE,    0x4000,
    480601                 AP_END);
     
    483604                 AP_SECTOR_28, (u32) io->RBA,
    484605                 AP_COUNT,     (u16) io->BlockCount & 0xffU,
    485                  AP_SGLIST,    io->pSGList, (u16) io->cSGList,
    486606                 AP_END);
    487607  }
     
    497617  IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb;
    498618  AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
     619  ULONG sector = io->RBA + io->BlocksXferred;
     620  USHORT count = io->BlockCount - io->BlocksXferred;
     621  USHORT sg_indx;
     622  USHORT sg_cnt;
    499623  int p = iorb_unit_port(iorb);
    500624  int d = iorb_unit_device(iorb);
    501625  int rc;
    502626
    503   /* prepare write command */
    504   if (io->RBA >= (1UL << 28) || io->BlockCount > 256) {
    505     /* need LBA48 for this command */
    506     if (!ai->ports[p].devs[d].lba48) {
    507       iorb_seterr(iorb, IOERR_RBA_LIMIT);
    508       return(-1);
    509     }
    510     rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
    511                  AP_SECTOR_48, (u32) io->RBA, (u16) 0,
    512                  AP_COUNT,     (u16) io->BlockCount,
    513                  AP_SGLIST,    io->pSGList, (u16) io->cSGList,
    514                  AP_DEVICE,    0x4000,
    515                  AP_END);
     627  /* prepare write command while keeping an eye on S/G count limitations */
     628  do {
     629    sg_indx = ata_get_sg_indx(io);
     630    sg_cnt = io->cSGList - sg_indx;
     631
     632    if (sector >= (1UL << 28) || count > 256) {
     633      /* need LBA48 for this command */
     634      if (!ai->ports[p].devs[d].lba48) {
     635        iorb_seterr(iorb, IOERR_RBA_LIMIT);
     636        return(-1);
     637      }
     638      rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
     639                   AP_SECTOR_48, (u32) sector, (u16) 0,
     640                   AP_COUNT,     (u16) count,
     641                   AP_SGLIST,    io->pSGList + sg_indx, (u16) sg_cnt,
     642                   AP_DEVICE,    0x4000,
     643                   AP_WRITE,
     644                   AP_END);
     645    } else {
     646      rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
     647                   AP_SECTOR_28, (u32) sector,
     648                   AP_COUNT,     (u16) count & 0xffU,
     649                   AP_SGLIST,    io->pSGList + sg_indx, (u16) sg_cnt,
     650                   AP_DEVICE,    0x4000,
     651                   AP_WRITE,
     652                   AP_END);
     653    }
     654
     655    if (rc > 0) {
     656      /* couldn't map all S/G elements */
     657      ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
     658    }
     659  } while (rc > 0 && sg_cnt > 0);
     660
     661  if (rc == 0) {
     662    add_workspace(iorb)->blocks = count;
     663    add_workspace(iorb)->ppfunc = ata_write_pp;
     664
     665  } else if (rc > 0) {
     666    iorb_seterr(iorb, IOERR_CMD_SGLIST_BAD);
     667
    516668  } else {
    517     rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
    518                  AP_SECTOR_28, (u32) io->RBA,
    519                  AP_COUNT,     (u16) io->BlockCount & 0xffU,
    520                  AP_SGLIST,    io->pSGList, (u16) io->cSGList,
    521                  AP_DEVICE,    0x4000,
    522                  AP_END);
     669    iorb_seterr(iorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
    523670  }
    524671
    525672  return(rc);
     673}
     674
     675/******************************************************************************
     676 * Post processing function for ata_write(); this function updates the
     677 * BlocksXferred counter in the IORB and, if not all blocks have been
     678 * transferred, requeues the IORB to process the remaining sectors.
     679 */
     680void ata_write_pp(IORBH _far *iorb)
     681{
     682  IORB_EXECUTEIO _far *io = (IORB_EXECUTEIO _far *) iorb;
     683
     684  io->BlocksXferred += add_workspace(iorb)->blocks;
     685  ddprintf("ata_write_pp(): blocks transferred = %d\n", (int) io->BlocksXferred);
     686
     687  if (io->BlocksXferred >= io->BlockCount) {
     688    /* we're done; tell IRQ handler the IORB is complete */
     689    add_workspace(iorb)->complete = 1;
     690  } else {
     691    /* requeue this IORB for next iteration */
     692    iorb_requeue(iorb);
     693  }
    526694}
    527695
     
    536704
    537705/******************************************************************************
    538  * Request sense information (which means "read ATA log page" for ATA devices)
     706 * Request sense information for a failed command. Since there is no "request
     707 * sense" command for ATA devices, we need to read the current error code from
     708 * the AHCI task file register and fabricate the sense information.
     709 *
     710 * NOTES:
     711 *
     712 *   - This function must be called right after an ATA command has failed and
     713 *     before any other commands are queued on the corresponding port. This
     714 *     function is typically called in the port restart context hook which is
     715 *     triggered by an AHCI error interrupt.
     716 *
     717 *   - The ATA error bits are a complete mess. We'll try and catch the most
     718 *     interesting error codes (such as medium errors) and report everything
     719 *     else with a generic error code.
    539720 */
    540721int ata_req_sense(IORBH _far *iorb, int slot)
    541722{
    542   iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
     723  AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
     724  u8 _far *port_mmio = port_base(ai, iorb_unit_port(iorb));
     725  u32 tf_data = readl(port_mmio + PORT_TFDATA);
     726  u8 err = (u8) (tf_data >> 8);
     727  u8 sts = (u8) (tf_data);
     728
     729  if (sts & ATA_ERR) {
     730    if (sts & ATA_DF) {
     731      /* there is a device-specific error condition */
     732      if (err & ATA_ICRC) {
     733        iorb_seterr(iorb, IOERR_ADAPTER_DEVICEBUSCHECK);
     734      } else if (err & ATA_UNC) {
     735        iorb_seterr(iorb, IOERR_MEDIA);
     736      } else if (err & ATA_IDNF) {
     737        iorb_seterr(iorb, IOERR_RBA_ADDRESSING_ERROR);
     738      } else {
     739        iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC);
     740      }
     741   
     742    } else {
     743      iorb_seterr(iorb, IOERR_DEVICE_NONSPECIFIC);
     744    }
     745  }
     746
     747  /* TBD: fill in SCSI sense buffer in IORB */
     748
     749  /* Return an error to indicate there's no HW command to be submitted and
     750   * that the IORB can be completed "as is" (the upstream code expects the
     751   * IORB error code, if any, to be set when this happens and this is exactly
     752   * what this function is all about).
     753   */
    543754  return(-1);
    544755}
  • trunk/src/ata.h

    r4 r8  
    396396  AP_SGLIST,       /* [void _far *, u16] buffer S/G (SCATGATENTRY/count)    */
    397397  AP_VADDR,        /* [void _far *, u16] buffer virtual address (buf/len)   */
    398   AP_H2D,          /* []                 host-to-device direction           */
     398  AP_WRITE,        /* []                 indicates a DMA write operation    */
    399399  AP_AHCI_FLAGS,   /* [u16]              AHCI command header flags          */
    400400  AP_ATAPI_CMD,    /* [void _far *, u16] ATAPI command (CDB) and length     */
     
    442442extern void      ata_cmd_to_fis      (u8 _far *fis, ATA_CMD _far *cmd,
    443443                                      int device);
     444extern USHORT    ata_get_sg_indx     (IORB_EXECUTEIO _far *io);
     445extern void      ata_max_sg_cnt      (IORB_EXECUTEIO _far *io, USHORT sg_indx,
     446                                      USHORT sg_max, USHORT _far *sg_cnt,
     447                                      USHORT _far *sector_cnt);
    444448
    445449extern int       ata_get_geometry    (IORBH _far *iorb, int slot);
     
    447451extern int       ata_unit_ready      (IORBH _far *iorb, int slot);
    448452extern int       ata_read            (IORBH _far *iorb, int slot);
     453extern void      ata_read_pp         (IORBH _far *iorb);
    449454extern int       ata_verify          (IORBH _far *iorb, int slot);
    450455extern int       ata_write           (IORBH _far *iorb, int slot);
     456extern void      ata_write_pp        (IORBH _far *iorb);
    451457extern int       ata_execute_ata     (IORBH _far *iorb, int slot);
    452458extern int       ata_req_sense       (IORBH _far *iorb, int slot);
     459
    453460extern char     *ata_dev_name        (u16 *id_buf);
     461
  • trunk/src/ctxhook.c

    r4 r8  
    8282void restart_ctxhook(ULONG parm)
    8383{
     84  IORB_QUEUE done_queue;
    8485  AD_INFO *ai;
    8586  IORBH _far *problem_iorb;
     
    9293  int a;
    9394  int p;
    94   int d;
    9595
    9696  dprintf("restart_ctxhook() started\n");
     97  memset(&done_queue, 0x00, sizeof(done_queue));
     98
    9799  spin_lock(drv_lock);
    98100
     
    108110    for (p = 0; p <= ai->port_max; p++) {
    109111      if (ports_to_restart[a] & (1UL << p)) {
     112        ports_to_restart[a] &= ~(1UL << p);
    110113
    111114        /* restart this port */
    112         ports_to_restart[a] &= ~(1UL << p);
    113115        port_mmio = port_base(ai, p);
    114116        problem_iorb = NULL;
     
    124126          if (aws->queued_hw) {
    125127            if (ai->ports[p].ncq_cmds != 0) {
     128              /* NCQ commands active; force non-NCQ mode and trigger port reset */
     129              aws->no_ncq = 1;
    126130              need_reset = 1;
    127131            } else if (aws->cmd_slot == ccs) {
     
    129133              problem_iorb = iorb;
    130134            }
    131 
    132             /* requeue this command with the 'no_ncq' flag set */
    133             aws_free(aws);
    134             memset(&iorb->ADDWorkSpace, sizeof(iorb->ADDWorkSpace), 0x00);
    135             aws->no_ncq = 1;
     135            iorb_requeue(iorb);
    136136
    137137            /* remove requeued command from the issued command bitmaps */
     
    166166        ai->busy = 0;
    167167
    168         /* reset port status */
    169         ai->ports[p].cmd_slot = 0;
     168        /* reset internal port status */
    170169        ai->ports[p].ncq_cmds = 0;
    171170        ai->ports[p].reg_cmds = 0;
    172 
    173         if (!need_reset && problem_iorb != NULL) {
    174           /* request sense data for the failing command in cmd slot #0 */
    175           ADD_WORKSPACE _far *aws = add_workspace(problem_iorb);
    176           aws->processing = 1;
    177           aws->queued_hw = 1;
    178           d = iorb_unit_device(problem_iorb);
    179           if (ai->ports[p].devs[d].atapi) {
    180             atapi_req_sense(problem_iorb, 0);
     171        ai->ports[p].cmd_slot = 0;
     172
     173        if (problem_iorb != NULL) {
     174          /* get details about the error that caused this IORB to fail */
     175          if (need_reset) {
     176            /* no way to retrieve error details after a reset */
     177            iorb_seterr(problem_iorb, IOERR_DEVICE_NONSPECIFIC);
     178            iorb_queue_del(&ai->ports[p].iorb_queue, problem_iorb);
     179            iorb_queue_add(&done_queue, problem_iorb);
     180
    181181          } else {
    182             ata_req_sense(problem_iorb, 0);
     182            /* get sense information */
     183            ADD_WORKSPACE _far *aws = add_workspace(problem_iorb);
     184            int d = iorb_unit_device(problem_iorb);
     185            int (*req_sense)(IORBH _far *, int) =
     186              (ai->ports[p].devs[d].atapi) ? atapi_req_sense : ata_req_sense;
     187
     188            aws->processing = 1;
     189            aws->queued_hw = 1;
     190
     191            if (req_sense(problem_iorb, 0) == 0) {
     192              /* execute request sense on slot #0 before anything else comes along */
     193              ai->ports[p].reg_cmds = 1;
     194              writel(port_mmio + PORT_CMD_ISSUE, 1);
     195              readl(port_mmio); /* flush */
     196
     197            } else {
     198              /* IORB is expected to contain the error code; just move to done queue */
     199              iorb_queue_del(&ai->ports[p].iorb_queue, problem_iorb);
     200              iorb_queue_add(&done_queue, problem_iorb);
     201            }
    183202          }
    184           ai->ports[p].reg_cmds = 1;
    185           writel(port_mmio + PORT_CMD_ISSUE, 1);
    186           readl(port_mmio); /* flush */
    187203        }
    188204      }
     
    190206  }
    191207
     208  spin_unlock(drv_lock);
     209
     210  /* call notification routine on all IORBs which have completed */
     211  for (iorb = done_queue.root; iorb != NULL; iorb = next) {
     212    next = iorb->pNxtIORB;
     213    if (iorb->RequestControl & IORB_ASYNC_POST) {
     214      iorb->NotifyAddress(iorb);
     215    }
     216  }
     217
    192218  /* restart engine to resume IORB processing */
     219  spin_lock(drv_lock);
    193220  trigger_engine();
    194221  spin_unlock(drv_lock);
     
    283310    for (p = 0; p <= ai->port_max; p++) {
    284311      if (ports_to_reset[a] & (1UL << p)) {
     312        ports_to_reset[a] &= ~(1UL << p);
     313
    285314        /* Reset this port. Since this is a rather slow operation, we'll
    286315         * release the spinlock while doing so. The adapter is marked as
     
    288317         * interfering.
    289318         */
    290         ports_to_reset[a] &= ~(1UL << p);
    291319        ai->busy = 1;
    292320        spin_unlock(drv_lock);
  • trunk/src/os2ahci.c

    r4 r8  
    700700    ptr->AdapterHostBus  = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
    701701    ptr->AdapterFlags    = AF_16M | AF_HW_SCATGAT;
    702     ptr->MaxHWSGList     = AHCI_MAX_SG;
     702
     703    /* AHCI limits S/G elements to 22 bits, thus we'll report only half of
     704     * our S/G list buffers to reduce complexity. The command preparation code
     705     * will always try to map as many S/G elements as possible so the physical
     706     * S/G list capacity is not really wasted except in rare conditions where
     707     * we need to split commands with long S/G lists without any suitable split
     708     * points except those at the reported MaxHWSGList.
     709     */
     710    ptr->MaxHWSGList     = AHCI_MAX_SG / 2;
    703711
    704712    if (!ad_info->port_scan_done) {
     
    978986}
    979987
     988/******************************************************************************
     989 * Requeue the specified IORB such that it will be sent downstream for
     990 * processing again. This includes freeing all resources currently allocated
     991 * (timer, buffer, ...) and resetting the flags to 0.
     992 *
     993 * The following flags are preserved:
     994 *  - no_ncq
     995 */
     996void iorb_requeue(IORBH _far *iorb)
     997{
     998  ADD_WORKSPACE _far *aws = add_workspace(iorb);
     999  u16 no_ncq = aws->no_ncq;
     1000
     1001  aws_free(aws);
     1002  memset(aws, 0x00, sizeof(*aws));
     1003  aws->no_ncq = no_ncq;
     1004}
     1005
  • trunk/src/os2ahci.h

    r4 r8  
    317317/* ADD workspace in IORB (must not exceed 16 bytes) */
    318318typedef struct {
    319   IORBH _far   *next;                  /* link to next IORB in our own queues */
    320319  void (*ppfunc)(IORBH _far *iorb);    /* post-processing function */
    321320  void         *buf;                   /* response buffer (e.g. for identify cmds) */
    322321  ULONG         timer;                 /* timer for timeout procesing */
     322  USHORT        blocks;                /* number of blocks to be transferred */
    323323  unsigned      processing    : 1;     /* IORB is being processd */
    324324  unsigned      idempotent    : 1;     /* IORB is idempotent (can be retried) */
    325325  unsigned      queued_hw     : 1;     /* IORB has been queued to hardware */
    326   unsigned      no_ncq        : 1;     /* don't use native command queuing */
     326  unsigned      no_ncq        : 1;     /* must not use native command queuing */
     327  unsigned      is_ncq        : 1;     /* should use native command queueing */
     328  unsigned      complete      : 1;     /* IORB has completed processing */
    327329  unsigned      cmd_slot      : 4;     /* AHCI command slot for this IORB */
    328330} ADD_WORKSPACE;
     
    357359extern void    iorb_seterr            (IORBH _far *iorb, USHORT error_code);
    358360extern void    iorb_done              (IORBH _far *iorb);
     361extern void    iorb_requeue           (IORBH _far *iorb);
    359362
    360363/* ahci.c */
     
    380383                                             int (*func)(IORBH _far *, int),
    381384                                             ULONG timeout);
     385extern int     ahci_exec_polled_cmd         (AD_INFO *ai, int p, int d,
     386                                             int timeout, int cmd, ...);
    382387
    383388extern int     ahci_intr                    (u16 irq);
Note: See TracChangeset for help on using the changeset viewer.