source: trunk/src/os2ahci/ata.c@ 188

Last change on this file since 188 was 188, checked in by David Azarewicz, 8 years ago

Fixed interrupt problem.

File size: 47.5 KB
Line 
1/******************************************************************************
2 * ata.c - ATA command processing
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ata.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* ------------------------ typedefs and structures ------------------------ */
34
35/* -------------------------- function prototypes -------------------------- */
36
37static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
38 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
39 ULONG sg_cnt);
40
41static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
42 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
43 ULONG sg_cnt, int write_through);
44
45/* ------------------------ global/static variables ------------------------ */
46
47/* ----------------------------- start of code ----------------------------- */
48
49/******************************************************************************
50 * Initialize AHCI command slot, FIS and S/G list for the specified ATA
51 * command. The command parameters are passed as a variable argument list
52 * of type and value(s). The list is terminated by AP_END.
53 *
54 * Notes:
55 *
56 * - The specified command slot is expected to be idle; no checks are
57 * performed to prevent messing with a busy port.
58 *
59 * - Port multipliers are not supported, yet, thus 'd' should always
60 * be 0 for the time being.
61 *
62 * - 'cmd' is passed as 16-bit integer because the compiler would push
63 * a 'u8' as 16-bit value (it's a fixed argument) and the stdarg
64 * macros would screw up the address of the first variable argument
65 * if the size of the last fixed argument wouldn't match what the
66 * compiler pushed on the stack.
67 *
68 * Return values:
69 * 0 : success
70 * > 0 : could not map all S/G entries; the return value is the number of
71 * S/G entries that could be mapped.
72 * < 0 : other error
73 */
74int ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, ...)
75{
76 va_list va;
77 va_start(va, cmd);
78 return(v_ata_cmd(ai, p, d, slot, cmd, va));
79}
80
81int v_ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, va_list va)
82{
83 AHCI_PORT_DMA *dma_base_virt;
84 AHCI_CMD_HDR *cmd_hdr;
85 AHCI_CMD_TBL *cmd_tbl;
86 SCATGATENTRY *sg_list = NULL;
87 SCATGATENTRY sg_single;
88 ATA_PARM ap;
89 ATA_CMD ata_cmd;
90 void *atapi_cmd = NULL;
91 u32 dma_base_phys;
92 u32 atapi_cmd_len = 0;
93 u32 ahci_flags = 0;
94 u32 sg_cnt = 0;
95 u32 i;
96 u32 n;
97
98 /* --------------------------------------------------------------------------
99 * Initialize ATA command. The ATA command is set up with the main command
100 * value and a variable list of additional parameters such as the sector
101 * address, transfer count, ...
102 */
103 memset(&ata_cmd, 0x00, sizeof(ata_cmd));
104 ata_cmd.cmd = cmd;
105
106 /* parse variable arguments */
107 do
108 {
109 switch ((ap = va_arg(va, ATA_PARM)))
110 {
111
112 case AP_AHCI_FLAGS:
113 ahci_flags |= va_arg(va, u32);
114 break;
115
116 case AP_WRITE:
117 if (va_arg(va, u32) != 0)
118 {
119 ahci_flags |= AHCI_CMD_WRITE;
120 }
121 break;
122
123 case AP_FEATURES:
124 /* ATA features word */
125 ata_cmd.features |= va_arg(va, u32);
126 break;
127
128 case AP_COUNT:
129 /* transfer count */
130 ata_cmd.count = va_arg(va, u32);
131 break;
132
133 case AP_SECTOR_28:
134 /* 28-bit sector address */
135 ata_cmd.lba_l = va_arg(va, u32);
136 if (ata_cmd.lba_l & 0xf0000000UL)
137 {
138 dprintf(0,"error: LBA-28 address %d has more than 28 bits\n", ata_cmd.lba_l);
139 return(ATA_CMD_INVALID_PARM);
140 }
141 /* add upper 4 bits to device field */
142 ata_cmd.device |= (ata_cmd.lba_l >> 24) & 0x0fU;
143 /* only lower 24 bits come into lba_l */
144 ata_cmd.lba_l &= 0x00ffffffUL;
145 break;
146
147 case AP_SECTOR_48:
148 /* 48-bit sector address */
149 ata_cmd.lba_l = va_arg(va, u32);
150 ata_cmd.lba_h = va_arg(va, u32);
151 break;
152
153 case AP_DEVICE:
154 /* ATA device byte; note that this byte contains the highest
155 * 4 bits of LBA-28 address; we have to leave them alone here. */
156 ata_cmd.device |= va_arg(va, u32) & 0xf0;
157 break;
158
159 case AP_SGLIST:
160 /* scatter/gather list in SCATGATENTRY/count format */
161 sg_list = va_arg(va, void *);
162 sg_cnt = va_arg(va, u32);
163 break;
164
165 case AP_VADDR:
166 /* virtual buffer address in addr/len format (up to 4K) */
167 sg_single.ppXferBuf = MemPhysAdr(va_arg(va, void *));
168 sg_single.XferBufLen = va_arg(va, u32);
169 sg_list = &sg_single;
170 sg_cnt = 1;
171 break;
172
173 case AP_ATAPI_CMD:
174 /* ATAPI command */
175 atapi_cmd = va_arg(va, void *);
176 atapi_cmd_len = va_arg(va, u32);
177 ahci_flags |= AHCI_CMD_ATAPI;
178 break;
179
180 case AP_ATA_CMD:
181 /* ATA command "pass-through" */
182 memcpy(&ata_cmd, va_arg(va, void *), sizeof(ATA_CMD));
183 break;
184
185 case AP_END:
186 break;
187
188 default:
189 dprintf(0,"error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap);
190 return(ATA_CMD_INVALID_PARM);
191 }
192
193 } while (ap != AP_END);
194
195 /* --------------------------------------------------------------------------
196 * Fill in AHCI ATA command information. This includes the port command slot,
197 * the corresponding command FIS and the S/G list. The layout of the AHCI
198 * port DMA region is based on the Linux AHCI driver and looks like this:
199 *
200 * - 32 AHCI command headers (AHCI_CMD_HDR) with 32 bytes, each
201 * - 1 FIS receive area with 256 bytes (AHCI_RX_FIS_SZ)
202 * - 32 AHCI command tables, each consisting of
203 * - 64 bytes for command FIS
204 * - 16 bytes for ATAPI comands
205 * - 48 bytes reserved
206 * - 48 S/G entries (AHCI_SG) with 32 bytes, each
207 *
208 * Since the whole DMA buffer for all ports is larger than 64KB and we need
209 * multiple segments to address all of them, there are no virtual pointers
210 * to the individual elements in AD_INFO. Instead, we're relying on macros
211 * for getting the base address of a particular port's DMA region, then
212 * map a structure on top of that for convenience (AHCI_PORT_DMA).
213 */
214 dma_base_virt = port_dma_base(ai, p);
215 dma_base_phys = port_dma_base_phys(ai, p);
216
217 /* AHCI command header */
218 cmd_hdr = &dma_base_virt->cmd_hdr[slot];
219 memset(cmd_hdr, 0x00, sizeof(*cmd_hdr));
220 cmd_hdr->options = ((d & 0x0f) << 12);
221 cmd_hdr->options |= ahci_flags; /* AHCI command flags */
222 cmd_hdr->options |= 5; /* length of command FIS in 32-bit words */
223 cmd_hdr->tbl_addr = dma_base_phys + offsetof(AHCI_PORT_DMA, cmd_tbl[slot]);
224 /* DAZ can use MemPhysAdr(&dma_base_virt->cmd_tbl[slot]), but is probably slower. */
225
226 /* AHCI command table */
227 cmd_tbl = &dma_base_virt->cmd_tbl[slot];
228 memset(cmd_tbl, 0x00, sizeof(*cmd_tbl));
229 ata_cmd_to_fis(cmd_tbl->cmd_fis, &ata_cmd, d);
230
231 if (atapi_cmd != NULL)
232 {
233 /* copy ATAPI command */
234 memcpy(cmd_tbl->atapi_cmd, atapi_cmd, atapi_cmd_len);
235 }
236
237 /* PRDT (S/G list)
238 *
239 * - The S/G list for AHCI adapters is limited to 22 bits for the transfer
240 * size of each element, thus we need to split S/G elements larger than
241 * 22 bits into 2 AHCI_SG elements.
242 *
243 * - The S/G element size for AHCI is what the spec calls '0'-based
244 * (i.e. 0 means 1 bytes). On top of that, the spec requires S/G transfer
245 * sizes to be even in the context of 16-bit transfers, thus bit '1'
246 * always needs to be set.
247 *
248 * - AHCI_MAX_SG_ELEMENT_LEN defines the maximum size of an AHCI S/G
249 * element in bytes, ignoring the '0'-based methodology (i.e. 1 << 22).
250 *
251 * - There's a limit on the maximum number of S/G elements in the port DMA
252 * buffer (AHCI_MAX_SG) which is lower than the HW maximum. It's beyond
253 * the control of this function to split commands which require more
254 * than AHCI_MAX_SG entries. In order to help the caller, the return value
255 * of this function will indicate how many OS/2 S/G entries were
256 * successfully mapped.
257 */
258 for (i = n = 0; i < sg_cnt; i++)
259 {
260 u32 sg_addr = sg_list[i].ppXferBuf;
261 u32 sg_size = sg_list[i].XferBufLen;
262
263 do
264 {
265 u32 chunk = (sg_size > AHCI_MAX_SG_ELEMENT_LEN) ? AHCI_MAX_SG_ELEMENT_LEN : sg_size;
266 if (n >= AHCI_MAX_SG)
267 {
268 /* couldn't store all S/G elements in our DMA buffer */
269 dprintf(0,"ata_cmd(): too many S/G elements\n");
270 return(i - 1);
271 }
272 if ((sg_addr & 1) || (chunk & 1))
273 {
274 dprintf(1,"error: ata_cmd() called with unaligned S/G element(s)\n");
275 return(ATA_CMD_UNALIGNED_ADDR);
276 }
277 cmd_tbl->sg_list[n].addr = sg_addr;
278 cmd_tbl->sg_list[n].size = chunk - 1;
279 sg_addr += chunk;
280 sg_size -= chunk;
281 n++;
282 } while (sg_size > 0);
283 }
284
285 /* set final S/G count in AHCI command header */
286 cmd_hdr->options |= n << 16;
287
288 #ifdef DEBUG
289 if ((D32g_DbgLevel >= 7) || (atapi_cmd != NULL))
290 {
291 DPRINTF(0,"ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot);
292 dHexDump(0,cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: ");
293 dHexDump(0,&ata_cmd, sizeof(ata_cmd), "ata_cmd: ");
294 if (atapi_cmd != NULL)
295 {
296 dHexDump(0,atapi_cmd, atapi_cmd_len, "atapi_cmd: ");
297 }
298 if (n > 0)
299 {
300 dHexDump(0,cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: ");
301 }
302 }
303 #endif
304
305 return(ATA_CMD_SUCCESS);
306}
307
308/******************************************************************************
309 * Fill SATA command FIS with values extracted from an ATA command structure.
310 * The command FIS buffer (fis) is expected to be initialized to 0s. The
311 * structure of the FIS maps to the ATA shadow register block, including
312 * registers which can be written twice to store 16 bits (called 'exp').
313 *
314 * The FIS structure looks like this (using LSB notation):
315 *
316 * +----------------+----------------+----------------+----------------+
317 * 00 | FIS type (27h) | C|R|R|R|PMP | Command | Features |
318 * +----------------+----------------+----------------+----------------+
319 * 04 | LBA 7:0 | LBA 15:8 | LBA 23:16 | R|R|R|D|Head |
320 * +----------------+----------------+----------------+----------------+
321 * 08 | LBA 31:24 | LBA 40:32 | LBA 47:40 | Features exp |
322 * +----------------+----------------+----------------+----------------+
323 * 12 | Count 7:0 | Count 15:8 | Reserved | Control |
324 * +----------------+----------------+----------------+----------------+
325 * 16 | Reserved | Reserved | Reserved | Reserved |
326 * +----------------+----------------+----------------+----------------+
327 */
328void ata_cmd_to_fis(u8 *fis, ATA_CMD *ata_cmd, int d)
329{
330 fis[0] = 0x27; /* register - host to device FIS */
331 fis[1] = (u8) (d & 0xf); /* port multiplier number */
332 fis[1] |= 0x80; /* bit 7 indicates Command FIS */
333 fis[2] = (u8) ata_cmd->cmd;
334 fis[3] = (u8) ata_cmd->features;
335
336 fis[4] = (u8) ata_cmd->lba_l;
337 fis[5] = (u8) (ata_cmd->lba_l >> 8);
338 fis[6] = (u8) (ata_cmd->lba_l >> 16);
339 fis[7] = (u8) ata_cmd->device;
340
341 fis[8] = (u8) (ata_cmd->lba_l >> 24);
342 fis[9] = (u8) ata_cmd->lba_h;
343 fis[10] = (u8) (ata_cmd->lba_h >> 8);
344 fis[11] = (u8) (ata_cmd->features >> 8);
345
346 fis[12] = (u8) ata_cmd->count;
347 fis[13] = (u8) (ata_cmd->count >> 8);
348}
349
350/******************************************************************************
351 * Get index in S/G list for the number of transferred sectors in the IORB.
352 *
353 * Returning io->cSGList indicates an error.
354 *
355 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW
356 * limit will never cross sector boundaries. This means that splitting
357 * S/G lists into multiple commands can be done without editing the S/G
358 * lists.
359 */
360u16 ata_get_sg_indx(IORB_EXECUTEIO *io)
361{
362 ULONG offset = io->BlocksXferred * io->BlockSize;
363 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
364 USHORT i;
365
366 for (i = 0; i < io->cSGList && offset > 0; i++)
367 {
368 offset -= pSGList[i].XferBufLen;
369 }
370
371 return(i);
372}
373
374/******************************************************************************
375 * Get max S/G count which will fit into our HW S/G buffers. This function is
376 * called when the S/G list is too long and we need to split the IORB into
377 * multiple commands. It returns both the number of sectors and S/G list
378 * elements that we can handle in a single command.
379 *
380 * The parameter 'sg_indx' indicates the current start index in the S/G list
381 * (0 if this is the first command iteration).
382 *
383 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates
384 * how many S/G elements were successfully mapped. Whatever we return needs to
385 * be less or equal to this value.
386 *
387 * Returning 0 in *sg_cnt indicates an error.
388 *
389 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits
390 * will never cross sector boundaries. This means that splitting S/G
391 * lists into multiple commands can be done without editing S/G list
392 * elements. Since AHCI only allows 22 bits for each S/G element, the
393 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based
394 * on the actual length of S/G elements. This function looks for the
395 * maximum number of S/G elements that can be mapped on sector
396 * boundaries which will still fit into our HW S/G list.
397 */
398void ata_max_sg_cnt(IORB_EXECUTEIO *io, USHORT sg_indx, USHORT sg_max,
399 USHORT *sg_cnt, USHORT *sector_cnt)
400{
401 ULONG max_sector_cnt = 0;
402 USHORT max_sg_cnt = 0;
403 ULONG offset = 0;
404 USHORT i;
405 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
406
407 for (i = sg_indx; i < io->cSGList; i++)
408 {
409 if (i - sg_indx >= sg_max)
410 {
411 /* we're beyond the number of S/G elements we can map */
412 break;
413 }
414
415 offset += pSGList[i].XferBufLen;
416 if (offset % io->BlockSize == 0)
417 {
418 /* this S/G element ends on a sector boundary */
419 max_sector_cnt = offset / io->BlockSize;
420 max_sg_cnt = i + 1;
421 }
422 }
423
424 /* return the best match we found (0 indicating failure) */
425 *sector_cnt = max_sector_cnt;
426 *sg_cnt = max_sg_cnt;
427}
428
429
430/******************************************************************************
431 * Get device or media geometry. Device and media geometry are expected to be
432 * the same for non-removable devices, which will always be the case for the
433 * ATA devices we're dealing with (hard disks). ATAPI is a different story
434 * and handled by atapi_get_geometry().
435 */
436int ata_get_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
437{
438 ADD_WORKSPACE *aws = add_workspace(pIorb);
439 int rc;
440
441 /* allocate buffer for ATA identify information */
442 if ((aws->buf = MemAlloc(ATA_ID_WORDS * sizeof(u16))) == NULL)
443 {
444 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
445 return(-1);
446 }
447
448 /* request ATA identify information */
449 aws->ppfunc = ata_get_geometry_pp;
450 rc = ata_cmd(ad_infos + iorb_unit_adapter(pIorb),
451 iorb_unit_port(pIorb),
452 iorb_unit_device(pIorb),
453 slot,
454 ATA_CMD_ID_ATA,
455 AP_VADDR, (void *) aws->buf, ATA_ID_WORDS * sizeof(u16),
456 AP_END);
457
458 if (rc != 0)
459 {
460 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
461 }
462
463 return(rc);
464}
465
466/* Adjust the cylinder count in the physical
467 * geometry to the last full cylinder.
468 */
469int adjust_cylinders(GEOMETRY *geometry, ULONG TotalSectors)
470{
471 USHORT SecPerCyl;
472 int rc = FALSE;
473
474 geometry->TotalSectors = TotalSectors;
475 SecPerCyl = geometry->SectorsPerTrack * geometry->NumHeads;
476 if (SecPerCyl > 0)
477 {
478 ULONG TotalCylinders = TotalSectors / SecPerCyl;
479
480 geometry->TotalSectors = TotalCylinders * SecPerCyl;
481 geometry->TotalCylinders = TotalCylinders;
482 if (TotalCylinders >> 16)
483 {
484 geometry->TotalCylinders = 65535;
485 rc = TRUE;
486 }
487 }
488 return (rc);
489}
490
491/* Calculate the logical geometry based on the input physcial geometry
492 * using the LBA Assist Translation algorithm.
493 */
494#define BIOS_MAX_CYLINDERS 1024l
495#define BIOS_MAX_NUMHEADS 255
496#define BIOS_MAX_SECTORSPERTRACK 63
497void log_geom_calculate_LBA_assist(GEOMETRY *geometry, ULONG TotalSectors)
498{
499 UCHAR numSpT = BIOS_MAX_SECTORSPERTRACK;
500 UCHAR numHeads = BIOS_MAX_NUMHEADS;
501 ULONG Cylinders;
502
503 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK))
504 {
505 USHORT temp = (TotalSectors - 1) / (BIOS_MAX_CYLINDERS * BIOS_MAX_SECTORSPERTRACK);
506
507 if (temp < 16) numHeads = 16;
508 else if (temp < 32) numHeads = 32;
509 else if (temp < 64) numHeads = 64;
510 else numHeads = 128;
511 }
512
513 do
514 {
515 Cylinders = TotalSectors / (USHORT)(numHeads * numSpT);
516 if (Cylinders >> 16)
517 {
518 if (numSpT < 128)
519 numSpT = (numSpT << 1) | 1;
520 else
521 Cylinders = 65535; // overflow !
522 }
523 } while (Cylinders >> 16);
524
525 geometry->TotalCylinders = Cylinders;
526 geometry->NumHeads = numHeads;
527 geometry->SectorsPerTrack = numSpT;
528}
529
530int check_lvm(IORBH *pIorb, ULONG sector)
531{
532 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace(pIorb)->buf;
533 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
534 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
535 int p = iorb_unit_port(pIorb);
536 int rc;
537
538 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_READ,
539 AP_SECTOR_28, sector-1,
540 AP_COUNT, 1,
541 AP_VADDR, (void *)pDLA, 512,
542 AP_DEVICE, 0x40,
543 AP_END);
544 if (rc) return 0;
545
546 DHEXDUMP(3,pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1);
547
548 if ((pDLA->DLA_Signature1 == DLA_TABLE_SIGNATURE1) && (pDLA->DLA_Signature2 == DLA_TABLE_SIGNATURE2)) {
549 DPRINTF(3,"is_lvm_geometry found at sector %d\n", sector-1);
550 geometry->TotalCylinders = pDLA->Cylinders;
551 geometry->NumHeads = pDLA->Heads_Per_Cylinder;
552 geometry->SectorsPerTrack = pDLA->Sectors_Per_Track;
553 geometry->TotalSectors = pDLA->Cylinders * pDLA->Heads_Per_Cylinder * pDLA->Sectors_Per_Track;
554 return 1;
555 }
556
557 return 0;
558}
559
560/******************************************************************************
561 * Try to read LVM information from the disk. If found, use the LVM geometry.
562 * This function will only work at init time. A better strategy would be to
563 * calculate the geometry during ahci_scan_ports and save it away and then just
564 * return the saved values when ata_get_geometry() is called.
565 */
566int is_lvm_geometry(IORBH *pIorb)
567{
568 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
569 ULONG sector;
570
571 if (init_complete) return 0; /* We cannot use ahci_exec_polled_cmd() after init_complete */
572
573 if (use_lvm_info)
574 {
575 #ifdef DEBUG
576 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
577 int p = iorb_unit_port(pIorb);
578 int d = iorb_unit_device(pIorb);
579 DPRINTF(3,"is_lvm_geometry (%d.%d.%d)\n", ad_no(ai), p, d);
580 #endif
581
582 /* First check the sector reported by the hardware */
583 if (check_lvm(pIorb, geometry->SectorsPerTrack)) return 1;
584
585 for (sector = 255; sector >= 63; sector >>= 1)
586 {
587 if (sector == geometry->SectorsPerTrack) continue;
588 if (check_lvm(pIorb, sector)) return 1;
589 }
590 }
591
592 return 0;
593}
594
595/******************************************************************************
596 * Post processing function for ata_get_geometry(): convert the ATA identify
597 * information to OS/2 IOCC_GEOMETRY information.
598 */
599void ata_get_geometry_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
600{
601 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
602 USHORT geometry_len = ((IORB_GEOMETRY *)pIorb)->GeometryLen;
603 u16 *id_buf = add_workspace(pIorb)->buf;
604 int a = iorb_unit_adapter(pIorb);
605 int p = iorb_unit_port(pIorb);
606 char *Method;
607
608 /* Fill-in geometry information; the ATA-8 spec declares the geometry
609 * fields in the ATA ID buffer as obsolete but it's still the best
610 * guess in most cases. If the information stored in the geometry
611 * fields is apparently incorrect, we'll use the algorithm typically
612 * used by SCSI adapters and modern PC BIOS versions:
613 *
614 * - 512 bytes per sector
615 * - 255 heads
616 * - 63 sectors per track (or 56 with the parameter "/4")
617 * - x cylinders (calculated)
618 *
619 * Please note that os2ahci currently does not natively support ATA sectors
620 * larger than 512 bytes, therefore relies on the translation logic built
621 * into the corresponding ATA disks. In order to prevent file systems that
622 * use block sizes larger than 512 bytes (FAT, JFS, ...) from ending up on
623 * incorrectly aligned physical sector accesses, hence using more physical
624 * I/Os than necessary, the command line parameter "/4" can be used to force
625 * a track size of 56 sectors. This way, partitions will start on 4K
626 * boundaries.
627 *
628 * Another limitation is that OS/2 has a 32-bit variable for the total number
629 * of sectors, limiting the maximum capacity to roughly 2TB. This is another
630 * issue that needs to be addressed sooner or later; large sectors could
631 * raise this limit to something like 8TB but this is not really much of a
632 * difference. Maybe there's something in later DDKs that allows more than
633 * 32 bits?
634 */
635 memset(geometry, 0x00, geometry_len);
636 geometry->BytesPerSector = ATA_SECTOR_SIZE;
637
638 /* extract total number of sectors */
639 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400)
640 {
641 /* 48-bit LBA supported */
642 if (ATA_CAPACITY48_H(id_buf) != 0)
643 {
644 /* more than 32 bits for number of sectors */
645 dprintf(0,"warning: limiting disk %d.%d.%d to 2TB\n",
646 iorb_unit_adapter(pIorb), iorb_unit_port(pIorb),
647 iorb_unit_device(pIorb));
648 geometry->TotalSectors = 0xffffffffUL;
649 }
650 else
651 {
652 geometry->TotalSectors = ATA_CAPACITY48_L(id_buf);
653 }
654 }
655 else
656 {
657 /* 28-bit LBA */
658 geometry->TotalSectors = ATA_CAPACITY(id_buf) & 0x0fffffffUL;
659 }
660
661 Method = "None";
662 /* fabricate the remaining geometry fields */
663 if (track_size[a][p] != 0)
664 {
665 /* A specific track size has been requested for this port; this is
666 * typically done for disks with 4K sectors to make sure partitions
667 * start on 8-sector boundaries (parameter "/4").
668 */
669 geometry->NumHeads = 255;
670 geometry->SectorsPerTrack = track_size[a][p];
671 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
672 Method = "Custom";
673 }
674 else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 &&
675 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf))
676 {
677 /* BIOS-supplied (aka "current") geometry values look valid */
678 geometry->NumHeads = CUR_HEADS(id_buf);
679 geometry->SectorsPerTrack = CUR_SECTORS(id_buf);
680 geometry->TotalCylinders = CUR_CYLS(id_buf);
681 Method = "BIOS";
682 }
683 else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0)
684 {
685 /* ATA-supplied values for geometry look valid */
686 geometry->NumHeads = ATA_HEADS(id_buf);
687 geometry->SectorsPerTrack = ATA_SECTORS(id_buf);
688 geometry->TotalCylinders = ATA_CYLS(id_buf);
689 Method = "ATA";
690 }
691 else
692 {
693 /* use typical SCSI geometry */
694 geometry->NumHeads = 255;
695 geometry->SectorsPerTrack = 63;
696 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
697 Method = "SCSI";
698 }
699
700 DPRINTF(2,"Physical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
701 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
702 (geometry->TotalSectors / 2048), Method);
703
704 /* Fixup the geometry in case the geometry reported by the BIOS is bad */
705 if (adjust_cylinders(geometry, geometry->TotalSectors))
706 { // cylinder overflow
707 log_geom_calculate_LBA_assist(geometry, geometry->TotalSectors);
708 geometry->TotalSectors = (USHORT)(geometry->NumHeads * geometry->SectorsPerTrack) * (ULONG)geometry->TotalCylinders;
709 }
710 adjust_cylinders(geometry, geometry->TotalSectors);
711
712 DPRINTF(2,"Logical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
713 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
714 (geometry->TotalSectors / 2048), Method);
715
716 if (is_lvm_geometry(pIorb)) Method = "LVM";
717 ad_infos[a].ports[p].devs[0].dev_info.Cylinders = geometry->TotalCylinders;
718 ad_infos[a].ports[p].devs[0].dev_info.HeadsPerCylinder = geometry->NumHeads;
719 ad_infos[a].ports[p].devs[0].dev_info.SectorsPerTrack = geometry->SectorsPerTrack;
720 ad_infos[a].ports[p].devs[0].dev_info.TotalSectors = geometry->TotalSectors;
721 ad_infos[a].ports[p].devs[0].dev_info.Method = Method;
722
723 DPRINTF(2,"Reported geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
724 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
725 (geometry->TotalSectors / 2048), Method);
726
727 /* tell interrupt handler that this IORB is complete */
728 add_workspace(pIorb)->complete = 1;
729}
730
731/******************************************************************************
732 * Test whether unit is ready.
733 */
734int ata_unit_ready(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
735{
736 /* This is a NOP for ATA devices (at least right now); returning an error
737 * without setting an error code means ahci_exec_iorb() will not queue any
738 * HW command and the IORB will complete successfully.
739 */
740 ((IORB_UNIT_STATUS *)pIorb)->UnitStatus = US_READY | US_POWER;
741 return(-1);
742}
743
744/******************************************************************************
745 * Read sectors from AHCI device.
746 */
747int ata_read(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
748{
749 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
750 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
751 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
752 ULONG sector = io->RBA + io->BlocksXferred;
753 USHORT count = io->BlockCount - io->BlocksXferred;
754 USHORT sg_indx;
755 USHORT sg_cnt;
756 int p = iorb_unit_port(pIorb);
757 int d = iorb_unit_device(pIorb);
758 int rc;
759
760 if (io->BlockCount == 0)
761 {
762 /* NOP; return -1 without error in IORB to indicate success */
763 return(-1);
764 }
765
766 if (add_workspace(pIorb)->unaligned)
767 {
768 /* unaligned S/G addresses present; need to use double buffers */
769 return(ata_read_unaligned(pIorb, slot));
770 }
771
772 /* Kludge: some I/O commands during boot use excessive S/G buffer lengths
773 * which cause NCQ commands to lock up. If there's only one S/G element
774 * and this element is already larger than what we can derive from the sector
775 * count, we'll adjust that element.
776 */
777 if (io->BlocksXferred == 0 && io->cSGList == 1 &&
778 pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize)
779 {
780 pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize;
781 }
782
783 /* prepare read command while keeping an eye on S/G count limitations */
784 do
785 {
786 sg_indx = ata_get_sg_indx(io);
787 sg_cnt = io->cSGList - sg_indx;
788 if ((rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, count,
789 pSGList + sg_indx, sg_cnt)) > 0)
790 {
791 /* couldn't map all S/G elements */
792 ata_max_sg_cnt(io, sg_indx, rc, &sg_cnt, &count);
793 }
794 } while (rc > 0 && sg_cnt > 0);
795
796 if (rc == 0)
797 {
798 add_workspace(pIorb)->blocks = count;
799 add_workspace(pIorb)->ppfunc = ata_read_pp;
800 }
801 else if (rc > 0)
802 {
803 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
804 }
805 else if (rc == ATA_CMD_UNALIGNED_ADDR)
806 {
807 /* unaligned S/G addresses detected; need to use double buffers */
808 add_workspace(pIorb)->unaligned = 1;
809 return(ata_read_unaligned(pIorb, slot));
810
811 }
812 else
813 {
814 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
815 }
816
817 return(rc);
818}
819
820/******************************************************************************
821 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI
822 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
823 * restrictions. This doesn't happen very often but when it does, we need to
824 * use a transfer buffer and copy the data manually.
825 */
826int ata_read_unaligned(IORBH *pIorb, int slot)
827{
828 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
829 ADD_WORKSPACE *aws = add_workspace(pIorb);
830 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
831 ULONG sector = io->RBA + io->BlocksXferred;
832 SCATGATENTRY sg_single;
833 int p = iorb_unit_port(pIorb);
834 int d = iorb_unit_device(pIorb);
835 int rc;
836
837 DPRINTF(7,"ata_read_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
838 ai->ports[p].unaligned_read_count++;
839
840 /* allocate transfer buffer */
841 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
842 {
843 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
844 return(-1);
845 }
846
847 /* prepare read command using transfer buffer */
848 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
849 sg_single.XferBufLen = io->BlockSize;
850 rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1);
851
852 if (rc == 0) {
853 add_workspace(pIorb)->blocks = 1;
854 add_workspace(pIorb)->ppfunc = ata_read_pp;
855
856 } else if (rc > 0) {
857 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
858
859 } else {
860 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
861 }
862
863 return(rc);
864}
865
866/******************************************************************************
867 * Post processing function for ata_read(); this function updates the
868 * BlocksXferred counter in the IORB and, if not all blocks have been
869 * transferred, requeues the IORB to process the remaining sectors. It also
870 * takes care of copying data from the transfer buffer for unaligned reads.
871 */
872void ata_read_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
873{
874 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
875 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
876 ADD_WORKSPACE *aws = add_workspace(pIorb);
877
878 if (aws->unaligned)
879 {
880 /* copy transfer buffer to corresponding physical address in S/G list */
881 sg_memcpy(pSGList, io->cSGList,
882 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
883 aws->buf, io->BlockSize, BUF_TO_SG);
884 }
885
886 io->BlocksXferred += add_workspace(pIorb)->blocks;
887 DPRINTF(7,"ata_read_pp(): blocks transferred = %d\n", io->BlocksXferred);
888
889 if (io->BlocksXferred >= io->BlockCount)
890 {
891 /* we're done; tell IRQ handler the IORB is complete */
892 add_workspace(pIorb)->complete = 1;
893 }
894 else
895 {
896 /* requeue this IORB for next iteration */
897 iorb_requeue(pIorb);
898 }
899}
900
901/******************************************************************************
902 * Verify readability of sectors on ATA device.
903 */
904int ata_verify(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
905{
906 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
907 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
908 int p = iorb_unit_port(pIorb);
909 int d = iorb_unit_device(pIorb);
910 int rc;
911
912 if (io->BlockCount == 0)
913 {
914 /* NOP; return -1 without error in IORB to indicate success */
915 return(-1);
916 }
917
918 /* prepare verify command */
919 if (io->RBA >= (1UL << 28) || io->BlockCount > 256)
920 {
921 /* need LBA48 for this command */
922 if (!ai->ports[p].devs[d].lba48) {
923 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
924 return(-1);
925 }
926 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY_EXT,
927 AP_SECTOR_48, io->RBA, 0,
928 AP_COUNT, io->BlockCount,
929 AP_DEVICE, 0x40,
930 AP_END);
931 } else {
932 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY,
933 AP_SECTOR_28, io->RBA,
934 AP_COUNT, io->BlockCount & 0xffU,
935 AP_DEVICE, 0x40,
936 AP_END);
937 }
938
939 return(rc);
940}
941
942/******************************************************************************
943 * Write sectors to AHCI device.
944 */
945int ata_write(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
946{
947 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
948 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
949 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
950 ULONG sector = io->RBA + io->BlocksXferred;
951 USHORT count = io->BlockCount - io->BlocksXferred;
952 USHORT sg_indx;
953 USHORT sg_cnt;
954 int p = iorb_unit_port(pIorb);
955 int d = iorb_unit_device(pIorb);
956 int rc;
957
958 if (io->BlockCount == 0)
959 {
960 /* NOP; return -1 without error in IORB to indicate success */
961 return(-1);
962 }
963
964 if (add_workspace(pIorb)->unaligned)
965 {
966 /* unaligned S/G addresses present; need to use double buffers */
967 return(ata_write_unaligned(pIorb, slot));
968 }
969
970 /* prepare write command while keeping an eye on S/G count limitations */
971 do {
972 sg_indx = ata_get_sg_indx(io);
973 sg_cnt = io->cSGList - sg_indx;
974 if ((rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, count,
975 pSGList + sg_indx, sg_cnt,
976 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0)
977 {
978 /* couldn't map all S/G elements */
979 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
980 }
981 } while (rc > 0 && sg_cnt > 0);
982
983 if (rc == 0)
984 {
985 add_workspace(pIorb)->blocks = count;
986 add_workspace(pIorb)->ppfunc = ata_write_pp;
987 }
988 else if (rc > 0)
989 {
990 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
991 }
992 else if (rc == ATA_CMD_UNALIGNED_ADDR)
993 {
994 /* unaligned S/G addresses detected; need to use double buffers */
995 add_workspace(pIorb)->unaligned = 1;
996 return(ata_write_unaligned(pIorb, slot));
997 }
998 else
999 {
1000 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1001 }
1002
1003 return(rc);
1004}
1005
1006/******************************************************************************
1007 * Write sectors from AHCI device with unaligned S/G element addresses. AHCI
1008 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
1009 * restrictions. This doesn't happen very often but when it does, we need to
1010 * use a transfer buffer and copy the data manually.
1011 */
1012int ata_write_unaligned(IORBH *pIorb, int slot)
1013{
1014 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1015 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
1016 ADD_WORKSPACE *aws = add_workspace(pIorb);
1017 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1018 ULONG sector = io->RBA + io->BlocksXferred;
1019 SCATGATENTRY sg_single;
1020 int p = iorb_unit_port(pIorb);
1021 int d = iorb_unit_device(pIorb);
1022 int rc;
1023
1024 DPRINTF(7,"ata_write_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
1025
1026 /* allocate transfer buffer */
1027 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
1028 {
1029 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1030 return(-1);
1031 }
1032
1033 /* copy next sector from S/G list to transfer buffer */
1034 sg_memcpy(pSGList, io->cSGList,
1035 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
1036 aws->buf, io->BlockSize, SG_TO_BUF);
1037
1038 /* prepare write command using transfer buffer */
1039 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
1040 sg_single.XferBufLen = io->BlockSize;
1041 rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1,
1042 io->Flags & XIO_DISABLE_HW_WRITE_CACHE);
1043
1044 if (rc == 0)
1045 {
1046 add_workspace(pIorb)->blocks = 1;
1047 add_workspace(pIorb)->ppfunc = ata_write_pp;
1048 }
1049 else if (rc > 0)
1050 {
1051 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
1052 }
1053 else
1054 {
1055 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1056 }
1057
1058 return(rc);
1059}
1060
1061
1062/******************************************************************************
1063 * Post processing function for ata_write(); this function updates the
1064 * BlocksXferred counter in the IORB and, if not all blocks have been
1065 * transferred, requeues the IORB to process the remaining sectors.
1066 */
1067void ata_write_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1068{
1069 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1070
1071 io->BlocksXferred += add_workspace(pIorb)->blocks;
1072 DPRINTF(7,"ata_write_pp(): blocks transferred = %d\n", io->BlocksXferred);
1073
1074 if (io->BlocksXferred >= io->BlockCount)
1075 {
1076 /* we're done; tell IRQ handler the IORB is complete */
1077 add_workspace(pIorb)->complete = 1;
1078 }
1079 else
1080 {
1081 /* requeue this IORB for next iteration */
1082 iorb_requeue(pIorb);
1083 }
1084}
1085
1086/******************************************************************************
1087 * Execute ATA command.
1088 */
1089int ata_execute_ata(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1090{
1091 IORB_ADAPTER_PASSTHRU *apt = (IORB_ADAPTER_PASSTHRU *)pIorb;
1092 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(apt->pSGList);
1093 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1094 int p = iorb_unit_port(pIorb);
1095 int d = iorb_unit_device(pIorb);
1096 int rc;
1097
1098 if (apt->ControllerCmdLen != sizeof(ATA_CMD))
1099 {
1100 iorb_seterr(pIorb, IOERR_CMD_SYNTAX);
1101 return(-1);
1102 }
1103
1104 rc = ata_cmd(ai, p, d, slot, 0,
1105 AP_SGLIST, pSGList, apt->cSGList,
1106 AP_ATA_CMD, Far16ToFlat(apt->pControllerCmd),
1107 AP_WRITE, !(apt->Flags & PT_DIRECTION_IN),
1108 AP_END);
1109
1110 if (rc == 0)
1111 {
1112 add_workspace(pIorb)->ppfunc = ata_execute_ata_pp;
1113 }
1114
1115 return(rc);
1116}
1117
1118/******************************************************************************
1119 * Post processing function for ata_execute_ata(); the main purpose of this
1120 * function is to copy the received D2H FIS (i.e. the device registers after
1121 * command completion) back to the ATA command structure.
1122 *
1123 * See ata_cmd_to_fis() for an explanation of the mapping.
1124 */
1125void ata_execute_ata_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1126{
1127 AHCI_PORT_DMA *dma_base;
1128 ATA_CMD *cmd;
1129 AD_INFO *ai;
1130 u8 *fis;
1131 int p;
1132
1133 /* get address of D2H FIS */
1134 ai = ad_infos + iorb_unit_adapter(pIorb);
1135 p = iorb_unit_port(pIorb);
1136 dma_base = port_dma_base(ai, p);
1137 fis = dma_base->rx_fis + 0x40;
1138
1139 if (fis[0] != 0x34)
1140 {
1141 /* this is not a D2H FIS - give up silently */
1142 DPRINTF(3,"ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]);
1143 add_workspace(pIorb)->complete = 1;
1144 return;
1145 }
1146
1147 /* map D2H FIS to the original ATA controller command structure */
1148 cmd = (ATA_CMD *)Far16ToFlat(((IORB_ADAPTER_PASSTHRU*)pIorb)->pControllerCmd);
1149
1150 cmd->cmd = fis[2];
1151 cmd->device = fis[7];
1152 cmd->features = ((u16) fis[3])
1153 | ((u16) fis[11]);
1154 cmd->lba_l = ((u32) fis[4])
1155 | ((u32) fis[5] << 8)
1156 | ((u32) fis[6] << 16)
1157 | ((u32) fis[8] << 24);
1158 cmd->lba_h = ((u16) fis[9])
1159 | ((u16) fis[10] << 8);
1160 cmd->count = ((u16) fis[12])
1161 | ((u16) fis[13] << 8);
1162
1163 DHEXDUMP(0,cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n");
1164
1165 /* signal completion to interrupt handler */
1166 add_workspace(pIorb)->complete = 1;
1167}
1168
1169/******************************************************************************
1170 * Request sense information for a failed command. Since there is no "request
1171 * sense" command for ATA devices, we need to read the current error code from
1172 * the AHCI task file register and fabricate the sense information.
1173 *
1174 * NOTES:
1175 *
1176 * - This function must be called right after an ATA command has failed and
1177 * before any other commands are queued on the corresponding port. This
1178 * function is typically called in the port restart context hook which is
1179 * triggered by an AHCI error interrupt.
1180 *
1181 * - The ATA error bits are a complete mess. We'll try and catch the most
1182 * interesting error codes (such as medium errors) and report everything
1183 * else with a generic error code.
1184 */
1185int ata_req_sense(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1186{
1187 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1188 u8 *port_mmio = port_base(ai, iorb_unit_port(pIorb));
1189 u32 tf_data = readl(port_mmio + PORT_TFDATA);
1190 u8 err = (tf_data >> 8);
1191 u8 sts = (tf_data);
1192
1193 if (sts & ATA_ERR)
1194 {
1195 if (sts & ATA_DF)
1196 {
1197 /* there is a device-specific error condition */
1198 if (err & ATA_ICRC)
1199 {
1200 iorb_seterr(pIorb, IOERR_ADAPTER_DEVICEBUSCHECK);
1201 }
1202 else if (err & ATA_UNC)
1203 {
1204 iorb_seterr(pIorb, IOERR_MEDIA);
1205 }
1206 else if (err & ATA_IDNF)
1207 {
1208 iorb_seterr(pIorb, IOERR_RBA_ADDRESSING_ERROR);
1209 }
1210 else
1211 {
1212 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1213 }
1214
1215 }
1216 else
1217 {
1218 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1219 }
1220 }
1221 else
1222 {
1223 /* this function only gets called when we received an error interrupt */
1224 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1225 }
1226
1227 /* Return an error to indicate there's no HW command to be submitted and
1228 * that the IORB can be completed "as is" (the upstream code expects the
1229 * IORB error code, if any, to be set when this happens and this is exactly
1230 * what this function is all about).
1231 */
1232 return(-1);
1233}
1234
1235/******************************************************************************
1236 * Extract vendor and device name from an ATA INDENTIFY buffer. Since strings
1237 * in the indentify buffer are byte-swapped, we need to swap them back.
1238 */
1239char *ata_dev_name(u16 *id_buf)
1240{
1241 static char dev_name[ATA_ID_PROD_LEN + 1];
1242 char *t = dev_name;
1243 char *s = (char *) (id_buf + ATA_ID_PROD);
1244 int i;
1245
1246 dev_name[sizeof(dev_name)-1] = '\0';
1247
1248 for (i = 0; i < ATA_ID_PROD_LEN / 2; i++) {
1249 *(t++) = s[1];
1250 *(t++) = s[0];
1251 s += 2;
1252 }
1253
1254 return(dev_name);
1255}
1256
1257/******************************************************************************
1258 * Fabricate ATA READ command based on the capabilities of the corresponding
1259 * device and the paramters set from above (NCQ, etc).
1260 */
1261static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1262 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1263 ULONG sg_cnt)
1264{
1265 int rc;
1266
1267 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1268 {
1269 /* need LBA48 for this command */
1270 if (!ai->ports[p].devs[d].lba48)
1271 {
1272 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1273 return(-1);
1274 }
1275 if (add_workspace(pIorb)->is_ncq)
1276 {
1277 /* use NCQ read; count goes into feature register, tag into count! */
1278 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ,
1279 AP_SECTOR_48, sector, 0,
1280 AP_FEATURES, count,
1281 AP_COUNT, (slot << 3), /* tag == slot */
1282 AP_SGLIST, sg_list, sg_cnt,
1283 AP_DEVICE, 0x40,
1284 AP_END);
1285 }
1286 else
1287 {
1288 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
1289 AP_SECTOR_48, sector, 0,
1290 AP_COUNT, count,
1291 AP_SGLIST, sg_list, sg_cnt,
1292 AP_DEVICE, 0x40,
1293 AP_END);
1294 }
1295
1296 }
1297 else
1298 {
1299 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
1300 AP_SECTOR_28, sector,
1301 AP_COUNT, count & 0xffU,
1302 AP_SGLIST, sg_list, sg_cnt,
1303 AP_DEVICE, 0x40,
1304 AP_END);
1305 }
1306
1307 return(rc);
1308}
1309
1310/******************************************************************************
1311 * Fabricate ATA WRITE command based on the capabilities of the corresponding
1312 * device and the paramters set from above (NCQ, etc)
1313 */
1314static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1315 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1316 ULONG sg_cnt, int write_through)
1317{
1318 int rc;
1319
1320 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1321 {
1322 /* need LBA48 for this command */
1323 if (!ai->ports[p].devs[d].lba48)
1324 {
1325 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1326 return(-1);
1327 }
1328 if (add_workspace(pIorb)->is_ncq)
1329 {
1330 /* use NCQ write; count goes into feature register, tag into count! */
1331 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE,
1332 AP_SECTOR_48, sector, 0,
1333 AP_FEATURES, count,
1334 /* tag = slot */
1335 AP_COUNT, (slot << 3),
1336 AP_SGLIST, sg_list, sg_cnt,
1337 AP_DEVICE, 0x40,
1338 /* force unit access */
1339 AP_DEVICE, (write_through && !force_write_cache) ? 0x80 : 0,
1340 AP_WRITE, 1,
1341 AP_END);
1342 }
1343 else
1344 {
1345 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
1346 AP_SECTOR_48, sector, 0,
1347 AP_COUNT, count,
1348 AP_SGLIST, sg_list, sg_cnt,
1349 AP_DEVICE, 0x40,
1350 AP_WRITE, 1,
1351 AP_END);
1352 }
1353 }
1354 else
1355 {
1356 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
1357 AP_SECTOR_28, sector,
1358 AP_COUNT, count & 0xffU,
1359 AP_SGLIST, sg_list, sg_cnt,
1360 AP_DEVICE, 0x40,
1361 AP_WRITE, 1,
1362 AP_END);
1363 }
1364
1365 return(rc);
1366}
1367
1368/******************************************************************************
1369 * Copy block from S/G list to virtual address or vice versa.
1370 */
1371void sg_memcpy(SCATGATENTRY *sg_list, USHORT sg_cnt, ULONG sg_off,
1372 void *buf, USHORT len, SG_MEMCPY_DIRECTION dir)
1373{
1374 USHORT i;
1375 USHORT l;
1376 ULONG phys_addr;
1377 ULONG pos = 0;
1378 char *p;
1379
1380 /* walk through S/G list to find the elements involved in the operation */
1381 for (i = 0; i < sg_cnt && len > 0; i++)
1382 {
1383 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off)
1384 {
1385 /* this S/G element intersects with the block to be copied */
1386 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos);
1387 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len)
1388 {
1389 l = len;
1390 }
1391
1392 if (Dev32Help_PhysToLin(phys_addr, l, (PVOID) &p))
1393 {
1394 panic("sg_memcpy(): DevHelp_PhysToLin() failed");
1395 }
1396 if (dir == SG_TO_BUF)
1397 {
1398 memcpy(buf, p, l);
1399 }
1400 else
1401 {
1402 memcpy(p, buf, l);
1403 }
1404 sg_off += l;
1405 buf = (char *) buf + l;
1406 len -= l;
1407 }
1408
1409 pos += sg_list[i].XferBufLen;
1410 }
1411}
1412
1413/******************************************************************************
1414 * Halt processing by submitting an internal error. This is a last resort and
1415 * should only be called when the system state is corrupt.
1416 */
1417void panic(char *msg)
1418{
1419 Dev32Help_InternalError(msg, strlen(msg));
1420}
1421
Note: See TracBrowser for help on using the repository browser.