source: trunk/src/os2ahci/ata.c@ 182

Last change on this file since 182 was 182, checked in by David Azarewicz, 9 years ago

Rearranged CAPS configuration.

File size: 47.4 KB
Line 
1/******************************************************************************
2 * ata.c - ATA command processing
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ata.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* ------------------------ typedefs and structures ------------------------ */
34
35/* -------------------------- function prototypes -------------------------- */
36
37static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
38 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
39 ULONG sg_cnt);
40
41static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
42 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
43 ULONG sg_cnt, int write_through);
44
45/* ------------------------ global/static variables ------------------------ */
46
47/* ----------------------------- start of code ----------------------------- */
48
49/******************************************************************************
50 * Initialize AHCI command slot, FIS and S/G list for the specified ATA
51 * command. The command parameters are passed as a variable argument list
52 * of type and value(s). The list is terminated by AP_END.
53 *
54 * Notes:
55 *
56 * - The specified command slot is expected to be idle; no checks are
57 * performed to prevent messing with a busy port.
58 *
59 * - Port multipliers are not supported, yet, thus 'd' should always
60 * be 0 for the time being.
61 *
62 * - 'cmd' is passed as 16-bit integer because the compiler would push
63 * a 'u8' as 16-bit value (it's a fixed argument) and the stdarg
64 * macros would screw up the address of the first variable argument
65 * if the size of the last fixed argument wouldn't match what the
66 * compiler pushed on the stack.
67 *
68 * Return values:
69 * 0 : success
70 * > 0 : could not map all S/G entries; the return value is the number of
71 * S/G entries that could be mapped.
72 * < 0 : other error
73 */
74int ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, ...)
75{
76 va_list va;
77 va_start(va, cmd);
78 return(v_ata_cmd(ai, p, d, slot, cmd, va));
79}
80
81int v_ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, va_list va)
82{
83 AHCI_PORT_DMA *dma_base_virt;
84 AHCI_CMD_HDR *cmd_hdr;
85 AHCI_CMD_TBL *cmd_tbl;
86 SCATGATENTRY *sg_list = NULL;
87 SCATGATENTRY sg_single;
88 ATA_PARM ap;
89 ATA_CMD ata_cmd;
90 void *atapi_cmd = NULL;
91 u32 dma_base_phys;
92 u32 atapi_cmd_len = 0;
93 u32 ahci_flags = 0;
94 u32 sg_cnt = 0;
95 u32 i;
96 u32 n;
97
98 /* --------------------------------------------------------------------------
99 * Initialize ATA command. The ATA command is set up with the main command
100 * value and a variable list of additional parameters such as the sector
101 * address, transfer count, ...
102 */
103 memset(&ata_cmd, 0x00, sizeof(ata_cmd));
104 ata_cmd.cmd = cmd;
105
106 /* parse variable arguments */
107 do
108 {
109 switch ((ap = va_arg(va, ATA_PARM)))
110 {
111
112 case AP_AHCI_FLAGS:
113 ahci_flags |= va_arg(va, u32);
114 break;
115
116 case AP_WRITE:
117 if (va_arg(va, u32) != 0)
118 {
119 ahci_flags |= AHCI_CMD_WRITE;
120 }
121 break;
122
123 case AP_FEATURES:
124 /* ATA features word */
125 ata_cmd.features |= va_arg(va, u32);
126 break;
127
128 case AP_COUNT:
129 /* transfer count */
130 ata_cmd.count = va_arg(va, u32);
131 break;
132
133 case AP_SECTOR_28:
134 /* 28-bit sector address */
135 ata_cmd.lba_l = va_arg(va, u32);
136 if (ata_cmd.lba_l & 0xf0000000UL)
137 {
138 DPRINTF(0,"error: LBA-28 address %d has more than 28 bits\n", ata_cmd.lba_l);
139 return(ATA_CMD_INVALID_PARM);
140 }
141 /* add upper 4 bits to device field */
142 ata_cmd.device |= (ata_cmd.lba_l >> 24) & 0x0fU;
143 /* only lower 24 bits come into lba_l */
144 ata_cmd.lba_l &= 0x00ffffffUL;
145 break;
146
147 case AP_SECTOR_48:
148 /* 48-bit sector address */
149 ata_cmd.lba_l = va_arg(va, u32);
150 ata_cmd.lba_h = va_arg(va, u32);
151 break;
152
153 case AP_DEVICE:
154 /* ATA device byte; note that this byte contains the highest
155 * 4 bits of LBA-28 address; we have to leave them alone here. */
156 ata_cmd.device |= va_arg(va, u32) & 0xf0;
157 break;
158
159 case AP_SGLIST:
160 /* scatter/gather list in SCATGATENTRY/count format */
161 sg_list = va_arg(va, void *);
162 sg_cnt = va_arg(va, u32);
163 break;
164
165 case AP_VADDR:
166 /* virtual buffer address in addr/len format (up to 4K) */
167 sg_single.ppXferBuf = MemPhysAdr(va_arg(va, void *));
168 sg_single.XferBufLen = va_arg(va, u32);
169 sg_list = &sg_single;
170 sg_cnt = 1;
171 break;
172
173 case AP_ATAPI_CMD:
174 /* ATAPI command */
175 atapi_cmd = va_arg(va, void *);
176 atapi_cmd_len = va_arg(va, u32);
177 ahci_flags |= AHCI_CMD_ATAPI;
178 break;
179
180 case AP_ATA_CMD:
181 /* ATA command "pass-through" */
182 memcpy(&ata_cmd, va_arg(va, void *), sizeof(ATA_CMD));
183 break;
184
185 case AP_END:
186 break;
187
188 default:
189 DPRINTF(0,"error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap);
190 return(ATA_CMD_INVALID_PARM);
191 }
192
193 } while (ap != AP_END);
194
195 /* --------------------------------------------------------------------------
196 * Fill in AHCI ATA command information. This includes the port command slot,
197 * the corresponding command FIS and the S/G list. The layout of the AHCI
198 * port DMA region is based on the Linux AHCI driver and looks like this:
199 *
200 * - 32 AHCI command headers (AHCI_CMD_HDR) with 32 bytes, each
201 * - 1 FIS receive area with 256 bytes (AHCI_RX_FIS_SZ)
202 * - 32 AHCI command tables, each consisting of
203 * - 64 bytes for command FIS
204 * - 16 bytes for ATAPI comands
205 * - 48 bytes reserved
206 * - 48 S/G entries (AHCI_SG) with 32 bytes, each
207 *
208 * Since the whole DMA buffer for all ports is larger than 64KB and we need
209 * multiple segments to address all of them, there are no virtual pointers
210 * to the individual elements in AD_INFO. Instead, we're relying on macros
211 * for getting the base address of a particular port's DMA region, then
212 * map a structure on top of that for convenience (AHCI_PORT_DMA).
213 */
214 dma_base_virt = port_dma_base(ai, p);
215 dma_base_phys = port_dma_base_phys(ai, p);
216
217 /* AHCI command header */
218 cmd_hdr = &dma_base_virt->cmd_hdr[slot];
219 memset(cmd_hdr, 0x00, sizeof(*cmd_hdr));
220 cmd_hdr->options = ((d & 0x0f) << 12);
221 cmd_hdr->options |= ahci_flags; /* AHCI command flags */
222 cmd_hdr->options |= 5; /* length of command FIS in 32-bit words */
223 cmd_hdr->tbl_addr = dma_base_phys + offsetof(AHCI_PORT_DMA, cmd_tbl[slot]);
224 /* DAZ can use MemPhysAdr(&dma_base_virt->cmd_tbl[slot]), but is probably slower. */
225
226 /* AHCI command table */
227 cmd_tbl = &dma_base_virt->cmd_tbl[slot];
228 memset(cmd_tbl, 0x00, sizeof(*cmd_tbl));
229 ata_cmd_to_fis(cmd_tbl->cmd_fis, &ata_cmd, d);
230
231 if (atapi_cmd != NULL)
232 {
233 /* copy ATAPI command */
234 memcpy(cmd_tbl->atapi_cmd, atapi_cmd, atapi_cmd_len);
235 }
236
237 /* PRDT (S/G list)
238 *
239 * - The S/G list for AHCI adapters is limited to 22 bits for the transfer
240 * size of each element, thus we need to split S/G elements larger than
241 * 22 bits into 2 AHCI_SG elements.
242 *
243 * - The S/G element size for AHCI is what the spec calls '0'-based
244 * (i.e. 0 means 1 bytes). On top of that, the spec requires S/G transfer
245 * sizes to be even in the context of 16-bit transfers, thus bit '1'
246 * always needs to be set.
247 *
248 * - AHCI_MAX_SG_ELEMENT_LEN defines the maximum size of an AHCI S/G
249 * element in bytes, ignoring the '0'-based methodology (i.e. 1 << 22).
250 *
251 * - There's a limit on the maximum number of S/G elements in the port DMA
252 * buffer (AHCI_MAX_SG) which is lower than the HW maximum. It's beyond
253 * the control of this function to split commands which require more
254 * than AHCI_MAX_SG entries. In order to help the caller, the return value
255 * of this function will indicate how many OS/2 S/G entries were
256 * successfully mapped.
257 */
258 for (i = n = 0; i < sg_cnt; i++)
259 {
260 u32 sg_addr = sg_list[i].ppXferBuf;
261 u32 sg_size = sg_list[i].XferBufLen;
262
263 do
264 {
265 u32 chunk = (sg_size > AHCI_MAX_SG_ELEMENT_LEN) ? AHCI_MAX_SG_ELEMENT_LEN : sg_size;
266 if (n >= AHCI_MAX_SG)
267 {
268 /* couldn't store all S/G elements in our DMA buffer */
269 DPRINTF(0,"ata_cmd(): too many S/G elements\n");
270 return(i - 1);
271 }
272 if ((sg_addr & 1) || (chunk & 1))
273 {
274 DPRINTF(0,"error: ata_cmd() called with unaligned S/G element(s)\n");
275 return(ATA_CMD_UNALIGNED_ADDR);
276 }
277 cmd_tbl->sg_list[n].addr = sg_addr;
278 cmd_tbl->sg_list[n].size = chunk - 1;
279 sg_addr += chunk;
280 sg_size -= chunk;
281 n++;
282 } while (sg_size > 0);
283 }
284
285 /* set final S/G count in AHCI command header */
286 cmd_hdr->options |= n << 16;
287
288 if (D32g_DbgLevel >= 7)
289 {
290 DPRINTF(0,"ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot);
291 dHexDump(0,cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: ");
292 dHexDump(0,&ata_cmd, sizeof(ata_cmd), "ata_cmd: ");
293 if (atapi_cmd != NULL)
294 {
295 dHexDump(0,atapi_cmd, atapi_cmd_len, "atapi_cmd: ");
296 }
297 if (n > 0)
298 {
299 dHexDump(0,cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: ");
300 }
301 }
302
303 return(ATA_CMD_SUCCESS);
304}
305
306/******************************************************************************
307 * Fill SATA command FIS with values extracted from an ATA command structure.
308 * The command FIS buffer (fis) is expected to be initialized to 0s. The
309 * structure of the FIS maps to the ATA shadow register block, including
310 * registers which can be written twice to store 16 bits (called 'exp').
311 *
312 * The FIS structure looks like this (using LSB notation):
313 *
314 * +----------------+----------------+----------------+----------------+
315 * 00 | FIS type (27h) | C|R|R|R|PMP | Command | Features |
316 * +----------------+----------------+----------------+----------------+
317 * 04 | LBA 7:0 | LBA 15:8 | LBA 23:16 | R|R|R|D|Head |
318 * +----------------+----------------+----------------+----------------+
319 * 08 | LBA 31:24 | LBA 40:32 | LBA 47:40 | Features exp |
320 * +----------------+----------------+----------------+----------------+
321 * 12 | Count 7:0 | Count 15:8 | Reserved | Control |
322 * +----------------+----------------+----------------+----------------+
323 * 16 | Reserved | Reserved | Reserved | Reserved |
324 * +----------------+----------------+----------------+----------------+
325 */
326void ata_cmd_to_fis(u8 *fis, ATA_CMD *ata_cmd, int d)
327{
328 fis[0] = 0x27; /* register - host to device FIS */
329 fis[1] = (u8) (d & 0xf); /* port multiplier number */
330 fis[1] |= 0x80; /* bit 7 indicates Command FIS */
331 fis[2] = (u8) ata_cmd->cmd;
332 fis[3] = (u8) ata_cmd->features;
333
334 fis[4] = (u8) ata_cmd->lba_l;
335 fis[5] = (u8) (ata_cmd->lba_l >> 8);
336 fis[6] = (u8) (ata_cmd->lba_l >> 16);
337 fis[7] = (u8) ata_cmd->device;
338
339 fis[8] = (u8) (ata_cmd->lba_l >> 24);
340 fis[9] = (u8) ata_cmd->lba_h;
341 fis[10] = (u8) (ata_cmd->lba_h >> 8);
342 fis[11] = (u8) (ata_cmd->features >> 8);
343
344 fis[12] = (u8) ata_cmd->count;
345 fis[13] = (u8) (ata_cmd->count >> 8);
346}
347
348/******************************************************************************
349 * Get index in S/G list for the number of transferred sectors in the IORB.
350 *
351 * Returning io->cSGList indicates an error.
352 *
353 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW
354 * limit will never cross sector boundaries. This means that splitting
355 * S/G lists into multiple commands can be done without editing the S/G
356 * lists.
357 */
358u16 ata_get_sg_indx(IORB_EXECUTEIO *io)
359{
360 ULONG offset = io->BlocksXferred * io->BlockSize;
361 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
362 USHORT i;
363
364 for (i = 0; i < io->cSGList && offset > 0; i++)
365 {
366 offset -= pSGList[i].XferBufLen;
367 }
368
369 return(i);
370}
371
372/******************************************************************************
373 * Get max S/G count which will fit into our HW S/G buffers. This function is
374 * called when the S/G list is too long and we need to split the IORB into
375 * multiple commands. It returns both the number of sectors and S/G list
376 * elements that we can handle in a single command.
377 *
378 * The parameter 'sg_indx' indicates the current start index in the S/G list
379 * (0 if this is the first command iteration).
380 *
381 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates
382 * how many S/G elements were successfully mapped. Whatever we return needs to
383 * be less or equal to this value.
384 *
385 * Returning 0 in *sg_cnt indicates an error.
386 *
387 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits
388 * will never cross sector boundaries. This means that splitting S/G
389 * lists into multiple commands can be done without editing S/G list
390 * elements. Since AHCI only allows 22 bits for each S/G element, the
391 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based
392 * on the actual length of S/G elements. This function looks for the
393 * maximum number of S/G elements that can be mapped on sector
394 * boundaries which will still fit into our HW S/G list.
395 */
396void ata_max_sg_cnt(IORB_EXECUTEIO *io, USHORT sg_indx, USHORT sg_max,
397 USHORT *sg_cnt, USHORT *sector_cnt)
398{
399 ULONG max_sector_cnt = 0;
400 USHORT max_sg_cnt = 0;
401 ULONG offset = 0;
402 USHORT i;
403 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
404
405 for (i = sg_indx; i < io->cSGList; i++)
406 {
407 if (i - sg_indx >= sg_max)
408 {
409 /* we're beyond the number of S/G elements we can map */
410 break;
411 }
412
413 offset += pSGList[i].XferBufLen;
414 if (offset % io->BlockSize == 0)
415 {
416 /* this S/G element ends on a sector boundary */
417 max_sector_cnt = offset / io->BlockSize;
418 max_sg_cnt = i + 1;
419 }
420 }
421
422 /* return the best match we found (0 indicating failure) */
423 *sector_cnt = max_sector_cnt;
424 *sg_cnt = max_sg_cnt;
425}
426
427
428/******************************************************************************
429 * Get device or media geometry. Device and media geometry are expected to be
430 * the same for non-removable devices, which will always be the case for the
431 * ATA devices we're dealing with (hard disks). ATAPI is a different story
432 * and handled by atapi_get_geometry().
433 */
434int ata_get_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
435{
436 ADD_WORKSPACE *aws = add_workspace(pIorb);
437 int rc;
438
439 /* allocate buffer for ATA identify information */
440 if ((aws->buf = MemAlloc(ATA_ID_WORDS * sizeof(u16))) == NULL)
441 {
442 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
443 return(-1);
444 }
445
446 /* request ATA identify information */
447 aws->ppfunc = ata_get_geometry_pp;
448 rc = ata_cmd(ad_infos + iorb_unit_adapter(pIorb),
449 iorb_unit_port(pIorb),
450 iorb_unit_device(pIorb),
451 slot,
452 ATA_CMD_ID_ATA,
453 AP_VADDR, (void *) aws->buf, ATA_ID_WORDS * sizeof(u16),
454 AP_END);
455
456 if (rc != 0)
457 {
458 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
459 }
460
461 return(rc);
462}
463
464/* Adjust the cylinder count in the physical
465 * geometry to the last full cylinder.
466 */
467int adjust_cylinders(GEOMETRY *geometry, ULONG TotalSectors)
468{
469 USHORT SecPerCyl;
470 int rc = FALSE;
471
472 geometry->TotalSectors = TotalSectors;
473 SecPerCyl = geometry->SectorsPerTrack * geometry->NumHeads;
474 if (SecPerCyl > 0)
475 {
476 ULONG TotalCylinders = TotalSectors / SecPerCyl;
477
478 geometry->TotalSectors = TotalCylinders * SecPerCyl;
479 geometry->TotalCylinders = TotalCylinders;
480 if (TotalCylinders >> 16)
481 {
482 geometry->TotalCylinders = 65535;
483 rc = TRUE;
484 }
485 }
486 return (rc);
487}
488
489/* Calculate the logical geometry based on the input physcial geometry
490 * using the LBA Assist Translation algorithm.
491 */
492#define BIOS_MAX_CYLINDERS 1024l
493#define BIOS_MAX_NUMHEADS 255
494#define BIOS_MAX_SECTORSPERTRACK 63
495void log_geom_calculate_LBA_assist(GEOMETRY *geometry, ULONG TotalSectors)
496{
497 UCHAR numSpT = BIOS_MAX_SECTORSPERTRACK;
498 UCHAR numHeads = BIOS_MAX_NUMHEADS;
499 ULONG Cylinders;
500
501 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK))
502 {
503 USHORT temp = (TotalSectors - 1) / (BIOS_MAX_CYLINDERS * BIOS_MAX_SECTORSPERTRACK);
504
505 if (temp < 16) numHeads = 16;
506 else if (temp < 32) numHeads = 32;
507 else if (temp < 64) numHeads = 64;
508 else numHeads = 128;
509 }
510
511 do
512 {
513 Cylinders = TotalSectors / (USHORT)(numHeads * numSpT);
514 if (Cylinders >> 16)
515 {
516 if (numSpT < 128)
517 numSpT = (numSpT << 1) | 1;
518 else
519 Cylinders = 65535; // overflow !
520 }
521 } while (Cylinders >> 16);
522
523 geometry->TotalCylinders = Cylinders;
524 geometry->NumHeads = numHeads;
525 geometry->SectorsPerTrack = numSpT;
526}
527
528int check_lvm(IORBH *pIorb, ULONG sector)
529{
530 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace(pIorb)->buf;
531 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
532 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
533 int p = iorb_unit_port(pIorb);
534 int rc;
535
536 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_READ,
537 AP_SECTOR_28, sector-1,
538 AP_COUNT, 1,
539 AP_VADDR, (void *)pDLA, 512,
540 AP_DEVICE, 0x40,
541 AP_END);
542 if (rc) return 0;
543
544 DHEXDUMP(3,pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1);
545
546 if ((pDLA->DLA_Signature1 == DLA_TABLE_SIGNATURE1) && (pDLA->DLA_Signature2 == DLA_TABLE_SIGNATURE2)) {
547 DPRINTF(3,"is_lvm_geometry found at sector %d\n", sector-1);
548 geometry->TotalCylinders = pDLA->Cylinders;
549 geometry->NumHeads = pDLA->Heads_Per_Cylinder;
550 geometry->SectorsPerTrack = pDLA->Sectors_Per_Track;
551 geometry->TotalSectors = pDLA->Cylinders * pDLA->Heads_Per_Cylinder * pDLA->Sectors_Per_Track;
552 return 1;
553 }
554
555 return 0;
556}
557
558/******************************************************************************
559 * Try to read LVM information from the disk. If found, use the LVM geometry.
560 * This function will only work at init time. A better strategy would be to
561 * calculate the geometry during ahci_scan_ports and save it away and then just
562 * return the saved values when ata_get_geometry() is called.
563 */
564int is_lvm_geometry(IORBH *pIorb)
565{
566 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
567 ULONG sector;
568
569 if (init_complete) return 0; /* We cannot use ahci_exec_polled_cmd() after init_complete */
570
571 if (use_lvm_info)
572 {
573 #ifdef DEBUG
574 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
575 int p = iorb_unit_port(pIorb);
576 int d = iorb_unit_device(pIorb);
577 DPRINTF(3,"is_lvm_geometry (%d.%d.%d)\n", ad_no(ai), p, d);
578 #endif
579
580 /* First check the sector reported by the hardware */
581 if (check_lvm(pIorb, geometry->SectorsPerTrack)) return 1;
582
583 for (sector = 255; sector >= 63; sector >>= 1)
584 {
585 if (sector == geometry->SectorsPerTrack) continue;
586 if (check_lvm(pIorb, sector)) return 1;
587 }
588 }
589
590 return 0;
591}
592
593/******************************************************************************
594 * Post processing function for ata_get_geometry(): convert the ATA identify
595 * information to OS/2 IOCC_GEOMETRY information.
596 */
597void ata_get_geometry_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
598{
599 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
600 USHORT geometry_len = ((IORB_GEOMETRY *)pIorb)->GeometryLen;
601 u16 *id_buf = add_workspace(pIorb)->buf;
602 int a = iorb_unit_adapter(pIorb);
603 int p = iorb_unit_port(pIorb);
604 char *Method;
605
606 /* Fill-in geometry information; the ATA-8 spec declares the geometry
607 * fields in the ATA ID buffer as obsolete but it's still the best
608 * guess in most cases. If the information stored in the geometry
609 * fields is apparently incorrect, we'll use the algorithm typically
610 * used by SCSI adapters and modern PC BIOS versions:
611 *
612 * - 512 bytes per sector
613 * - 255 heads
614 * - 63 sectors per track (or 56 with the parameter "/4")
615 * - x cylinders (calculated)
616 *
617 * Please note that os2ahci currently does not natively support ATA sectors
618 * larger than 512 bytes, therefore relies on the translation logic built
619 * into the corresponding ATA disks. In order to prevent file systems that
620 * use block sizes larger than 512 bytes (FAT, JFS, ...) from ending up on
621 * incorrectly aligned physical sector accesses, hence using more physical
622 * I/Os than necessary, the command line parameter "/4" can be used to force
623 * a track size of 56 sectors. This way, partitions will start on 4K
624 * boundaries.
625 *
626 * Another limitation is that OS/2 has a 32-bit variable for the total number
627 * of sectors, limiting the maximum capacity to roughly 2TB. This is another
628 * issue that needs to be addressed sooner or later; large sectors could
629 * raise this limit to something like 8TB but this is not really much of a
630 * difference. Maybe there's something in later DDKs that allows more than
631 * 32 bits?
632 */
633 memset(geometry, 0x00, geometry_len);
634 geometry->BytesPerSector = ATA_SECTOR_SIZE;
635
636 /* extract total number of sectors */
637 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400)
638 {
639 /* 48-bit LBA supported */
640 if (ATA_CAPACITY48_H(id_buf) != 0)
641 {
642 /* more than 32 bits for number of sectors */
643 DPRINTF(0,"warning: limiting disk %d.%d.%d to 2TB\n",
644 iorb_unit_adapter(pIorb), iorb_unit_port(pIorb),
645 iorb_unit_device(pIorb));
646 geometry->TotalSectors = 0xffffffffUL;
647 }
648 else
649 {
650 geometry->TotalSectors = ATA_CAPACITY48_L(id_buf);
651 }
652 }
653 else
654 {
655 /* 28-bit LBA */
656 geometry->TotalSectors = ATA_CAPACITY(id_buf) & 0x0fffffffUL;
657 }
658
659 Method = "None";
660 /* fabricate the remaining geometry fields */
661 if (track_size[a][p] != 0)
662 {
663 /* A specific track size has been requested for this port; this is
664 * typically done for disks with 4K sectors to make sure partitions
665 * start on 8-sector boundaries (parameter "/4").
666 */
667 geometry->NumHeads = 255;
668 geometry->SectorsPerTrack = track_size[a][p];
669 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
670 Method = "Custom";
671 }
672 else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 &&
673 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf))
674 {
675 /* BIOS-supplied (aka "current") geometry values look valid */
676 geometry->NumHeads = CUR_HEADS(id_buf);
677 geometry->SectorsPerTrack = CUR_SECTORS(id_buf);
678 geometry->TotalCylinders = CUR_CYLS(id_buf);
679 Method = "BIOS";
680 }
681 else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0)
682 {
683 /* ATA-supplied values for geometry look valid */
684 geometry->NumHeads = ATA_HEADS(id_buf);
685 geometry->SectorsPerTrack = ATA_SECTORS(id_buf);
686 geometry->TotalCylinders = ATA_CYLS(id_buf);
687 Method = "ATA";
688 }
689 else
690 {
691 /* use typical SCSI geometry */
692 geometry->NumHeads = 255;
693 geometry->SectorsPerTrack = 63;
694 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
695 Method = "SCSI";
696 }
697
698 DPRINTF(2,"Physical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
699 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
700 (geometry->TotalSectors / 2048), Method);
701
702 /* Fixup the geometry in case the geometry reported by the BIOS is bad */
703 if (adjust_cylinders(geometry, geometry->TotalSectors))
704 { // cylinder overflow
705 log_geom_calculate_LBA_assist(geometry, geometry->TotalSectors);
706 geometry->TotalSectors = (USHORT)(geometry->NumHeads * geometry->SectorsPerTrack) * (ULONG)geometry->TotalCylinders;
707 }
708 adjust_cylinders(geometry, geometry->TotalSectors);
709
710 DPRINTF(2,"Logical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
711 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
712 (geometry->TotalSectors / 2048), Method);
713
714 if (is_lvm_geometry(pIorb)) Method = "LVM";
715 ad_infos[a].ports[p].devs[0].dev_info.Cylinders = geometry->TotalCylinders;
716 ad_infos[a].ports[p].devs[0].dev_info.HeadsPerCylinder = geometry->NumHeads;
717 ad_infos[a].ports[p].devs[0].dev_info.SectorsPerTrack = geometry->SectorsPerTrack;
718 ad_infos[a].ports[p].devs[0].dev_info.TotalSectors = geometry->TotalSectors;
719 ad_infos[a].ports[p].devs[0].dev_info.Method = Method;
720
721 DPRINTF(2,"Reported geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
722 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
723 (geometry->TotalSectors / 2048), Method);
724
725 /* tell interrupt handler that this IORB is complete */
726 add_workspace(pIorb)->complete = 1;
727}
728
729/******************************************************************************
730 * Test whether unit is ready.
731 */
732int ata_unit_ready(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
733{
734 /* This is a NOP for ATA devices (at least right now); returning an error
735 * without setting an error code means ahci_exec_iorb() will not queue any
736 * HW command and the IORB will complete successfully.
737 */
738 ((IORB_UNIT_STATUS *)pIorb)->UnitStatus = US_READY | US_POWER;
739 return(-1);
740}
741
742/******************************************************************************
743 * Read sectors from AHCI device.
744 */
745int ata_read(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
746{
747 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
748 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
749 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
750 ULONG sector = io->RBA + io->BlocksXferred;
751 USHORT count = io->BlockCount - io->BlocksXferred;
752 USHORT sg_indx;
753 USHORT sg_cnt;
754 int p = iorb_unit_port(pIorb);
755 int d = iorb_unit_device(pIorb);
756 int rc;
757
758 if (io->BlockCount == 0)
759 {
760 /* NOP; return -1 without error in IORB to indicate success */
761 return(-1);
762 }
763
764 if (add_workspace(pIorb)->unaligned)
765 {
766 /* unaligned S/G addresses present; need to use double buffers */
767 return(ata_read_unaligned(pIorb, slot));
768 }
769
770 /* Kludge: some I/O commands during boot use excessive S/G buffer lengths
771 * which cause NCQ commands to lock up. If there's only one S/G element
772 * and this element is already larger than what we can derive from the sector
773 * count, we'll adjust that element.
774 */
775 if (io->BlocksXferred == 0 && io->cSGList == 1 &&
776 pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize)
777 {
778 pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize;
779 }
780
781 /* prepare read command while keeping an eye on S/G count limitations */
782 do
783 {
784 sg_indx = ata_get_sg_indx(io);
785 sg_cnt = io->cSGList - sg_indx;
786 if ((rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, count,
787 pSGList + sg_indx, sg_cnt)) > 0)
788 {
789 /* couldn't map all S/G elements */
790 ata_max_sg_cnt(io, sg_indx, rc, &sg_cnt, &count);
791 }
792 } while (rc > 0 && sg_cnt > 0);
793
794 if (rc == 0)
795 {
796 add_workspace(pIorb)->blocks = count;
797 add_workspace(pIorb)->ppfunc = ata_read_pp;
798 }
799 else if (rc > 0)
800 {
801 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
802 }
803 else if (rc == ATA_CMD_UNALIGNED_ADDR)
804 {
805 /* unaligned S/G addresses detected; need to use double buffers */
806 add_workspace(pIorb)->unaligned = 1;
807 return(ata_read_unaligned(pIorb, slot));
808
809 }
810 else
811 {
812 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
813 }
814
815 return(rc);
816}
817
818/******************************************************************************
819 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI
820 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
821 * restrictions. This doesn't happen very often but when it does, we need to
822 * use a transfer buffer and copy the data manually.
823 */
824int ata_read_unaligned(IORBH *pIorb, int slot)
825{
826 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
827 ADD_WORKSPACE *aws = add_workspace(pIorb);
828 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
829 ULONG sector = io->RBA + io->BlocksXferred;
830 SCATGATENTRY sg_single;
831 int p = iorb_unit_port(pIorb);
832 int d = iorb_unit_device(pIorb);
833 int rc;
834
835 DPRINTF(7,"ata_read_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
836
837 /* allocate transfer buffer */
838 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
839 {
840 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
841 return(-1);
842 }
843
844 /* prepare read command using transfer buffer */
845 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
846 sg_single.XferBufLen = io->BlockSize;
847 rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1);
848
849 if (rc == 0) {
850 add_workspace(pIorb)->blocks = 1;
851 add_workspace(pIorb)->ppfunc = ata_read_pp;
852
853 } else if (rc > 0) {
854 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
855
856 } else {
857 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
858 }
859
860 return(rc);
861}
862
863/******************************************************************************
864 * Post processing function for ata_read(); this function updates the
865 * BlocksXferred counter in the IORB and, if not all blocks have been
866 * transferred, requeues the IORB to process the remaining sectors. It also
867 * takes care of copying data from the transfer buffer for unaligned reads.
868 */
869void ata_read_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
870{
871 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
872 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
873 ADD_WORKSPACE *aws = add_workspace(pIorb);
874
875 if (aws->unaligned)
876 {
877 /* copy transfer buffer to corresponding physical address in S/G list */
878 sg_memcpy(pSGList, io->cSGList,
879 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
880 aws->buf, io->BlockSize, BUF_TO_SG);
881 }
882
883 io->BlocksXferred += add_workspace(pIorb)->blocks;
884 DPRINTF(7,"ata_read_pp(): blocks transferred = %d\n", io->BlocksXferred);
885
886 if (io->BlocksXferred >= io->BlockCount)
887 {
888 /* we're done; tell IRQ handler the IORB is complete */
889 add_workspace(pIorb)->complete = 1;
890 }
891 else
892 {
893 /* requeue this IORB for next iteration */
894 iorb_requeue(pIorb);
895 }
896}
897
898/******************************************************************************
899 * Verify readability of sectors on ATA device.
900 */
901int ata_verify(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
902{
903 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
904 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
905 int p = iorb_unit_port(pIorb);
906 int d = iorb_unit_device(pIorb);
907 int rc;
908
909 if (io->BlockCount == 0)
910 {
911 /* NOP; return -1 without error in IORB to indicate success */
912 return(-1);
913 }
914
915 /* prepare verify command */
916 if (io->RBA >= (1UL << 28) || io->BlockCount > 256)
917 {
918 /* need LBA48 for this command */
919 if (!ai->ports[p].devs[d].lba48) {
920 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
921 return(-1);
922 }
923 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY_EXT,
924 AP_SECTOR_48, io->RBA, 0,
925 AP_COUNT, io->BlockCount,
926 AP_DEVICE, 0x40,
927 AP_END);
928 } else {
929 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY,
930 AP_SECTOR_28, io->RBA,
931 AP_COUNT, io->BlockCount & 0xffU,
932 AP_DEVICE, 0x40,
933 AP_END);
934 }
935
936 return(rc);
937}
938
939/******************************************************************************
940 * Write sectors to AHCI device.
941 */
942int ata_write(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
943{
944 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
945 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
946 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
947 ULONG sector = io->RBA + io->BlocksXferred;
948 USHORT count = io->BlockCount - io->BlocksXferred;
949 USHORT sg_indx;
950 USHORT sg_cnt;
951 int p = iorb_unit_port(pIorb);
952 int d = iorb_unit_device(pIorb);
953 int rc;
954
955 if (io->BlockCount == 0)
956 {
957 /* NOP; return -1 without error in IORB to indicate success */
958 return(-1);
959 }
960
961 if (add_workspace(pIorb)->unaligned)
962 {
963 /* unaligned S/G addresses present; need to use double buffers */
964 return(ata_write_unaligned(pIorb, slot));
965 }
966
967 /* prepare write command while keeping an eye on S/G count limitations */
968 do {
969 sg_indx = ata_get_sg_indx(io);
970 sg_cnt = io->cSGList - sg_indx;
971 if ((rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, count,
972 pSGList + sg_indx, sg_cnt,
973 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0)
974 {
975 /* couldn't map all S/G elements */
976 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
977 }
978 } while (rc > 0 && sg_cnt > 0);
979
980 if (rc == 0)
981 {
982 add_workspace(pIorb)->blocks = count;
983 add_workspace(pIorb)->ppfunc = ata_write_pp;
984 }
985 else if (rc > 0)
986 {
987 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
988 }
989 else if (rc == ATA_CMD_UNALIGNED_ADDR)
990 {
991 /* unaligned S/G addresses detected; need to use double buffers */
992 add_workspace(pIorb)->unaligned = 1;
993 return(ata_write_unaligned(pIorb, slot));
994 }
995 else
996 {
997 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
998 }
999
1000 return(rc);
1001}
1002
1003/******************************************************************************
1004 * Write sectors from AHCI device with unaligned S/G element addresses. AHCI
1005 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
1006 * restrictions. This doesn't happen very often but when it does, we need to
1007 * use a transfer buffer and copy the data manually.
1008 */
1009int ata_write_unaligned(IORBH *pIorb, int slot)
1010{
1011 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1012 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
1013 ADD_WORKSPACE *aws = add_workspace(pIorb);
1014 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1015 ULONG sector = io->RBA + io->BlocksXferred;
1016 SCATGATENTRY sg_single;
1017 int p = iorb_unit_port(pIorb);
1018 int d = iorb_unit_device(pIorb);
1019 int rc;
1020
1021 DPRINTF(7,"ata_write_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
1022
1023 /* allocate transfer buffer */
1024 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
1025 {
1026 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1027 return(-1);
1028 }
1029
1030 /* copy next sector from S/G list to transfer buffer */
1031 sg_memcpy(pSGList, io->cSGList,
1032 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
1033 aws->buf, io->BlockSize, SG_TO_BUF);
1034
1035 /* prepare write command using transfer buffer */
1036 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
1037 sg_single.XferBufLen = io->BlockSize;
1038 rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1,
1039 io->Flags & XIO_DISABLE_HW_WRITE_CACHE);
1040
1041 if (rc == 0)
1042 {
1043 add_workspace(pIorb)->blocks = 1;
1044 add_workspace(pIorb)->ppfunc = ata_write_pp;
1045 }
1046 else if (rc > 0)
1047 {
1048 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
1049 }
1050 else
1051 {
1052 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1053 }
1054
1055 return(rc);
1056}
1057
1058
1059/******************************************************************************
1060 * Post processing function for ata_write(); this function updates the
1061 * BlocksXferred counter in the IORB and, if not all blocks have been
1062 * transferred, requeues the IORB to process the remaining sectors.
1063 */
1064void ata_write_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1065{
1066 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1067
1068 io->BlocksXferred += add_workspace(pIorb)->blocks;
1069 DPRINTF(7,"ata_write_pp(): blocks transferred = %d\n", io->BlocksXferred);
1070
1071 if (io->BlocksXferred >= io->BlockCount)
1072 {
1073 /* we're done; tell IRQ handler the IORB is complete */
1074 add_workspace(pIorb)->complete = 1;
1075 }
1076 else
1077 {
1078 /* requeue this IORB for next iteration */
1079 iorb_requeue(pIorb);
1080 }
1081}
1082
1083/******************************************************************************
1084 * Execute ATA command.
1085 */
1086int ata_execute_ata(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1087{
1088 IORB_ADAPTER_PASSTHRU *apt = (IORB_ADAPTER_PASSTHRU *)pIorb;
1089 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(apt->pSGList);
1090 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1091 int p = iorb_unit_port(pIorb);
1092 int d = iorb_unit_device(pIorb);
1093 int rc;
1094
1095 if (apt->ControllerCmdLen != sizeof(ATA_CMD))
1096 {
1097 iorb_seterr(pIorb, IOERR_CMD_SYNTAX);
1098 return(-1);
1099 }
1100
1101 rc = ata_cmd(ai, p, d, slot, 0,
1102 AP_SGLIST, pSGList, apt->cSGList,
1103 AP_ATA_CMD, Far16ToFlat(apt->pControllerCmd),
1104 AP_WRITE, !(apt->Flags & PT_DIRECTION_IN),
1105 AP_END);
1106
1107 if (rc == 0)
1108 {
1109 add_workspace(pIorb)->ppfunc = ata_execute_ata_pp;
1110 }
1111
1112 return(rc);
1113}
1114
1115/******************************************************************************
1116 * Post processing function for ata_execute_ata(); the main purpose of this
1117 * function is to copy the received D2H FIS (i.e. the device registers after
1118 * command completion) back to the ATA command structure.
1119 *
1120 * See ata_cmd_to_fis() for an explanation of the mapping.
1121 */
1122void ata_execute_ata_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1123{
1124 AHCI_PORT_DMA *dma_base;
1125 ATA_CMD *cmd;
1126 AD_INFO *ai;
1127 u8 *fis;
1128 int p;
1129
1130 /* get address of D2H FIS */
1131 ai = ad_infos + iorb_unit_adapter(pIorb);
1132 p = iorb_unit_port(pIorb);
1133 dma_base = port_dma_base(ai, p);
1134 fis = dma_base->rx_fis + 0x40;
1135
1136 if (fis[0] != 0x34)
1137 {
1138 /* this is not a D2H FIS - give up silently */
1139 DPRINTF(3,"ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]);
1140 add_workspace(pIorb)->complete = 1;
1141 return;
1142 }
1143
1144 /* map D2H FIS to the original ATA controller command structure */
1145 cmd = (ATA_CMD *)Far16ToFlat(((IORB_ADAPTER_PASSTHRU*)pIorb)->pControllerCmd);
1146
1147 cmd->cmd = fis[2];
1148 cmd->device = fis[7];
1149 cmd->features = ((u16) fis[3])
1150 | ((u16) fis[11]);
1151 cmd->lba_l = ((u32) fis[4])
1152 | ((u32) fis[5] << 8)
1153 | ((u32) fis[6] << 16)
1154 | ((u32) fis[8] << 24);
1155 cmd->lba_h = ((u16) fis[9])
1156 | ((u16) fis[10] << 8);
1157 cmd->count = ((u16) fis[12])
1158 | ((u16) fis[13] << 8);
1159
1160 DHEXDUMP(0,cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n");
1161
1162 /* signal completion to interrupt handler */
1163 add_workspace(pIorb)->complete = 1;
1164}
1165
1166/******************************************************************************
1167 * Request sense information for a failed command. Since there is no "request
1168 * sense" command for ATA devices, we need to read the current error code from
1169 * the AHCI task file register and fabricate the sense information.
1170 *
1171 * NOTES:
1172 *
1173 * - This function must be called right after an ATA command has failed and
1174 * before any other commands are queued on the corresponding port. This
1175 * function is typically called in the port restart context hook which is
1176 * triggered by an AHCI error interrupt.
1177 *
1178 * - The ATA error bits are a complete mess. We'll try and catch the most
1179 * interesting error codes (such as medium errors) and report everything
1180 * else with a generic error code.
1181 */
1182int ata_req_sense(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1183{
1184 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1185 u8 *port_mmio = port_base(ai, iorb_unit_port(pIorb));
1186 u32 tf_data = readl(port_mmio + PORT_TFDATA);
1187 u8 err = (tf_data >> 8);
1188 u8 sts = (tf_data);
1189
1190 if (sts & ATA_ERR)
1191 {
1192 if (sts & ATA_DF)
1193 {
1194 /* there is a device-specific error condition */
1195 if (err & ATA_ICRC)
1196 {
1197 iorb_seterr(pIorb, IOERR_ADAPTER_DEVICEBUSCHECK);
1198 }
1199 else if (err & ATA_UNC)
1200 {
1201 iorb_seterr(pIorb, IOERR_MEDIA);
1202 }
1203 else if (err & ATA_IDNF)
1204 {
1205 iorb_seterr(pIorb, IOERR_RBA_ADDRESSING_ERROR);
1206 }
1207 else
1208 {
1209 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1210 }
1211
1212 }
1213 else
1214 {
1215 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1216 }
1217 }
1218 else
1219 {
1220 /* this function only gets called when we received an error interrupt */
1221 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1222 }
1223
1224 /* Return an error to indicate there's no HW command to be submitted and
1225 * that the IORB can be completed "as is" (the upstream code expects the
1226 * IORB error code, if any, to be set when this happens and this is exactly
1227 * what this function is all about).
1228 */
1229 return(-1);
1230}
1231
1232/******************************************************************************
1233 * Extract vendor and device name from an ATA INDENTIFY buffer. Since strings
1234 * in the indentify buffer are byte-swapped, we need to swap them back.
1235 */
1236char *ata_dev_name(u16 *id_buf)
1237{
1238 static char dev_name[ATA_ID_PROD_LEN + 1];
1239 char *t = dev_name;
1240 char *s = (char *) (id_buf + ATA_ID_PROD);
1241 int i;
1242
1243 dev_name[sizeof(dev_name)-1] = '\0';
1244
1245 for (i = 0; i < ATA_ID_PROD_LEN / 2; i++) {
1246 *(t++) = s[1];
1247 *(t++) = s[0];
1248 s += 2;
1249 }
1250
1251 return(dev_name);
1252}
1253
1254/******************************************************************************
1255 * Fabricate ATA READ command based on the capabilities of the corresponding
1256 * device and the paramters set from above (NCQ, etc).
1257 */
1258static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1259 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1260 ULONG sg_cnt)
1261{
1262 int rc;
1263
1264 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1265 {
1266 /* need LBA48 for this command */
1267 if (!ai->ports[p].devs[d].lba48)
1268 {
1269 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1270 return(-1);
1271 }
1272 if (add_workspace(pIorb)->is_ncq)
1273 {
1274 /* use NCQ read; count goes into feature register, tag into count! */
1275 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ,
1276 AP_SECTOR_48, sector, 0,
1277 AP_FEATURES, count,
1278 AP_COUNT, (slot << 3), /* tag == slot */
1279 AP_SGLIST, sg_list, sg_cnt,
1280 AP_DEVICE, 0x40,
1281 AP_END);
1282 }
1283 else
1284 {
1285 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
1286 AP_SECTOR_48, sector, 0,
1287 AP_COUNT, count,
1288 AP_SGLIST, sg_list, sg_cnt,
1289 AP_DEVICE, 0x40,
1290 AP_END);
1291 }
1292
1293 }
1294 else
1295 {
1296 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
1297 AP_SECTOR_28, sector,
1298 AP_COUNT, count & 0xffU,
1299 AP_SGLIST, sg_list, sg_cnt,
1300 AP_DEVICE, 0x40,
1301 AP_END);
1302 }
1303
1304 return(rc);
1305}
1306
1307/******************************************************************************
1308 * Fabricate ATA WRITE command based on the capabilities of the corresponding
1309 * device and the paramters set from above (NCQ, etc)
1310 */
1311static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1312 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1313 ULONG sg_cnt, int write_through)
1314{
1315 int rc;
1316
1317 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1318 {
1319 /* need LBA48 for this command */
1320 if (!ai->ports[p].devs[d].lba48)
1321 {
1322 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1323 return(-1);
1324 }
1325 if (add_workspace(pIorb)->is_ncq)
1326 {
1327 /* use NCQ write; count goes into feature register, tag into count! */
1328 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE,
1329 AP_SECTOR_48, sector, 0,
1330 AP_FEATURES, count,
1331 /* tag = slot */
1332 AP_COUNT, (slot << 3),
1333 AP_SGLIST, sg_list, sg_cnt,
1334 AP_DEVICE, 0x40,
1335 /* force unit access */
1336 AP_DEVICE, (write_through && !force_write_cache) ? 0x80 : 0,
1337 AP_WRITE, 1,
1338 AP_END);
1339 }
1340 else
1341 {
1342 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
1343 AP_SECTOR_48, sector, 0,
1344 AP_COUNT, count,
1345 AP_SGLIST, sg_list, sg_cnt,
1346 AP_DEVICE, 0x40,
1347 AP_WRITE, 1,
1348 AP_END);
1349 }
1350 }
1351 else
1352 {
1353 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
1354 AP_SECTOR_28, sector,
1355 AP_COUNT, count & 0xffU,
1356 AP_SGLIST, sg_list, sg_cnt,
1357 AP_DEVICE, 0x40,
1358 AP_WRITE, 1,
1359 AP_END);
1360 }
1361
1362 return(rc);
1363}
1364
1365/******************************************************************************
1366 * Copy block from S/G list to virtual address or vice versa.
1367 */
1368void sg_memcpy(SCATGATENTRY *sg_list, USHORT sg_cnt, ULONG sg_off,
1369 void *buf, USHORT len, SG_MEMCPY_DIRECTION dir)
1370{
1371 USHORT i;
1372 USHORT l;
1373 ULONG phys_addr;
1374 ULONG pos = 0;
1375 char *p;
1376
1377 /* walk through S/G list to find the elements involved in the operation */
1378 for (i = 0; i < sg_cnt && len > 0; i++)
1379 {
1380 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off)
1381 {
1382 /* this S/G element intersects with the block to be copied */
1383 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos);
1384 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len)
1385 {
1386 l = len;
1387 }
1388
1389 if (Dev32Help_PhysToLin(phys_addr, l, (PVOID) &p))
1390 {
1391 panic("sg_memcpy(): DevHelp_PhysToLin() failed");
1392 }
1393 if (dir == SG_TO_BUF)
1394 {
1395 memcpy(buf, p, l);
1396 }
1397 else
1398 {
1399 memcpy(p, buf, l);
1400 }
1401 sg_off += l;
1402 buf = (char *) buf + l;
1403 len -= l;
1404 }
1405
1406 pos += sg_list[i].XferBufLen;
1407 }
1408}
1409
1410/******************************************************************************
1411 * Halt processing by submitting an internal error. This is a last resort and
1412 * should only be called when the system state is corrupt.
1413 */
1414void panic(char *msg)
1415{
1416 Dev32Help_InternalError(msg, strlen(msg));
1417}
1418
Note: See TracBrowser for help on using the repository browser.