source: trunk/src/os2ahci/ata.c@ 198

Last change on this file since 198 was 198, checked in by David Azarewicz, 7 years ago

Added Usable Disk option.

File size: 47.7 KB
Line 
1/******************************************************************************
2 * ata.c - ATA command processing
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2018 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ata.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* ------------------------ typedefs and structures ------------------------ */
34
35/* -------------------------- function prototypes -------------------------- */
36
37static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
38 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
39 ULONG sg_cnt);
40
41static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
42 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
43 ULONG sg_cnt, int write_through);
44
45/* ------------------------ global/static variables ------------------------ */
46
47/* ----------------------------- start of code ----------------------------- */
48
49/******************************************************************************
50 * Initialize AHCI command slot, FIS and S/G list for the specified ATA
51 * command. The command parameters are passed as a variable argument list
52 * of type and value(s). The list is terminated by AP_END.
53 *
54 * Notes:
55 *
56 * - The specified command slot is expected to be idle; no checks are
57 * performed to prevent messing with a busy port.
58 *
59 * - Port multipliers are not supported, yet, thus 'd' should always
60 * be 0 for the time being.
61 *
62 * - 'cmd' is passed as 16-bit integer because the compiler would push
63 * a 'u8' as 16-bit value (it's a fixed argument) and the stdarg
64 * macros would screw up the address of the first variable argument
65 * if the size of the last fixed argument wouldn't match what the
66 * compiler pushed on the stack.
67 *
68 * Return values:
69 * 0 : success
70 * > 0 : could not map all S/G entries; the return value is the number of
71 * S/G entries that could be mapped.
72 * < 0 : other error
73 */
74int ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, ...)
75{
76 va_list va;
77 va_start(va, cmd);
78 return(v_ata_cmd(ai, p, d, slot, cmd, va));
79}
80
81int v_ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, va_list va)
82{
83 AHCI_PORT_DMA *dma_base_virt;
84 AHCI_CMD_HDR *cmd_hdr;
85 AHCI_CMD_TBL *cmd_tbl;
86 SCATGATENTRY *sg_list = NULL;
87 SCATGATENTRY sg_single;
88 ATA_PARM ap;
89 ATA_CMD ata_cmd;
90 void *atapi_cmd = NULL;
91 u32 dma_base_phys;
92 u32 atapi_cmd_len = 0;
93 u32 ahci_flags = 0;
94 u32 sg_cnt = 0;
95 u32 i;
96 u32 n;
97
98 /* --------------------------------------------------------------------------
99 * Initialize ATA command. The ATA command is set up with the main command
100 * value and a variable list of additional parameters such as the sector
101 * address, transfer count, ...
102 */
103 memset(&ata_cmd, 0x00, sizeof(ata_cmd));
104 ata_cmd.cmd = cmd;
105
106 /* parse variable arguments */
107 do
108 {
109 switch ((ap = va_arg(va, ATA_PARM)))
110 {
111
112 case AP_AHCI_FLAGS:
113 ahci_flags |= va_arg(va, u32);
114 break;
115
116 case AP_WRITE:
117 if (va_arg(va, u32) != 0)
118 {
119 ahci_flags |= AHCI_CMD_WRITE;
120 }
121 break;
122
123 case AP_FEATURES:
124 /* ATA features word */
125 ata_cmd.features |= va_arg(va, u32);
126 break;
127
128 case AP_COUNT:
129 /* transfer count */
130 ata_cmd.count = va_arg(va, u32);
131 break;
132
133 case AP_SECTOR_28:
134 /* 28-bit sector address */
135 ata_cmd.lba_l = va_arg(va, u32);
136 if (ata_cmd.lba_l & 0xf0000000UL)
137 {
138 dprintf(0,"error: LBA-28 address %d has more than 28 bits\n", ata_cmd.lba_l);
139 return(ATA_CMD_INVALID_PARM);
140 }
141 /* add upper 4 bits to device field */
142 ata_cmd.device |= (ata_cmd.lba_l >> 24) & 0x0fU;
143 /* only lower 24 bits come into lba_l */
144 ata_cmd.lba_l &= 0x00ffffffUL;
145 break;
146
147 case AP_SECTOR_48:
148 /* 48-bit sector address */
149 ata_cmd.lba_l = va_arg(va, u32);
150 ata_cmd.lba_h = va_arg(va, u32);
151 break;
152
153 case AP_DEVICE:
154 /* ATA device byte; note that this byte contains the highest
155 * 4 bits of LBA-28 address; we have to leave them alone here. */
156 ata_cmd.device |= va_arg(va, u32) & 0xf0;
157 break;
158
159 case AP_SGLIST:
160 /* scatter/gather list in SCATGATENTRY/count format */
161 sg_list = va_arg(va, void *);
162 sg_cnt = va_arg(va, u32);
163 break;
164
165 case AP_VADDR:
166 /* virtual buffer address in addr/len format (up to 4K) */
167 sg_single.ppXferBuf = MemPhysAdr(va_arg(va, void *));
168 sg_single.XferBufLen = va_arg(va, u32);
169 sg_list = &sg_single;
170 sg_cnt = 1;
171 break;
172
173 case AP_ATAPI_CMD:
174 /* ATAPI command */
175 atapi_cmd = va_arg(va, void *);
176 atapi_cmd_len = va_arg(va, u32);
177 ahci_flags |= AHCI_CMD_ATAPI;
178 break;
179
180 case AP_ATA_CMD:
181 /* ATA command "pass-through" */
182 memcpy(&ata_cmd, va_arg(va, void *), sizeof(ATA_CMD));
183 break;
184
185 case AP_END:
186 break;
187
188 default:
189 dprintf(0,"error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap);
190 return(ATA_CMD_INVALID_PARM);
191 }
192
193 } while (ap != AP_END);
194
195 /* --------------------------------------------------------------------------
196 * Fill in AHCI ATA command information. This includes the port command slot,
197 * the corresponding command FIS and the S/G list. The layout of the AHCI
198 * port DMA region is based on the Linux AHCI driver and looks like this:
199 *
200 * - 32 AHCI command headers (AHCI_CMD_HDR) with 32 bytes, each
201 * - 1 FIS receive area with 256 bytes (AHCI_RX_FIS_SZ)
202 * - 32 AHCI command tables, each consisting of
203 * - 64 bytes for command FIS
204 * - 16 bytes for ATAPI comands
205 * - 48 bytes reserved
206 * - 48 S/G entries (AHCI_SG) with 32 bytes, each
207 *
208 * Since the whole DMA buffer for all ports is larger than 64KB and we need
209 * multiple segments to address all of them, there are no virtual pointers
210 * to the individual elements in AD_INFO. Instead, we're relying on macros
211 * for getting the base address of a particular port's DMA region, then
212 * map a structure on top of that for convenience (AHCI_PORT_DMA).
213 */
214 dma_base_virt = port_dma_base(ai, p);
215 dma_base_phys = port_dma_base_phys(ai, p);
216
217 /* AHCI command header */
218 cmd_hdr = &dma_base_virt->cmd_hdr[slot];
219 memset(cmd_hdr, 0x00, sizeof(*cmd_hdr));
220 cmd_hdr->options = ((d & 0x0f) << 12);
221 cmd_hdr->options |= ahci_flags; /* AHCI command flags */
222 cmd_hdr->options |= 5; /* length of command FIS in 32-bit words */
223 cmd_hdr->tbl_addr = dma_base_phys + offsetof(AHCI_PORT_DMA, cmd_tbl[slot]);
224 /* DAZ can use MemPhysAdr(&dma_base_virt->cmd_tbl[slot]), but is probably slower. */
225
226 /* AHCI command table */
227 cmd_tbl = &dma_base_virt->cmd_tbl[slot];
228 memset(cmd_tbl, 0x00, sizeof(*cmd_tbl));
229 ata_cmd_to_fis(cmd_tbl->cmd_fis, &ata_cmd, d);
230
231 if (atapi_cmd != NULL)
232 {
233 /* copy ATAPI command */
234 memcpy(cmd_tbl->atapi_cmd, atapi_cmd, atapi_cmd_len);
235 }
236
237 /* PRDT (S/G list)
238 *
239 * - The S/G list for AHCI adapters is limited to 22 bits for the transfer
240 * size of each element, thus we need to split S/G elements larger than
241 * 22 bits into 2 AHCI_SG elements.
242 *
243 * - The S/G element size for AHCI is what the spec calls '0'-based
244 * (i.e. 0 means 1 bytes). On top of that, the spec requires S/G transfer
245 * sizes to be even in the context of 16-bit transfers, thus bit '1'
246 * always needs to be set.
247 *
248 * - AHCI_MAX_SG_ELEMENT_LEN defines the maximum size of an AHCI S/G
249 * element in bytes, ignoring the '0'-based methodology (i.e. 1 << 22).
250 *
251 * - There's a limit on the maximum number of S/G elements in the port DMA
252 * buffer (AHCI_MAX_SG) which is lower than the HW maximum. It's beyond
253 * the control of this function to split commands which require more
254 * than AHCI_MAX_SG entries. In order to help the caller, the return value
255 * of this function will indicate how many OS/2 S/G entries were
256 * successfully mapped.
257 */
258 for (i = n = 0; i < sg_cnt; i++)
259 {
260 u32 sg_addr = sg_list[i].ppXferBuf;
261 u32 sg_size = sg_list[i].XferBufLen;
262
263 do
264 {
265 u32 chunk = (sg_size > AHCI_MAX_SG_ELEMENT_LEN) ? AHCI_MAX_SG_ELEMENT_LEN : sg_size;
266 if (n >= AHCI_MAX_SG)
267 {
268 /* couldn't store all S/G elements in our DMA buffer */
269 dprintf(0,"ata_cmd(): too many S/G elements\n");
270 return(i - 1);
271 }
272 if ((sg_addr & 1) || (chunk & 1))
273 {
274 dprintf(1,"error: ata_cmd() called with unaligned S/G element(s)\n");
275 return(ATA_CMD_UNALIGNED_ADDR);
276 }
277 cmd_tbl->sg_list[n].addr = sg_addr;
278 cmd_tbl->sg_list[n].size = chunk - 1;
279 sg_addr += chunk;
280 sg_size -= chunk;
281 n++;
282 } while (sg_size > 0);
283 }
284
285 /* set final S/G count in AHCI command header */
286 cmd_hdr->options |= n << 16;
287
288 #ifdef DEBUG
289 if ((D32g_DbgLevel >= 5) /*|| (atapi_cmd != NULL)*/)
290 {
291 DPRINTF(0,"ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot);
292 dHexDump(0,cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: ");
293 dHexDump(0,&ata_cmd, sizeof(ata_cmd), "ata_cmd: ");
294 if (atapi_cmd != NULL)
295 {
296 dHexDump(0,atapi_cmd, atapi_cmd_len, "atapi_cmd: ");
297 }
298 if (n > 0)
299 {
300 dHexDump(0,cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: ");
301 }
302 }
303 #endif
304
305 return(ATA_CMD_SUCCESS);
306}
307
308/******************************************************************************
309 * Fill SATA command FIS with values extracted from an ATA command structure.
310 * The command FIS buffer (fis) is expected to be initialized to 0s. The
311 * structure of the FIS maps to the ATA shadow register block, including
312 * registers which can be written twice to store 16 bits (called 'exp').
313 *
314 * The FIS structure looks like this (using LSB notation):
315 *
316 * +----------------+----------------+----------------+----------------+
317 * 00 | FIS type (27h) | C|R|R|R|PMP | Command | Features |
318 * +----------------+----------------+----------------+----------------+
319 * 04 | LBA 7:0 | LBA 15:8 | LBA 23:16 | R|R|R|D|Head |
320 * +----------------+----------------+----------------+----------------+
321 * 08 | LBA 31:24 | LBA 40:32 | LBA 47:40 | Features exp |
322 * +----------------+----------------+----------------+----------------+
323 * 12 | Count 7:0 | Count 15:8 | Reserved | Control |
324 * +----------------+----------------+----------------+----------------+
325 * 16 | Reserved | Reserved | Reserved | Reserved |
326 * +----------------+----------------+----------------+----------------+
327 */
328void ata_cmd_to_fis(u8 *fis, ATA_CMD *ata_cmd, int d)
329{
330 fis[0] = 0x27; /* register - host to device FIS */
331 fis[1] = (u8) (d & 0xf); /* port multiplier number */
332 fis[1] |= 0x80; /* bit 7 indicates Command FIS */
333 fis[2] = (u8) ata_cmd->cmd;
334 fis[3] = (u8) ata_cmd->features;
335
336 fis[4] = (u8) ata_cmd->lba_l;
337 fis[5] = (u8) (ata_cmd->lba_l >> 8);
338 fis[6] = (u8) (ata_cmd->lba_l >> 16);
339 fis[7] = (u8) ata_cmd->device;
340
341 fis[8] = (u8) (ata_cmd->lba_l >> 24);
342 fis[9] = (u8) ata_cmd->lba_h;
343 fis[10] = (u8) (ata_cmd->lba_h >> 8);
344 fis[11] = (u8) (ata_cmd->features >> 8);
345
346 fis[12] = (u8) ata_cmd->count;
347 fis[13] = (u8) (ata_cmd->count >> 8);
348}
349
350/******************************************************************************
351 * Get index in S/G list for the number of transferred sectors in the IORB.
352 *
353 * Returning io->cSGList indicates an error.
354 *
355 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW
356 * limit will never cross sector boundaries. This means that splitting
357 * S/G lists into multiple commands can be done without editing the S/G
358 * lists.
359 */
360u16 ata_get_sg_indx(IORB_EXECUTEIO *io)
361{
362 ULONG offset = io->BlocksXferred * io->BlockSize;
363 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
364 USHORT i;
365
366 for (i = 0; i < io->cSGList && offset > 0; i++)
367 {
368 offset -= pSGList[i].XferBufLen;
369 }
370
371 return(i);
372}
373
374/******************************************************************************
375 * Get max S/G count which will fit into our HW S/G buffers. This function is
376 * called when the S/G list is too long and we need to split the IORB into
377 * multiple commands. It returns both the number of sectors and S/G list
378 * elements that we can handle in a single command.
379 *
380 * The parameter 'sg_indx' indicates the current start index in the S/G list
381 * (0 if this is the first command iteration).
382 *
383 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates
384 * how many S/G elements were successfully mapped. Whatever we return needs to
385 * be less or equal to this value.
386 *
387 * Returning 0 in *sg_cnt indicates an error.
388 *
389 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits
390 * will never cross sector boundaries. This means that splitting S/G
391 * lists into multiple commands can be done without editing S/G list
392 * elements. Since AHCI only allows 22 bits for each S/G element, the
393 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based
394 * on the actual length of S/G elements. This function looks for the
395 * maximum number of S/G elements that can be mapped on sector
396 * boundaries which will still fit into our HW S/G list.
397 */
398void ata_max_sg_cnt(IORB_EXECUTEIO *io, USHORT sg_indx, USHORT sg_max,
399 USHORT *sg_cnt, USHORT *sector_cnt)
400{
401 ULONG max_sector_cnt = 0;
402 USHORT max_sg_cnt = 0;
403 ULONG offset = 0;
404 USHORT i;
405 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
406
407 for (i = sg_indx; i < io->cSGList; i++)
408 {
409 if (i - sg_indx >= sg_max)
410 {
411 /* we're beyond the number of S/G elements we can map */
412 break;
413 }
414
415 offset += pSGList[i].XferBufLen;
416 if (offset % io->BlockSize == 0)
417 {
418 /* this S/G element ends on a sector boundary */
419 max_sector_cnt = offset / io->BlockSize;
420 max_sg_cnt = i + 1;
421 }
422 }
423
424 /* return the best match we found (0 indicating failure) */
425 *sector_cnt = max_sector_cnt;
426 *sg_cnt = max_sg_cnt;
427}
428
429
430/******************************************************************************
431 * Get device or media geometry. Device and media geometry are expected to be
432 * the same for non-removable devices, which will always be the case for the
433 * ATA devices we're dealing with (hard disks). ATAPI is a different story
434 * and handled by atapi_get_geometry().
435 */
436int ata_get_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
437{
438 ADD_WORKSPACE *aws = add_workspace(pIorb);
439 int rc;
440
441 /* allocate buffer for ATA identify information */
442 if ((aws->buf = MemAlloc(ATA_ID_WORDS * sizeof(u16))) == NULL)
443 {
444 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
445 return(-1);
446 }
447
448 /* request ATA identify information */
449 aws->ppfunc = ata_get_geometry_pp;
450 rc = ata_cmd(ad_infos + iorb_unit_adapter(pIorb),
451 iorb_unit_port(pIorb),
452 iorb_unit_device(pIorb),
453 slot,
454 ATA_CMD_ID_ATA,
455 AP_VADDR, (void *) aws->buf, ATA_ID_WORDS * sizeof(u16),
456 AP_END);
457
458 if (rc != 0)
459 {
460 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
461 }
462
463 return(rc);
464}
465
466/* Adjust the cylinder count in the physical
467 * geometry to the last full cylinder.
468 */
469int adjust_cylinders(GEOMETRY *geometry, ULONG TotalSectors)
470{
471 USHORT SecPerCyl;
472 int rc = FALSE;
473
474 geometry->TotalSectors = TotalSectors;
475 SecPerCyl = geometry->SectorsPerTrack * geometry->NumHeads;
476 if (SecPerCyl > 0)
477 {
478 ULONG TotalCylinders = TotalSectors / SecPerCyl;
479
480 geometry->TotalSectors = TotalCylinders * SecPerCyl;
481 geometry->TotalCylinders = TotalCylinders;
482 if (TotalCylinders >> 16)
483 {
484 geometry->TotalCylinders = 65535;
485 rc = TRUE;
486 }
487 }
488 return (rc);
489}
490
491/* Calculate the logical geometry based on the input physcial geometry
492 * using the LBA Assist Translation algorithm.
493 */
494#define BIOS_MAX_CYLINDERS 1024l
495#define BIOS_MAX_NUMHEADS 255
496#define BIOS_MAX_SECTORSPERTRACK 63
497void log_geom_calculate_LBA_assist(GEOMETRY *geometry, ULONG TotalSectors)
498{
499 UCHAR numSpT = BIOS_MAX_SECTORSPERTRACK;
500 UCHAR numHeads = BIOS_MAX_NUMHEADS;
501 ULONG Cylinders;
502
503 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK))
504 {
505 USHORT temp = (TotalSectors - 1) / (BIOS_MAX_CYLINDERS * BIOS_MAX_SECTORSPERTRACK);
506
507 if (temp < 16) numHeads = 16;
508 else if (temp < 32) numHeads = 32;
509 else if (temp < 64) numHeads = 64;
510 else numHeads = 128;
511 }
512
513 do
514 {
515 Cylinders = TotalSectors / (USHORT)(numHeads * numSpT);
516 if (Cylinders >> 16)
517 {
518 if (numSpT < 128)
519 numSpT = (numSpT << 1) | 1;
520 else
521 Cylinders = 65535; // overflow !
522 }
523 } while (Cylinders >> 16);
524
525 geometry->TotalCylinders = Cylinders;
526 geometry->NumHeads = numHeads;
527 geometry->SectorsPerTrack = numSpT;
528}
529
530int check_lvm(IORBH *pIorb, ULONG sector)
531{
532 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace(pIorb)->buf;
533 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
534 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
535 int p = iorb_unit_port(pIorb);
536 int rc;
537
538 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_READ,
539 AP_SECTOR_28, sector-1,
540 AP_COUNT, 1,
541 AP_VADDR, (void *)pDLA, 512,
542 AP_DEVICE, 0x40,
543 AP_END);
544 if (rc) return 0;
545
546 DHEXDUMP(5,pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1);
547
548 if ((pDLA->DLA_Signature1 == DLA_TABLE_SIGNATURE1) && (pDLA->DLA_Signature2 == DLA_TABLE_SIGNATURE2))
549 {
550 DPRINTF(3,__func__": DLA found at sector %d\n", sector-1);
551 geometry->TotalCylinders = pDLA->Cylinders;
552 geometry->NumHeads = pDLA->Heads_Per_Cylinder;
553 geometry->SectorsPerTrack = pDLA->Sectors_Per_Track;
554 geometry->TotalSectors = pDLA->Cylinders * pDLA->Heads_Per_Cylinder * pDLA->Sectors_Per_Track;
555 return 1;
556 }
557
558 return 0;
559}
560
561/******************************************************************************
562 * Try to read LVM information from the disk. If found, use the LVM geometry.
563 * This function will only work at init time. A better strategy would be to
564 * calculate the geometry during ahci_scan_ports and save it away and then just
565 * return the saved values when ata_get_geometry() is called.
566 */
567int is_lvm_geometry(IORBH *pIorb)
568{
569 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
570 ULONG sector;
571
572 if (init_complete) return 0; /* We cannot use ahci_exec_polled_cmd() after init_complete */
573
574 #ifdef DEBUG
575 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
576 int p = iorb_unit_port(pIorb);
577 int d = iorb_unit_device(pIorb);
578 DPRINTF(3,__func__" (%d.%d.%d)\n", ad_no(ai), p, d);
579 #endif
580
581 /* First check the sector reported by the hardware */
582 if (check_lvm(pIorb, geometry->SectorsPerTrack)) return 1;
583
584 for (sector = 255; sector >= 63; sector >>= 1)
585 {
586 if (sector == geometry->SectorsPerTrack) continue;
587 if (check_lvm(pIorb, sector)) return 1;
588 }
589
590 return 0;
591}
592
593/******************************************************************************
594 * Post processing function for ata_get_geometry(): convert the ATA identify
595 * information to OS/2 IOCC_GEOMETRY information.
596 */
597void ata_get_geometry_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
598{
599 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
600 USHORT geometry_len = ((IORB_GEOMETRY *)pIorb)->GeometryLen;
601 u16 *id_buf = add_workspace(pIorb)->buf;
602 int a = iorb_unit_adapter(pIorb);
603 int p = iorb_unit_port(pIorb);
604 char *Method;
605
606 /* Fill-in geometry information; the ATA-8 spec declares the geometry
607 * fields in the ATA ID buffer as obsolete but it's still the best
608 * guess in most cases. If the information stored in the geometry
609 * fields is apparently incorrect, we'll use the algorithm typically
610 * used by SCSI adapters and modern PC BIOS versions:
611 *
612 * - 512 bytes per sector
613 * - 255 heads
614 * - 63 sectors per track (or 56 with the parameter "/4")
615 * - x cylinders (calculated)
616 *
617 * Please note that os2ahci currently does not natively support ATA sectors
618 * larger than 512 bytes, therefore relies on the translation logic built
619 * into the corresponding ATA disks. In order to prevent file systems that
620 * use block sizes larger than 512 bytes (FAT, JFS, ...) from ending up on
621 * incorrectly aligned physical sector accesses, hence using more physical
622 * I/Os than necessary, the command line parameter "/4" can be used to force
623 * a track size of 56 sectors. This way, partitions will start on 4K
624 * boundaries.
625 *
626 * Another limitation is that OS/2 has a 32-bit variable for the total number
627 * of sectors, limiting the maximum capacity to roughly 2TB. This is another
628 * issue that needs to be addressed sooner or later; large sectors could
629 * raise this limit to something like 8TB but this is not really much of a
630 * difference. Maybe there's something in later DDKs that allows more than
631 * 32 bits?
632 *
633 * Warning: Do not change the algorithm for calculating disk geometry without
634 * fully understaing the consequences. Side effects of even slight changes
635 * can be unexpected and catastrophic.
636 */
637 memset(geometry, 0x00, geometry_len);
638 geometry->BytesPerSector = ATA_SECTOR_SIZE;
639
640 /* extract total number of sectors */
641 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400)
642 {
643 /* 48-bit LBA supported */
644 if (ATA_CAPACITY48_H(id_buf) != 0)
645 {
646 /* more than 32 bits for number of sectors */
647 dprintf(0,"warning: limiting disk %d.%d.%d to 2TB\n",
648 iorb_unit_adapter(pIorb), iorb_unit_port(pIorb),
649 iorb_unit_device(pIorb));
650 geometry->TotalSectors = 0xffffffffUL;
651 }
652 else
653 {
654 geometry->TotalSectors = ATA_CAPACITY48_L(id_buf);
655 }
656 }
657 else
658 {
659 /* 28-bit LBA */
660 geometry->TotalSectors = ATA_CAPACITY(id_buf) & 0x0fffffffUL;
661 }
662
663 Method = "None";
664 /* fabricate the remaining geometry fields */
665 if (track_size[a][p] != 0)
666 {
667 /* A specific track size has been requested for this port; this is
668 * typically done for disks with 4K sectors to make sure partitions
669 * start on 8-sector boundaries (parameter "/4").
670 */
671 geometry->NumHeads = 255;
672 geometry->SectorsPerTrack = track_size[a][p];
673 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
674 Method = "Custom";
675 }
676 else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 &&
677 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf))
678 {
679 /* BIOS-supplied (aka "current") geometry values look valid */
680 geometry->NumHeads = CUR_HEADS(id_buf);
681 geometry->SectorsPerTrack = CUR_SECTORS(id_buf);
682 geometry->TotalCylinders = CUR_CYLS(id_buf);
683 Method = "BIOS";
684 }
685 else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0)
686 {
687 /* ATA-supplied values for geometry look valid */
688 geometry->NumHeads = ATA_HEADS(id_buf);
689 geometry->SectorsPerTrack = ATA_SECTORS(id_buf);
690 geometry->TotalCylinders = ATA_CYLS(id_buf);
691 Method = "ATA";
692 }
693 else
694 {
695 /* use typical SCSI geometry */
696 geometry->NumHeads = 255;
697 geometry->SectorsPerTrack = 63;
698 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
699 Method = "SCSI";
700 }
701
702 DPRINTF(2,"Physical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
703 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
704 (geometry->TotalSectors / 2048), Method);
705
706 /* Fixup the geometry in case the geometry reported by the BIOS is bad */
707 if (adjust_cylinders(geometry, geometry->TotalSectors))
708 { // cylinder overflow
709 log_geom_calculate_LBA_assist(geometry, geometry->TotalSectors);
710 geometry->TotalSectors = (USHORT)(geometry->NumHeads * geometry->SectorsPerTrack) * (ULONG)geometry->TotalCylinders;
711 }
712 adjust_cylinders(geometry, geometry->TotalSectors);
713
714 DPRINTF(2,"Logical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
715 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
716 (geometry->TotalSectors / 2048), Method);
717
718 if (is_lvm_geometry(pIorb)) Method = "LVM";
719 ad_infos[a].ports[p].devs[0].dev_info.Cylinders = geometry->TotalCylinders;
720 ad_infos[a].ports[p].devs[0].dev_info.HeadsPerCylinder = geometry->NumHeads;
721 ad_infos[a].ports[p].devs[0].dev_info.SectorsPerTrack = geometry->SectorsPerTrack;
722 ad_infos[a].ports[p].devs[0].dev_info.TotalSectors = geometry->TotalSectors;
723 ad_infos[a].ports[p].devs[0].dev_info.Method = Method;
724
725 DPRINTF(2,"Reported geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
726 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
727 (geometry->TotalSectors / 2048), Method);
728
729 /* tell interrupt handler that this IORB is complete */
730 add_workspace(pIorb)->complete = 1;
731}
732
733/******************************************************************************
734 * Test whether unit is ready.
735 */
736int ata_unit_ready(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
737{
738 /* This is a NOP for ATA devices (at least right now); returning an error
739 * without setting an error code means ahci_exec_iorb() will not queue any
740 * HW command and the IORB will complete successfully.
741 */
742 ((IORB_UNIT_STATUS *)pIorb)->UnitStatus = US_READY | US_POWER;
743 return(-1);
744}
745
746/******************************************************************************
747 * Read sectors from AHCI device.
748 */
749int ata_read(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
750{
751 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
752 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
753 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
754 ULONG sector = io->RBA + io->BlocksXferred;
755 USHORT count = io->BlockCount - io->BlocksXferred;
756 USHORT sg_indx;
757 USHORT sg_cnt;
758 int p = iorb_unit_port(pIorb);
759 int d = iorb_unit_device(pIorb);
760 int rc;
761
762 if (io->BlockCount == 0)
763 {
764 /* NOP; return -1 without error in IORB to indicate success */
765 return(-1);
766 }
767
768 if (add_workspace(pIorb)->unaligned)
769 {
770 /* unaligned S/G addresses present; need to use double buffers */
771 return(ata_read_unaligned(pIorb, slot));
772 }
773
774 /* Kludge: some I/O commands during boot use excessive S/G buffer lengths
775 * which cause NCQ commands to lock up. If there's only one S/G element
776 * and this element is already larger than what we can derive from the sector
777 * count, we'll adjust that element.
778 */
779 if (io->BlocksXferred == 0 && io->cSGList == 1 &&
780 pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize)
781 {
782 pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize;
783 }
784
785 /* prepare read command while keeping an eye on S/G count limitations */
786 do
787 {
788 sg_indx = ata_get_sg_indx(io);
789 sg_cnt = io->cSGList - sg_indx;
790 if ((rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, count,
791 pSGList + sg_indx, sg_cnt)) > 0)
792 {
793 /* couldn't map all S/G elements */
794 ata_max_sg_cnt(io, sg_indx, rc, &sg_cnt, &count);
795 }
796 } while (rc > 0 && sg_cnt > 0);
797
798 if (rc == 0)
799 {
800 add_workspace(pIorb)->blocks = count;
801 add_workspace(pIorb)->ppfunc = ata_read_pp;
802 }
803 else if (rc > 0)
804 {
805 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
806 }
807 else if (rc == ATA_CMD_UNALIGNED_ADDR)
808 {
809 /* unaligned S/G addresses detected; need to use double buffers */
810 add_workspace(pIorb)->unaligned = 1;
811 return(ata_read_unaligned(pIorb, slot));
812
813 }
814 else
815 {
816 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
817 }
818
819 return(rc);
820}
821
822/******************************************************************************
823 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI
824 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
825 * restrictions. This doesn't happen very often but when it does, we need to
826 * use a transfer buffer and copy the data manually.
827 */
828int ata_read_unaligned(IORBH *pIorb, int slot)
829{
830 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
831 ADD_WORKSPACE *aws = add_workspace(pIorb);
832 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
833 ULONG sector = io->RBA + io->BlocksXferred;
834 SCATGATENTRY sg_single;
835 int p = iorb_unit_port(pIorb);
836 int d = iorb_unit_device(pIorb);
837 int rc;
838
839 DPRINTF(7,"ata_read_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
840 ai->ports[p].unaligned_read_count++;
841
842 /* allocate transfer buffer */
843 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
844 {
845 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
846 return(-1);
847 }
848
849 /* prepare read command using transfer buffer */
850 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
851 sg_single.XferBufLen = io->BlockSize;
852 rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1);
853
854 if (rc == 0) {
855 add_workspace(pIorb)->blocks = 1;
856 add_workspace(pIorb)->ppfunc = ata_read_pp;
857
858 } else if (rc > 0) {
859 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
860
861 } else {
862 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
863 }
864
865 return(rc);
866}
867
868/******************************************************************************
869 * Post processing function for ata_read(); this function updates the
870 * BlocksXferred counter in the IORB and, if not all blocks have been
871 * transferred, requeues the IORB to process the remaining sectors. It also
872 * takes care of copying data from the transfer buffer for unaligned reads.
873 */
874void ata_read_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
875{
876 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
877 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
878 ADD_WORKSPACE *aws = add_workspace(pIorb);
879
880 if (aws->unaligned)
881 {
882 /* copy transfer buffer to corresponding physical address in S/G list */
883 sg_memcpy(pSGList, io->cSGList,
884 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
885 aws->buf, io->BlockSize, BUF_TO_SG);
886 }
887
888 io->BlocksXferred += add_workspace(pIorb)->blocks;
889 DPRINTF(7,__func__": blocks transferred = %d\n", io->BlocksXferred);
890
891 if (io->BlocksXferred >= io->BlockCount)
892 {
893 /* we're done; tell IRQ handler the IORB is complete */
894 add_workspace(pIorb)->complete = 1;
895 }
896 else
897 {
898 /* requeue this IORB for next iteration */
899 iorb_requeue(pIorb);
900 }
901}
902
903/******************************************************************************
904 * Verify readability of sectors on ATA device.
905 */
906int ata_verify(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
907{
908 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
909 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
910 int p = iorb_unit_port(pIorb);
911 int d = iorb_unit_device(pIorb);
912 int rc;
913
914 if (io->BlockCount == 0)
915 {
916 /* NOP; return -1 without error in IORB to indicate success */
917 return(-1);
918 }
919
920 /* prepare verify command */
921 if (io->RBA >= (1UL << 28) || io->BlockCount > 256)
922 {
923 /* need LBA48 for this command */
924 if (!ai->ports[p].devs[d].lba48) {
925 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
926 return(-1);
927 }
928 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY_EXT,
929 AP_SECTOR_48, io->RBA, 0,
930 AP_COUNT, io->BlockCount,
931 AP_DEVICE, 0x40,
932 AP_END);
933 } else {
934 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY,
935 AP_SECTOR_28, io->RBA,
936 AP_COUNT, io->BlockCount & 0xffU,
937 AP_DEVICE, 0x40,
938 AP_END);
939 }
940
941 return(rc);
942}
943
944/******************************************************************************
945 * Write sectors to AHCI device.
946 */
947int ata_write(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
948{
949 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
950 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
951 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
952 ULONG sector = io->RBA + io->BlocksXferred;
953 USHORT count = io->BlockCount - io->BlocksXferred;
954 USHORT sg_indx;
955 USHORT sg_cnt;
956 int p = iorb_unit_port(pIorb);
957 int d = iorb_unit_device(pIorb);
958 int rc;
959
960 if (io->BlockCount == 0)
961 {
962 /* NOP; return -1 without error in IORB to indicate success */
963 return(-1);
964 }
965
966 if (add_workspace(pIorb)->unaligned)
967 {
968 /* unaligned S/G addresses present; need to use double buffers */
969 return(ata_write_unaligned(pIorb, slot));
970 }
971
972 /* prepare write command while keeping an eye on S/G count limitations */
973 do {
974 sg_indx = ata_get_sg_indx(io);
975 sg_cnt = io->cSGList - sg_indx;
976 if ((rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, count,
977 pSGList + sg_indx, sg_cnt,
978 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0)
979 {
980 /* couldn't map all S/G elements */
981 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
982 }
983 } while (rc > 0 && sg_cnt > 0);
984
985 if (rc == 0)
986 {
987 add_workspace(pIorb)->blocks = count;
988 add_workspace(pIorb)->ppfunc = ata_write_pp;
989 }
990 else if (rc > 0)
991 {
992 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
993 }
994 else if (rc == ATA_CMD_UNALIGNED_ADDR)
995 {
996 /* unaligned S/G addresses detected; need to use double buffers */
997 add_workspace(pIorb)->unaligned = 1;
998 return(ata_write_unaligned(pIorb, slot));
999 }
1000 else
1001 {
1002 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1003 }
1004
1005 return(rc);
1006}
1007
1008/******************************************************************************
1009 * Write sectors from AHCI device with unaligned S/G element addresses. AHCI
1010 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
1011 * restrictions. This doesn't happen very often but when it does, we need to
1012 * use a transfer buffer and copy the data manually.
1013 */
1014int ata_write_unaligned(IORBH *pIorb, int slot)
1015{
1016 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1017 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
1018 ADD_WORKSPACE *aws = add_workspace(pIorb);
1019 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1020 ULONG sector = io->RBA + io->BlocksXferred;
1021 SCATGATENTRY sg_single;
1022 int p = iorb_unit_port(pIorb);
1023 int d = iorb_unit_device(pIorb);
1024 int rc;
1025
1026 DPRINTF(7,"ata_write_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
1027
1028 /* allocate transfer buffer */
1029 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
1030 {
1031 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1032 return(-1);
1033 }
1034
1035 /* copy next sector from S/G list to transfer buffer */
1036 sg_memcpy(pSGList, io->cSGList,
1037 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
1038 aws->buf, io->BlockSize, SG_TO_BUF);
1039
1040 /* prepare write command using transfer buffer */
1041 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
1042 sg_single.XferBufLen = io->BlockSize;
1043 rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1,
1044 io->Flags & XIO_DISABLE_HW_WRITE_CACHE);
1045
1046 if (rc == 0)
1047 {
1048 add_workspace(pIorb)->blocks = 1;
1049 add_workspace(pIorb)->ppfunc = ata_write_pp;
1050 }
1051 else if (rc > 0)
1052 {
1053 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
1054 }
1055 else
1056 {
1057 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1058 }
1059
1060 return(rc);
1061}
1062
1063
1064/******************************************************************************
1065 * Post processing function for ata_write(); this function updates the
1066 * BlocksXferred counter in the IORB and, if not all blocks have been
1067 * transferred, requeues the IORB to process the remaining sectors.
1068 */
1069void ata_write_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1070{
1071 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1072
1073 io->BlocksXferred += add_workspace(pIorb)->blocks;
1074 DPRINTF(7,"ata_write_pp(): blocks transferred = %d\n", io->BlocksXferred);
1075
1076 if (io->BlocksXferred >= io->BlockCount)
1077 {
1078 /* we're done; tell IRQ handler the IORB is complete */
1079 add_workspace(pIorb)->complete = 1;
1080 }
1081 else
1082 {
1083 /* requeue this IORB for next iteration */
1084 iorb_requeue(pIorb);
1085 }
1086}
1087
1088/******************************************************************************
1089 * Execute ATA command.
1090 */
1091int ata_execute_ata(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1092{
1093 IORB_ADAPTER_PASSTHRU *apt = (IORB_ADAPTER_PASSTHRU *)pIorb;
1094 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(apt->pSGList);
1095 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1096 int p = iorb_unit_port(pIorb);
1097 int d = iorb_unit_device(pIorb);
1098 int rc;
1099
1100 if (apt->ControllerCmdLen != sizeof(ATA_CMD))
1101 {
1102 iorb_seterr(pIorb, IOERR_CMD_SYNTAX);
1103 return(-1);
1104 }
1105
1106 rc = ata_cmd(ai, p, d, slot, 0,
1107 AP_SGLIST, pSGList, apt->cSGList,
1108 AP_ATA_CMD, Far16ToFlat(apt->pControllerCmd),
1109 AP_WRITE, !(apt->Flags & PT_DIRECTION_IN),
1110 AP_END);
1111
1112 if (rc == 0)
1113 {
1114 add_workspace(pIorb)->ppfunc = ata_execute_ata_pp;
1115 }
1116
1117 return(rc);
1118}
1119
1120/******************************************************************************
1121 * Post processing function for ata_execute_ata(); the main purpose of this
1122 * function is to copy the received D2H FIS (i.e. the device registers after
1123 * command completion) back to the ATA command structure.
1124 *
1125 * See ata_cmd_to_fis() for an explanation of the mapping.
1126 */
1127void ata_execute_ata_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1128{
1129 AHCI_PORT_DMA *dma_base;
1130 ATA_CMD *cmd;
1131 AD_INFO *ai;
1132 u8 *fis;
1133 int p;
1134
1135 /* get address of D2H FIS */
1136 ai = ad_infos + iorb_unit_adapter(pIorb);
1137 p = iorb_unit_port(pIorb);
1138 dma_base = port_dma_base(ai, p);
1139 fis = dma_base->rx_fis + 0x40;
1140
1141 if (fis[0] != 0x34)
1142 {
1143 /* this is not a D2H FIS - give up silently */
1144 DPRINTF(3,"ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]);
1145 add_workspace(pIorb)->complete = 1;
1146 return;
1147 }
1148
1149 /* map D2H FIS to the original ATA controller command structure */
1150 cmd = (ATA_CMD *)Far16ToFlat(((IORB_ADAPTER_PASSTHRU*)pIorb)->pControllerCmd);
1151
1152 cmd->cmd = fis[2];
1153 cmd->device = fis[7];
1154 cmd->features = ((u16) fis[3])
1155 | ((u16) fis[11]);
1156 cmd->lba_l = ((u32) fis[4])
1157 | ((u32) fis[5] << 8)
1158 | ((u32) fis[6] << 16)
1159 | ((u32) fis[8] << 24);
1160 cmd->lba_h = ((u16) fis[9])
1161 | ((u16) fis[10] << 8);
1162 cmd->count = ((u16) fis[12])
1163 | ((u16) fis[13] << 8);
1164
1165 DHEXDUMP(5,cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n");
1166
1167 /* signal completion to interrupt handler */
1168 add_workspace(pIorb)->complete = 1;
1169}
1170
1171/******************************************************************************
1172 * Request sense information for a failed command. Since there is no "request
1173 * sense" command for ATA devices, we need to read the current error code from
1174 * the AHCI task file register and fabricate the sense information.
1175 *
1176 * NOTES:
1177 *
1178 * - This function must be called right after an ATA command has failed and
1179 * before any other commands are queued on the corresponding port. This
1180 * function is typically called in the port restart context hook which is
1181 * triggered by an AHCI error interrupt.
1182 *
1183 * - The ATA error bits are a complete mess. We'll try and catch the most
1184 * interesting error codes (such as medium errors) and report everything
1185 * else with a generic error code.
1186 */
1187int ata_req_sense(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1188{
1189 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1190 u8 *port_mmio = port_base(ai, iorb_unit_port(pIorb));
1191 u32 tf_data = readl(port_mmio + PORT_TFDATA);
1192 u8 err = (tf_data >> 8);
1193 u8 sts = (tf_data);
1194
1195 if (sts & ATA_ERR)
1196 {
1197 if (sts & ATA_DF)
1198 {
1199 /* there is a device-specific error condition */
1200 if (err & ATA_ICRC)
1201 {
1202 iorb_seterr(pIorb, IOERR_ADAPTER_DEVICEBUSCHECK);
1203 }
1204 else if (err & ATA_UNC)
1205 {
1206 iorb_seterr(pIorb, IOERR_MEDIA);
1207 }
1208 else if (err & ATA_IDNF)
1209 {
1210 iorb_seterr(pIorb, IOERR_RBA_ADDRESSING_ERROR);
1211 }
1212 else
1213 {
1214 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1215 }
1216
1217 }
1218 else
1219 {
1220 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1221 }
1222 }
1223 else
1224 {
1225 /* this function only gets called when we received an error interrupt */
1226 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1227 }
1228
1229 /* Return an error to indicate there's no HW command to be submitted and
1230 * that the IORB can be completed "as is" (the upstream code expects the
1231 * IORB error code, if any, to be set when this happens and this is exactly
1232 * what this function is all about).
1233 */
1234 return(-1);
1235}
1236
1237/******************************************************************************
1238 * Extract vendor and device name from an ATA INDENTIFY buffer. Since strings
1239 * in the indentify buffer are byte-swapped, we need to swap them back.
1240 */
1241char *ata_dev_name(u16 *id_buf)
1242{
1243 static char dev_name[ATA_ID_PROD_LEN + 1];
1244 char *t = dev_name;
1245 char *s = (char *) (id_buf + ATA_ID_PROD);
1246 int i;
1247
1248 dev_name[sizeof(dev_name)-1] = '\0';
1249
1250 for (i = 0; i < ATA_ID_PROD_LEN / 2; i++) {
1251 *(t++) = s[1];
1252 *(t++) = s[0];
1253 s += 2;
1254 }
1255
1256 return(dev_name);
1257}
1258
1259/******************************************************************************
1260 * Fabricate ATA READ command based on the capabilities of the corresponding
1261 * device and the paramters set from above (NCQ, etc).
1262 */
1263static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1264 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1265 ULONG sg_cnt)
1266{
1267 int rc;
1268
1269 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1270 {
1271 /* need LBA48 for this command */
1272 if (!ai->ports[p].devs[d].lba48)
1273 {
1274 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1275 return(-1);
1276 }
1277 if (add_workspace(pIorb)->is_ncq)
1278 {
1279 /* use NCQ read; count goes into feature register, tag into count! */
1280 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ,
1281 AP_SECTOR_48, sector, 0,
1282 AP_FEATURES, count,
1283 AP_COUNT, (slot << 3), /* tag == slot */
1284 AP_SGLIST, sg_list, sg_cnt,
1285 AP_DEVICE, 0x40,
1286 AP_END);
1287 }
1288 else
1289 {
1290 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
1291 AP_SECTOR_48, sector, 0,
1292 AP_COUNT, count,
1293 AP_SGLIST, sg_list, sg_cnt,
1294 AP_DEVICE, 0x40,
1295 AP_END);
1296 }
1297
1298 }
1299 else
1300 {
1301 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
1302 AP_SECTOR_28, sector,
1303 AP_COUNT, count & 0xffU,
1304 AP_SGLIST, sg_list, sg_cnt,
1305 AP_DEVICE, 0x40,
1306 AP_END);
1307 }
1308
1309 return(rc);
1310}
1311
1312/******************************************************************************
1313 * Fabricate ATA WRITE command based on the capabilities of the corresponding
1314 * device and the paramters set from above (NCQ, etc)
1315 */
1316static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1317 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1318 ULONG sg_cnt, int write_through)
1319{
1320 int rc;
1321
1322 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1323 {
1324 /* need LBA48 for this command */
1325 if (!ai->ports[p].devs[d].lba48)
1326 {
1327 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1328 return(-1);
1329 }
1330 if (add_workspace(pIorb)->is_ncq)
1331 {
1332 /* use NCQ write; count goes into feature register, tag into count! */
1333 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE,
1334 AP_SECTOR_48, sector, 0,
1335 AP_FEATURES, count,
1336 /* tag = slot */
1337 AP_COUNT, (slot << 3),
1338 AP_SGLIST, sg_list, sg_cnt,
1339 AP_DEVICE, 0x40,
1340 /* force unit access */
1341 AP_DEVICE, (write_through && !force_write_cache) ? 0x80 : 0,
1342 AP_WRITE, 1,
1343 AP_END);
1344 }
1345 else
1346 {
1347 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
1348 AP_SECTOR_48, sector, 0,
1349 AP_COUNT, count,
1350 AP_SGLIST, sg_list, sg_cnt,
1351 AP_DEVICE, 0x40,
1352 AP_WRITE, 1,
1353 AP_END);
1354 }
1355 }
1356 else
1357 {
1358 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
1359 AP_SECTOR_28, sector,
1360 AP_COUNT, count & 0xffU,
1361 AP_SGLIST, sg_list, sg_cnt,
1362 AP_DEVICE, 0x40,
1363 AP_WRITE, 1,
1364 AP_END);
1365 }
1366
1367 return(rc);
1368}
1369
1370/******************************************************************************
1371 * Copy block from S/G list to virtual address or vice versa.
1372 */
1373void sg_memcpy(SCATGATENTRY *sg_list, USHORT sg_cnt, ULONG sg_off,
1374 void *buf, USHORT len, SG_MEMCPY_DIRECTION dir)
1375{
1376 USHORT i;
1377 USHORT l;
1378 ULONG phys_addr;
1379 ULONG pos = 0;
1380 char *p;
1381
1382 /* walk through S/G list to find the elements involved in the operation */
1383 for (i = 0; i < sg_cnt && len > 0; i++)
1384 {
1385 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off)
1386 {
1387 /* this S/G element intersects with the block to be copied */
1388 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos);
1389 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len)
1390 {
1391 l = len;
1392 }
1393
1394 if (Dev32Help_PhysToLin(phys_addr, l, (PVOID) &p))
1395 {
1396 panic(__func__": DevHelp_PhysToLin() failed");
1397 }
1398 if (dir == SG_TO_BUF)
1399 {
1400 memcpy(buf, p, l);
1401 }
1402 else
1403 {
1404 memcpy(p, buf, l);
1405 }
1406 sg_off += l;
1407 buf = (char *) buf + l;
1408 len -= l;
1409 }
1410
1411 pos += sg_list[i].XferBufLen;
1412 }
1413}
1414
1415/******************************************************************************
1416 * Halt processing by submitting an internal error. This is a last resort and
1417 * should only be called when the system state is corrupt.
1418 */
1419void panic(char *msg)
1420{
1421 Dev32Help_InternalError(msg, strlen(msg));
1422}
1423
Note: See TracBrowser for help on using the repository browser.