source: trunk/src/os2ahci/ata.c@ 185

Last change on this file since 185 was 185, checked in by David Azarewicz, 9 years ago

Fix for sparse port hardware

File size: 47.5 KB
Line 
1/******************************************************************************
2 * ata.c - ATA command processing
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ata.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* ------------------------ typedefs and structures ------------------------ */
34
35/* -------------------------- function prototypes -------------------------- */
36
37static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
38 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
39 ULONG sg_cnt);
40
41static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
42 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
43 ULONG sg_cnt, int write_through);
44
45/* ------------------------ global/static variables ------------------------ */
46
47/* ----------------------------- start of code ----------------------------- */
48
49/******************************************************************************
50 * Initialize AHCI command slot, FIS and S/G list for the specified ATA
51 * command. The command parameters are passed as a variable argument list
52 * of type and value(s). The list is terminated by AP_END.
53 *
54 * Notes:
55 *
56 * - The specified command slot is expected to be idle; no checks are
57 * performed to prevent messing with a busy port.
58 *
59 * - Port multipliers are not supported, yet, thus 'd' should always
60 * be 0 for the time being.
61 *
62 * - 'cmd' is passed as 16-bit integer because the compiler would push
63 * a 'u8' as 16-bit value (it's a fixed argument) and the stdarg
64 * macros would screw up the address of the first variable argument
65 * if the size of the last fixed argument wouldn't match what the
66 * compiler pushed on the stack.
67 *
68 * Return values:
69 * 0 : success
70 * > 0 : could not map all S/G entries; the return value is the number of
71 * S/G entries that could be mapped.
72 * < 0 : other error
73 */
74int ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, ...)
75{
76 va_list va;
77 va_start(va, cmd);
78 return(v_ata_cmd(ai, p, d, slot, cmd, va));
79}
80
81int v_ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, va_list va)
82{
83 AHCI_PORT_DMA *dma_base_virt;
84 AHCI_CMD_HDR *cmd_hdr;
85 AHCI_CMD_TBL *cmd_tbl;
86 SCATGATENTRY *sg_list = NULL;
87 SCATGATENTRY sg_single;
88 ATA_PARM ap;
89 ATA_CMD ata_cmd;
90 void *atapi_cmd = NULL;
91 u32 dma_base_phys;
92 u32 atapi_cmd_len = 0;
93 u32 ahci_flags = 0;
94 u32 sg_cnt = 0;
95 u32 i;
96 u32 n;
97
98 /* --------------------------------------------------------------------------
99 * Initialize ATA command. The ATA command is set up with the main command
100 * value and a variable list of additional parameters such as the sector
101 * address, transfer count, ...
102 */
103 memset(&ata_cmd, 0x00, sizeof(ata_cmd));
104 ata_cmd.cmd = cmd;
105
106 /* parse variable arguments */
107 do
108 {
109 switch ((ap = va_arg(va, ATA_PARM)))
110 {
111
112 case AP_AHCI_FLAGS:
113 ahci_flags |= va_arg(va, u32);
114 break;
115
116 case AP_WRITE:
117 if (va_arg(va, u32) != 0)
118 {
119 ahci_flags |= AHCI_CMD_WRITE;
120 }
121 break;
122
123 case AP_FEATURES:
124 /* ATA features word */
125 ata_cmd.features |= va_arg(va, u32);
126 break;
127
128 case AP_COUNT:
129 /* transfer count */
130 ata_cmd.count = va_arg(va, u32);
131 break;
132
133 case AP_SECTOR_28:
134 /* 28-bit sector address */
135 ata_cmd.lba_l = va_arg(va, u32);
136 if (ata_cmd.lba_l & 0xf0000000UL)
137 {
138 dprintf(0,"error: LBA-28 address %d has more than 28 bits\n", ata_cmd.lba_l);
139 return(ATA_CMD_INVALID_PARM);
140 }
141 /* add upper 4 bits to device field */
142 ata_cmd.device |= (ata_cmd.lba_l >> 24) & 0x0fU;
143 /* only lower 24 bits come into lba_l */
144 ata_cmd.lba_l &= 0x00ffffffUL;
145 break;
146
147 case AP_SECTOR_48:
148 /* 48-bit sector address */
149 ata_cmd.lba_l = va_arg(va, u32);
150 ata_cmd.lba_h = va_arg(va, u32);
151 break;
152
153 case AP_DEVICE:
154 /* ATA device byte; note that this byte contains the highest
155 * 4 bits of LBA-28 address; we have to leave them alone here. */
156 ata_cmd.device |= va_arg(va, u32) & 0xf0;
157 break;
158
159 case AP_SGLIST:
160 /* scatter/gather list in SCATGATENTRY/count format */
161 sg_list = va_arg(va, void *);
162 sg_cnt = va_arg(va, u32);
163 break;
164
165 case AP_VADDR:
166 /* virtual buffer address in addr/len format (up to 4K) */
167 sg_single.ppXferBuf = MemPhysAdr(va_arg(va, void *));
168 sg_single.XferBufLen = va_arg(va, u32);
169 sg_list = &sg_single;
170 sg_cnt = 1;
171 break;
172
173 case AP_ATAPI_CMD:
174 /* ATAPI command */
175 atapi_cmd = va_arg(va, void *);
176 atapi_cmd_len = va_arg(va, u32);
177 ahci_flags |= AHCI_CMD_ATAPI;
178 break;
179
180 case AP_ATA_CMD:
181 /* ATA command "pass-through" */
182 memcpy(&ata_cmd, va_arg(va, void *), sizeof(ATA_CMD));
183 break;
184
185 case AP_END:
186 break;
187
188 default:
189 dprintf(0,"error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap);
190 return(ATA_CMD_INVALID_PARM);
191 }
192
193 } while (ap != AP_END);
194
195 /* --------------------------------------------------------------------------
196 * Fill in AHCI ATA command information. This includes the port command slot,
197 * the corresponding command FIS and the S/G list. The layout of the AHCI
198 * port DMA region is based on the Linux AHCI driver and looks like this:
199 *
200 * - 32 AHCI command headers (AHCI_CMD_HDR) with 32 bytes, each
201 * - 1 FIS receive area with 256 bytes (AHCI_RX_FIS_SZ)
202 * - 32 AHCI command tables, each consisting of
203 * - 64 bytes for command FIS
204 * - 16 bytes for ATAPI comands
205 * - 48 bytes reserved
206 * - 48 S/G entries (AHCI_SG) with 32 bytes, each
207 *
208 * Since the whole DMA buffer for all ports is larger than 64KB and we need
209 * multiple segments to address all of them, there are no virtual pointers
210 * to the individual elements in AD_INFO. Instead, we're relying on macros
211 * for getting the base address of a particular port's DMA region, then
212 * map a structure on top of that for convenience (AHCI_PORT_DMA).
213 */
214 dma_base_virt = port_dma_base(ai, p);
215 dma_base_phys = port_dma_base_phys(ai, p);
216
217 /* AHCI command header */
218 cmd_hdr = &dma_base_virt->cmd_hdr[slot];
219 memset(cmd_hdr, 0x00, sizeof(*cmd_hdr));
220 cmd_hdr->options = ((d & 0x0f) << 12);
221 cmd_hdr->options |= ahci_flags; /* AHCI command flags */
222 cmd_hdr->options |= 5; /* length of command FIS in 32-bit words */
223 cmd_hdr->tbl_addr = dma_base_phys + offsetof(AHCI_PORT_DMA, cmd_tbl[slot]);
224 /* DAZ can use MemPhysAdr(&dma_base_virt->cmd_tbl[slot]), but is probably slower. */
225
226 /* AHCI command table */
227 cmd_tbl = &dma_base_virt->cmd_tbl[slot];
228 memset(cmd_tbl, 0x00, sizeof(*cmd_tbl));
229 ata_cmd_to_fis(cmd_tbl->cmd_fis, &ata_cmd, d);
230
231 if (atapi_cmd != NULL)
232 {
233 /* copy ATAPI command */
234 memcpy(cmd_tbl->atapi_cmd, atapi_cmd, atapi_cmd_len);
235 }
236
237 /* PRDT (S/G list)
238 *
239 * - The S/G list for AHCI adapters is limited to 22 bits for the transfer
240 * size of each element, thus we need to split S/G elements larger than
241 * 22 bits into 2 AHCI_SG elements.
242 *
243 * - The S/G element size for AHCI is what the spec calls '0'-based
244 * (i.e. 0 means 1 bytes). On top of that, the spec requires S/G transfer
245 * sizes to be even in the context of 16-bit transfers, thus bit '1'
246 * always needs to be set.
247 *
248 * - AHCI_MAX_SG_ELEMENT_LEN defines the maximum size of an AHCI S/G
249 * element in bytes, ignoring the '0'-based methodology (i.e. 1 << 22).
250 *
251 * - There's a limit on the maximum number of S/G elements in the port DMA
252 * buffer (AHCI_MAX_SG) which is lower than the HW maximum. It's beyond
253 * the control of this function to split commands which require more
254 * than AHCI_MAX_SG entries. In order to help the caller, the return value
255 * of this function will indicate how many OS/2 S/G entries were
256 * successfully mapped.
257 */
258 for (i = n = 0; i < sg_cnt; i++)
259 {
260 u32 sg_addr = sg_list[i].ppXferBuf;
261 u32 sg_size = sg_list[i].XferBufLen;
262
263 do
264 {
265 u32 chunk = (sg_size > AHCI_MAX_SG_ELEMENT_LEN) ? AHCI_MAX_SG_ELEMENT_LEN : sg_size;
266 if (n >= AHCI_MAX_SG)
267 {
268 /* couldn't store all S/G elements in our DMA buffer */
269 dprintf(0,"ata_cmd(): too many S/G elements\n");
270 return(i - 1);
271 }
272 if ((sg_addr & 1) || (chunk & 1))
273 {
274 dprintf(1,"error: ata_cmd() called with unaligned S/G element(s)\n");
275 return(ATA_CMD_UNALIGNED_ADDR);
276 }
277 cmd_tbl->sg_list[n].addr = sg_addr;
278 cmd_tbl->sg_list[n].size = chunk - 1;
279 sg_addr += chunk;
280 sg_size -= chunk;
281 n++;
282 } while (sg_size > 0);
283 }
284
285 /* set final S/G count in AHCI command header */
286 cmd_hdr->options |= n << 16;
287
288 if (D32g_DbgLevel >= 7)
289 {
290 DPRINTF(0,"ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot);
291 dHexDump(0,cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: ");
292 dHexDump(0,&ata_cmd, sizeof(ata_cmd), "ata_cmd: ");
293 if (atapi_cmd != NULL)
294 {
295 dHexDump(0,atapi_cmd, atapi_cmd_len, "atapi_cmd: ");
296 }
297 if (n > 0)
298 {
299 dHexDump(0,cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: ");
300 }
301 }
302
303 return(ATA_CMD_SUCCESS);
304}
305
306/******************************************************************************
307 * Fill SATA command FIS with values extracted from an ATA command structure.
308 * The command FIS buffer (fis) is expected to be initialized to 0s. The
309 * structure of the FIS maps to the ATA shadow register block, including
310 * registers which can be written twice to store 16 bits (called 'exp').
311 *
312 * The FIS structure looks like this (using LSB notation):
313 *
314 * +----------------+----------------+----------------+----------------+
315 * 00 | FIS type (27h) | C|R|R|R|PMP | Command | Features |
316 * +----------------+----------------+----------------+----------------+
317 * 04 | LBA 7:0 | LBA 15:8 | LBA 23:16 | R|R|R|D|Head |
318 * +----------------+----------------+----------------+----------------+
319 * 08 | LBA 31:24 | LBA 40:32 | LBA 47:40 | Features exp |
320 * +----------------+----------------+----------------+----------------+
321 * 12 | Count 7:0 | Count 15:8 | Reserved | Control |
322 * +----------------+----------------+----------------+----------------+
323 * 16 | Reserved | Reserved | Reserved | Reserved |
324 * +----------------+----------------+----------------+----------------+
325 */
326void ata_cmd_to_fis(u8 *fis, ATA_CMD *ata_cmd, int d)
327{
328 fis[0] = 0x27; /* register - host to device FIS */
329 fis[1] = (u8) (d & 0xf); /* port multiplier number */
330 fis[1] |= 0x80; /* bit 7 indicates Command FIS */
331 fis[2] = (u8) ata_cmd->cmd;
332 fis[3] = (u8) ata_cmd->features;
333
334 fis[4] = (u8) ata_cmd->lba_l;
335 fis[5] = (u8) (ata_cmd->lba_l >> 8);
336 fis[6] = (u8) (ata_cmd->lba_l >> 16);
337 fis[7] = (u8) ata_cmd->device;
338
339 fis[8] = (u8) (ata_cmd->lba_l >> 24);
340 fis[9] = (u8) ata_cmd->lba_h;
341 fis[10] = (u8) (ata_cmd->lba_h >> 8);
342 fis[11] = (u8) (ata_cmd->features >> 8);
343
344 fis[12] = (u8) ata_cmd->count;
345 fis[13] = (u8) (ata_cmd->count >> 8);
346}
347
348/******************************************************************************
349 * Get index in S/G list for the number of transferred sectors in the IORB.
350 *
351 * Returning io->cSGList indicates an error.
352 *
353 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW
354 * limit will never cross sector boundaries. This means that splitting
355 * S/G lists into multiple commands can be done without editing the S/G
356 * lists.
357 */
358u16 ata_get_sg_indx(IORB_EXECUTEIO *io)
359{
360 ULONG offset = io->BlocksXferred * io->BlockSize;
361 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
362 USHORT i;
363
364 for (i = 0; i < io->cSGList && offset > 0; i++)
365 {
366 offset -= pSGList[i].XferBufLen;
367 }
368
369 return(i);
370}
371
372/******************************************************************************
373 * Get max S/G count which will fit into our HW S/G buffers. This function is
374 * called when the S/G list is too long and we need to split the IORB into
375 * multiple commands. It returns both the number of sectors and S/G list
376 * elements that we can handle in a single command.
377 *
378 * The parameter 'sg_indx' indicates the current start index in the S/G list
379 * (0 if this is the first command iteration).
380 *
381 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates
382 * how many S/G elements were successfully mapped. Whatever we return needs to
383 * be less or equal to this value.
384 *
385 * Returning 0 in *sg_cnt indicates an error.
386 *
387 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits
388 * will never cross sector boundaries. This means that splitting S/G
389 * lists into multiple commands can be done without editing S/G list
390 * elements. Since AHCI only allows 22 bits for each S/G element, the
391 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based
392 * on the actual length of S/G elements. This function looks for the
393 * maximum number of S/G elements that can be mapped on sector
394 * boundaries which will still fit into our HW S/G list.
395 */
396void ata_max_sg_cnt(IORB_EXECUTEIO *io, USHORT sg_indx, USHORT sg_max,
397 USHORT *sg_cnt, USHORT *sector_cnt)
398{
399 ULONG max_sector_cnt = 0;
400 USHORT max_sg_cnt = 0;
401 ULONG offset = 0;
402 USHORT i;
403 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
404
405 for (i = sg_indx; i < io->cSGList; i++)
406 {
407 if (i - sg_indx >= sg_max)
408 {
409 /* we're beyond the number of S/G elements we can map */
410 break;
411 }
412
413 offset += pSGList[i].XferBufLen;
414 if (offset % io->BlockSize == 0)
415 {
416 /* this S/G element ends on a sector boundary */
417 max_sector_cnt = offset / io->BlockSize;
418 max_sg_cnt = i + 1;
419 }
420 }
421
422 /* return the best match we found (0 indicating failure) */
423 *sector_cnt = max_sector_cnt;
424 *sg_cnt = max_sg_cnt;
425}
426
427
428/******************************************************************************
429 * Get device or media geometry. Device and media geometry are expected to be
430 * the same for non-removable devices, which will always be the case for the
431 * ATA devices we're dealing with (hard disks). ATAPI is a different story
432 * and handled by atapi_get_geometry().
433 */
434int ata_get_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
435{
436 ADD_WORKSPACE *aws = add_workspace(pIorb);
437 int rc;
438
439 /* allocate buffer for ATA identify information */
440 if ((aws->buf = MemAlloc(ATA_ID_WORDS * sizeof(u16))) == NULL)
441 {
442 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
443 return(-1);
444 }
445
446 /* request ATA identify information */
447 aws->ppfunc = ata_get_geometry_pp;
448 rc = ata_cmd(ad_infos + iorb_unit_adapter(pIorb),
449 iorb_unit_port(pIorb),
450 iorb_unit_device(pIorb),
451 slot,
452 ATA_CMD_ID_ATA,
453 AP_VADDR, (void *) aws->buf, ATA_ID_WORDS * sizeof(u16),
454 AP_END);
455
456 if (rc != 0)
457 {
458 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
459 }
460
461 return(rc);
462}
463
464/* Adjust the cylinder count in the physical
465 * geometry to the last full cylinder.
466 */
467int adjust_cylinders(GEOMETRY *geometry, ULONG TotalSectors)
468{
469 USHORT SecPerCyl;
470 int rc = FALSE;
471
472 geometry->TotalSectors = TotalSectors;
473 SecPerCyl = geometry->SectorsPerTrack * geometry->NumHeads;
474 if (SecPerCyl > 0)
475 {
476 ULONG TotalCylinders = TotalSectors / SecPerCyl;
477
478 geometry->TotalSectors = TotalCylinders * SecPerCyl;
479 geometry->TotalCylinders = TotalCylinders;
480 if (TotalCylinders >> 16)
481 {
482 geometry->TotalCylinders = 65535;
483 rc = TRUE;
484 }
485 }
486 return (rc);
487}
488
489/* Calculate the logical geometry based on the input physcial geometry
490 * using the LBA Assist Translation algorithm.
491 */
492#define BIOS_MAX_CYLINDERS 1024l
493#define BIOS_MAX_NUMHEADS 255
494#define BIOS_MAX_SECTORSPERTRACK 63
495void log_geom_calculate_LBA_assist(GEOMETRY *geometry, ULONG TotalSectors)
496{
497 UCHAR numSpT = BIOS_MAX_SECTORSPERTRACK;
498 UCHAR numHeads = BIOS_MAX_NUMHEADS;
499 ULONG Cylinders;
500
501 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK))
502 {
503 USHORT temp = (TotalSectors - 1) / (BIOS_MAX_CYLINDERS * BIOS_MAX_SECTORSPERTRACK);
504
505 if (temp < 16) numHeads = 16;
506 else if (temp < 32) numHeads = 32;
507 else if (temp < 64) numHeads = 64;
508 else numHeads = 128;
509 }
510
511 do
512 {
513 Cylinders = TotalSectors / (USHORT)(numHeads * numSpT);
514 if (Cylinders >> 16)
515 {
516 if (numSpT < 128)
517 numSpT = (numSpT << 1) | 1;
518 else
519 Cylinders = 65535; // overflow !
520 }
521 } while (Cylinders >> 16);
522
523 geometry->TotalCylinders = Cylinders;
524 geometry->NumHeads = numHeads;
525 geometry->SectorsPerTrack = numSpT;
526}
527
528int check_lvm(IORBH *pIorb, ULONG sector)
529{
530 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace(pIorb)->buf;
531 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
532 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
533 int p = iorb_unit_port(pIorb);
534 int rc;
535
536 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_READ,
537 AP_SECTOR_28, sector-1,
538 AP_COUNT, 1,
539 AP_VADDR, (void *)pDLA, 512,
540 AP_DEVICE, 0x40,
541 AP_END);
542 if (rc) return 0;
543
544 DHEXDUMP(3,pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1);
545
546 if ((pDLA->DLA_Signature1 == DLA_TABLE_SIGNATURE1) && (pDLA->DLA_Signature2 == DLA_TABLE_SIGNATURE2)) {
547 DPRINTF(3,"is_lvm_geometry found at sector %d\n", sector-1);
548 geometry->TotalCylinders = pDLA->Cylinders;
549 geometry->NumHeads = pDLA->Heads_Per_Cylinder;
550 geometry->SectorsPerTrack = pDLA->Sectors_Per_Track;
551 geometry->TotalSectors = pDLA->Cylinders * pDLA->Heads_Per_Cylinder * pDLA->Sectors_Per_Track;
552 return 1;
553 }
554
555 return 0;
556}
557
558/******************************************************************************
559 * Try to read LVM information from the disk. If found, use the LVM geometry.
560 * This function will only work at init time. A better strategy would be to
561 * calculate the geometry during ahci_scan_ports and save it away and then just
562 * return the saved values when ata_get_geometry() is called.
563 */
564int is_lvm_geometry(IORBH *pIorb)
565{
566 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
567 ULONG sector;
568
569 if (init_complete) return 0; /* We cannot use ahci_exec_polled_cmd() after init_complete */
570
571 if (use_lvm_info)
572 {
573 #ifdef DEBUG
574 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
575 int p = iorb_unit_port(pIorb);
576 int d = iorb_unit_device(pIorb);
577 DPRINTF(3,"is_lvm_geometry (%d.%d.%d)\n", ad_no(ai), p, d);
578 #endif
579
580 /* First check the sector reported by the hardware */
581 if (check_lvm(pIorb, geometry->SectorsPerTrack)) return 1;
582
583 for (sector = 255; sector >= 63; sector >>= 1)
584 {
585 if (sector == geometry->SectorsPerTrack) continue;
586 if (check_lvm(pIorb, sector)) return 1;
587 }
588 }
589
590 return 0;
591}
592
593/******************************************************************************
594 * Post processing function for ata_get_geometry(): convert the ATA identify
595 * information to OS/2 IOCC_GEOMETRY information.
596 */
597void ata_get_geometry_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
598{
599 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
600 USHORT geometry_len = ((IORB_GEOMETRY *)pIorb)->GeometryLen;
601 u16 *id_buf = add_workspace(pIorb)->buf;
602 int a = iorb_unit_adapter(pIorb);
603 int p = iorb_unit_port(pIorb);
604 char *Method;
605
606 /* Fill-in geometry information; the ATA-8 spec declares the geometry
607 * fields in the ATA ID buffer as obsolete but it's still the best
608 * guess in most cases. If the information stored in the geometry
609 * fields is apparently incorrect, we'll use the algorithm typically
610 * used by SCSI adapters and modern PC BIOS versions:
611 *
612 * - 512 bytes per sector
613 * - 255 heads
614 * - 63 sectors per track (or 56 with the parameter "/4")
615 * - x cylinders (calculated)
616 *
617 * Please note that os2ahci currently does not natively support ATA sectors
618 * larger than 512 bytes, therefore relies on the translation logic built
619 * into the corresponding ATA disks. In order to prevent file systems that
620 * use block sizes larger than 512 bytes (FAT, JFS, ...) from ending up on
621 * incorrectly aligned physical sector accesses, hence using more physical
622 * I/Os than necessary, the command line parameter "/4" can be used to force
623 * a track size of 56 sectors. This way, partitions will start on 4K
624 * boundaries.
625 *
626 * Another limitation is that OS/2 has a 32-bit variable for the total number
627 * of sectors, limiting the maximum capacity to roughly 2TB. This is another
628 * issue that needs to be addressed sooner or later; large sectors could
629 * raise this limit to something like 8TB but this is not really much of a
630 * difference. Maybe there's something in later DDKs that allows more than
631 * 32 bits?
632 */
633 memset(geometry, 0x00, geometry_len);
634 geometry->BytesPerSector = ATA_SECTOR_SIZE;
635
636 /* extract total number of sectors */
637 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400)
638 {
639 /* 48-bit LBA supported */
640 if (ATA_CAPACITY48_H(id_buf) != 0)
641 {
642 /* more than 32 bits for number of sectors */
643 dprintf(0,"warning: limiting disk %d.%d.%d to 2TB\n",
644 iorb_unit_adapter(pIorb), iorb_unit_port(pIorb),
645 iorb_unit_device(pIorb));
646 geometry->TotalSectors = 0xffffffffUL;
647 }
648 else
649 {
650 geometry->TotalSectors = ATA_CAPACITY48_L(id_buf);
651 }
652 }
653 else
654 {
655 /* 28-bit LBA */
656 geometry->TotalSectors = ATA_CAPACITY(id_buf) & 0x0fffffffUL;
657 }
658
659 Method = "None";
660 /* fabricate the remaining geometry fields */
661 if (track_size[a][p] != 0)
662 {
663 /* A specific track size has been requested for this port; this is
664 * typically done for disks with 4K sectors to make sure partitions
665 * start on 8-sector boundaries (parameter "/4").
666 */
667 geometry->NumHeads = 255;
668 geometry->SectorsPerTrack = track_size[a][p];
669 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
670 Method = "Custom";
671 }
672 else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 &&
673 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf))
674 {
675 /* BIOS-supplied (aka "current") geometry values look valid */
676 geometry->NumHeads = CUR_HEADS(id_buf);
677 geometry->SectorsPerTrack = CUR_SECTORS(id_buf);
678 geometry->TotalCylinders = CUR_CYLS(id_buf);
679 Method = "BIOS";
680 }
681 else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0)
682 {
683 /* ATA-supplied values for geometry look valid */
684 geometry->NumHeads = ATA_HEADS(id_buf);
685 geometry->SectorsPerTrack = ATA_SECTORS(id_buf);
686 geometry->TotalCylinders = ATA_CYLS(id_buf);
687 Method = "ATA";
688 }
689 else
690 {
691 /* use typical SCSI geometry */
692 geometry->NumHeads = 255;
693 geometry->SectorsPerTrack = 63;
694 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
695 Method = "SCSI";
696 }
697
698 DPRINTF(2,"Physical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
699 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
700 (geometry->TotalSectors / 2048), Method);
701
702 /* Fixup the geometry in case the geometry reported by the BIOS is bad */
703 if (adjust_cylinders(geometry, geometry->TotalSectors))
704 { // cylinder overflow
705 log_geom_calculate_LBA_assist(geometry, geometry->TotalSectors);
706 geometry->TotalSectors = (USHORT)(geometry->NumHeads * geometry->SectorsPerTrack) * (ULONG)geometry->TotalCylinders;
707 }
708 adjust_cylinders(geometry, geometry->TotalSectors);
709
710 DPRINTF(2,"Logical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
711 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
712 (geometry->TotalSectors / 2048), Method);
713
714 if (is_lvm_geometry(pIorb)) Method = "LVM";
715 ad_infos[a].ports[p].devs[0].dev_info.Cylinders = geometry->TotalCylinders;
716 ad_infos[a].ports[p].devs[0].dev_info.HeadsPerCylinder = geometry->NumHeads;
717 ad_infos[a].ports[p].devs[0].dev_info.SectorsPerTrack = geometry->SectorsPerTrack;
718 ad_infos[a].ports[p].devs[0].dev_info.TotalSectors = geometry->TotalSectors;
719 ad_infos[a].ports[p].devs[0].dev_info.Method = Method;
720
721 DPRINTF(2,"Reported geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
722 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
723 (geometry->TotalSectors / 2048), Method);
724
725 /* tell interrupt handler that this IORB is complete */
726 add_workspace(pIorb)->complete = 1;
727}
728
729/******************************************************************************
730 * Test whether unit is ready.
731 */
732int ata_unit_ready(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
733{
734 /* This is a NOP for ATA devices (at least right now); returning an error
735 * without setting an error code means ahci_exec_iorb() will not queue any
736 * HW command and the IORB will complete successfully.
737 */
738 ((IORB_UNIT_STATUS *)pIorb)->UnitStatus = US_READY | US_POWER;
739 return(-1);
740}
741
742/******************************************************************************
743 * Read sectors from AHCI device.
744 */
745int ata_read(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
746{
747 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
748 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
749 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
750 ULONG sector = io->RBA + io->BlocksXferred;
751 USHORT count = io->BlockCount - io->BlocksXferred;
752 USHORT sg_indx;
753 USHORT sg_cnt;
754 int p = iorb_unit_port(pIorb);
755 int d = iorb_unit_device(pIorb);
756 int rc;
757
758 if (io->BlockCount == 0)
759 {
760 /* NOP; return -1 without error in IORB to indicate success */
761 return(-1);
762 }
763
764 if (add_workspace(pIorb)->unaligned)
765 {
766 /* unaligned S/G addresses present; need to use double buffers */
767 return(ata_read_unaligned(pIorb, slot));
768 }
769
770 /* Kludge: some I/O commands during boot use excessive S/G buffer lengths
771 * which cause NCQ commands to lock up. If there's only one S/G element
772 * and this element is already larger than what we can derive from the sector
773 * count, we'll adjust that element.
774 */
775 if (io->BlocksXferred == 0 && io->cSGList == 1 &&
776 pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize)
777 {
778 pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize;
779 }
780
781 /* prepare read command while keeping an eye on S/G count limitations */
782 do
783 {
784 sg_indx = ata_get_sg_indx(io);
785 sg_cnt = io->cSGList - sg_indx;
786 if ((rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, count,
787 pSGList + sg_indx, sg_cnt)) > 0)
788 {
789 /* couldn't map all S/G elements */
790 ata_max_sg_cnt(io, sg_indx, rc, &sg_cnt, &count);
791 }
792 } while (rc > 0 && sg_cnt > 0);
793
794 if (rc == 0)
795 {
796 add_workspace(pIorb)->blocks = count;
797 add_workspace(pIorb)->ppfunc = ata_read_pp;
798 }
799 else if (rc > 0)
800 {
801 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
802 }
803 else if (rc == ATA_CMD_UNALIGNED_ADDR)
804 {
805 /* unaligned S/G addresses detected; need to use double buffers */
806 add_workspace(pIorb)->unaligned = 1;
807 return(ata_read_unaligned(pIorb, slot));
808
809 }
810 else
811 {
812 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
813 }
814
815 return(rc);
816}
817
818/******************************************************************************
819 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI
820 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
821 * restrictions. This doesn't happen very often but when it does, we need to
822 * use a transfer buffer and copy the data manually.
823 */
824int ata_read_unaligned(IORBH *pIorb, int slot)
825{
826 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
827 ADD_WORKSPACE *aws = add_workspace(pIorb);
828 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
829 ULONG sector = io->RBA + io->BlocksXferred;
830 SCATGATENTRY sg_single;
831 int p = iorb_unit_port(pIorb);
832 int d = iorb_unit_device(pIorb);
833 int rc;
834
835 DPRINTF(7,"ata_read_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
836 ai->ports[p].unaligned_read_count++;
837
838 /* allocate transfer buffer */
839 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
840 {
841 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
842 return(-1);
843 }
844
845 /* prepare read command using transfer buffer */
846 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
847 sg_single.XferBufLen = io->BlockSize;
848 rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1);
849
850 if (rc == 0) {
851 add_workspace(pIorb)->blocks = 1;
852 add_workspace(pIorb)->ppfunc = ata_read_pp;
853
854 } else if (rc > 0) {
855 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
856
857 } else {
858 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
859 }
860
861 return(rc);
862}
863
864/******************************************************************************
865 * Post processing function for ata_read(); this function updates the
866 * BlocksXferred counter in the IORB and, if not all blocks have been
867 * transferred, requeues the IORB to process the remaining sectors. It also
868 * takes care of copying data from the transfer buffer for unaligned reads.
869 */
870void ata_read_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
871{
872 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
873 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
874 ADD_WORKSPACE *aws = add_workspace(pIorb);
875
876 if (aws->unaligned)
877 {
878 /* copy transfer buffer to corresponding physical address in S/G list */
879 sg_memcpy(pSGList, io->cSGList,
880 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
881 aws->buf, io->BlockSize, BUF_TO_SG);
882 }
883
884 io->BlocksXferred += add_workspace(pIorb)->blocks;
885 DPRINTF(7,"ata_read_pp(): blocks transferred = %d\n", io->BlocksXferred);
886
887 if (io->BlocksXferred >= io->BlockCount)
888 {
889 /* we're done; tell IRQ handler the IORB is complete */
890 add_workspace(pIorb)->complete = 1;
891 }
892 else
893 {
894 /* requeue this IORB for next iteration */
895 iorb_requeue(pIorb);
896 }
897}
898
899/******************************************************************************
900 * Verify readability of sectors on ATA device.
901 */
902int ata_verify(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
903{
904 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
905 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
906 int p = iorb_unit_port(pIorb);
907 int d = iorb_unit_device(pIorb);
908 int rc;
909
910 if (io->BlockCount == 0)
911 {
912 /* NOP; return -1 without error in IORB to indicate success */
913 return(-1);
914 }
915
916 /* prepare verify command */
917 if (io->RBA >= (1UL << 28) || io->BlockCount > 256)
918 {
919 /* need LBA48 for this command */
920 if (!ai->ports[p].devs[d].lba48) {
921 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
922 return(-1);
923 }
924 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY_EXT,
925 AP_SECTOR_48, io->RBA, 0,
926 AP_COUNT, io->BlockCount,
927 AP_DEVICE, 0x40,
928 AP_END);
929 } else {
930 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY,
931 AP_SECTOR_28, io->RBA,
932 AP_COUNT, io->BlockCount & 0xffU,
933 AP_DEVICE, 0x40,
934 AP_END);
935 }
936
937 return(rc);
938}
939
940/******************************************************************************
941 * Write sectors to AHCI device.
942 */
943int ata_write(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
944{
945 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
946 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
947 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
948 ULONG sector = io->RBA + io->BlocksXferred;
949 USHORT count = io->BlockCount - io->BlocksXferred;
950 USHORT sg_indx;
951 USHORT sg_cnt;
952 int p = iorb_unit_port(pIorb);
953 int d = iorb_unit_device(pIorb);
954 int rc;
955
956 if (io->BlockCount == 0)
957 {
958 /* NOP; return -1 without error in IORB to indicate success */
959 return(-1);
960 }
961
962 if (add_workspace(pIorb)->unaligned)
963 {
964 /* unaligned S/G addresses present; need to use double buffers */
965 return(ata_write_unaligned(pIorb, slot));
966 }
967
968 /* prepare write command while keeping an eye on S/G count limitations */
969 do {
970 sg_indx = ata_get_sg_indx(io);
971 sg_cnt = io->cSGList - sg_indx;
972 if ((rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, count,
973 pSGList + sg_indx, sg_cnt,
974 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0)
975 {
976 /* couldn't map all S/G elements */
977 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
978 }
979 } while (rc > 0 && sg_cnt > 0);
980
981 if (rc == 0)
982 {
983 add_workspace(pIorb)->blocks = count;
984 add_workspace(pIorb)->ppfunc = ata_write_pp;
985 }
986 else if (rc > 0)
987 {
988 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
989 }
990 else if (rc == ATA_CMD_UNALIGNED_ADDR)
991 {
992 /* unaligned S/G addresses detected; need to use double buffers */
993 add_workspace(pIorb)->unaligned = 1;
994 return(ata_write_unaligned(pIorb, slot));
995 }
996 else
997 {
998 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
999 }
1000
1001 return(rc);
1002}
1003
1004/******************************************************************************
1005 * Write sectors from AHCI device with unaligned S/G element addresses. AHCI
1006 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
1007 * restrictions. This doesn't happen very often but when it does, we need to
1008 * use a transfer buffer and copy the data manually.
1009 */
1010int ata_write_unaligned(IORBH *pIorb, int slot)
1011{
1012 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1013 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
1014 ADD_WORKSPACE *aws = add_workspace(pIorb);
1015 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1016 ULONG sector = io->RBA + io->BlocksXferred;
1017 SCATGATENTRY sg_single;
1018 int p = iorb_unit_port(pIorb);
1019 int d = iorb_unit_device(pIorb);
1020 int rc;
1021
1022 DPRINTF(7,"ata_write_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
1023
1024 /* allocate transfer buffer */
1025 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
1026 {
1027 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1028 return(-1);
1029 }
1030
1031 /* copy next sector from S/G list to transfer buffer */
1032 sg_memcpy(pSGList, io->cSGList,
1033 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
1034 aws->buf, io->BlockSize, SG_TO_BUF);
1035
1036 /* prepare write command using transfer buffer */
1037 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
1038 sg_single.XferBufLen = io->BlockSize;
1039 rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1,
1040 io->Flags & XIO_DISABLE_HW_WRITE_CACHE);
1041
1042 if (rc == 0)
1043 {
1044 add_workspace(pIorb)->blocks = 1;
1045 add_workspace(pIorb)->ppfunc = ata_write_pp;
1046 }
1047 else if (rc > 0)
1048 {
1049 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
1050 }
1051 else
1052 {
1053 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1054 }
1055
1056 return(rc);
1057}
1058
1059
1060/******************************************************************************
1061 * Post processing function for ata_write(); this function updates the
1062 * BlocksXferred counter in the IORB and, if not all blocks have been
1063 * transferred, requeues the IORB to process the remaining sectors.
1064 */
1065void ata_write_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1066{
1067 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1068
1069 io->BlocksXferred += add_workspace(pIorb)->blocks;
1070 DPRINTF(7,"ata_write_pp(): blocks transferred = %d\n", io->BlocksXferred);
1071
1072 if (io->BlocksXferred >= io->BlockCount)
1073 {
1074 /* we're done; tell IRQ handler the IORB is complete */
1075 add_workspace(pIorb)->complete = 1;
1076 }
1077 else
1078 {
1079 /* requeue this IORB for next iteration */
1080 iorb_requeue(pIorb);
1081 }
1082}
1083
1084/******************************************************************************
1085 * Execute ATA command.
1086 */
1087int ata_execute_ata(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1088{
1089 IORB_ADAPTER_PASSTHRU *apt = (IORB_ADAPTER_PASSTHRU *)pIorb;
1090 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(apt->pSGList);
1091 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1092 int p = iorb_unit_port(pIorb);
1093 int d = iorb_unit_device(pIorb);
1094 int rc;
1095
1096 if (apt->ControllerCmdLen != sizeof(ATA_CMD))
1097 {
1098 iorb_seterr(pIorb, IOERR_CMD_SYNTAX);
1099 return(-1);
1100 }
1101
1102 rc = ata_cmd(ai, p, d, slot, 0,
1103 AP_SGLIST, pSGList, apt->cSGList,
1104 AP_ATA_CMD, Far16ToFlat(apt->pControllerCmd),
1105 AP_WRITE, !(apt->Flags & PT_DIRECTION_IN),
1106 AP_END);
1107
1108 if (rc == 0)
1109 {
1110 add_workspace(pIorb)->ppfunc = ata_execute_ata_pp;
1111 }
1112
1113 return(rc);
1114}
1115
1116/******************************************************************************
1117 * Post processing function for ata_execute_ata(); the main purpose of this
1118 * function is to copy the received D2H FIS (i.e. the device registers after
1119 * command completion) back to the ATA command structure.
1120 *
1121 * See ata_cmd_to_fis() for an explanation of the mapping.
1122 */
1123void ata_execute_ata_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1124{
1125 AHCI_PORT_DMA *dma_base;
1126 ATA_CMD *cmd;
1127 AD_INFO *ai;
1128 u8 *fis;
1129 int p;
1130
1131 /* get address of D2H FIS */
1132 ai = ad_infos + iorb_unit_adapter(pIorb);
1133 p = iorb_unit_port(pIorb);
1134 dma_base = port_dma_base(ai, p);
1135 fis = dma_base->rx_fis + 0x40;
1136
1137 if (fis[0] != 0x34)
1138 {
1139 /* this is not a D2H FIS - give up silently */
1140 DPRINTF(3,"ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]);
1141 add_workspace(pIorb)->complete = 1;
1142 return;
1143 }
1144
1145 /* map D2H FIS to the original ATA controller command structure */
1146 cmd = (ATA_CMD *)Far16ToFlat(((IORB_ADAPTER_PASSTHRU*)pIorb)->pControllerCmd);
1147
1148 cmd->cmd = fis[2];
1149 cmd->device = fis[7];
1150 cmd->features = ((u16) fis[3])
1151 | ((u16) fis[11]);
1152 cmd->lba_l = ((u32) fis[4])
1153 | ((u32) fis[5] << 8)
1154 | ((u32) fis[6] << 16)
1155 | ((u32) fis[8] << 24);
1156 cmd->lba_h = ((u16) fis[9])
1157 | ((u16) fis[10] << 8);
1158 cmd->count = ((u16) fis[12])
1159 | ((u16) fis[13] << 8);
1160
1161 DHEXDUMP(0,cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n");
1162
1163 /* signal completion to interrupt handler */
1164 add_workspace(pIorb)->complete = 1;
1165}
1166
1167/******************************************************************************
1168 * Request sense information for a failed command. Since there is no "request
1169 * sense" command for ATA devices, we need to read the current error code from
1170 * the AHCI task file register and fabricate the sense information.
1171 *
1172 * NOTES:
1173 *
1174 * - This function must be called right after an ATA command has failed and
1175 * before any other commands are queued on the corresponding port. This
1176 * function is typically called in the port restart context hook which is
1177 * triggered by an AHCI error interrupt.
1178 *
1179 * - The ATA error bits are a complete mess. We'll try and catch the most
1180 * interesting error codes (such as medium errors) and report everything
1181 * else with a generic error code.
1182 */
1183int ata_req_sense(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1184{
1185 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1186 u8 *port_mmio = port_base(ai, iorb_unit_port(pIorb));
1187 u32 tf_data = readl(port_mmio + PORT_TFDATA);
1188 u8 err = (tf_data >> 8);
1189 u8 sts = (tf_data);
1190
1191 if (sts & ATA_ERR)
1192 {
1193 if (sts & ATA_DF)
1194 {
1195 /* there is a device-specific error condition */
1196 if (err & ATA_ICRC)
1197 {
1198 iorb_seterr(pIorb, IOERR_ADAPTER_DEVICEBUSCHECK);
1199 }
1200 else if (err & ATA_UNC)
1201 {
1202 iorb_seterr(pIorb, IOERR_MEDIA);
1203 }
1204 else if (err & ATA_IDNF)
1205 {
1206 iorb_seterr(pIorb, IOERR_RBA_ADDRESSING_ERROR);
1207 }
1208 else
1209 {
1210 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1211 }
1212
1213 }
1214 else
1215 {
1216 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1217 }
1218 }
1219 else
1220 {
1221 /* this function only gets called when we received an error interrupt */
1222 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1223 }
1224
1225 /* Return an error to indicate there's no HW command to be submitted and
1226 * that the IORB can be completed "as is" (the upstream code expects the
1227 * IORB error code, if any, to be set when this happens and this is exactly
1228 * what this function is all about).
1229 */
1230 return(-1);
1231}
1232
1233/******************************************************************************
1234 * Extract vendor and device name from an ATA INDENTIFY buffer. Since strings
1235 * in the indentify buffer are byte-swapped, we need to swap them back.
1236 */
1237char *ata_dev_name(u16 *id_buf)
1238{
1239 static char dev_name[ATA_ID_PROD_LEN + 1];
1240 char *t = dev_name;
1241 char *s = (char *) (id_buf + ATA_ID_PROD);
1242 int i;
1243
1244 dev_name[sizeof(dev_name)-1] = '\0';
1245
1246 for (i = 0; i < ATA_ID_PROD_LEN / 2; i++) {
1247 *(t++) = s[1];
1248 *(t++) = s[0];
1249 s += 2;
1250 }
1251
1252 return(dev_name);
1253}
1254
1255/******************************************************************************
1256 * Fabricate ATA READ command based on the capabilities of the corresponding
1257 * device and the paramters set from above (NCQ, etc).
1258 */
1259static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1260 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1261 ULONG sg_cnt)
1262{
1263 int rc;
1264
1265 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1266 {
1267 /* need LBA48 for this command */
1268 if (!ai->ports[p].devs[d].lba48)
1269 {
1270 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1271 return(-1);
1272 }
1273 if (add_workspace(pIorb)->is_ncq)
1274 {
1275 /* use NCQ read; count goes into feature register, tag into count! */
1276 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ,
1277 AP_SECTOR_48, sector, 0,
1278 AP_FEATURES, count,
1279 AP_COUNT, (slot << 3), /* tag == slot */
1280 AP_SGLIST, sg_list, sg_cnt,
1281 AP_DEVICE, 0x40,
1282 AP_END);
1283 }
1284 else
1285 {
1286 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
1287 AP_SECTOR_48, sector, 0,
1288 AP_COUNT, count,
1289 AP_SGLIST, sg_list, sg_cnt,
1290 AP_DEVICE, 0x40,
1291 AP_END);
1292 }
1293
1294 }
1295 else
1296 {
1297 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
1298 AP_SECTOR_28, sector,
1299 AP_COUNT, count & 0xffU,
1300 AP_SGLIST, sg_list, sg_cnt,
1301 AP_DEVICE, 0x40,
1302 AP_END);
1303 }
1304
1305 return(rc);
1306}
1307
1308/******************************************************************************
1309 * Fabricate ATA WRITE command based on the capabilities of the corresponding
1310 * device and the paramters set from above (NCQ, etc)
1311 */
1312static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1313 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1314 ULONG sg_cnt, int write_through)
1315{
1316 int rc;
1317
1318 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1319 {
1320 /* need LBA48 for this command */
1321 if (!ai->ports[p].devs[d].lba48)
1322 {
1323 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1324 return(-1);
1325 }
1326 if (add_workspace(pIorb)->is_ncq)
1327 {
1328 /* use NCQ write; count goes into feature register, tag into count! */
1329 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE,
1330 AP_SECTOR_48, sector, 0,
1331 AP_FEATURES, count,
1332 /* tag = slot */
1333 AP_COUNT, (slot << 3),
1334 AP_SGLIST, sg_list, sg_cnt,
1335 AP_DEVICE, 0x40,
1336 /* force unit access */
1337 AP_DEVICE, (write_through && !force_write_cache) ? 0x80 : 0,
1338 AP_WRITE, 1,
1339 AP_END);
1340 }
1341 else
1342 {
1343 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
1344 AP_SECTOR_48, sector, 0,
1345 AP_COUNT, count,
1346 AP_SGLIST, sg_list, sg_cnt,
1347 AP_DEVICE, 0x40,
1348 AP_WRITE, 1,
1349 AP_END);
1350 }
1351 }
1352 else
1353 {
1354 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
1355 AP_SECTOR_28, sector,
1356 AP_COUNT, count & 0xffU,
1357 AP_SGLIST, sg_list, sg_cnt,
1358 AP_DEVICE, 0x40,
1359 AP_WRITE, 1,
1360 AP_END);
1361 }
1362
1363 return(rc);
1364}
1365
1366/******************************************************************************
1367 * Copy block from S/G list to virtual address or vice versa.
1368 */
1369void sg_memcpy(SCATGATENTRY *sg_list, USHORT sg_cnt, ULONG sg_off,
1370 void *buf, USHORT len, SG_MEMCPY_DIRECTION dir)
1371{
1372 USHORT i;
1373 USHORT l;
1374 ULONG phys_addr;
1375 ULONG pos = 0;
1376 char *p;
1377
1378 /* walk through S/G list to find the elements involved in the operation */
1379 for (i = 0; i < sg_cnt && len > 0; i++)
1380 {
1381 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off)
1382 {
1383 /* this S/G element intersects with the block to be copied */
1384 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos);
1385 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len)
1386 {
1387 l = len;
1388 }
1389
1390 if (Dev32Help_PhysToLin(phys_addr, l, (PVOID) &p))
1391 {
1392 panic("sg_memcpy(): DevHelp_PhysToLin() failed");
1393 }
1394 if (dir == SG_TO_BUF)
1395 {
1396 memcpy(buf, p, l);
1397 }
1398 else
1399 {
1400 memcpy(p, buf, l);
1401 }
1402 sg_off += l;
1403 buf = (char *) buf + l;
1404 len -= l;
1405 }
1406
1407 pos += sg_list[i].XferBufLen;
1408 }
1409}
1410
1411/******************************************************************************
1412 * Halt processing by submitting an internal error. This is a last resort and
1413 * should only be called when the system state is corrupt.
1414 */
1415void panic(char *msg)
1416{
1417 Dev32Help_InternalError(msg, strlen(msg));
1418}
1419
Note: See TracBrowser for help on using the repository browser.