source: trunk/src/os2ahci/ata.c@ 181

Last change on this file since 181 was 181, checked in by David Azarewicz, 9 years ago

Debugging changes

File size: 47.5 KB
Line 
1/******************************************************************************
2 * ata.c - ATA command processing
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ata.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* ------------------------ typedefs and structures ------------------------ */
34
35/* -------------------------- function prototypes -------------------------- */
36
37static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
38 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
39 ULONG sg_cnt);
40
41static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
42 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
43 ULONG sg_cnt, int write_through);
44
45/* ------------------------ global/static variables ------------------------ */
46
47/* ----------------------------- start of code ----------------------------- */
48
49/******************************************************************************
50 * Initialize AHCI command slot, FIS and S/G list for the specified ATA
51 * command. The command parameters are passed as a variable argument list
52 * of type and value(s). The list is terminated by AP_END.
53 *
54 * Notes:
55 *
56 * - The specified command slot is expected to be idle; no checks are
57 * performed to prevent messing with a busy port.
58 *
59 * - Port multipliers are not supported, yet, thus 'd' should always
60 * be 0 for the time being.
61 *
62 * - 'cmd' is passed as 16-bit integer because the compiler would push
63 * a 'u8' as 16-bit value (it's a fixed argument) and the stdarg
64 * macros would screw up the address of the first variable argument
65 * if the size of the last fixed argument wouldn't match what the
66 * compiler pushed on the stack.
67 *
68 * Return values:
69 * 0 : success
70 * > 0 : could not map all S/G entries; the return value is the number of
71 * S/G entries that could be mapped.
72 * < 0 : other error
73 */
74int ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, ...)
75{
76 va_list va;
77 va_start(va, cmd);
78 return(v_ata_cmd(ai, p, d, slot, cmd, va));
79}
80
81int v_ata_cmd(AD_INFO *ai, int p, int d, int slot, int cmd, va_list va)
82{
83 AHCI_PORT_DMA *dma_base_virt;
84 AHCI_CMD_HDR *cmd_hdr;
85 AHCI_CMD_TBL *cmd_tbl;
86 SCATGATENTRY *sg_list = NULL;
87 SCATGATENTRY sg_single;
88 ATA_PARM ap;
89 ATA_CMD ata_cmd;
90 void *atapi_cmd = NULL;
91 u32 dma_base_phys;
92 u32 atapi_cmd_len = 0;
93 u32 ahci_flags = 0;
94 u32 sg_cnt = 0;
95 u32 i;
96 u32 n;
97
98 /* --------------------------------------------------------------------------
99 * Initialize ATA command. The ATA command is set up with the main command
100 * value and a variable list of additional parameters such as the sector
101 * address, transfer count, ...
102 */
103 memset(&ata_cmd, 0x00, sizeof(ata_cmd));
104 ata_cmd.cmd = cmd;
105
106 /* parse variable arguments */
107 do
108 {
109 switch ((ap = va_arg(va, ATA_PARM)))
110 {
111
112 case AP_AHCI_FLAGS:
113 ahci_flags |= va_arg(va, u32);
114 break;
115
116 case AP_WRITE:
117 if (va_arg(va, u32) != 0)
118 {
119 ahci_flags |= AHCI_CMD_WRITE;
120 }
121 break;
122
123 case AP_FEATURES:
124 /* ATA features word */
125 ata_cmd.features |= va_arg(va, u32);
126 break;
127
128 case AP_COUNT:
129 /* transfer count */
130 ata_cmd.count = va_arg(va, u32);
131 break;
132
133 case AP_SECTOR_28:
134 /* 28-bit sector address */
135 ata_cmd.lba_l = va_arg(va, u32);
136 if (ata_cmd.lba_l & 0xf0000000UL)
137 {
138 DPRINTF(0,"error: LBA-28 address %d has more than 28 bits\n", ata_cmd.lba_l);
139 return(ATA_CMD_INVALID_PARM);
140 }
141 /* add upper 4 bits to device field */
142 ata_cmd.device |= (ata_cmd.lba_l >> 24) & 0x0fU;
143 /* only lower 24 bits come into lba_l */
144 ata_cmd.lba_l &= 0x00ffffffUL;
145 break;
146
147 case AP_SECTOR_48:
148 /* 48-bit sector address */
149 ata_cmd.lba_l = va_arg(va, u32);
150 ata_cmd.lba_h = va_arg(va, u32);
151 break;
152
153 case AP_DEVICE:
154 /* ATA device byte; note that this byte contains the highest
155 * 4 bits of LBA-28 address; we have to leave them alone here. */
156 ata_cmd.device |= va_arg(va, u32) & 0xf0;
157 break;
158
159 case AP_SGLIST:
160 /* scatter/gather list in SCATGATENTRY/count format */
161 sg_list = va_arg(va, void *);
162 sg_cnt = va_arg(va, u32);
163 break;
164
165 case AP_VADDR:
166 /* virtual buffer address in addr/len format (up to 4K) */
167 sg_single.ppXferBuf = MemPhysAdr(va_arg(va, void *));
168 sg_single.XferBufLen = va_arg(va, u32);
169 sg_list = &sg_single;
170 sg_cnt = 1;
171 break;
172
173 case AP_ATAPI_CMD:
174 /* ATAPI command */
175 atapi_cmd = va_arg(va, void *);
176 atapi_cmd_len = va_arg(va, u32);
177 ahci_flags |= AHCI_CMD_ATAPI;
178 break;
179
180 case AP_ATA_CMD:
181 /* ATA command "pass-through" */
182 memcpy(&ata_cmd, va_arg(va, void *), sizeof(ATA_CMD));
183 break;
184
185 case AP_END:
186 break;
187
188 default:
189 DPRINTF(0,"error: v_ata_cmd() called with invalid parameter type (%d)\n", (int) ap);
190 return(ATA_CMD_INVALID_PARM);
191 }
192
193 } while (ap != AP_END);
194
195 /* --------------------------------------------------------------------------
196 * Fill in AHCI ATA command information. This includes the port command slot,
197 * the corresponding command FIS and the S/G list. The layout of the AHCI
198 * port DMA region is based on the Linux AHCI driver and looks like this:
199 *
200 * - 32 AHCI command headers (AHCI_CMD_HDR) with 32 bytes, each
201 * - 1 FIS receive area with 256 bytes (AHCI_RX_FIS_SZ)
202 * - 32 AHCI command tables, each consisting of
203 * - 64 bytes for command FIS
204 * - 16 bytes for ATAPI comands
205 * - 48 bytes reserved
206 * - 48 S/G entries (AHCI_SG) with 32 bytes, each
207 *
208 * Since the whole DMA buffer for all ports is larger than 64KB and we need
209 * multiple segments to address all of them, there are no virtual pointers
210 * to the individual elements in AD_INFO. Instead, we're relying on macros
211 * for getting the base address of a particular port's DMA region, then
212 * map a structure on top of that for convenience (AHCI_PORT_DMA).
213 */
214 dma_base_virt = port_dma_base(ai, p);
215 dma_base_phys = port_dma_base_phys(ai, p);
216
217 /* AHCI command header */
218 cmd_hdr = &dma_base_virt->cmd_hdr[slot];
219 memset(cmd_hdr, 0x00, sizeof(*cmd_hdr));
220 cmd_hdr->options = ((d & 0x0f) << 12);
221 cmd_hdr->options |= ahci_flags; /* AHCI command flags */
222 cmd_hdr->options |= 5; /* length of command FIS in 32-bit words */
223 cmd_hdr->tbl_addr = dma_base_phys + offsetof(AHCI_PORT_DMA, cmd_tbl[slot]);
224 /* DAZ can use MemPhysAdr(&dma_base_virt->cmd_tbl[slot]), but is probably slower. */
225
226 /* AHCI command table */
227 cmd_tbl = &dma_base_virt->cmd_tbl[slot];
228 memset(cmd_tbl, 0x00, sizeof(*cmd_tbl));
229 ata_cmd_to_fis(cmd_tbl->cmd_fis, &ata_cmd, d);
230
231 if (atapi_cmd != NULL)
232 {
233 /* copy ATAPI command */
234 memcpy(cmd_tbl->atapi_cmd, atapi_cmd, atapi_cmd_len);
235 }
236
237 /* PRDT (S/G list)
238 *
239 * - The S/G list for AHCI adapters is limited to 22 bits for the transfer
240 * size of each element, thus we need to split S/G elements larger than
241 * 22 bits into 2 AHCI_SG elements.
242 *
243 * - The S/G element size for AHCI is what the spec calls '0'-based
244 * (i.e. 0 means 1 bytes). On top of that, the spec requires S/G transfer
245 * sizes to be even in the context of 16-bit transfers, thus bit '1'
246 * always needs to be set.
247 *
248 * - AHCI_MAX_SG_ELEMENT_LEN defines the maximum size of an AHCI S/G
249 * element in bytes, ignoring the '0'-based methodology (i.e. 1 << 22).
250 *
251 * - There's a limit on the maximum number of S/G elements in the port DMA
252 * buffer (AHCI_MAX_SG) which is lower than the HW maximum. It's beyond
253 * the control of this function to split commands which require more
254 * than AHCI_MAX_SG entries. In order to help the caller, the return value
255 * of this function will indicate how many OS/2 S/G entries were
256 * successfully mapped.
257 */
258 for (i = n = 0; i < sg_cnt; i++)
259 {
260 u32 sg_addr = sg_list[i].ppXferBuf;
261 u32 sg_size = sg_list[i].XferBufLen;
262
263 do
264 {
265 u32 chunk = (sg_size > AHCI_MAX_SG_ELEMENT_LEN) ? AHCI_MAX_SG_ELEMENT_LEN
266 : sg_size;
267 if (n >= AHCI_MAX_SG)
268 {
269 /* couldn't store all S/G elements in our DMA buffer */
270 DPRINTF(0,"ata_cmd(): too many S/G elements\n");
271 return(i - 1);
272 }
273 if ((sg_addr & 1) || (chunk & 1))
274 {
275 DPRINTF(0,"error: ata_cmd() called with unaligned S/G element(s)\n");
276 return(ATA_CMD_UNALIGNED_ADDR);
277 }
278 cmd_tbl->sg_list[n].addr = sg_addr;
279 cmd_tbl->sg_list[n].size = chunk - 1;
280 sg_addr += chunk;
281 sg_size -= chunk;
282 n++;
283 } while (sg_size > 0);
284 }
285
286 /* set final S/G count in AHCI command header */
287 cmd_hdr->options |= n << 16;
288
289 if (D32g_DbgLevel >= 7)
290 {
291 DPRINTF(0,"ATA command for %d.%d.%d, slot %d:\n", ad_no(ai), p, d, slot);
292 dHexDump(0,cmd_hdr, offsetof(AHCI_CMD_HDR, reserved), "cmd_hdr: ");
293 dHexDump(0,&ata_cmd, sizeof(ata_cmd), "ata_cmd: ");
294 if (atapi_cmd != NULL)
295 {
296 dHexDump(0,atapi_cmd, atapi_cmd_len, "atapi_cmd: ");
297 }
298 if (n > 0)
299 {
300 dHexDump(0,cmd_tbl->sg_list, sizeof(*cmd_tbl->sg_list) * n, "sg_list: ");
301 }
302 }
303
304 return(ATA_CMD_SUCCESS);
305}
306
307/******************************************************************************
308 * Fill SATA command FIS with values extracted from an ATA command structure.
309 * The command FIS buffer (fis) is expected to be initialized to 0s. The
310 * structure of the FIS maps to the ATA shadow register block, including
311 * registers which can be written twice to store 16 bits (called 'exp').
312 *
313 * The FIS structure looks like this (using LSB notation):
314 *
315 * +----------------+----------------+----------------+----------------+
316 * 00 | FIS type (27h) | C|R|R|R|PMP | Command | Features |
317 * +----------------+----------------+----------------+----------------+
318 * 04 | LBA 7:0 | LBA 15:8 | LBA 23:16 | R|R|R|D|Head |
319 * +----------------+----------------+----------------+----------------+
320 * 08 | LBA 31:24 | LBA 40:32 | LBA 47:40 | Features exp |
321 * +----------------+----------------+----------------+----------------+
322 * 12 | Count 7:0 | Count 15:8 | Reserved | Control |
323 * +----------------+----------------+----------------+----------------+
324 * 16 | Reserved | Reserved | Reserved | Reserved |
325 * +----------------+----------------+----------------+----------------+
326 */
327void ata_cmd_to_fis(u8 *fis, ATA_CMD *ata_cmd, int d)
328{
329 fis[0] = 0x27; /* register - host to device FIS */
330 fis[1] = (u8) (d & 0xf); /* port multiplier number */
331 fis[1] |= 0x80; /* bit 7 indicates Command FIS */
332 fis[2] = (u8) ata_cmd->cmd;
333 fis[3] = (u8) ata_cmd->features;
334
335 fis[4] = (u8) ata_cmd->lba_l;
336 fis[5] = (u8) (ata_cmd->lba_l >> 8);
337 fis[6] = (u8) (ata_cmd->lba_l >> 16);
338 fis[7] = (u8) ata_cmd->device;
339
340 fis[8] = (u8) (ata_cmd->lba_l >> 24);
341 fis[9] = (u8) ata_cmd->lba_h;
342 fis[10] = (u8) (ata_cmd->lba_h >> 8);
343 fis[11] = (u8) (ata_cmd->features >> 8);
344
345 fis[12] = (u8) ata_cmd->count;
346 fis[13] = (u8) (ata_cmd->count >> 8);
347}
348
349/******************************************************************************
350 * Get index in S/G list for the number of transferred sectors in the IORB.
351 *
352 * Returning io->cSGList indicates an error.
353 *
354 * NOTE: OS/2 makes sure S/G lists are set up such that entries at the HW
355 * limit will never cross sector boundaries. This means that splitting
356 * S/G lists into multiple commands can be done without editing the S/G
357 * lists.
358 */
359u16 ata_get_sg_indx(IORB_EXECUTEIO *io)
360{
361 ULONG offset = io->BlocksXferred * io->BlockSize;
362 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
363 USHORT i;
364
365 for (i = 0; i < io->cSGList && offset > 0; i++)
366 {
367 offset -= pSGList[i].XferBufLen;
368 }
369
370 return(i);
371}
372
373/******************************************************************************
374 * Get max S/G count which will fit into our HW S/G buffers. This function is
375 * called when the S/G list is too long and we need to split the IORB into
376 * multiple commands. It returns both the number of sectors and S/G list
377 * elements that we can handle in a single command.
378 *
379 * The parameter 'sg_indx' indicates the current start index in the S/G list
380 * (0 if this is the first command iteration).
381 *
382 * The parameter 'sg_max' is the return value of v_ata_cmd() and indicates
383 * how many S/G elements were successfully mapped. Whatever we return needs to
384 * be less or equal to this value.
385 *
386 * Returning 0 in *sg_cnt indicates an error.
387 *
388 * NOTE: OS/2 makes sure S/G lists are set up such that entries at HW limits
389 * will never cross sector boundaries. This means that splitting S/G
390 * lists into multiple commands can be done without editing S/G list
391 * elements. Since AHCI only allows 22 bits for each S/G element, the
392 * hardware limits are reported as AHCI_MAX_SG / 2 but will vary based
393 * on the actual length of S/G elements. This function looks for the
394 * maximum number of S/G elements that can be mapped on sector
395 * boundaries which will still fit into our HW S/G list.
396 */
397void ata_max_sg_cnt(IORB_EXECUTEIO *io, USHORT sg_indx, USHORT sg_max,
398 USHORT *sg_cnt, USHORT *sector_cnt)
399{
400 ULONG max_sector_cnt = 0;
401 USHORT max_sg_cnt = 0;
402 ULONG offset = 0;
403 USHORT i;
404 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
405
406 for (i = sg_indx; i < io->cSGList; i++)
407 {
408 if (i - sg_indx >= sg_max)
409 {
410 /* we're beyond the number of S/G elements we can map */
411 break;
412 }
413
414 offset += pSGList[i].XferBufLen;
415 if (offset % io->BlockSize == 0)
416 {
417 /* this S/G element ends on a sector boundary */
418 max_sector_cnt = offset / io->BlockSize;
419 max_sg_cnt = i + 1;
420 }
421 }
422
423 /* return the best match we found (0 indicating failure) */
424 *sector_cnt = max_sector_cnt;
425 *sg_cnt = max_sg_cnt;
426}
427
428
429/******************************************************************************
430 * Get device or media geometry. Device and media geometry are expected to be
431 * the same for non-removable devices, which will always be the case for the
432 * ATA devices we're dealing with (hard disks). ATAPI is a different story
433 * and handled by atapi_get_geometry().
434 */
435int ata_get_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
436{
437 ADD_WORKSPACE *aws = add_workspace(pIorb);
438 int rc;
439
440 /* allocate buffer for ATA identify information */
441 if ((aws->buf = MemAlloc(ATA_ID_WORDS * sizeof(u16))) == NULL)
442 {
443 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
444 return(-1);
445 }
446
447 /* request ATA identify information */
448 aws->ppfunc = ata_get_geometry_pp;
449 rc = ata_cmd(ad_infos + iorb_unit_adapter(pIorb),
450 iorb_unit_port(pIorb),
451 iorb_unit_device(pIorb),
452 slot,
453 ATA_CMD_ID_ATA,
454 AP_VADDR, (void *) aws->buf, ATA_ID_WORDS * sizeof(u16),
455 AP_END);
456
457 if (rc != 0)
458 {
459 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
460 }
461
462 return(rc);
463}
464
465/* Adjust the cylinder count in the physical
466 * geometry to the last full cylinder.
467 */
468int adjust_cylinders(GEOMETRY *geometry, ULONG TotalSectors)
469{
470 USHORT SecPerCyl;
471 int rc = FALSE;
472
473 geometry->TotalSectors = TotalSectors;
474 SecPerCyl = geometry->SectorsPerTrack * geometry->NumHeads;
475 if (SecPerCyl > 0)
476 {
477 ULONG TotalCylinders = TotalSectors / SecPerCyl;
478
479 geometry->TotalSectors = TotalCylinders * SecPerCyl;
480 geometry->TotalCylinders = TotalCylinders;
481 if (TotalCylinders >> 16)
482 {
483 geometry->TotalCylinders = 65535;
484 rc = TRUE;
485 }
486 }
487 return (rc);
488}
489
490/* Calculate the logical geometry based on the input physcial geometry
491 * using the LBA Assist Translation algorithm.
492 */
493#define BIOS_MAX_CYLINDERS 1024l
494#define BIOS_MAX_NUMHEADS 255
495#define BIOS_MAX_SECTORSPERTRACK 63
496void log_geom_calculate_LBA_assist(GEOMETRY *geometry, ULONG TotalSectors)
497{
498 UCHAR numSpT = BIOS_MAX_SECTORSPERTRACK;
499 UCHAR numHeads = BIOS_MAX_NUMHEADS;
500 ULONG Cylinders;
501
502 if (TotalSectors <= (BIOS_MAX_CYLINDERS * 128 * BIOS_MAX_SECTORSPERTRACK))
503 {
504 USHORT temp = (TotalSectors - 1) / (BIOS_MAX_CYLINDERS * BIOS_MAX_SECTORSPERTRACK);
505
506 if (temp < 16) numHeads = 16;
507 else if (temp < 32) numHeads = 32;
508 else if (temp < 64) numHeads = 64;
509 else numHeads = 128;
510 }
511
512 do
513 {
514 Cylinders = TotalSectors / (USHORT)(numHeads * numSpT);
515 if (Cylinders >> 16)
516 {
517 if (numSpT < 128)
518 numSpT = (numSpT << 1) | 1;
519 else
520 Cylinders = 65535; // overflow !
521 }
522 } while (Cylinders >> 16);
523
524 geometry->TotalCylinders = Cylinders;
525 geometry->NumHeads = numHeads;
526 geometry->SectorsPerTrack = numSpT;
527}
528
529int check_lvm(IORBH *pIorb, ULONG sector)
530{
531 DLA_Table_Sector *pDLA = (DLA_Table_Sector*)add_workspace(pIorb)->buf;
532 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
533 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
534 int p = iorb_unit_port(pIorb);
535 int rc;
536
537 rc = ahci_exec_polled_cmd(ai, p, 0, 500, ATA_CMD_READ,
538 AP_SECTOR_28, sector-1,
539 AP_COUNT, 1,
540 AP_VADDR, (void *)pDLA, 512,
541 AP_DEVICE, 0x40,
542 AP_END);
543 if (rc) return 0;
544
545 DHEXDUMP(3,pDLA, sizeof(DLA_Table_Sector), "DLA sector %d:\n", sector-1);
546
547 if ((pDLA->DLA_Signature1 == DLA_TABLE_SIGNATURE1) && (pDLA->DLA_Signature2 == DLA_TABLE_SIGNATURE2)) {
548 DPRINTF(3,"is_lvm_geometry found at sector %d\n", sector-1);
549 geometry->TotalCylinders = pDLA->Cylinders;
550 geometry->NumHeads = pDLA->Heads_Per_Cylinder;
551 geometry->SectorsPerTrack = pDLA->Sectors_Per_Track;
552 geometry->TotalSectors = pDLA->Cylinders * pDLA->Heads_Per_Cylinder * pDLA->Sectors_Per_Track;
553 return 1;
554 }
555
556 return 0;
557}
558
559/******************************************************************************
560 * Try to read LVM information from the disk. If found, use the LVM geometry.
561 * This function will only work at init time. A better strategy would be to
562 * calculate the geometry during ahci_scan_ports and save it away and then just
563 * return the saved values when ata_get_geometry() is called.
564 */
565int is_lvm_geometry(IORBH *pIorb)
566{
567 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
568 ULONG sector;
569
570 if (init_complete) return 0; /* We cannot use ahci_exec_polled_cmd() after init_complete */
571
572 if (use_lvm_info)
573 {
574 #ifdef DEBUG
575 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
576 int p = iorb_unit_port(pIorb);
577 int d = iorb_unit_device(pIorb);
578 DPRINTF(3,"is_lvm_geometry (%d.%d.%d)\n", ad_no(ai), p, d);
579 #endif
580
581 /* First check the sector reported by the hardware */
582 if (check_lvm(pIorb, geometry->SectorsPerTrack)) return 1;
583
584 for (sector = 255; sector >= 63; sector >>= 1)
585 {
586 if (sector == geometry->SectorsPerTrack) continue;
587 if (check_lvm(pIorb, sector)) return 1;
588 }
589 }
590
591 return 0;
592}
593
594/******************************************************************************
595 * Post processing function for ata_get_geometry(): convert the ATA identify
596 * information to OS/2 IOCC_GEOMETRY information.
597 */
598void ata_get_geometry_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
599{
600 GEOMETRY *geometry = ((IORB_GEOMETRY*)pIorb)->pGeometry;
601 USHORT geometry_len = ((IORB_GEOMETRY *)pIorb)->GeometryLen;
602 u16 *id_buf = add_workspace(pIorb)->buf;
603 int a = iorb_unit_adapter(pIorb);
604 int p = iorb_unit_port(pIorb);
605 char *Method;
606
607 /* Fill-in geometry information; the ATA-8 spec declares the geometry
608 * fields in the ATA ID buffer as obsolete but it's still the best
609 * guess in most cases. If the information stored in the geometry
610 * fields is apparently incorrect, we'll use the algorithm typically
611 * used by SCSI adapters and modern PC BIOS versions:
612 *
613 * - 512 bytes per sector
614 * - 255 heads
615 * - 63 sectors per track (or 56 with the parameter "/4")
616 * - x cylinders (calculated)
617 *
618 * Please note that os2ahci currently does not natively support ATA sectors
619 * larger than 512 bytes, therefore relies on the translation logic built
620 * into the corresponding ATA disks. In order to prevent file systems that
621 * use block sizes larger than 512 bytes (FAT, JFS, ...) from ending up on
622 * incorrectly aligned physical sector accesses, hence using more physical
623 * I/Os than necessary, the command line parameter "/4" can be used to force
624 * a track size of 56 sectors. This way, partitions will start on 4K
625 * boundaries.
626 *
627 * Another limitation is that OS/2 has a 32-bit variable for the total number
628 * of sectors, limiting the maximum capacity to roughly 2TB. This is another
629 * issue that needs to be addressed sooner or later; large sectors could
630 * raise this limit to something like 8TB but this is not really much of a
631 * difference. Maybe there's something in later DDKs that allows more than
632 * 32 bits?
633 */
634 memset(geometry, 0x00, geometry_len);
635 geometry->BytesPerSector = ATA_SECTOR_SIZE;
636
637 /* extract total number of sectors */
638 if (id_buf[ATA_ID_CFS_ENABLE_2] & 0x400)
639 {
640 /* 48-bit LBA supported */
641 if (ATA_CAPACITY48_H(id_buf) != 0)
642 {
643 /* more than 32 bits for number of sectors */
644 DPRINTF(0,"warning: limiting disk %d.%d.%d to 2TB\n",
645 iorb_unit_adapter(pIorb), iorb_unit_port(pIorb),
646 iorb_unit_device(pIorb));
647 geometry->TotalSectors = 0xffffffffUL;
648 }
649 else
650 {
651 geometry->TotalSectors = ATA_CAPACITY48_L(id_buf);
652 }
653 }
654 else
655 {
656 /* 28-bit LBA */
657 geometry->TotalSectors = ATA_CAPACITY(id_buf) & 0x0fffffffUL;
658 }
659
660 Method = "None";
661 /* fabricate the remaining geometry fields */
662 if (track_size[a][p] != 0)
663 {
664 /* A specific track size has been requested for this port; this is
665 * typically done for disks with 4K sectors to make sure partitions
666 * start on 8-sector boundaries (parameter "/4").
667 */
668 geometry->NumHeads = 255;
669 geometry->SectorsPerTrack = track_size[a][p];
670 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
671 Method = "Custom";
672 }
673 else if (CUR_HEADS(id_buf) > 0 && CUR_CYLS(id_buf) > 0 && CUR_SECTORS(id_buf) > 0 &&
674 CUR_CAPACITY(id_buf) == (u32) CUR_HEADS(id_buf) * (u32) CUR_CYLS(id_buf) * (u32) CUR_SECTORS(id_buf))
675 {
676 /* BIOS-supplied (aka "current") geometry values look valid */
677 geometry->NumHeads = CUR_HEADS(id_buf);
678 geometry->SectorsPerTrack = CUR_SECTORS(id_buf);
679 geometry->TotalCylinders = CUR_CYLS(id_buf);
680 Method = "BIOS";
681 }
682 else if (ATA_HEADS(id_buf) > 0 && ATA_CYLS(id_buf) > 0 && ATA_SECTORS(id_buf) > 0)
683 {
684 /* ATA-supplied values for geometry look valid */
685 geometry->NumHeads = ATA_HEADS(id_buf);
686 geometry->SectorsPerTrack = ATA_SECTORS(id_buf);
687 geometry->TotalCylinders = ATA_CYLS(id_buf);
688 Method = "ATA";
689 }
690 else
691 {
692 /* use typical SCSI geometry */
693 geometry->NumHeads = 255;
694 geometry->SectorsPerTrack = 63;
695 geometry->TotalCylinders = geometry->TotalSectors / ((u32) geometry->NumHeads * (u32) geometry->SectorsPerTrack);
696 Method = "SCSI";
697 }
698
699 DPRINTF(2,"Physical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
700 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
701 (geometry->TotalSectors / 2048), Method);
702
703 /* Fixup the geometry in case the geometry reported by the BIOS is bad */
704 if (adjust_cylinders(geometry, geometry->TotalSectors))
705 { // cylinder overflow
706 log_geom_calculate_LBA_assist(geometry, geometry->TotalSectors);
707 geometry->TotalSectors = (USHORT)(geometry->NumHeads * geometry->SectorsPerTrack) * (ULONG)geometry->TotalCylinders;
708 }
709 adjust_cylinders(geometry, geometry->TotalSectors);
710
711 DPRINTF(2,"Logical geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
712 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
713 (geometry->TotalSectors / 2048), Method);
714
715 if (is_lvm_geometry(pIorb)) Method = "LVM";
716 ad_infos[a].ports[p].devs[0].dev_info.Cylinders = geometry->TotalCylinders;
717 ad_infos[a].ports[p].devs[0].dev_info.HeadsPerCylinder = geometry->NumHeads;
718 ad_infos[a].ports[p].devs[0].dev_info.SectorsPerTrack = geometry->SectorsPerTrack;
719 ad_infos[a].ports[p].devs[0].dev_info.TotalSectors = geometry->TotalSectors;
720 ad_infos[a].ports[p].devs[0].dev_info.Method = Method;
721
722 DPRINTF(2,"Reported geometry: %d cylinders, %d heads, %d sectors per track (%dMB) (%s)\n",
723 geometry->TotalCylinders, geometry->NumHeads, geometry->SectorsPerTrack,
724 (geometry->TotalSectors / 2048), Method);
725
726 /* tell interrupt handler that this IORB is complete */
727 add_workspace(pIorb)->complete = 1;
728}
729
730/******************************************************************************
731 * Test whether unit is ready.
732 */
733int ata_unit_ready(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
734{
735 /* This is a NOP for ATA devices (at least right now); returning an error
736 * without setting an error code means ahci_exec_iorb() will not queue any
737 * HW command and the IORB will complete successfully.
738 */
739 ((IORB_UNIT_STATUS *)pIorb)->UnitStatus = US_READY | US_POWER;
740 return(-1);
741}
742
743/******************************************************************************
744 * Read sectors from AHCI device.
745 */
746int ata_read(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
747{
748 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
749 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
750 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
751 ULONG sector = io->RBA + io->BlocksXferred;
752 USHORT count = io->BlockCount - io->BlocksXferred;
753 USHORT sg_indx;
754 USHORT sg_cnt;
755 int p = iorb_unit_port(pIorb);
756 int d = iorb_unit_device(pIorb);
757 int rc;
758
759 if (io->BlockCount == 0)
760 {
761 /* NOP; return -1 without error in IORB to indicate success */
762 return(-1);
763 }
764
765 if (add_workspace(pIorb)->unaligned)
766 {
767 /* unaligned S/G addresses present; need to use double buffers */
768 return(ata_read_unaligned(pIorb, slot));
769 }
770
771 /* Kludge: some I/O commands during boot use excessive S/G buffer lengths
772 * which cause NCQ commands to lock up. If there's only one S/G element
773 * and this element is already larger than what we can derive from the sector
774 * count, we'll adjust that element.
775 */
776 if (io->BlocksXferred == 0 && io->cSGList == 1 &&
777 pSGList[0].XferBufLen > (ULONG) io->BlockCount * io->BlockSize)
778 {
779 pSGList[0].XferBufLen = (ULONG) io->BlockCount * io->BlockSize;
780 }
781
782 /* prepare read command while keeping an eye on S/G count limitations */
783 do
784 {
785 sg_indx = ata_get_sg_indx(io);
786 sg_cnt = io->cSGList - sg_indx;
787 if ((rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, count,
788 pSGList + sg_indx, sg_cnt)) > 0)
789 {
790 /* couldn't map all S/G elements */
791 ata_max_sg_cnt(io, sg_indx, rc, &sg_cnt, &count);
792 }
793 } while (rc > 0 && sg_cnt > 0);
794
795 if (rc == 0)
796 {
797 add_workspace(pIorb)->blocks = count;
798 add_workspace(pIorb)->ppfunc = ata_read_pp;
799 }
800 else if (rc > 0)
801 {
802 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
803 }
804 else if (rc == ATA_CMD_UNALIGNED_ADDR)
805 {
806 /* unaligned S/G addresses detected; need to use double buffers */
807 add_workspace(pIorb)->unaligned = 1;
808 return(ata_read_unaligned(pIorb, slot));
809
810 }
811 else
812 {
813 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
814 }
815
816 return(rc);
817}
818
819/******************************************************************************
820 * Read sectors from AHCI device with unaligned S/G element addresses. AHCI
821 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
822 * restrictions. This doesn't happen very often but when it does, we need to
823 * use a transfer buffer and copy the data manually.
824 */
825int ata_read_unaligned(IORBH *pIorb, int slot)
826{
827 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
828 ADD_WORKSPACE *aws = add_workspace(pIorb);
829 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
830 ULONG sector = io->RBA + io->BlocksXferred;
831 SCATGATENTRY sg_single;
832 int p = iorb_unit_port(pIorb);
833 int d = iorb_unit_device(pIorb);
834 int rc;
835
836 DPRINTF(7,"ata_read_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
837
838 /* allocate transfer buffer */
839 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
840 {
841 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
842 return(-1);
843 }
844
845 /* prepare read command using transfer buffer */
846 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
847 sg_single.XferBufLen = io->BlockSize;
848 rc = ata_cmd_read(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1);
849
850 if (rc == 0) {
851 add_workspace(pIorb)->blocks = 1;
852 add_workspace(pIorb)->ppfunc = ata_read_pp;
853
854 } else if (rc > 0) {
855 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
856
857 } else {
858 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
859 }
860
861 return(rc);
862}
863
864/******************************************************************************
865 * Post processing function for ata_read(); this function updates the
866 * BlocksXferred counter in the IORB and, if not all blocks have been
867 * transferred, requeues the IORB to process the remaining sectors. It also
868 * takes care of copying data from the transfer buffer for unaligned reads.
869 */
870void ata_read_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
871{
872 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
873 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
874 ADD_WORKSPACE *aws = add_workspace(pIorb);
875
876 if (aws->unaligned)
877 {
878 /* copy transfer buffer to corresponding physical address in S/G list */
879 sg_memcpy(pSGList, io->cSGList,
880 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
881 aws->buf, io->BlockSize, BUF_TO_SG);
882 }
883
884 io->BlocksXferred += add_workspace(pIorb)->blocks;
885 DPRINTF(7,"ata_read_pp(): blocks transferred = %d\n", io->BlocksXferred);
886
887 if (io->BlocksXferred >= io->BlockCount)
888 {
889 /* we're done; tell IRQ handler the IORB is complete */
890 add_workspace(pIorb)->complete = 1;
891 }
892 else
893 {
894 /* requeue this IORB for next iteration */
895 iorb_requeue(pIorb);
896 }
897}
898
899/******************************************************************************
900 * Verify readability of sectors on ATA device.
901 */
902int ata_verify(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
903{
904 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
905 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
906 int p = iorb_unit_port(pIorb);
907 int d = iorb_unit_device(pIorb);
908 int rc;
909
910 if (io->BlockCount == 0)
911 {
912 /* NOP; return -1 without error in IORB to indicate success */
913 return(-1);
914 }
915
916 /* prepare verify command */
917 if (io->RBA >= (1UL << 28) || io->BlockCount > 256)
918 {
919 /* need LBA48 for this command */
920 if (!ai->ports[p].devs[d].lba48) {
921 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
922 return(-1);
923 }
924 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY_EXT,
925 AP_SECTOR_48, io->RBA, 0,
926 AP_COUNT, io->BlockCount,
927 AP_DEVICE, 0x40,
928 AP_END);
929 } else {
930 rc = ata_cmd(ai, p, d, slot, ATA_CMD_VERIFY,
931 AP_SECTOR_28, io->RBA,
932 AP_COUNT, io->BlockCount & 0xffU,
933 AP_DEVICE, 0x40,
934 AP_END);
935 }
936
937 return(rc);
938}
939
940/******************************************************************************
941 * Write sectors to AHCI device.
942 */
943int ata_write(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
944{
945 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
946 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
947 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
948 ULONG sector = io->RBA + io->BlocksXferred;
949 USHORT count = io->BlockCount - io->BlocksXferred;
950 USHORT sg_indx;
951 USHORT sg_cnt;
952 int p = iorb_unit_port(pIorb);
953 int d = iorb_unit_device(pIorb);
954 int rc;
955
956 if (io->BlockCount == 0)
957 {
958 /* NOP; return -1 without error in IORB to indicate success */
959 return(-1);
960 }
961
962 if (add_workspace(pIorb)->unaligned)
963 {
964 /* unaligned S/G addresses present; need to use double buffers */
965 return(ata_write_unaligned(pIorb, slot));
966 }
967
968 /* prepare write command while keeping an eye on S/G count limitations */
969 do {
970 sg_indx = ata_get_sg_indx(io);
971 sg_cnt = io->cSGList - sg_indx;
972 if ((rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, count,
973 pSGList + sg_indx, sg_cnt,
974 io->Flags & XIO_DISABLE_HW_WRITE_CACHE)) > 0)
975 {
976 /* couldn't map all S/G elements */
977 ata_max_sg_cnt(io, sg_indx, (USHORT) rc, &sg_cnt, &count);
978 }
979 } while (rc > 0 && sg_cnt > 0);
980
981 if (rc == 0)
982 {
983 add_workspace(pIorb)->blocks = count;
984 add_workspace(pIorb)->ppfunc = ata_write_pp;
985 }
986 else if (rc > 0)
987 {
988 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
989 }
990 else if (rc == ATA_CMD_UNALIGNED_ADDR)
991 {
992 /* unaligned S/G addresses detected; need to use double buffers */
993 add_workspace(pIorb)->unaligned = 1;
994 return(ata_write_unaligned(pIorb, slot));
995 }
996 else
997 {
998 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
999 }
1000
1001 return(rc);
1002}
1003
1004/******************************************************************************
1005 * Write sectors from AHCI device with unaligned S/G element addresses. AHCI
1006 * only allows aligned S/G addresses while OS/2 doesn't have these kind of
1007 * restrictions. This doesn't happen very often but when it does, we need to
1008 * use a transfer buffer and copy the data manually.
1009 */
1010int ata_write_unaligned(IORBH *pIorb, int slot)
1011{
1012 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1013 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(io->pSGList);
1014 ADD_WORKSPACE *aws = add_workspace(pIorb);
1015 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1016 ULONG sector = io->RBA + io->BlocksXferred;
1017 SCATGATENTRY sg_single;
1018 int p = iorb_unit_port(pIorb);
1019 int d = iorb_unit_device(pIorb);
1020 int rc;
1021
1022 DPRINTF(7,"ata_write_unaligned(%d.%d.%d, %d)\n", ad_no(ai), p, d, sector);
1023
1024 /* allocate transfer buffer */
1025 if ((aws->buf = MemAlloc(io->BlockSize)) == NULL)
1026 {
1027 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1028 return(-1);
1029 }
1030
1031 /* copy next sector from S/G list to transfer buffer */
1032 sg_memcpy(pSGList, io->cSGList,
1033 (ULONG) io->BlocksXferred * (ULONG) io->BlockSize,
1034 aws->buf, io->BlockSize, SG_TO_BUF);
1035
1036 /* prepare write command using transfer buffer */
1037 sg_single.ppXferBuf = MemPhysAdr(aws->buf);
1038 sg_single.XferBufLen = io->BlockSize;
1039 rc = ata_cmd_write(pIorb, ai, p, d, slot, sector, 1, &sg_single, 1,
1040 io->Flags & XIO_DISABLE_HW_WRITE_CACHE);
1041
1042 if (rc == 0)
1043 {
1044 add_workspace(pIorb)->blocks = 1;
1045 add_workspace(pIorb)->ppfunc = ata_write_pp;
1046 }
1047 else if (rc > 0)
1048 {
1049 iorb_seterr(pIorb, IOERR_CMD_SGLIST_BAD);
1050 }
1051 else
1052 {
1053 iorb_seterr(pIorb, IOERR_CMD_ADD_SOFTWARE_FAILURE);
1054 }
1055
1056 return(rc);
1057}
1058
1059
1060/******************************************************************************
1061 * Post processing function for ata_write(); this function updates the
1062 * BlocksXferred counter in the IORB and, if not all blocks have been
1063 * transferred, requeues the IORB to process the remaining sectors.
1064 */
1065void ata_write_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1066{
1067 IORB_EXECUTEIO *io = (IORB_EXECUTEIO *)pIorb;
1068
1069 io->BlocksXferred += add_workspace(pIorb)->blocks;
1070 DPRINTF(7,"ata_write_pp(): blocks transferred = %d\n", io->BlocksXferred);
1071
1072 if (io->BlocksXferred >= io->BlockCount)
1073 {
1074 /* we're done; tell IRQ handler the IORB is complete */
1075 add_workspace(pIorb)->complete = 1;
1076 }
1077 else
1078 {
1079 /* requeue this IORB for next iteration */
1080 iorb_requeue(pIorb);
1081 }
1082}
1083
1084/******************************************************************************
1085 * Execute ATA command.
1086 */
1087int ata_execute_ata(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1088{
1089 IORB_ADAPTER_PASSTHRU *apt = (IORB_ADAPTER_PASSTHRU *)pIorb;
1090 SCATGATENTRY *pSGList = (SCATGATENTRY*)Far16ToFlat(apt->pSGList);
1091 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1092 int p = iorb_unit_port(pIorb);
1093 int d = iorb_unit_device(pIorb);
1094 int rc;
1095
1096 if (apt->ControllerCmdLen != sizeof(ATA_CMD))
1097 {
1098 iorb_seterr(pIorb, IOERR_CMD_SYNTAX);
1099 return(-1);
1100 }
1101
1102 rc = ata_cmd(ai, p, d, slot, 0,
1103 AP_SGLIST, pSGList, apt->cSGList,
1104 AP_ATA_CMD, Far16ToFlat(apt->pControllerCmd),
1105 AP_WRITE, !(apt->Flags & PT_DIRECTION_IN),
1106 AP_END);
1107
1108 if (rc == 0)
1109 {
1110 add_workspace(pIorb)->ppfunc = ata_execute_ata_pp;
1111 }
1112
1113 return(rc);
1114}
1115
1116/******************************************************************************
1117 * Post processing function for ata_execute_ata(); the main purpose of this
1118 * function is to copy the received D2H FIS (i.e. the device registers after
1119 * command completion) back to the ATA command structure.
1120 *
1121 * See ata_cmd_to_fis() for an explanation of the mapping.
1122 */
1123void ata_execute_ata_pp(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1124{
1125 AHCI_PORT_DMA *dma_base;
1126 ATA_CMD *cmd;
1127 AD_INFO *ai;
1128 u8 *fis;
1129 int p;
1130
1131 /* get address of D2H FIS */
1132 ai = ad_infos + iorb_unit_adapter(pIorb);
1133 p = iorb_unit_port(pIorb);
1134 dma_base = port_dma_base(ai, p);
1135 fis = dma_base->rx_fis + 0x40;
1136
1137 if (fis[0] != 0x34)
1138 {
1139 /* this is not a D2H FIS - give up silently */
1140 DPRINTF(3,"ata_execute_ata_pp(): D2H FIS type incorrect: %d\n", fis[0]);
1141 add_workspace(pIorb)->complete = 1;
1142 return;
1143 }
1144
1145 /* map D2H FIS to the original ATA controller command structure */
1146 cmd = (ATA_CMD *)Far16ToFlat(((IORB_ADAPTER_PASSTHRU*)pIorb)->pControllerCmd);
1147
1148 cmd->cmd = fis[2];
1149 cmd->device = fis[7];
1150 cmd->features = ((u16) fis[3])
1151 | ((u16) fis[11]);
1152 cmd->lba_l = ((u32) fis[4])
1153 | ((u32) fis[5] << 8)
1154 | ((u32) fis[6] << 16)
1155 | ((u32) fis[8] << 24);
1156 cmd->lba_h = ((u16) fis[9])
1157 | ((u16) fis[10] << 8);
1158 cmd->count = ((u16) fis[12])
1159 | ((u16) fis[13] << 8);
1160
1161 DHEXDUMP(0,cmd, sizeof(*cmd), "ahci_execute_ata_pp(): cmd after completion:\n");
1162
1163 /* signal completion to interrupt handler */
1164 add_workspace(pIorb)->complete = 1;
1165}
1166
1167/******************************************************************************
1168 * Request sense information for a failed command. Since there is no "request
1169 * sense" command for ATA devices, we need to read the current error code from
1170 * the AHCI task file register and fabricate the sense information.
1171 *
1172 * NOTES:
1173 *
1174 * - This function must be called right after an ATA command has failed and
1175 * before any other commands are queued on the corresponding port. This
1176 * function is typically called in the port restart context hook which is
1177 * triggered by an AHCI error interrupt.
1178 *
1179 * - The ATA error bits are a complete mess. We'll try and catch the most
1180 * interesting error codes (such as medium errors) and report everything
1181 * else with a generic error code.
1182 */
1183int ata_req_sense(IORBH FAR16DATA *vIorb, IORBH *pIorb, int slot)
1184{
1185 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
1186 u8 *port_mmio = port_base(ai, iorb_unit_port(pIorb));
1187 u32 tf_data = readl(port_mmio + PORT_TFDATA);
1188 u8 err = (tf_data >> 8);
1189 u8 sts = (tf_data);
1190
1191 if (sts & ATA_ERR)
1192 {
1193 if (sts & ATA_DF)
1194 {
1195 /* there is a device-specific error condition */
1196 if (err & ATA_ICRC)
1197 {
1198 iorb_seterr(pIorb, IOERR_ADAPTER_DEVICEBUSCHECK);
1199 }
1200 else if (err & ATA_UNC)
1201 {
1202 iorb_seterr(pIorb, IOERR_MEDIA);
1203 }
1204 else if (err & ATA_IDNF)
1205 {
1206 iorb_seterr(pIorb, IOERR_RBA_ADDRESSING_ERROR);
1207 }
1208 else
1209 {
1210 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1211 }
1212
1213 }
1214 else
1215 {
1216 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1217 }
1218 }
1219 else
1220 {
1221 /* this function only gets called when we received an error interrupt */
1222 iorb_seterr(pIorb, IOERR_DEVICE_NONSPECIFIC);
1223 }
1224
1225 /* Return an error to indicate there's no HW command to be submitted and
1226 * that the IORB can be completed "as is" (the upstream code expects the
1227 * IORB error code, if any, to be set when this happens and this is exactly
1228 * what this function is all about).
1229 */
1230 return(-1);
1231}
1232
1233/******************************************************************************
1234 * Extract vendor and device name from an ATA INDENTIFY buffer. Since strings
1235 * in the indentify buffer are byte-swapped, we need to swap them back.
1236 */
1237char *ata_dev_name(u16 *id_buf)
1238{
1239 static char dev_name[ATA_ID_PROD_LEN + 1];
1240 char *t = dev_name;
1241 char *s = (char *) (id_buf + ATA_ID_PROD);
1242 int i;
1243
1244 dev_name[sizeof(dev_name)-1] = '\0';
1245
1246 for (i = 0; i < ATA_ID_PROD_LEN / 2; i++) {
1247 *(t++) = s[1];
1248 *(t++) = s[0];
1249 s += 2;
1250 }
1251
1252 return(dev_name);
1253}
1254
1255/******************************************************************************
1256 * Fabricate ATA READ command based on the capabilities of the corresponding
1257 * device and the paramters set from above (NCQ, etc).
1258 */
1259static int ata_cmd_read(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1260 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1261 ULONG sg_cnt)
1262{
1263 int rc;
1264
1265 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1266 {
1267 /* need LBA48 for this command */
1268 if (!ai->ports[p].devs[d].lba48)
1269 {
1270 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1271 return(-1);
1272 }
1273 if (add_workspace(pIorb)->is_ncq)
1274 {
1275 /* use NCQ read; count goes into feature register, tag into count! */
1276 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_READ,
1277 AP_SECTOR_48, sector, 0,
1278 AP_FEATURES, count,
1279 AP_COUNT, (slot << 3), /* tag == slot */
1280 AP_SGLIST, sg_list, sg_cnt,
1281 AP_DEVICE, 0x40,
1282 AP_END);
1283 }
1284 else
1285 {
1286 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ_EXT,
1287 AP_SECTOR_48, sector, 0,
1288 AP_COUNT, count,
1289 AP_SGLIST, sg_list, sg_cnt,
1290 AP_DEVICE, 0x40,
1291 AP_END);
1292 }
1293
1294 }
1295 else
1296 {
1297 rc = ata_cmd(ai, p, d, slot, ATA_CMD_READ,
1298 AP_SECTOR_28, sector,
1299 AP_COUNT, count & 0xffU,
1300 AP_SGLIST, sg_list, sg_cnt,
1301 AP_DEVICE, 0x40,
1302 AP_END);
1303 }
1304
1305 return(rc);
1306}
1307
1308/******************************************************************************
1309 * Fabricate ATA WRITE command based on the capabilities of the corresponding
1310 * device and the paramters set from above (NCQ, etc)
1311 */
1312static int ata_cmd_write(IORBH *pIorb, AD_INFO *ai, int p, int d, int slot,
1313 ULONG sector, ULONG count, SCATGATENTRY *sg_list,
1314 ULONG sg_cnt, int write_through)
1315{
1316 int rc;
1317
1318 if (sector >= (1UL << 28) || count > 256 || add_workspace(pIorb)->is_ncq)
1319 {
1320 /* need LBA48 for this command */
1321 if (!ai->ports[p].devs[d].lba48)
1322 {
1323 iorb_seterr(pIorb, IOERR_RBA_LIMIT);
1324 return(-1);
1325 }
1326 if (add_workspace(pIorb)->is_ncq)
1327 {
1328 /* use NCQ write; count goes into feature register, tag into count! */
1329 rc = ata_cmd(ai, p, d, slot, ATA_CMD_FPDMA_WRITE,
1330 AP_SECTOR_48, sector, 0,
1331 AP_FEATURES, count,
1332 /* tag = slot */
1333 AP_COUNT, (slot << 3),
1334 AP_SGLIST, sg_list, sg_cnt,
1335 AP_DEVICE, 0x40,
1336 /* force unit access */
1337 AP_DEVICE, (write_through && !force_write_cache) ? 0x80 : 0,
1338 AP_WRITE, 1,
1339 AP_END);
1340 }
1341 else
1342 {
1343 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE_EXT,
1344 AP_SECTOR_48, sector, 0,
1345 AP_COUNT, count,
1346 AP_SGLIST, sg_list, sg_cnt,
1347 AP_DEVICE, 0x40,
1348 AP_WRITE, 1,
1349 AP_END);
1350 }
1351 }
1352 else
1353 {
1354 rc = ata_cmd(ai, p, d, slot, ATA_CMD_WRITE,
1355 AP_SECTOR_28, sector,
1356 AP_COUNT, count & 0xffU,
1357 AP_SGLIST, sg_list, sg_cnt,
1358 AP_DEVICE, 0x40,
1359 AP_WRITE, 1,
1360 AP_END);
1361 }
1362
1363 return(rc);
1364}
1365
1366/******************************************************************************
1367 * Copy block from S/G list to virtual address or vice versa.
1368 */
1369void sg_memcpy(SCATGATENTRY *sg_list, USHORT sg_cnt, ULONG sg_off,
1370 void *buf, USHORT len, SG_MEMCPY_DIRECTION dir)
1371{
1372 USHORT i;
1373 USHORT l;
1374 ULONG phys_addr;
1375 ULONG pos = 0;
1376 char *p;
1377
1378 /* walk through S/G list to find the elements involved in the operation */
1379 for (i = 0; i < sg_cnt && len > 0; i++)
1380 {
1381 if (pos <= sg_off && pos + sg_list[i].XferBufLen > sg_off)
1382 {
1383 /* this S/G element intersects with the block to be copied */
1384 phys_addr = sg_list[i].ppXferBuf + (sg_off - pos);
1385 if ((l = sg_list[i].XferBufLen - (sg_off - pos)) > len)
1386 {
1387 l = len;
1388 }
1389
1390 if (Dev32Help_PhysToLin(phys_addr, l, (PVOID) &p))
1391 {
1392 panic("sg_memcpy(): DevHelp_PhysToLin() failed");
1393 }
1394 if (dir == SG_TO_BUF)
1395 {
1396 memcpy(buf, p, l);
1397 }
1398 else
1399 {
1400 memcpy(p, buf, l);
1401 }
1402 sg_off += l;
1403 buf = (char *) buf + l;
1404 len -= l;
1405 }
1406
1407 pos += sg_list[i].XferBufLen;
1408 }
1409}
1410
1411/******************************************************************************
1412 * Halt processing by submitting an internal error. This is a last resort and
1413 * should only be called when the system state is corrupt.
1414 */
1415void panic(char *msg)
1416{
1417 Dev32Help_InternalError(msg, strlen(msg));
1418}
1419
Note: See TracBrowser for help on using the repository browser.