source: trunk/src/os2ahci/os2ahci.c@ 201

Last change on this file since 201 was 201, checked in by David Azarewicz, 6 years ago

Fixed trap in hardware failure recovery.

File size: 51.0 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2018 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31#include "devhdr.h"
32
33/* -------------------------- macros and constants ------------------------- */
34
35/* set two-dimensional array of port options */
36#define set_port_option(opt, val) \
37 if (adapter_index == -1) { \
38 /* set option for all adapters and ports */ \
39 memset(opt, val, sizeof(opt)); \
40 } else if (port_index == -1) { \
41 /* set option for all ports on current adapter */ \
42 memset(opt[adapter_index], val, sizeof(*opt)); \
43 } else { \
44 /* set option for specific port */ \
45 opt[adapter_index][port_index] = val; \
46 }
47
48#define FLAG_KRNL_EXIT_ADD 0x1000
49#define FLAG_KRNL_EXIT_REMOVE 0x2000
50
51#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
52#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
53#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
54#define TYPE_KRNL_EXIT_DYN 0x0003
55#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
56
57/* ------------------------ typedefs and structures ------------------------ */
58
59/* -------------------------- function prototypes -------------------------- */
60
61extern int SetPsdPutc(void);
62static int add_unit_info(IORB_CONFIGURATION *iorb_conf, int dt_ai, int a, int p, int d, int scsi_id);
63
64/* ------------------------ global/static variables ------------------------ */
65int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
66int init_reset = 1; /* if != 0, reset ports during init */
67int force_write_cache; /* if != 0, force write cache */
68int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
69int use_mbr_test = 1;
70long com_baud = 0;
71
72HDRIVER rm_drvh; /* resource manager driver handle */
73USHORT add_handle; /* driver handle (RegisterDeviceClass) */
74char drv_name[] = "OS2AHCI"; /* driver name as string */
75
76/* resource manager driver information structure */
77static DRIVERSTRUCT rm_drvinfo =
78{
79 NULL, /* We cannot do Flat to Far16 conversion at compile time */
80 NULL, /* so we put NULLs in all the Far16 fields and then fill */
81 NULL, /* them in at run time */
82 DMAJOR,
83 DMINOR,
84 BLD_YEAR, BLD_MONTH, BLD_DAY,
85 0,
86 DRT_ADDDM,
87 DRS_ADD,
88 NULL
89};
90
91SpinLock_t drv_lock; /* driver-level spinlock */
92IORB_QUEUE driver_queue; /* driver-level IORB queue */
93AD_INFO ad_infos[MAX_AD]; /* adapter information list */
94int ad_info_cnt; /* number of entries in ad_infos[] */
95u16 ad_ignore; /* bitmap with adapter indexes to ignore */
96int init_complete; /* if != 0, initialization has completed */
97int suspended;
98int resume_sleep_flag;
99
100/* apapter/port-specific options saved when parsing the command line */
101u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
102u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
103u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
104u8 link_power[MAX_AD][AHCI_MAX_PORTS];
105u8 track_size[MAX_AD][AHCI_MAX_PORTS];
106u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
107
108char BldLevel[] = BLDLEVEL;
109
110/* ----------------------------- start of code ----------------------------- */
111
112/******************************************************************************
113 * OS/2 device driver main strategy function.
114 *
115 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
116 * packet for IDC calls, so they can be handled by gen_ioctl.
117 */
118void StrategyHandler(REQPACKET *prp)
119{
120 u16 rc;
121
122 switch (prp->bCommand)
123 {
124 case STRATEGY_BASEDEVINIT:
125 rc = init_drv(prp);
126 break;
127
128 case STRATEGY_SHUTDOWN:
129 rc = exit_drv(prp->save_restore.Function);
130 break;
131
132 case STRATEGY_GENIOCTL:
133 rc = gen_ioctl(prp);
134 break;
135
136 case STRATEGY_OPEN:
137 build_user_info();
138 rc = RPDONE;
139 break;
140
141 case STRATEGY_READ:
142 rc = char_dev_input(prp);
143 break;
144
145 case STRATEGY_SAVERESTORE:
146 rc = sr_drv(prp->save_restore.Function);
147 break;
148
149 case STRATEGY_INITCOMPLETE:
150 case STRATEGY_CLOSE:
151 case STRATEGY_INPUTSTATUS:
152 case STRATEGY_FLUSHINPUT:
153 /* noop */
154 rc = RPDONE;
155 break;
156
157 default:
158 rc = RPDONE | RPERR_BADCOMMAND;
159 break;
160 }
161
162 prp->usStatus = rc;
163}
164
165void IdcHandler(REQPACKET *prp)
166{
167 StrategyHandler(prp);
168}
169
170/******************************************************************************
171 * Intialize the os2ahci driver. This includes command line parsing, scanning
172 * the PCI bus for supported AHCI adapters, etc.
173 */
174USHORT init_drv(REQPACKET *req)
175{
176 static int init_drv_called;
177 static int init_drv_failed;
178 APIRET rmrc;
179 const char *pszCmdLine, *cmd_line;
180 int adapter_index = -1;
181 int port_index = -1;
182 int iInvertOption;
183 int iStatus;
184
185 if (init_drv_called)
186 {
187 /* This is the init call for the second (IBMS506$) character
188 * device driver. If the main driver failed initialization, fail this
189 * one as well.
190 */
191 return(RPDONE | ((init_drv_failed) ? RPERR_INITFAIL : 0));
192 }
193 D32g_DbgLevel = 0;
194 init_drv_called = 1;
195 suspended = 0;
196 resume_sleep_flag = 0;
197 memset(ad_infos, 0, sizeof(ad_infos));
198 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
199 UtSetDriverName("OS2AHCI$");
200 Header.ulCaps |= DEV_ADAPTER_DD; /* DAZ This flag is not really needed. */
201
202 /* create driver-level spinlock */
203 KernAllocSpinLock(&drv_lock);
204
205 /* register driver with resource manager */
206 rm_drvinfo.DrvrName = drv_name;
207 rm_drvinfo.DrvrDescript = "AHCI SATA Driver";
208 rm_drvinfo.VendorName = DVENDOR;
209 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS)
210 {
211 iprintf("%s: failed to register driver with resource manager (rc = %d)", drv_name, rmrc);
212 goto init_fail;
213 }
214
215 pszCmdLine = cmd_line = req->init_in.szArgs;
216 iStatus = 0;
217 while (*pszCmdLine)
218 {
219 if (*pszCmdLine++ != '/') continue; /* Ignore anything that doesn't start with '/' */
220 /* pszCmdLine now points to first char of argument */
221
222 if ((iInvertOption = (*pszCmdLine == '!')) != 0) pszCmdLine++;
223
224 if (ArgCmp(pszCmdLine, "B:"))
225 {
226 pszCmdLine += 2;
227 com_baud = strtol(pszCmdLine, &pszCmdLine, 0);
228 continue;
229 }
230
231 if (ArgCmp(pszCmdLine, "C:"))
232 {
233 pszCmdLine += 2;
234 /* set COM port base address for debug messages */
235 D32g_ComBase = strtol(pszCmdLine, &pszCmdLine, 0);
236 if (D32g_ComBase == 1) D32g_ComBase = 0x3f8;
237 if (D32g_ComBase == 2) D32g_ComBase = 0x2f8;
238 continue;
239 }
240
241 if (ArgCmp(pszCmdLine, "D"))
242 {
243 pszCmdLine++;
244 if (*pszCmdLine == ':')
245 {
246 pszCmdLine++;
247 D32g_DbgLevel = strtol(pszCmdLine, &pszCmdLine, 0);
248 }
249 else D32g_DbgLevel++; /* increase debug level */
250 continue;
251 }
252
253 if (ArgCmp(pszCmdLine, "G:"))
254 {
255 u16 usVendor;
256 u16 usDevice;
257
258 pszCmdLine += 2;
259 /* add specfied PCI ID as a supported generic AHCI adapter */
260 usVendor = strtol(pszCmdLine, &pszCmdLine, 16);
261 if (*pszCmdLine != ':') break;
262 pszCmdLine++;
263 usDevice = strtol(pszCmdLine, &pszCmdLine, 16);
264 if (add_pci_id(usVendor, usDevice))
265 {
266 iprintf("%s: failed to add PCI ID %04x:%04x", drv_name, usVendor, usDevice);
267 iStatus = 1;
268 }
269 thorough_scan = 1;
270 continue;
271 }
272
273 if (ArgCmp(pszCmdLine, "T"))
274 {
275 pszCmdLine++;
276 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
277 thorough_scan = !iInvertOption;
278 continue;
279 }
280
281 if (ArgCmp(pszCmdLine, "R"))
282 {
283 pszCmdLine++;
284 /* reset ports during initialization */
285 init_reset = !iInvertOption;
286 continue;
287 }
288
289 if (ArgCmp(pszCmdLine, "F"))
290 {
291 pszCmdLine++;
292 /* force write cache regardless of IORB flags */
293 force_write_cache = 1;
294 continue;
295 }
296
297 if (ArgCmp(pszCmdLine, "A:"))
298 {
299 pszCmdLine += 2;
300 /* set adapter index for adapter and port-related options */
301 adapter_index = strtol(pszCmdLine, &pszCmdLine, 0);
302 if (adapter_index < 0 || adapter_index >= MAX_AD)
303 {
304 iprintf("%s: invalid adapter index (%d)", drv_name, adapter_index);
305 iStatus = 1;
306 }
307 continue;
308 }
309
310 if (ArgCmp(pszCmdLine, "P:"))
311 {
312 pszCmdLine += 2;
313 /* set port index for port-related options */
314 port_index = strtol(pszCmdLine, &pszCmdLine, 0);
315 if (port_index < 0 || port_index >= AHCI_MAX_PORTS)
316 {
317 iprintf("%s: invalid port index (%d)", drv_name, port_index);
318 iStatus = 1;
319 }
320 continue;
321 }
322
323 if (ArgCmp(pszCmdLine, "I"))
324 {
325 pszCmdLine++;
326 /* ignore current adapter index */
327 if (adapter_index >= 0)
328 {
329 if (port_index >= 0) port_ignore[adapter_index][port_index] = !iInvertOption;
330 else ad_ignore |= 1U << adapter_index;
331 }
332 continue;
333 }
334
335 if (ArgCmp(pszCmdLine, "S"))
336 {
337 pszCmdLine++;
338 /* enable SCSI emulation for ATAPI devices */
339 set_port_option(emulate_scsi, !iInvertOption);
340 continue;
341 }
342
343 if (ArgCmp(pszCmdLine, "N"))
344 {
345 pszCmdLine++;
346 /* enable NCQ */
347 set_port_option(enable_ncq, !iInvertOption);
348 continue;
349 }
350
351 if (ArgCmp(pszCmdLine, "LS:"))
352 {
353 int optval;
354
355 pszCmdLine += 3;
356 /* set link speed */
357 optval = strtol(pszCmdLine, &pszCmdLine, 0);
358 set_port_option(link_speed, optval);
359 /* need to reset the port in order to establish link settings */
360 init_reset = 1;
361 continue;
362 }
363
364 if (ArgCmp(pszCmdLine, "LP:"))
365 {
366 int optval;
367
368 pszCmdLine += 3;
369 /* set power management */
370 optval = strtol(pszCmdLine, &pszCmdLine, 0);
371 set_port_option(link_power, optval);
372 /* need to reset the port in order to establish link settings */
373 init_reset = 1;
374 continue;
375 }
376
377 if (ArgCmp(pszCmdLine, "4"))
378 {
379 pszCmdLine++;
380 /* enable 4K sector geometry enhancement (track size = 56) */
381 if (!iInvertOption) set_port_option(track_size, 56);
382 continue;
383 }
384
385 if (ArgCmp(pszCmdLine, "U"))
386 {
387 pszCmdLine++;
388 /* Specify to use the MBR test to ignore non-MBR disks.
389 * Default is on.
390 */
391 use_mbr_test = !iInvertOption;
392 continue;
393 }
394
395 if (ArgCmp(pszCmdLine, "V"))
396 {
397 pszCmdLine++;
398 if (*pszCmdLine == ':')
399 {
400 pszCmdLine++;
401 verbosity = strtol(pszCmdLine, &pszCmdLine, 0);
402 }
403 else verbosity++; /* increase verbosity level */
404 continue;
405 }
406
407 if (ArgCmp(pszCmdLine, "W"))
408 {
409 pszCmdLine++;
410 /* Specify to allow the trace buffer to wrap when full. */
411 D32g_DbgBufWrap = !iInvertOption;
412 continue;
413 }
414
415 iprintf("Unrecognized switch: %s", pszCmdLine-1);
416 iStatus = 1; /* unrecognized argument */
417 }
418
419 if (iStatus) goto init_fail;
420
421 if (com_baud) InitComPort(com_baud);
422
423 dprintf(0,"BldLevel: %s\n", BldLevel);
424 dprintf(0,"CmdLine: %s\n", cmd_line);
425 /*
426 if (sizeof(ADD_WORKSPACE) > ADD_WORKSPACE_SIZE)
427 {
428 dprintf(0,"ADD_WORKSPACE size is too big! %d>16\n", sizeof(ADD_WORKSPACE));
429 goto init_fail;
430 }
431 */
432
433 /* print initialization message */
434 ciprintf("%s driver version %d.%02d", drv_name, DMAJOR, DMINOR);
435
436 #ifdef TESTVER
437 #include "testver.c"
438 #endif
439
440 /* scan PCI bus for supported devices */
441 scan_pci_bus();
442
443 if (ad_info_cnt > 0)
444 {
445 /* initialization succeeded and we found at least one AHCI adapter */
446
447 if (Dev32Help_RegisterDeviceClass(drv_name, add_entry, 0, 1, &add_handle))
448 {
449 iprintf("%s: couldn't register device class", drv_name);
450 goto init_fail;
451 }
452
453 Timer_InitTimer(TIMER_COUNT);
454
455 /* allocate context hooks */
456 KernAllocateContextHook(restart_ctxhook, 0, &restart_ctxhook_h);
457 KernAllocateContextHook(reset_ctxhook, 0, &reset_ctxhook_h);
458 KernAllocateContextHook(engine_ctxhook, 0, &engine_ctxhook_h);
459
460 /* register kernel exit routine for trap dumps */
461 Dev32Help_RegisterKrnlExit(shutdown_driver, FLAG_KRNL_EXIT_ADD, TYPE_KRNL_EXIT_INT13);
462
463 return(RPDONE);
464 }
465 else
466 {
467 /* no adapters found */
468 ciprintf("%s: No adapters found.", drv_name);
469 }
470
471init_fail:
472 /* initialization failed; set segment sizes to 0 and return error */
473 init_drv_failed = 1;
474
475 if (rm_drvh != 0)
476 {
477 /* remove driver from resource manager */
478 RMDestroyDriver(rm_drvh);
479 }
480
481 ciprintf("%s driver *not* installed", drv_name);
482 return(RPDONE | RPERR_INITFAIL);
483}
484
485/******************************************************************************
486 * Generic IOCTL via character device driver. IOCTLs are used to control the
487 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
488 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
489 * commands for ATA disks) are implemented here.
490 */
491USHORT gen_ioctl(REQPACKET *ioctl)
492{
493 DPRINTF(2,"IOCTL 0x%x/0x%x\n", ioctl->ioctl.bCategory, ioctl->ioctl.bFunction);
494
495 switch (ioctl->ioctl.bCategory)
496 {
497 case OS2AHCI_IOCTL_CATEGORY:
498 switch (ioctl->ioctl.bFunction)
499 {
500 case OS2AHCI_IOCTL_GET_DEVLIST:
501 return(ioctl_get_devlist(ioctl));
502
503 case OS2AHCI_IOCTL_PASSTHROUGH:
504 return(ioctl_passthrough(ioctl));
505 }
506 break;
507
508 case DSKSP_CAT_GENERIC:
509 return(ioctl_gen_dsk(ioctl));
510
511 case DSKSP_CAT_SMART:
512 return(ioctl_smart(ioctl));
513 }
514
515 return(RPDONE | RPERR_BADCOMMAND);
516}
517
518/******************************************************************************
519 * Read from character device. If tracing is on (internal ring buffer trace),
520 * we return data from the trace buffer; if not, we might return a device
521 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
522 */
523USHORT char_dev_input(REQPACKET *pPacket)
524{
525 void *LinAdr;
526
527 if (Dev32Help_PhysToLin(pPacket->io.ulAddress, pPacket->io.usCount, &LinAdr))
528 {
529 pPacket->io.usCount = 0;
530 return RPDONE | RPERR_GENERAL;
531 }
532
533 pPacket->io.usCount = dCopyToUser(LinAdr, pPacket->io.usCount);
534
535 return RPDONE;
536}
537
538/******************************************************************************
539 * Device driver exit handler. This handler is called when OS/2 shuts down and
540 * flushes the write caches of all attached devices. Since this is effectively
541 * the same we do when suspending, we'll call out to the corresponding suspend
542 * function.
543 *
544 * NOTE: Errors are ignored because there's no way we could stop the shutdown
545 * or do something about the error, unless retrying endlessly is
546 * considered an option.
547 */
548USHORT exit_drv(int func)
549{
550 DPRINTF(2,"exit_drv(%d) called\n", func);
551
552 if (func == 0)
553 {
554 /* we're only interested in the second phase of the shutdown */
555 return(RPDONE);
556 }
557
558 suspend();
559 return(RPDONE);
560}
561
562/******************************************************************************
563 * Device driver suspend/resume handler. This handler is called when ACPI is
564 * executing a suspend or resume.
565 */
566USHORT sr_drv(int func)
567{
568 DPRINTF(2,"sr_drv(%d) called\n", func);
569
570 if (func) resume();
571 else suspend();
572
573 return(RPDONE);
574}
575
576/******************************************************************************
577 * ADD entry point. This is the main entry point for all ADD requests. Due to
578 * the asynchronous nature of ADD drivers, this function primarily queues the
579 * IORB(s) to the corresponding adapter or port queues, then triggers the
580 * state machine to initiate processing queued IORBs.
581 *
582 * NOTE: In order to prevent race conditions or engine stalls, certain rules
583 * around locking, unlocking and IORB handling in general have been
584 * established. Refer to the comments in "trigger_engine()" for
585 * details.
586 */
587void add_entry(IORBH FAR16DATA *vFirstIorb)
588{
589 IORBH FAR16DATA *vIorb;
590 IORBH FAR16DATA *vNext = FAR16NULL;
591
592 spin_lock(drv_lock);
593
594 for (vIorb=vFirstIorb; vIorb!=FAR16NULL; vIorb=vNext)
595 {
596 IORBH *pIorb = Far16ToFlat(vIorb);
597
598 /* Queue this IORB. Queues primarily exist on port level but there are
599 * some requests which affect the whole driver, most notably
600 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
601 * port queue will change the links, thus we need to save the original
602 * link in 'vNext'.
603 */
604 if (pIorb->RequestControl & IORB_CHAIN) vNext = pIorb->pNxtIORB;
605 else vNext = (IORBH FAR16DATA *)0;
606
607 pIorb->Status = 0;
608 pIorb->ErrorCode = 0;
609 memset(&pIorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
610
611 #ifdef DEBUG
612 DumpIorb(pIorb); /* DAZ TESTING */
613 #endif
614
615 if (iorb_driver_level(pIorb))
616 {
617 /* driver-level IORB */
618 pIorb->UnitHandle = 0;
619 iorb_queue_add(&driver_queue, vIorb, pIorb);
620 }
621 else
622 {
623 /* port-level IORB */
624 int a = iorb_unit_adapter(pIorb);
625 int p = iorb_unit_port(pIorb);
626 int d = iorb_unit_device(pIorb);
627
628 if (a >= ad_info_cnt ||
629 p > ad_infos[a].port_max ||
630 d > ad_infos[a].ports[p].dev_max ||
631 (ad_infos[a].port_map & (1UL << p)) == 0)
632 {
633 /* unit handle outside of the allowed range */
634 dprintf(0,"warning: IORB for %d.%d.%d out of range\n", a, p, d);
635 pIorb->Status = IORB_ERROR;
636 pIorb->ErrorCode = IOERR_CMD_SYNTAX;
637 iorb_complete(vIorb, pIorb);
638 continue;
639 }
640
641 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, vIorb, pIorb);
642 }
643 }
644
645 /* trigger state machine */
646 trigger_engine();
647
648 spin_unlock(drv_lock);
649}
650
651/******************************************************************************
652 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
653 * which will try to get all IORBs sent on their way a couple of times. If
654 * there are still IORBs ready for processing after this, this function will
655 * hand off to a context hook which will continue to trigger the engine until
656 * all IORBs have been sent.
657 *
658 * NOTE: While initialization has not completed (or during suspend/resume
659 * operations), this function will loop indefinitely because we can't
660 * rely on interrupt handlers or context hooks and complex IORBs
661 * requiring multiple requeues would eventually hang and time out if
662 * we stopped triggering here.
663 */
664void trigger_engine(void)
665{
666 int i;
667
668 for (i = 0; i < 3 || !init_complete; i++)
669 {
670 if (trigger_engine_1() == 0)
671 {
672 /* done -- all IORBs have been sent on their way */
673 return;
674 }
675 }
676
677 /* Something keeps bouncing; hand off to the engine context hook which will
678 * keep trying in the background.
679 */
680 KernArmHook(engine_ctxhook_h, 0, 0);
681}
682
683/******************************************************************************
684 * Trigger IORB queue engine in order to send commands in the driver/port IORB
685 * queues to the AHCI hardware. This function will return the number of IORBs
686 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
687 * a state to accept the command, thus it might take quite a few calls to get
688 * all IORBs on their way. This is why there's a wrapper function which tries
689 * it a few times, then hands off to a context hook which will keep trying in
690 * the background.
691 *
692 * IORBs might complete before send_iorb() has returned, at any time during
693 * interrupt processing or on another CPU on SMP systems. IORB completion
694 * means modifications to the corresponding IORB queue (the completed IORB
695 * is removed from the queue) thus we need to protect the IORB queues from
696 * race conditions. The safest approach short of keeping the driver-level
697 * spinlock aquired permanently is to keep it throughout this function and
698 * release it temporarily in send_iorb().
699 *
700 * This implies that the handler functions are fully responsible for aquiring
701 * the driver-level spinlock when they need it, and for releasing it again.
702 *
703 * As a rule of thumb, get the driver-level spinlock whenever accessing
704 * volatile variables (IORB queues, values in ad_info[], ...).
705 *
706 * Additional Notes:
707 *
708 * - This function is expected to be called with the spinlock aquired
709 *
710 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
711 * just remain in the queue). This can be used to release the driver-level
712 * spinlock while making sure no new IORBs are going to hit the hardware.
713 * In order to prevent engine stalls, all handlers using this functionality
714 * need to invoke trigger_engine() after resetting the busy flag.
715 *
716 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
717 * However, the driver-level queue is worked "one entry at a time" which
718 * means that no new IORBs will be queued on the driver-level queue until
719 * the head element has completed processing. This means that driver-
720 * level IORB handlers don't need to protect against each other. But they
721 * they do need to keep in mind interference with port-level IORBs:
722 *
723 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
724 * adapters as 'busy' which are affected by the driver-level IORB
725 *
726 * - Driver-level IORB handlers must not access the hardware of a
727 * particular adapter if it's flagged as 'busy' by another IORB.
728 */
729int trigger_engine_1(void)
730{
731 IORBH FAR16DATA *vIorb;
732 IORBH *pIorb;
733 IORBH FAR16DATA *vNext;
734 int iorbs_sent = 0;
735 int a;
736 int p;
737
738 iorbs_sent = 0;
739
740 /* process driver-level IORBs */
741 if ((vIorb = driver_queue.vRoot) != FAR16NULL)
742 {
743 pIorb = Far16ToFlat(vIorb);
744
745 if (!add_workspace(pIorb)->processing)
746 {
747 send_iorb(vIorb, pIorb);
748 iorbs_sent++;
749 }
750 }
751
752 /* process port-level IORBs */
753 for (a = 0; a < ad_info_cnt; a++)
754 {
755 AD_INFO *ai = ad_infos + a;
756 if (ai->busy)
757 {
758 /* adapter is busy; don't process any IORBs */
759 continue;
760 }
761 for (p = 0; p <= ai->port_max; p++)
762 {
763 /* send all queued IORBs on this port */
764 vNext = FAR16NULL;
765 for (vIorb = ai->ports[p].iorb_queue.vRoot; vIorb != FAR16NULL; vIorb = vNext)
766 {
767 pIorb = Far16ToFlat(vIorb);
768
769 vNext = pIorb->pNxtIORB;
770 if (!add_workspace(pIorb)->processing)
771 {
772 send_iorb(vIorb, pIorb);
773 iorbs_sent++;
774 }
775 }
776 }
777 }
778
779 return(iorbs_sent);
780}
781
782/******************************************************************************
783 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
784 * switch board for calling the corresponding iocc_*() handler function.
785 *
786 * NOTE: This function is expected to be called with the driver-level spinlock
787 * aquired. It will release it before calling any of the handler
788 * functions and re-aquire it when done.
789 */
790void send_iorb(IORBH FAR16DATA *vIorb, IORBH *pIorb)
791{
792 /* Mark IORB as "processing" before doing anything else. Once the IORB is
793 * marked as "processing", we can release the spinlock because subsequent
794 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
795 * IORB.
796 */
797 add_workspace(pIorb)->processing = 1;
798 spin_unlock(drv_lock);
799
800 switch (pIorb->CommandCode)
801 {
802 case IOCC_CONFIGURATION:
803 iocc_configuration(vIorb, pIorb);
804 break;
805
806 case IOCC_DEVICE_CONTROL:
807 iocc_device_control(vIorb, pIorb);
808 break;
809
810 case IOCC_UNIT_CONTROL:
811 iocc_unit_control(vIorb, pIorb);
812 break;
813
814 case IOCC_GEOMETRY:
815 iocc_geometry(vIorb, pIorb);
816 break;
817
818 case IOCC_EXECUTE_IO:
819 iocc_execute_io(vIorb, pIorb);
820 break;
821
822 case IOCC_UNIT_STATUS:
823 iocc_unit_status(vIorb, pIorb);
824 break;
825
826 case IOCC_ADAPTER_PASSTHRU:
827 iocc_adapter_passthru(vIorb, pIorb);
828 break;
829
830 default:
831 /* unsupported call */
832 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
833 iorb_done(vIorb, pIorb);
834 break;
835 }
836
837 /* re-aquire spinlock before returning to trigger_engine() */
838 spin_lock(drv_lock);
839}
840
841/******************************************************************************
842 * Handle IOCC_CONFIGURATION requests.
843 */
844void iocc_configuration(IORBH FAR16DATA *vIorb, IORBH *pIorb)
845{
846 int a;
847
848 switch (pIorb->CommandModifier)
849 {
850
851 case IOCM_COMPLETE_INIT:
852 /* Complete initialization. From now on, we won't have to restore the BIOS
853 * configuration after each command and we're fully operational (i.e. will
854 * use interrupts, timers and context hooks instead of polling).
855 */
856 if (!init_complete)
857 {
858 DPRINTF(1,"leaving initialization mode\n");
859 for (a = 0; a < ad_info_cnt; a++)
860 {
861 lock_adapter(ad_infos + a);
862 ahci_complete_init(ad_infos + a);
863 }
864 init_complete = 1;
865
866 /* release all adapters */
867 for (a = 0; a < ad_info_cnt; a++)
868 {
869 unlock_adapter(ad_infos + a);
870 }
871 DPRINTF(1,"leaving initialization mode 2\n");
872
873 #ifdef LEGACY_APM
874 /* register APM hook */
875 apm_init();
876 #endif
877 }
878 iorb_done(vIorb, pIorb);
879 break;
880
881 case IOCM_GET_DEVICE_TABLE:
882 /* construct a device table */
883 iocm_device_table(vIorb, pIorb);
884 break;
885
886 default:
887 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
888 iorb_done(vIorb, pIorb);
889 break;
890 }
891}
892
893/******************************************************************************
894 * Handle IOCC_DEVICE_CONTROL requests.
895 */
896void iocc_device_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
897{
898 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
899 IORBH FAR16DATA *vPtr;
900 IORBH FAR16DATA *vNext = FAR16NULL;
901 int p = iorb_unit_port(pIorb);
902 int d = iorb_unit_device(pIorb);
903
904 switch (pIorb->CommandModifier)
905 {
906 case IOCM_ABORT:
907 /* abort all pending commands on specified port and device */
908 spin_lock(drv_lock);
909 for (vPtr = ai->ports[p].iorb_queue.vRoot; vPtr != FAR16NULL; vPtr = vNext)
910 {
911 IORBH *pPtr = Far16ToFlat(vPtr);
912
913 vNext = pPtr->pNxtIORB;
914 /* move all matching IORBs to the abort queue */
915 if (vPtr != vIorb && iorb_unit_device(pPtr) == d)
916 {
917 iorb_queue_del(&ai->ports[p].iorb_queue, vPtr);
918 iorb_queue_add(&abort_queue, vPtr, pPtr);
919 pPtr->ErrorCode = IOERR_CMD_ABORTED;
920 }
921 }
922 spin_unlock(drv_lock);
923
924 /* trigger reset context hook which will finish the abort processing */
925 KernArmHook(reset_ctxhook_h, 0, 0);
926 break;
927
928 case IOCM_SUSPEND:
929 case IOCM_RESUME:
930 case IOCM_GET_QUEUE_STATUS:
931 /* Suspend/resume operations allow access to the hardware for other
932 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
933 * and ATAPI in the same driver, this won't be required.
934 */
935 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
936 break;
937
938 case IOCM_LOCK_MEDIA:
939 case IOCM_UNLOCK_MEDIA:
940 case IOCM_EJECT_MEDIA:
941 /* unit control commands to lock, unlock and eject media */
942 /* will be supported later... */
943 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
944 break;
945
946 default:
947 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
948 break;
949 }
950
951 iorb_done(vIorb, pIorb);
952}
953
954/******************************************************************************
955 * Handle IOCC_UNIT_CONTROL requests.
956 */
957void iocc_unit_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
958{
959 IORB_UNIT_CONTROL *pIorb_uc = (IORB_UNIT_CONTROL *)pIorb;
960 int a = iorb_unit_adapter(pIorb);
961 int p = iorb_unit_port(pIorb);
962 int d = iorb_unit_device(pIorb);
963
964 spin_lock(drv_lock);
965 switch (pIorb->CommandModifier)
966 {
967 case IOCM_ALLOCATE_UNIT:
968 /* allocate unit for exclusive access */
969 if (ad_infos[a].ports[p].devs[d].allocated)
970 {
971 iorb_seterr(pIorb, IOERR_UNIT_ALLOCATED);
972 }
973 else
974 {
975 ad_infos[a].ports[p].devs[d].allocated = 1;
976 }
977 break;
978
979 case IOCM_DEALLOCATE_UNIT:
980 /* deallocate exclusive access to unit */
981 if (!ad_infos[a].ports[p].devs[d].allocated)
982 {
983 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
984 }
985 else
986 {
987 ad_infos[a].ports[p].devs[d].allocated = 0;
988 }
989 break;
990
991 case IOCM_CHANGE_UNITINFO:
992 /* Change unit (device) information. One reason for this IOCM is the
993 * interface for filter device drivers: a filter device driver can
994 * either change existing UNITINFOs or permanently allocate units
995 * and fabricate new [logical] units; the former is the reason why we
996 * must store the pointer to the updated UNITNIFO for subsequent
997 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
998 */
999 if (!ad_infos[a].ports[p].devs[d].allocated)
1000 {
1001 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
1002 break;
1003 }
1004 ad_infos[a].ports[p].devs[d].unit_info = pIorb_uc->pUnitInfo;
1005 break;
1006
1007 default:
1008 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1009 break;
1010 }
1011
1012 spin_unlock(drv_lock);
1013 iorb_done(vIorb, pIorb);
1014}
1015
1016/******************************************************************************
1017 * Scan all ports for AHCI devices and construct a DASD device table.
1018 *
1019 * NOTES: This function may be called multiple times. Only the first
1020 * invocation will actually scan for devices; all subsequent calls will
1021 * merely return the results of the initial scan, potentially augmented
1022 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
1023 * requests.
1024 *
1025 * In order to support applications that can't deal with ATAPI devices
1026 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
1027 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
1028 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
1029 * request. The units attached to this adapter will use the real HW
1030 * unit IDs, thus we'll never receive a command specific to the
1031 * emulated SCSI adapter and won't need to set up any sort of entity
1032 * for it; the only purpose of the emulated SCSI adapter is to pass the
1033 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
1034 * course. The emulated SCSI target IDs are allocated as follows:
1035 *
1036 * 0 the virtual adapter
1037 * 1..n emulated devices; SCSI target ID increments sequentially
1038 */
1039void iocm_device_table(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1040{
1041 IORB_CONFIGURATION *pIorb_conf;
1042 DEVICETABLE FAR16DATA *vDt;
1043 DEVICETABLE *pDt;
1044 char *pPos;
1045 int scsi_units = 0;
1046 int scsi_id = 1;
1047 int rc;
1048 int dta;
1049 int a;
1050 int p;
1051 int d;
1052
1053 pIorb_conf = (IORB_CONFIGURATION *)pIorb;
1054 vDt = pIorb_conf->pDeviceTable;
1055 pDt = Far16ToFlat(vDt);
1056
1057 spin_lock(drv_lock);
1058
1059 /* initialize device table header */
1060 pDt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1061 pDt->ADDLevelMinor = ADD_LEVEL_MINOR;
1062 pDt->ADDHandle = add_handle;
1063 pDt->TotalAdapters = ad_info_cnt + 1;
1064
1065 /* set start of adapter and device information tables */
1066 pPos = (char*)&pDt->pAdapter[pDt->TotalAdapters];
1067
1068 /* go through all adapters, including the virtual SCSI adapter */
1069 for (dta = 0; dta < pDt->TotalAdapters; dta++)
1070 {
1071 ADAPTERINFO *pPtr = (ADAPTERINFO *)pPos;
1072
1073 /* sanity check for sufficient space in device table */
1074 if ((u32)(pPtr + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1075 {
1076 dprintf(0,"error: device table provided by DASD too small\n");
1077 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1078 goto iocm_device_table_done;
1079 }
1080
1081 pDt->pAdapter[dta] = MakeNear16PtrFromDiff(pIorb_conf->pDeviceTable, pDt, pPtr);
1082
1083 //DPRINTF(2,"iocm_device_table: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1084 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1085 memset(pPtr, 0x00, sizeof(*pPtr));
1086
1087 pPtr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1088 pPtr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1089 pPtr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1090 pPtr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1091
1092 if (dta < ad_info_cnt)
1093 {
1094 /* this is a physical AHCI adapter */
1095 AD_INFO *ad_info = ad_infos + dta;
1096
1097 pPtr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1098 snprintf(pPtr->AdapterName, sizeof(pPtr->AdapterName), "AHCI_%d", dta);
1099
1100 if (!ad_info->port_scan_done)
1101 {
1102 /* first call; need to scan AHCI hardware for devices */
1103 if (ad_info->busy)
1104 {
1105 dprintf(0,"error: port scan requested while adapter was busy\n");
1106 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1107 goto iocm_device_table_done;
1108 }
1109 ad_info->busy = 1;
1110 spin_unlock(drv_lock);
1111 rc = ahci_scan_ports(ad_info);
1112 spin_lock(drv_lock);
1113 ad_info->busy = 0;
1114
1115 if (rc != 0)
1116 {
1117 dprintf(0,"error: port scan failed on adapter #%d\n", dta);
1118 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1119 goto iocm_device_table_done;
1120 }
1121 ad_info->port_scan_done = 1;
1122 }
1123
1124 /* insert physical (i.e. AHCI) devices into the device table */
1125 for (p = 0; p <= ad_info->port_max; p++)
1126 {
1127 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1128 {
1129 if (ad_info->ports[p].devs[d].present && !ad_info->ports[p].devs[d].ignored)
1130 {
1131 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p])
1132 {
1133 /* report this unit as SCSI unit */
1134 scsi_units++;
1135 //continue;
1136 }
1137 if (add_unit_info(pIorb_conf, dta, dta, p, d, 0))
1138 {
1139 goto iocm_device_table_done;
1140 }
1141 }
1142 }
1143 }
1144 }
1145 else
1146 {
1147 /* this is the virtual SCSI adapter */
1148 if (scsi_units == 0)
1149 {
1150 /* not a single unit to be emulated via SCSI */
1151 pDt->TotalAdapters--;
1152 break;
1153 }
1154
1155 /* set adapter name and bus type to mimic a SCSI controller */
1156 pPtr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1157 snprintf(pPtr->AdapterName, sizeof(pPtr->AdapterName), "AHCI_SCSI_0");
1158
1159 /* add all ATAPI units to be emulated by this virtual adaper */
1160 for (a = 0; a < ad_info_cnt; a++)
1161 {
1162 AD_INFO *ad_info = ad_infos + a;
1163
1164 for (p = 0; p <= ad_info->port_max; p++)
1165 {
1166 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1167 {
1168 if (ad_info->ports[p].devs[d].present && !ad_info->ports[p].devs[d].ignored
1169 && ad_info->ports[p].devs[d].atapi && emulate_scsi[a][p])
1170 {
1171 if (add_unit_info(pIorb_conf, dta, a, p, d, scsi_id++))
1172 {
1173 goto iocm_device_table_done;
1174 }
1175 }
1176 }
1177 }
1178 }
1179 }
1180
1181 /* calculate offset for next adapter */
1182 pPos = (char *)(pPtr->UnitInfo + pPtr->AdapterUnits);
1183 }
1184
1185iocm_device_table_done:
1186 spin_unlock(drv_lock);
1187 iorb_done(vIorb, pIorb);
1188}
1189
1190/******************************************************************************
1191 * Handle IOCC_GEOMETRY requests.
1192 */
1193void iocc_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1194{
1195 switch (pIorb->CommandModifier)
1196 {
1197 case IOCM_GET_MEDIA_GEOMETRY:
1198 case IOCM_GET_DEVICE_GEOMETRY:
1199 add_workspace(pIorb)->idempotent = 1;
1200 ahci_get_geometry(vIorb, pIorb);
1201 break;
1202
1203 default:
1204 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1205 iorb_done(vIorb, pIorb);
1206 }
1207}
1208
1209/******************************************************************************
1210 * Handle IOCC_EXECUTE_IO requests.
1211 */
1212void iocc_execute_io(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1213{
1214 switch (pIorb->CommandModifier)
1215 {
1216 case IOCM_READ:
1217 add_workspace(pIorb)->idempotent = 1;
1218 ahci_read(vIorb, pIorb);
1219 break;
1220
1221 case IOCM_READ_VERIFY:
1222 add_workspace(pIorb)->idempotent = 1;
1223 ahci_verify(vIorb, pIorb);
1224 break;
1225
1226 case IOCM_WRITE:
1227 add_workspace(pIorb)->idempotent = 1;
1228 ahci_write(vIorb, pIorb);
1229 break;
1230
1231 case IOCM_WRITE_VERIFY:
1232 add_workspace(pIorb)->idempotent = 1;
1233 ahci_write(vIorb, pIorb);
1234 break;
1235
1236 default:
1237 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1238 iorb_done(vIorb, pIorb);
1239 }
1240}
1241
1242/******************************************************************************
1243 * Handle IOCC_UNIT_STATUS requests.
1244 */
1245void iocc_unit_status(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1246{
1247 switch (pIorb->CommandModifier)
1248 {
1249 case IOCM_GET_UNIT_STATUS:
1250 add_workspace(pIorb)->idempotent = 1;
1251 ahci_unit_ready(vIorb, pIorb);
1252 break;
1253
1254 default:
1255 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1256 iorb_done(vIorb, pIorb);
1257 }
1258}
1259
1260/******************************************************************************
1261 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1262 */
1263void iocc_adapter_passthru(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1264{
1265 switch (pIorb->CommandModifier)
1266 {
1267 case IOCM_EXECUTE_CDB:
1268 add_workspace(pIorb)->idempotent = 0;
1269 ahci_execute_cdb(vIorb, pIorb);
1270 break;
1271
1272 case IOCM_EXECUTE_ATA:
1273 add_workspace(pIorb)->idempotent = 0;
1274 ahci_execute_ata(vIorb, pIorb);
1275 break;
1276
1277 default:
1278 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1279 iorb_done(vIorb, pIorb);
1280 }
1281}
1282
1283/******************************************************************************
1284 * Add an IORB to the specified queue. This function must be called with the
1285 * adapter-level spinlock aquired.
1286 */
1287void iorb_queue_add(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb, IORBH *pIorb)
1288{
1289 if (iorb_priority(pIorb)
1290 {
1291 /* priority IORB; insert at first position */
1292 pIorb->pNxtIORB = queue->vRoot;
1293 queue->vRoot = vIorb;
1294 }
1295 else
1296 {
1297 /* append IORB to end of queue */
1298 pIorb->pNxtIORB = FAR16NULL;
1299
1300 if (queue->vRoot == FAR16NULL)
1301 {
1302 queue->vRoot = vIorb;
1303 }
1304 else
1305 {
1306 ((IORBH *)Far16ToFlat(queue->vTail))->pNxtIORB = vIorb;
1307 }
1308 queue->vTail = vIorb;
1309 }
1310
1311 #ifdef DEBUG
1312 if (D32g_DbgLevel)
1313 {
1314 /* determine queue type (local, driver, abort or port) and minimum debug
1315 * level; otherwise, queue debug prints can become really confusing.
1316 */
1317 char *queue_type;
1318 int min_debug = 7;
1319
1320 if ((u32)queue >> 16 == (u32)&queue >> 16) /* DAZ this is bogus */
1321 {
1322 /* this queue is on the stack */
1323 queue_type = "local";
1324 min_debug = 8;
1325 }
1326 else if (queue == &driver_queue)
1327 {
1328 queue_type = "driver";
1329 }
1330 else if (queue == &abort_queue)
1331 {
1332 queue_type = "abort";
1333 min_debug = 8;
1334 }
1335 else
1336 {
1337 queue_type = "port";
1338 }
1339
1340 DPRINTF(min_debug,"IORB %x queued (cmd=%d/%d queue=%x [%s], timeout=%d)\n",
1341 vIorb, pIorb->CommandCode, pIorb->CommandModifier, queue, queue_type,
1342 pIorb->Timeout);
1343 }
1344 #endif
1345}
1346
1347/******************************************************************************
1348 * Remove an IORB from the specified queue. This function must be called with
1349 * the adapter-level spinlock aquired.
1350 */
1351int iorb_queue_del(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb)
1352{
1353 IORBH FAR16DATA *_vIorb;
1354 IORBH FAR16DATA *_vPrev = FAR16NULL;
1355 int found = 0;
1356
1357 for (_vIorb = queue->vRoot; _vIorb != FAR16NULL; )
1358 {
1359 IORBH *_pIorb = Far16ToFlat(_vIorb);
1360 if (_vIorb == vIorb)
1361 {
1362 /* found the IORB to be removed */
1363 if (_vPrev != FAR16NULL)
1364 {
1365 ((IORBH*)Far16ToFlat(_vPrev))->pNxtIORB = _pIorb->pNxtIORB;
1366 }
1367 else
1368 {
1369 queue->vRoot = _pIorb->pNxtIORB;
1370 }
1371 if (_vIorb == queue->vTail)
1372 {
1373 queue->vTail = _vPrev;
1374 }
1375 found = 1;
1376 break;
1377 }
1378 _vPrev = _vIorb;
1379 _vIorb = _pIorb->pNxtIORB;
1380 }
1381
1382 #ifdef DEBUG
1383 if (found)
1384 {
1385 DPRINTF(8,"IORB %x removed (queue = %x)\n", vIorb, queue);
1386 }
1387 else
1388 {
1389 DPRINTF(2,"IORB %x not found in queue %x\n", vIorb, queue);
1390 }
1391 #endif
1392
1393 return(!found);
1394}
1395
1396/******************************************************************************
1397 * Set the error code in the specified IORB
1398 *
1399 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1400 * status to the specified error code.
1401 */
1402void iorb_seterr(IORBH *pIorb, USHORT error_code)
1403{
1404 pIorb->ErrorCode = error_code;
1405 pIorb->Status |= IORB_ERROR;
1406}
1407
1408/******************************************************************************
1409 * Mark the specified IORB as done and notify the asynchronous post function,
1410 * if any. The IORB is also removed from the corresponding IORB queue.
1411 *
1412 * NOTES: This function does not clear the Status field; it merely adds the
1413 * IORB_DONE flag.
1414 *
1415 * This function is expected to be called *without* the corresponding
1416 * driver-level drv_lock aquired. It will aquire the spinlock before
1417 * updating the IORB queue and release it before notifying the upstream
1418 * code in order to prevent deadlocks.
1419 *
1420 * Due to this logic, this function is only good for simple task-time
1421 * completions. Functions working on lists of IORBs (such as interrupt
1422 * handlers or context hooks) should call iorb_complete() directly and
1423 * implement their own logic for removing the IORB from the port queue.
1424 * See abort_ctxhook() for an example.
1425 */
1426void iorb_done(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1427{
1428 int a = iorb_unit_adapter(pIorb);
1429 int p = iorb_unit_port(pIorb);
1430
1431 /* remove IORB from corresponding queue */
1432 spin_lock(drv_lock);
1433 if (iorb_driver_level(pIorb))
1434 {
1435 iorb_queue_del(&driver_queue, vIorb);
1436 }
1437 else
1438 {
1439 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb);
1440 }
1441 aws_free(add_workspace(pIorb));
1442 spin_unlock(drv_lock);
1443
1444 iorb_complete(vIorb, pIorb);
1445}
1446
1447/******************************************************************************
1448 * Complete an IORB. This should be called without the adapter-level spinlock
1449 * to allow the IORB completion routine to perform whatever processing it
1450 * requires. This implies that the IORB should no longer be in any global
1451 * queue because the IORB completion routine may well reuse the IORB and send
1452 * the next request to us before even returning from this function.
1453 */
1454void iorb_complete(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1455{
1456 pIorb->Status |= IORB_DONE;
1457
1458 DPRINTF(7,"IORB %x complete status=0x%04x error=0x%04x\n",
1459 vIorb, pIorb->Status, pIorb->ErrorCode);
1460
1461 if (pIorb->RequestControl & IORB_ASYNC_POST)
1462 {
1463 Dev32Help_CallFar16((PFNFAR16)pIorb->NotifyAddress, vIorb);
1464 }
1465}
1466
1467/******************************************************************************
1468 * Requeue the specified IORB such that it will be sent downstream for
1469 * processing again. This includes freeing all resources currently allocated
1470 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1471 * spinlock must be aquired when calling this function.
1472 *
1473 * The following flags are preserved:
1474 * - no_ncq
1475 */
1476void iorb_requeue(IORBH *pIorb)
1477{
1478 ADD_WORKSPACE *aws = add_workspace(pIorb);
1479 u16 no_ncq = aws->no_ncq;
1480 u16 unaligned = aws->unaligned;
1481 u16 retries = aws->retries;
1482
1483 aws_free(aws);
1484 memset(aws, 0x00, sizeof(*aws));
1485
1486 aws->no_ncq = no_ncq;
1487 aws->unaligned = unaligned;
1488 aws->retries = retries;
1489}
1490
1491/******************************************************************************
1492 * Free resources in ADD workspace (timer, buffer, ...). This function should
1493 * be called with the spinlock held to prevent race conditions.
1494 */
1495void aws_free(ADD_WORKSPACE *aws)
1496{
1497 if (aws->timer != 0)
1498 {
1499 Timer_CancelTimer(aws->timer);
1500 aws->timer = 0;
1501 }
1502
1503 if (aws->buf != NULL)
1504 {
1505 MemFree(aws->buf);
1506 aws->buf = NULL;
1507 }
1508}
1509
1510/******************************************************************************
1511 * Lock the adapter, waiting for availability if necessary. This is expected
1512 * to be called at task/request time without the driver-level spinlock
1513 * aquired. Don't call at interrupt time.
1514 */
1515void lock_adapter(AD_INFO *ai)
1516{
1517 TIMER Timer;
1518
1519 spin_lock(drv_lock);
1520 while (ai->busy)
1521 {
1522 spin_unlock(drv_lock);
1523 TimerInit(&Timer, 250);
1524 while (!TimerCheckAndBlock(&Timer));
1525 spin_lock(drv_lock);
1526 }
1527 ai->busy = 1;
1528 spin_unlock(drv_lock);
1529}
1530
1531/******************************************************************************
1532 * Unlock adapter (i.e. reset busy flag)
1533 */
1534void unlock_adapter(AD_INFO *ai)
1535{
1536 ai->busy = 0;
1537}
1538
1539/******************************************************************************
1540 * Timeout handler for I/O commands. Since timeout handling can involve
1541 * lengthy operations like port resets, the main code is located in a
1542 * separate function which is invoked via a context hook.
1543 */
1544void __syscall timeout_callback(ULONG timer_handle, ULONG p1)
1545{
1546 IORBH FAR16DATA *vIorb = (IORBH FAR16DATA *)CastULONGToFar16(p1);
1547 IORBH *pIorb = Far16ToFlat(vIorb);
1548 int a = iorb_unit_adapter(pIorb);
1549 int p = iorb_unit_port(pIorb);
1550
1551 Timer_CancelTimer(timer_handle);
1552 dprintf(0,"timeout for IORB %x\n", vIorb);
1553
1554 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1555 * IORB has completed after the timeout has expired but before we got to
1556 * this line of code, we'll check the return code of iorb_queue_del(): If it
1557 * returns an error, the IORB must have completed a few microseconds ago and
1558 * there is no timeout.
1559 */
1560 spin_lock(drv_lock);
1561 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb) == 0)
1562 {
1563 iorb_queue_add(&abort_queue, vIorb, pIorb);
1564 pIorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1565 }
1566 spin_unlock(drv_lock);
1567
1568 /* Trigger abort processing function. We don't really care whether this
1569 * succeeds because the only reason why it would fail should be multiple
1570 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1571 * start executing, which leaves two scenarios:
1572 *
1573 * - We succeded in arming the context hook. Fine.
1574 *
1575 * - We armed the context hook a second time before it had a chance to
1576 * start executing. In this case, the already scheduled context hook
1577 * will process our IORB as well.
1578 */
1579 KernArmHook(reset_ctxhook_h, 0, 0);
1580
1581 /* Set up a watchdog timer which calls the context hook manually in case
1582 * some kernel thread is looping around the IORB_COMPLETE status bit
1583 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1584 * happen per design because kernel threads are supposed to yield but it
1585 * does in the early boot phase.
1586 */
1587 Timer_StartTimerMS(&th_reset_watchdog, 5000, reset_watchdog, 0);
1588}
1589
1590/******************************************************************************
1591 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1592 * will execute as soon as a kernel thread yields the CPU. However, some
1593 * kernel components won't yield the CPU during the early boot phase and the
1594 * only way to kick some sense into those components is to run the context
1595 * hook right inside this timer callback. Not exactly pretty, especially
1596 * considering the fact that context hooks were implemented to prevent running
1597 * lengthy operations like a port reset at interrupt time, but without this
1598 * watchdog mechanism we run the risk of getting completely stalled by device
1599 * problems during the early boot phase.
1600 */
1601void __syscall reset_watchdog(ULONG timer_handle, ULONG p1)
1602{
1603 /* reset watchdog timer */
1604 Timer_CancelTimer(timer_handle);
1605 th_reset_watchdog = 0;
1606 dprintf(0,"reset watchdog invoked\n");
1607
1608 /* You cannot call the reset_ctxhook directly because it does things
1609 * that are illegal in an interrupt handler.
1610 */
1611
1612 KernArmHook(reset_ctxhook_h, 0, 0);
1613
1614 /* call context hook manually */
1615 //reset_ctxhook(0);
1616}
1617
1618/******************************************************************************
1619 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1620 * adapter info array in the device table, dt->pAdapter[], is expected to be
1621 * initialized for the specified index (dt_ai).
1622 *
1623 * Please note that the device table adapter index, dta, is not always equal
1624 * to the physical adapter index, a: if SCSI emulation has been activated, the
1625 * last reported adapter is a virtual SCSI adapter and the physical adapter
1626 * indexes for those units are, of course, different from the device table
1627 * index of the virtual SCSI adapter.
1628 */
1629static int add_unit_info(IORB_CONFIGURATION *pIorb_conf, int dta,
1630 int a, int p, int d, int scsi_id)
1631{
1632 DEVICETABLE *pDt = Far16ToFlat(pIorb_conf->pDeviceTable);
1633 ADAPTERINFO *pPtr;
1634 UNITINFO *pUi;
1635 AD_INFO *ai = ad_infos + a;
1636
1637 pPtr = (ADAPTERINFO *)MakeFlatFromNear16(pIorb_conf->pDeviceTable, pDt->pAdapter[dta]);
1638 //DPRINTF(2,"add_unit_info: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1639 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1640
1641 pUi = &pPtr->UnitInfo[pPtr->AdapterUnits];
1642
1643 if ((u32)(pUi + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1644 {
1645 dprintf(0,"error: device table provided by DASD too small\n");
1646 iorb_seterr(&pIorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1647 return(-1);
1648 }
1649
1650 if (ai->ports[p].devs[d].unit_info == NULL)
1651 {
1652 /* provide original information about this device (unit) */
1653 memset(pUi, 0x00, sizeof(*pUi));
1654 pUi->AdapterIndex = dta; /* device table adapter index */
1655 pUi->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1656 pUi->UnitIndex = pPtr->AdapterUnits;
1657 pUi->UnitType = ai->ports[p].devs[d].dev_type;
1658 pUi->QueuingCount = ai->ports[p].devs[d].ncq_max;
1659 if (ai->ports[p].devs[d].removable)
1660 {
1661 pUi->UnitFlags |= UF_REMOVABLE;
1662 }
1663 if (scsi_id > 0) {
1664 /* set fake SCSI ID for this unit */
1665 pUi->UnitSCSITargetID = scsi_id;
1666 }
1667 }
1668 else
1669 {
1670 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1671 memcpy(pUi, ai->ports[p].devs[d].unit_info, sizeof(*pUi));
1672 }
1673
1674 pPtr->AdapterUnits++;
1675 return(0);
1676}
1677
Note: See TracBrowser for help on using the repository browser.