source: trunk/src/os2ahci/os2ahci.c@ 141

Last change on this file since 141 was 141, checked in by Markus Thielen, 13 years ago

fixed #13 (kernel exit call for trap dumps)

File size: 50.4 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 *
7 * Authors: Christian Mueller, Markus Thielen
8 *
9 * Parts copied from/inspired by the Linux AHCI driver;
10 * those parts are (c) Linux AHCI/ATA maintainers
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include "os2ahci.h"
28#include "bldday.h"
29
30#include "ioctl.h"
31
32/* -------------------------- macros and constants ------------------------- */
33
34/* parse integer command line parameter */
35#define drv_parm_int(s, value, type, radix) \
36 { \
37 char _far *_ep; \
38 if ((s)[1] != ':') { \
39 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
40 goto init_fail; \
41 } \
42 value = (type) strtol((s) + 2, \
43 (const char _far* _far*) &_ep, \
44 radix); \
45 s = _ep; \
46 }
47
48/* set two-dimensional array of port options */
49#define set_port_option(opt, val) \
50 if (adapter_index == -1) { \
51 /* set option for all adapters and ports */ \
52 memset(opt, val, sizeof(opt)); \
53 } else if (port_index == -1) { \
54 /* set option for all ports on current adapter */ \
55 memset(opt[adapter_index], val, sizeof(*opt)); \
56 } else { \
57 /* set option for specific port */ \
58 opt[adapter_index][port_index] = val; \
59 }
60
61/* constants for undefined kernel exit routine;
62 * see register_krnl_exit() func */
63#define DevHlp_RegisterKrnlExit 0x006f
64
65#define FLAG_KRNL_EXIT_ADD 0x1000
66#define FLAG_KRNL_EXIT_REMOVE 0x2000
67
68#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
69#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
70#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
71#define TYPE_KRNL_EXIT_DYN 0x0003
72#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
73
74/* ------------------------ typedefs and structures ------------------------ */
75
76/* -------------------------- function prototypes -------------------------- */
77
78void _cdecl small_code_ (void);
79
80static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
81 int a, int p, int d, int scsi_id);
82
83static void register_krnl_exit (void);
84
85/* ------------------------ global/static variables ------------------------ */
86
87int debug = 0; /* if > 0, print debug messages to COM1 */
88int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
89int init_reset; /* if != 0, reset ports during init */
90int force_write_cache; /* if != 0, force write cache */
91int verbosity = 1; /* == 1 -> show sign on banner
92 * > 1 -> show adapter info during boot */
93
94PFN Device_Help = 0; /* pointer to device helper entry point */
95ULONG RMFlags = 0; /* required by resource manager library */
96PFN RM_Help0 = NULL; /* required by resource manager library */
97PFN RM_Help3 = NULL; /* required by resource manager library */
98HDRIVER rm_drvh; /* resource manager driver handle */
99char rm_drvname[80]; /* driver name as returned by RM */
100USHORT add_handle; /* driver handle (RegisterDeviceClass) */
101UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
102char drv_name[] = "OS2AHCI"; /* driver name as string */
103
104/* resource manager driver information structure */
105DRIVERSTRUCT rm_drvinfo = {
106 drv_name, /* driver name */
107 "AHCI SATA Driver", /* driver description */
108 "GNU", /* vendor name */
109 CMVERSION_MAJOR, /* RM interface version major */
110 CMVERSION_MINOR, /* RM interface version minor */
111 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
112 0, /* driver flags */
113 DRT_ADDDM, /* driver type */
114 DRS_ADD, /* driver sub type */
115 NULL /* driver callback */
116};
117
118ULONG drv_lock; /* driver-level spinlock */
119IORB_QUEUE driver_queue; /* driver-level IORB queue */
120AD_INFO ad_infos[MAX_AD]; /* adapter information list */
121int ad_info_cnt; /* number of entries in ad_infos[] */
122u16 ad_ignore; /* bitmap with adapter indexes to ignore */
123int init_complete; /* if != 0, initialization has completed */
124
125/* apapter/port-specific options saved when parsing the command line */
126u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
127u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
128u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
129u8 link_power[MAX_AD][AHCI_MAX_PORTS];
130u8 track_size[MAX_AD][AHCI_MAX_PORTS];
131
132static char init_msg[] = "%s driver version %d.%02d\n";
133static char exit_msg[] = "%s driver *not* installed\n";
134
135/* ----------------------------- start of code ----------------------------- */
136
137/******************************************************************************
138 * OS/2 device driver main strategy function. This function is only used
139 * for initialization purposes; all other calls go directly to the adapter
140 * device driver's strategy function.
141 *
142 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
143 * packet for IDC calls, so they can be handled by gen_ioctl.
144 */
145USHORT _cdecl c_strat(RPH _far *req)
146{
147 u16 rc;
148
149 switch (req->Cmd) {
150
151 case CMDInitBase:
152 rc = init_drv((RPINITIN _far *) req);
153 break;
154
155 case CMDShutdown:
156 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
157 break;
158
159 case CMDGenIOCTL:
160 rc = gen_ioctl((RP_GENIOCTL _far *) req);
161 break;
162
163 case CMDINPUT:
164 rc = char_dev_input((RP_RWV _far *) req);
165 break;
166
167 default:
168 rc = STDON | STATUS_ERR_UNKCMD;
169 break;
170 }
171
172 return(rc);
173}
174
175/******************************************************************************
176 * Intialize the os2ahci driver. This includes command line parsing, scanning
177 * the PCI bus for supported AHCI adapters, etc.
178 */
179USHORT init_drv(RPINITIN _far *req)
180{
181 static int init_drv_called;
182 static int init_drv_failed;
183 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
184 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
185 APIRET rmrc;
186 char _far *cmd_line;
187 char _far *s;
188 int adapter_index = -1;
189 int port_index = -1;
190 int invert_option;
191 int optval;
192 u16 vendor;
193 u16 device;
194
195 if (init_drv_called) {
196 /* This is the init call for the second (legacy IBMS506$) character
197 * device driver. If the main driver failed initialization, fail this
198 * one as well.
199 */
200 rsp->CodeEnd = (u16) end_of_code;
201 rsp->DataEnd = (u16) &end_of_data;
202 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
203 }
204 init_drv_called = 1;
205
206 /* set device helper entry point */
207 Device_Help = req->DevHlpEP;
208
209 /* create driver-level spinlock */
210 DevHelp_CreateSpinLock(&drv_lock);
211
212 /* initialize libc code */
213 init_libc();
214
215 /* register driver with resource manager */
216 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
217 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
218 drv_name, rmrc);
219 goto init_fail;
220 }
221
222 /* parse command line parameters */
223 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
224
225 for (s = cmd_line; *s != 0; s++) {
226 if (*s == '/') {
227 if ((invert_option = (s[1] == '!')) != 0) {
228 s++;
229 }
230 s++;
231 switch (tolower(*s)) {
232
233 case '\0':
234 /* end of command line; can only happen if command line is incorrect */
235 cprintf("%s: incomplete command line option\n", drv_name);
236 goto init_fail;
237
238 case 'c':
239 /* set COM port base address for debug messages */
240 drv_parm_int(s, com_base, u16, 16);
241 break;
242
243 case 'd':
244 /* increase debug level */
245 debug++;
246 break;
247
248 case 'g':
249 /* add specfied PCI ID as a supported generic AHCI adapter */
250 drv_parm_int(s, vendor, u16, 16);
251 s--;
252 drv_parm_int(s, device, u16, 16);
253 if (add_pci_id(vendor, device)) {
254 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
255 goto init_fail;
256 }
257 thorough_scan = 1;
258 break;
259
260 case 't':
261 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
262 thorough_scan = !invert_option;
263 break;
264
265 case 'r':
266 /* reset ports during initialization */
267 init_reset = 1;
268 break;
269
270 case 'f':
271 /* force write cache regardless of IORB flags */
272 force_write_cache = 1;
273 break;
274
275 case 'a':
276 /* set adapter index for adapter and port-related options */
277 drv_parm_int(s, adapter_index, int, 10);
278 if (adapter_index < 0 || adapter_index >= MAX_AD) {
279 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
280 goto init_fail;
281 }
282 break;
283
284 case 'p':
285 /* set port index for port-related options */
286 drv_parm_int(s, port_index, int, 10);
287 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
288 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
289 goto init_fail;
290 }
291 break;
292
293 case 'i':
294 /* ignore current adapter index */
295 if (adapter_index >= 0) {
296 ad_ignore |= 1U << adapter_index;
297 }
298 break;
299
300 case 's':
301 /* enable SCSI emulation for ATAPI devices */
302 set_port_option(emulate_scsi, !invert_option);
303 break;
304
305 case 'n':
306 /* enable NCQ */
307 set_port_option(enable_ncq, !invert_option);
308 break;
309
310 case 'l':
311 /* set link speed or power savings */
312 s++;
313 switch (tolower(*s)) {
314 case 's':
315 /* set link speed */
316 drv_parm_int(s, optval, int, 10);
317 set_port_option(link_speed, optval);
318 break;
319 case 'p':
320 /* set power management */
321 drv_parm_int(s, optval, int, 10);
322 set_port_option(link_power, optval);
323 break;
324 default:
325 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
326 goto init_fail;
327 }
328 /* need to reset the port in order to establish link settings */
329 init_reset = 1;
330 break;
331
332 case '4':
333 /* enable 4K sector geometry enhancement (track size = 56) */
334 if (!invert_option) {
335 set_port_option(track_size, 56);
336 }
337 break;
338
339 case 'v':
340 /* be verbose during boot */
341 verbosity++;
342 break;
343
344 case 'q':
345 /* be quiet */
346 verbosity = -1000;
347 break;
348
349 default:
350 cprintf("%s: unknown option: /%c\n", drv_name, *s);
351 goto init_fail;
352 }
353 }
354 }
355
356 /* print initialization message */
357 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
358
359#ifdef ECS_BUILD
360 ciprintf("This driver is licensed for use only in conjunction with eComStation.");
361#endif
362
363 if (debug) {
364 /* initialize com port for debug output */
365 init_com();
366 }
367
368 /* initialize trace buffer if applicable */
369 if (TRACE_ACTIVE) {
370 /* debug is on, but COM port is off -> use our trace buffer */
371 trace_init();
372 }
373
374 /* scan PCI bus for supported devices */
375 scan_pci_bus();
376
377 if (ad_info_cnt > 0) {
378 /* initialization succeeded and we found at least one AHCI adapter */
379 ADD_InitTimer(timer_pool, sizeof(timer_pool));
380 mdelay_cal();
381
382 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1,
383 &add_handle)) {
384 cprintf("%s: couldn't register device class\n", drv_name);
385 goto init_fail;
386 }
387
388 /* allocate context hooks */
389 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
390 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
391 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
392 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
393 goto init_fail;
394 }
395
396 rsp->CodeEnd = (u16) end_of_code;
397 rsp->DataEnd = (u16) &end_of_data;
398
399 /* register kernel exit routine for trap dumps */
400 register_krnl_exit();
401
402 return(STDON);
403
404 } else {
405 /* no adapters found */
406 ciprintf(" No adapters found.\n");
407 }
408
409init_fail:
410 /* initialization failed; set segment sizes to 0 and return error */
411 rsp->CodeEnd = 0;
412 rsp->DataEnd = 0;
413 init_drv_failed = 1;
414
415 /* free context hooks */
416 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
417 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
418 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
419
420 if (rm_drvh != 0) {
421 /* remove driver from resource manager */
422 RMDestroyDriver(rm_drvh);
423 }
424
425 ciprintf(exit_msg, drv_name);
426 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
427}
428
429/******************************************************************************
430 * Generic IOCTL via character device driver. IOCTLs are used to control the
431 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
432 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
433 * commands for ATA disks) are implemented here.
434 */
435USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
436{
437 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
438
439 switch (ioctl->Category) {
440
441 case OS2AHCI_IOCTL_CATEGORY:
442 switch (ioctl->Function) {
443
444 case OS2AHCI_IOCTL_GET_DEVLIST:
445 return(ioctl_get_devlist(ioctl));
446
447 case OS2AHCI_IOCTL_PASSTHROUGH:
448 return(ioctl_passthrough(ioctl));
449
450 }
451 break;
452
453 case OS2AHCI_IDC_CATEGORY:
454 switch (ioctl->Function) {
455
456 case OS2AHCI_IDC_BIOSMODE:
457 /* reconfigure adapters in BIOS/int13 mode; needed for generating
458 * trap dumps on some machines. This is called by ACPI.PSD.
459 *
460 * To enter BIOS mode, we flush all write caches, turn off interrupts
461 * and restore the BIOS configuration. This is exactly what
462 * apm_suspend() does.
463 */
464 apm_suspend();
465 return(STDON);
466
467 case OS2AHCI_IDC_BEEP:
468 /* IOCTL for IDC testing - just beep */
469 DevHelp_Beep(2000, 100);
470 return(STDON);
471 }
472 break;
473
474 case DSKSP_CAT_GENERIC:
475 return(ioctl_gen_dsk(ioctl));
476
477 case DSKSP_CAT_SMART:
478 return(ioctl_smart(ioctl));
479
480 }
481
482 return(STDON | STATUS_ERR_UNKCMD);
483}
484
485/******************************************************************************
486 * Read from character device. If tracing is on (internal ring buffer trace),
487 * we return data from the trace buffer; if not, we might return a device
488 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
489 */
490USHORT char_dev_input(RP_RWV _far *rwrb)
491{
492 if (TRACE_ACTIVE) {
493 return(trace_char_dev(rwrb));
494 }
495 return(STDON | STATUS_ERR_UNKCMD);
496}
497
498/******************************************************************************
499 * Device driver exit handler. This handler is called when OS/2 shuts down and
500 * flushes the write caches of all attached devices. Since this is effectively
501 * the same we do when suspending, we'll call out to the corresponding APM
502 * function.
503 *
504 * NOTE: Errors are ignored because there's no way we could stop the shutdown
505 * or do something about the error, unless retrying endlessly is
506 * considered an option.
507 */
508USHORT exit_drv(int func)
509{
510 dprintf("exit_drv(%d) called\n", func);
511
512 if (func == 0) {
513 /* we're only interested in the second phase of the shutdown */
514 return(STDON);
515 }
516
517 apm_suspend();
518 return(STDON);
519}
520
521/******************************************************************************
522 * ADD entry point. This is the main entry point for all ADD requests. Due to
523 * the asynchronous nature of ADD drivers, this function primarily queues the
524 * IORB(s) to the corresponding adapter or port queues, then triggers the
525 * state machine to initiate processing queued IORBs.
526 *
527 * NOTE: In order to prevent race conditions or engine stalls, certain rules
528 * around locking, unlocking and IORB handling in general have been
529 * established. Refer to the comments in "trigger_engine()" for
530 * details.
531 */
532void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
533{
534 IORBH _far *iorb;
535 IORBH _far *next = NULL;
536
537 spin_lock(drv_lock);
538
539 for (iorb = first_iorb; iorb != NULL; iorb = next) {
540 /* Queue this IORB. Queues primarily exist on port level but there are
541 * some requests which affect the whole driver, most notably
542 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
543 * port queue will change the links, thus we need to save the original
544 * link in 'next'.
545 */
546 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
547
548 iorb->Status = 0;
549 iorb->ErrorCode = 0;
550 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
551
552 if (iorb_driver_level(iorb)) {
553 /* driver-level IORB */
554 iorb->UnitHandle = 0;
555 iorb_queue_add(&driver_queue, iorb);
556
557 } else {
558 /* port-level IORB */
559 int a = iorb_unit_adapter(iorb);
560 int p = iorb_unit_port(iorb);
561 int d = iorb_unit_device(iorb);
562
563 if (a >= ad_info_cnt ||
564 p > ad_infos[a].port_max ||
565 d > ad_infos[a].ports[p].dev_max ||
566 (ad_infos[a].port_map & (1UL << p)) == 0) {
567
568 /* unit handle outside of the allowed range */
569 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
570 iorb->Status = IORB_ERROR;
571 iorb->ErrorCode = IOERR_CMD_SYNTAX;
572 iorb_complete(iorb);
573 continue;
574 }
575
576 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
577 }
578 }
579
580 /* trigger state machine */
581 trigger_engine();
582
583 spin_unlock(drv_lock);
584}
585
586/******************************************************************************
587 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
588 * which will try to get all IORBs sent on their way a couple of times. If
589 * there are still IORBs ready for processing after this, this function will
590 * hand off to a context hook which will continue to trigger the engine until
591 * all IORBs have been sent.
592 *
593 * NOTE: While initialization has not completed (or during APM suspend/resume
594 * operations), this function will loop indefinitely because we can't
595 * rely on interrupt handlers or context hooks and complex IORBs
596 * requiring multiple requeues would eventually hang and time out if
597 * we stopped triggering here.
598 */
599void trigger_engine(void)
600{
601 int i;
602
603 for (i = 0; i < 3 || !init_complete; i++) {
604 if (trigger_engine_1() == 0) {
605 /* done -- all IORBs have been sent on their way */
606 return;
607 }
608 }
609
610 /* Something keeps bouncing; hand off to the engine context hook which will
611 * keep trying in the background.
612 */
613 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
614}
615
616/******************************************************************************
617 * Trigger IORB queue engine in order to send commands in the driver/port IORB
618 * queues to the AHCI hardware. This function will return the number of IORBs
619 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
620 * a state to accept the command, thus it might take quite a few calls to get
621 * all IORBs on their way. This is why there's a wrapper function which tries
622 * it a few times, then hands off to a context hook which will keep trying in
623 * the background.
624 *
625 * IORBs might complete before send_iorb() has returned, at any time during
626 * interrupt processing or on another CPU on SMP systems. IORB completion
627 * means modifications to the corresponding IORB queue (the completed IORB
628 * is removed from the queue) thus we need to protect the IORB queues from
629 * race conditions. The safest approach short of keeping the driver-level
630 * spinlock aquired permanently is to keep it throughout this function and
631 * release it temporarily in send_iorb().
632 *
633 * This implies that the handler functions are fully responsible for aquiring
634 * the driver-level spinlock when they need it, and for releasing it again.
635 *
636 * As a rule of thumb, get the driver-level spinlock whenever accessing
637 * volatile variables (IORB queues, values in ad_info[], ...).
638 *
639 * Additional Notes:
640 *
641 * - This function is expected to be called with the spinlock aquired
642 *
643 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
644 * just remain in the queue). This can be used to release the driver-level
645 * spinlock while making sure no new IORBs are going to hit the hardware.
646 * In order to prevent engine stalls, all handlers using this functionality
647 * need to invoke trigger_engine() after resetting the busy flag.
648 *
649 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
650 * However, the driver-level queue is worked "one entry at a time" which
651 * means that no new IORBs will be queued on the driver-level queue until
652 * the head element has completed processing. This means that driver-
653 * level IORB handlers don't need to protect against each other. But they
654 * they do need to keep in mind interference with port-level IORBs:
655 *
656 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
657 * adapters as 'busy' which are affected by the driver-level IORB
658 *
659 * - Driver-level IORB handlers must not access the hardware of a
660 * particular adapter if it's flagged as 'busy' by another IORB.
661 */
662int trigger_engine_1(void)
663{
664 IORBH _far *iorb;
665 IORBH _far *next;
666 int iorbs_sent = 0;
667 int a;
668 int p;
669
670 iorbs_sent = 0;
671
672 /* process driver-level IORBs */
673 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
674 send_iorb(iorb);
675 iorbs_sent++;
676 }
677
678 /* process port-level IORBs */
679 for (a = 0; a < ad_info_cnt; a++) {
680 AD_INFO *ai = ad_infos + a;
681 if (ai->busy) {
682 /* adapter is busy; don't process any IORBs */
683 continue;
684 }
685 for (p = 0; p <= ai->port_max; p++) {
686 /* send all queued IORBs on this port */
687 next = NULL;
688 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
689 next = iorb->pNxtIORB;
690 if (!add_workspace(iorb)->processing) {
691 send_iorb(iorb);
692 iorbs_sent++;
693 }
694 }
695 }
696 }
697
698 return(iorbs_sent);
699}
700
701/******************************************************************************
702 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
703 * switch board for calling the corresponding iocc_*() handler function.
704 *
705 * NOTE: This function is expected to be called with the driver-level spinlock
706 * aquired. It will release it before calling any of the handler
707 * functions and re-aquire it when done.
708 */
709void send_iorb(IORBH _far *iorb)
710{
711 /* Mark IORB as "processing" before doing anything else. Once the IORB is
712 * marked as "processing", we can release the spinlock because subsequent
713 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
714 * IORB.
715 */
716 add_workspace(iorb)->processing = 1;
717 spin_unlock(drv_lock);
718
719 switch (iorb->CommandCode) {
720
721 case IOCC_CONFIGURATION:
722 iocc_configuration(iorb);
723 break;
724
725 case IOCC_DEVICE_CONTROL:
726 iocc_device_control(iorb);
727 break;
728
729 case IOCC_UNIT_CONTROL:
730 iocc_unit_control(iorb);
731 break;
732
733 case IOCC_GEOMETRY:
734 iocc_geometry(iorb);
735 break;
736
737 case IOCC_EXECUTE_IO:
738 iocc_execute_io(iorb);
739 break;
740
741 case IOCC_UNIT_STATUS:
742 iocc_unit_status(iorb);
743 break;
744
745 case IOCC_ADAPTER_PASSTHRU:
746 iocc_adapter_passthru(iorb);
747 break;
748
749 default:
750 /* unsupported call */
751 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
752 iorb_done(iorb);
753 break;
754 }
755
756 /* re-aquire spinlock before returning to trigger_engine() */
757 spin_lock(drv_lock);
758}
759
760/******************************************************************************
761 * Handle IOCC_CONFIGURATION requests.
762 */
763void iocc_configuration(IORBH _far *iorb)
764{
765 int a;
766
767 switch (iorb->CommandModifier) {
768
769 case IOCM_COMPLETE_INIT:
770 /* Complete initialization. From now on, we won't have to restore the BIOS
771 * configuration after each command and we're fully operational (i.e. will
772 * use interrupts, timers and context hooks instead of polling).
773 */
774 if (!init_complete) {
775 dprintf("leaving initialization mode\n");
776 for (a = 0; a < ad_info_cnt; a++) {
777 lock_adapter(ad_infos + a);
778 ahci_complete_init(ad_infos + a);
779 }
780 init_complete = 1;
781
782 /* release all adapters */
783 for (a = 0; a < ad_info_cnt; a++) {
784 unlock_adapter(ad_infos + a);
785 }
786
787 /* register APM hook */
788 apm_init();
789 }
790 iorb_done(iorb);
791 break;
792
793 case IOCM_GET_DEVICE_TABLE:
794 /* construct a device table */
795 iocm_device_table(iorb);
796 break;
797
798 default:
799 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
800 iorb_done(iorb);
801 break;
802 }
803}
804
805/******************************************************************************
806 * Handle IOCC_DEVICE_CONTROL requests.
807 */
808void iocc_device_control(IORBH _far *iorb)
809{
810 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
811 IORBH _far *ptr;
812 IORBH _far *next = NULL;
813 int p = iorb_unit_port(iorb);
814 int d = iorb_unit_device(iorb);
815
816 switch (iorb->CommandModifier) {
817
818 case IOCM_ABORT:
819 /* abort all pending commands on specified port and device */
820 spin_lock(drv_lock);
821 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
822 next = ptr->pNxtIORB;
823 /* move all matching IORBs to the abort queue */
824 if (ptr != iorb && iorb_unit_device(ptr) == d) {
825 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
826 iorb_queue_add(&abort_queue, ptr);
827 ptr->ErrorCode = IOERR_CMD_ABORTED;
828 }
829 }
830 spin_unlock(drv_lock);
831
832 /* trigger reset context hook which will finish the abort processing */
833 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
834 break;
835
836 case IOCM_SUSPEND:
837 case IOCM_RESUME:
838 case IOCM_GET_QUEUE_STATUS:
839 /* Suspend/resume operations allow access to the hardware for other
840 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
841 * and ATAPI in the same driver, this won't be required.
842 */
843 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
844 break;
845
846 case IOCM_LOCK_MEDIA:
847 case IOCM_UNLOCK_MEDIA:
848 case IOCM_EJECT_MEDIA:
849 /* unit control commands to lock, unlock and eject media */
850 /* will be supported later... */
851 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
852 break;
853
854 default:
855 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
856 break;
857 }
858
859 iorb_done(iorb);
860}
861
862/******************************************************************************
863 * Handle IOCC_UNIT_CONTROL requests.
864 */
865void iocc_unit_control(IORBH _far *iorb)
866{
867 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
868 int a = iorb_unit_adapter(iorb);
869 int p = iorb_unit_port(iorb);
870 int d = iorb_unit_device(iorb);
871
872 spin_lock(drv_lock);
873 switch (iorb->CommandModifier) {
874
875 case IOCM_ALLOCATE_UNIT:
876 /* allocate unit for exclusive access */
877 if (ad_infos[a].ports[p].devs[d].allocated) {
878 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
879 } else {
880 ad_infos[a].ports[p].devs[d].allocated = 1;
881 }
882 break;
883
884 case IOCM_DEALLOCATE_UNIT:
885 /* deallocate exclusive access to unit */
886 if (!ad_infos[a].ports[p].devs[d].allocated) {
887 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
888 } else {
889 ad_infos[a].ports[p].devs[d].allocated = 0;
890 }
891 break;
892
893 case IOCM_CHANGE_UNITINFO:
894 /* Change unit (device) information. One reason for this IOCM is the
895 * interface for filter device drivers: a filter device driver can
896 * either change existing UNITINFOs or permanently allocate units
897 * and fabricate new [logical] units; the former is the reason why we
898 * must store the pointer to the updated UNITNIFO for subsequent
899 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
900 */
901 if (!ad_infos[a].ports[p].devs[d].allocated) {
902 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
903 break;
904 }
905 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
906 break;
907
908 default:
909 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
910 break;
911 }
912
913 spin_unlock(drv_lock);
914 iorb_done(iorb);
915}
916
917/******************************************************************************
918 * Scan all ports for AHCI devices and construct a DASD device table.
919 *
920 * NOTES: This function may be called multiple times. Only the first
921 * invocation will actually scan for devices; all subsequent calls will
922 * merely return the results of the initial scan, potentially augmented
923 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
924 * requests.
925 *
926 * In order to support applications that can't deal with ATAPI devices
927 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
928 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
929 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
930 * request. The units attached to this adapter will use the real HW
931 * unit IDs, thus we'll never receive a command specific to the
932 * emulated SCSI adapter and won't need to set up any sort of entity
933 * for it; the only purpose of the emulated SCSI adapter is to pass the
934 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
935 * course. The emulated SCSI target IDs are allocated as follows:
936 *
937 * 0 the virtual adapter
938 * 1..n emulated devices; SCSI target ID increments sequentially
939 */
940void iocm_device_table(IORBH _far *iorb)
941{
942 IORB_CONFIGURATION _far *iorb_conf;
943 DEVICETABLE _far *dt;
944 char _far *pos;
945 int scsi_units = 0;
946 int scsi_id = 1;
947 int rc;
948 int dta;
949 int a;
950 int p;
951 int d;
952
953 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
954 dt = iorb_conf->pDeviceTable;
955
956 spin_lock(drv_lock);
957
958 /* initialize device table header */
959 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
960 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
961 dt->ADDHandle = add_handle;
962 dt->TotalAdapters = ad_info_cnt + 1;
963
964 /* set start of adapter and device information tables */
965 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
966
967 /* go through all adapters, including the virtual SCSI adapter */
968 for (dta = 0; dta < dt->TotalAdapters; dta++) {
969 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
970
971 /* sanity check for sufficient space in device table */
972 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
973 dprintf("error: device table provided by DASD too small\n");
974 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
975 goto iocm_device_table_done;
976 }
977
978 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
979 memset(ptr, 0x00, sizeof(*ptr));
980
981 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
982 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
983 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
984 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
985
986 if (dta < ad_info_cnt) {
987 /* this is a physical AHCI adapter */
988 AD_INFO *ad_info = ad_infos + dta;
989
990 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
991 sprintf(ptr->AdapterName, "AHCI_%d", dta);
992
993 if (!ad_info->port_scan_done) {
994 /* first call; need to scan AHCI hardware for devices */
995 if (ad_info->busy) {
996 dprintf("error: port scan requested while adapter was busy\n");
997 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
998 goto iocm_device_table_done;
999 }
1000 ad_info->busy = 1;
1001 spin_unlock(drv_lock);
1002 rc = ahci_scan_ports(ad_info);
1003 spin_lock(drv_lock);
1004 ad_info->busy = 0;
1005
1006 if (rc != 0) {
1007 dprintf("error: port scan failed on adapter #%d\n", dta);
1008 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1009 goto iocm_device_table_done;
1010 }
1011 ad_info->port_scan_done = 1;
1012 }
1013
1014 /* insert physical (i.e. AHCI) devices into the device table */
1015 for (p = 0; p <= ad_info->port_max; p++) {
1016 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1017 if (ad_info->ports[p].devs[d].present) {
1018 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1019 /* only report this unit as SCSI unit */
1020 scsi_units++;
1021 continue;
1022 }
1023 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1024 goto iocm_device_table_done;
1025 }
1026 }
1027 }
1028 }
1029
1030 } else {
1031 /* this is the virtual SCSI adapter */
1032 if (scsi_units == 0) {
1033 /* not a single unit to be emulated via SCSI */
1034 dt->TotalAdapters--;
1035 break;
1036 }
1037
1038 /* set adapter name and bus type to mimic a SCSI controller */
1039 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1040 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1041
1042 /* add all ATAPI units to be emulated by this virtual adaper */
1043 for (a = 0; a < ad_info_cnt; a++) {
1044 AD_INFO *ad_info = ad_infos + a;
1045
1046 for (p = 0; p <= ad_info->port_max; p++) {
1047 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1048 if (ad_info->ports[p].devs[d].present &&
1049 ad_info->ports[p].devs[d].atapi &&
1050 emulate_scsi[a][p]) {
1051 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1052 goto iocm_device_table_done;
1053 }
1054 }
1055 }
1056 }
1057 }
1058 }
1059
1060 /* calculate offset for next adapter */
1061 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1062 }
1063
1064iocm_device_table_done:
1065 spin_unlock(drv_lock);
1066 iorb_done(iorb);
1067}
1068
1069/******************************************************************************
1070 * Handle IOCC_GEOMETRY requests.
1071 */
1072void iocc_geometry(IORBH _far *iorb)
1073{
1074 switch (iorb->CommandModifier) {
1075
1076 case IOCM_GET_MEDIA_GEOMETRY:
1077 case IOCM_GET_DEVICE_GEOMETRY:
1078 add_workspace(iorb)->idempotent = 1;
1079 ahci_get_geometry(iorb);
1080 break;
1081
1082 default:
1083 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1084 iorb_done(iorb);
1085 }
1086}
1087
1088/******************************************************************************
1089 * Handle IOCC_EXECUTE_IO requests.
1090 */
1091void iocc_execute_io(IORBH _far *iorb)
1092{
1093 switch (iorb->CommandModifier) {
1094
1095 case IOCM_READ:
1096 add_workspace(iorb)->idempotent = 1;
1097 ahci_read(iorb);
1098 break;
1099
1100 case IOCM_READ_VERIFY:
1101 add_workspace(iorb)->idempotent = 1;
1102 ahci_verify(iorb);
1103 break;
1104
1105 case IOCM_WRITE:
1106 add_workspace(iorb)->idempotent = 1;
1107 ahci_write(iorb);
1108 break;
1109
1110 case IOCM_WRITE_VERIFY:
1111 add_workspace(iorb)->idempotent = 1;
1112 ahci_write(iorb);
1113 break;
1114
1115 default:
1116 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1117 iorb_done(iorb);
1118 }
1119}
1120
1121/******************************************************************************
1122 * Handle IOCC_UNIT_STATUS requests.
1123 */
1124void iocc_unit_status(IORBH _far *iorb)
1125{
1126 switch (iorb->CommandModifier) {
1127
1128 case IOCM_GET_UNIT_STATUS:
1129 add_workspace(iorb)->idempotent = 1;
1130 ahci_unit_ready(iorb);
1131 break;
1132
1133 default:
1134 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1135 iorb_done(iorb);
1136 }
1137}
1138
1139/******************************************************************************
1140 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1141 */
1142void iocc_adapter_passthru(IORBH _far *iorb)
1143{
1144 switch (iorb->CommandModifier) {
1145
1146 case IOCM_EXECUTE_CDB:
1147 add_workspace(iorb)->idempotent = 0;
1148 ahci_execute_cdb(iorb);
1149 break;
1150
1151 case IOCM_EXECUTE_ATA:
1152 add_workspace(iorb)->idempotent = 0;
1153 ahci_execute_ata(iorb);
1154 break;
1155
1156 default:
1157 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1158 iorb_done(iorb);
1159 }
1160}
1161
1162/******************************************************************************
1163 * Add an IORB to the specified queue. This function must be called with the
1164 * adapter-level spinlock aquired.
1165 */
1166void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1167{
1168 if (iorb_priority(iorb) {
1169 /* priority IORB; insert at first position */
1170 iorb->pNxtIORB = queue->root;
1171 queue->root = iorb;
1172
1173 } else {
1174 /* append IORB to end of queue */
1175 iorb->pNxtIORB = NULL;
1176
1177 if (queue->root == NULL) {
1178 queue->root = iorb;
1179 } else {
1180 queue->tail->pNxtIORB = iorb;
1181 }
1182 queue->tail = iorb;
1183 }
1184
1185 if (debug) {
1186 /* determine queue type (local, driver, abort or port) and minimum debug
1187 * level; otherwise, queue debug prints can become really confusing.
1188 */
1189 char *queue_type;
1190 int min_debug = 1;
1191
1192 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1193 /* this queue is on the stack */
1194 queue_type = "local";
1195 min_debug = 2;
1196
1197 } else if (queue == &driver_queue) {
1198 queue_type = "driver";
1199
1200 } else if (queue == &abort_queue) {
1201 queue_type = "abort";
1202 min_debug = 2;
1203
1204 } else {
1205 queue_type = "port";
1206 }
1207
1208 if (debug >= min_debug) {
1209 printf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1210 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1211 iorb->Timeout);
1212 }
1213 }
1214}
1215
1216/******************************************************************************
1217 * Remove an IORB from the specified queue. This function must be called with
1218 * the adapter-level spinlock aquired.
1219 */
1220int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1221{
1222 IORBH _far *_iorb;
1223 IORBH _far *_prev = NULL;
1224 int found = 0;
1225
1226 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1227 if (_iorb == iorb) {
1228 /* found the IORB to be removed */
1229 if (_prev != NULL) {
1230 _prev->pNxtIORB = _iorb->pNxtIORB;
1231 } else {
1232 queue->root = _iorb->pNxtIORB;
1233 }
1234 if (_iorb == queue->tail) {
1235 queue->tail = _prev;
1236 }
1237 found = 1;
1238 break;
1239 }
1240 _prev = _iorb;
1241 }
1242
1243 if (found) {
1244 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1245 } else {
1246 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1247 }
1248
1249 return(!found);
1250}
1251
1252/******************************************************************************
1253 * Set the error code in the specified IORB
1254 *
1255 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1256 * status to the specified error code.
1257 */
1258void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1259{
1260 iorb->ErrorCode = error_code;
1261 iorb->Status |= IORB_ERROR;
1262}
1263
1264/******************************************************************************
1265 * Mark the specified IORB as done and notify the asynchronous post function,
1266 * if any. The IORB is also removed from the corresponding IORB queue.
1267 *
1268 * NOTES: This function does not clear the Status field; it merely adds the
1269 * IORB_DONE flag.
1270 *
1271 * This function is expected to be called *without* the corresponding
1272 * driver-level drv_lock aquired. It will aquire the spinlock before
1273 * updating the IORB queue and release it before notifying the upstream
1274 * code in order to prevent deadlocks.
1275 *
1276 * Due to this logic, this function is only good for simple task-time
1277 * completions. Functions working on lists of IORBs (such as interrupt
1278 * handlers or context hooks) should call iorb_complete() directly and
1279 * implement their own logic for removing the IORB from the port queue.
1280 * See abort_ctxhook() for an example.
1281 */
1282void iorb_done(IORBH _far *iorb)
1283{
1284 int a = iorb_unit_adapter(iorb);
1285 int p = iorb_unit_port(iorb);
1286
1287 /* remove IORB from corresponding queue */
1288 spin_lock(drv_lock);
1289 if (iorb_driver_level(iorb)) {
1290 iorb_queue_del(&driver_queue, iorb);
1291 } else {
1292 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1293 }
1294 aws_free(add_workspace(iorb));
1295 spin_unlock(drv_lock);
1296
1297 iorb_complete(iorb);
1298}
1299
1300/******************************************************************************
1301 * Complete an IORB. This should be called without the adapter-level spinlock
1302 * to allow the IORB completion routine to perform whatever processing it
1303 * requires. This implies that the IORB should no longer be in any global
1304 * queue because the IORB completion routine may well reuse the IORB and send
1305 * the next request to us before even returning from this function.
1306 */
1307void iorb_complete(IORBH _far *iorb)
1308{
1309 iorb->Status |= IORB_DONE;
1310
1311 dprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1312 iorb, iorb->Status, iorb->ErrorCode);
1313
1314 if (iorb->RequestControl & IORB_ASYNC_POST) {
1315 iorb->NotifyAddress(iorb);
1316 }
1317}
1318
1319/******************************************************************************
1320 * Requeue the specified IORB such that it will be sent downstream for
1321 * processing again. This includes freeing all resources currently allocated
1322 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1323 * spinlock must be aquired when calling this function.
1324 *
1325 * The following flags are preserved:
1326 * - no_ncq
1327 */
1328void iorb_requeue(IORBH _far *iorb)
1329{
1330 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1331 u16 no_ncq = aws->no_ncq;
1332 u16 unaligned = aws->unaligned;
1333 u16 retries = aws->retries;
1334
1335 aws_free(aws);
1336 memset(aws, 0x00, sizeof(*aws));
1337
1338 aws->no_ncq = no_ncq;
1339 aws->unaligned = unaligned;
1340 aws->retries = retries;
1341}
1342
1343/******************************************************************************
1344 * Free resources in ADD workspace (timer, buffer, ...). This function should
1345 * be called with the spinlock held to prevent race conditions.
1346 */
1347void aws_free(ADD_WORKSPACE _far *aws)
1348{
1349 if (aws->timer != 0) {
1350 ADD_CancelTimer(aws->timer);
1351 aws->timer = 0;
1352 }
1353
1354 if (aws->buf != NULL) {
1355 free(aws->buf);
1356 aws->buf = NULL;
1357 }
1358}
1359
1360/******************************************************************************
1361 * Lock the adapter, waiting for availability if necessary. This is expected
1362 * to be called at task/request time without the driver-level spinlock
1363 * aquired. Don't call at interrupt time.
1364 */
1365void lock_adapter(AD_INFO *ai)
1366{
1367 spin_lock(drv_lock);
1368 while (ai->busy) {
1369 spin_unlock(drv_lock);
1370 msleep(250);
1371 spin_lock(drv_lock);
1372 }
1373 ai->busy = 1;
1374 spin_unlock(drv_lock);
1375}
1376
1377/******************************************************************************
1378 * Unlock adapter (i.e. reset busy flag)
1379 */
1380void unlock_adapter(AD_INFO *ai)
1381{
1382 ai->busy = 0;
1383}
1384
1385/******************************************************************************
1386 * Timeout handler for I/O commands. Since timeout handling can involve
1387 * lengthy operations like port resets, the main code is located in a
1388 * separate function which is invoked via a context hook.
1389 */
1390void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1,
1391 ULONG p2)
1392{
1393 IORBH _far *iorb = (IORBH _far *) p1;
1394 int a = iorb_unit_adapter(iorb);
1395 int p = iorb_unit_port(iorb);
1396
1397 ADD_CancelTimer(timer_handle);
1398 dprintf("timeout for IORB %Fp\n", iorb);
1399
1400 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1401 * IORB has completed after the timeout has expired but before we got to
1402 * this line of code, we'll check the return code of iorb_queue_del(): If it
1403 * returns an error, the IORB must have completed a few microseconds ago and
1404 * there is no timeout.
1405 */
1406 spin_lock(drv_lock);
1407 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1408 iorb_queue_add(&abort_queue, iorb);
1409 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1410 }
1411 spin_unlock(drv_lock);
1412
1413 /* Trigger abort processing function. We don't really care whether this
1414 * succeeds because the only reason why it would fail should be multiple
1415 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1416 * start executing, which leaves two scenarios:
1417 *
1418 * - We succeded in arming the context hook. Fine.
1419 *
1420 * - We armed the context hook a second time before it had a chance to
1421 * start executing. In this case, the already scheduled context hook
1422 * will process our IORB as well.
1423 */
1424 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1425
1426 /* Set up a watchdog timer which calls the context hook manually in case
1427 * some kernel thread is looping around the IORB_COMPLETE status bit
1428 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1429 * happen per design because kernel threads are supposed to yield but it
1430 * does in the early boot phase.
1431 */
1432 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1433}
1434
1435/******************************************************************************
1436 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1437 * will execute as soon as a kernel thread yields the CPU. However, some
1438 * kernel components won't yield the CPU during the early boot phase and the
1439 * only way to kick some sense into those components is to run the context
1440 * hook right inside this timer callback. Not exactly pretty, especially
1441 * considering the fact that context hooks were implemented to prevent running
1442 * lengthy operations like a port reset at interrupt time, but without this
1443 * watchdog mechanism we run the risk of getting completely stalled by device
1444 * problems during the early boot phase.
1445 */
1446void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1,
1447 ULONG p2)
1448{
1449 /* reset watchdog timer */
1450 ADD_CancelTimer(timer_handle);
1451 dprintf("reset watchdog invoked\n");
1452
1453 /* call context hook manually */
1454 reset_ctxhook(0);
1455}
1456
1457/******************************************************************************
1458 * small_code_ - this dummy func resolves the undefined reference linker
1459 * error that occurrs when linking WATCOM objects with DDK's link.exe
1460 */
1461void _cdecl small_code_(void)
1462{
1463}
1464
1465/******************************************************************************
1466 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1467 * adapter info array in the device table, dt->pAdapter[], is expected to be
1468 * initialized for the specified index (dt_ai).
1469 *
1470 * Please note that the device table adapter index, dta, is not always equal
1471 * to the physical adapter index, a: if SCSI emulation has been activated, the
1472 * last reported adapter is a virtual SCSI adapter and the physical adapter
1473 * indexes for those units are, of course, different from the device table
1474 * index of the virtual SCSI adapter.
1475 */
1476static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1477 int a, int p, int d, int scsi_id)
1478{
1479 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1480 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1481 (u16) dt->pAdapter[dta]);
1482 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1483 AD_INFO *ai = ad_infos + a;
1484
1485 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1486 dprintf("error: device table provided by DASD too small\n");
1487 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1488 return(-1);
1489 }
1490
1491 if (ai->ports[p].devs[d].unit_info == NULL) {
1492 /* provide original information about this device (unit) */
1493 memset(ui, 0x00, sizeof(*ui));
1494 ui->AdapterIndex = dta; /* device table adapter index */
1495 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1496 ui->UnitIndex = ptr->AdapterUnits;
1497 ui->UnitType = ai->ports[p].devs[d].dev_type;
1498 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1499 if (ai->ports[p].devs[d].removable) {
1500 ui->UnitFlags |= UF_REMOVABLE;
1501 }
1502 if (scsi_id > 0) {
1503 /* set fake SCSI ID for this unit */
1504 ui->UnitSCSITargetID = scsi_id;
1505 }
1506 } else {
1507 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1508 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1509 }
1510
1511 ptr->AdapterUnits++;
1512 return(0);
1513}
1514
1515/*******************************************************************************
1516 * Register kernel exit handler for trap dumps. Our exit handler will be called
1517 * right before the kernel starts a dump; that's where we reset the controller
1518 * so it supports BIOS int13 I/O calls.
1519 */
1520static void register_krnl_exit(void)
1521{
1522 _asm {
1523
1524 push ds
1525 push es
1526 push bx
1527 push si
1528 push di
1529
1530 mov ax, FLAG_KRNL_EXIT_ADD
1531 mov cx, TYPE_KRNL_EXIT_INT13
1532 mov bx, SEG asm_krnl_exit
1533 mov si, OFFSET asm_krnl_exit
1534 mov dl, DevHlp_RegisterKrnlExit
1535 call Device_Help
1536
1537 pop di
1538 pop si
1539 pop bx
1540 pop es
1541 pop ds
1542 }
1543
1544 dprintf("Registered kernel exit routine for INT13 mode\n");
1545}
Note: See TracBrowser for help on using the repository browser.