source: trunk/src/os2ahci/os2ahci.c@ 174

Last change on this file since 174 was 174, checked in by David Azarewicz, 12 years ago

Fix for hardware that reports incorrect status.
Now report both real and fake devices when SCSI emulation is enabled.

File size: 52.1 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31
32/* -------------------------- macros and constants ------------------------- */
33
34/* parse integer command line parameter */
35#define drv_parm_int(s, value, type, radix) \
36 { \
37 char _far *_ep; \
38 if ((s)[1] != ':') { \
39 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
40 goto init_fail; \
41 } \
42 value = (type) strtol((s) + 2, \
43 (const char _far* _far*) &_ep, \
44 radix); \
45 s = _ep; \
46 }
47
48#define drv_parm_int_optional(s, value, type, radix) \
49 { \
50 char _far *_ep; \
51 if ((s)[1] == ':') { \
52 value = (type) strtol((s) + 2, (const char _far* _far*) &_ep, radix); \
53 s = _ep; \
54 } else { \
55 value++; \
56 } \
57 }
58
59/* set two-dimensional array of port options */
60#define set_port_option(opt, val) \
61 if (adapter_index == -1) { \
62 /* set option for all adapters and ports */ \
63 memset(opt, val, sizeof(opt)); \
64 } else if (port_index == -1) { \
65 /* set option for all ports on current adapter */ \
66 memset(opt[adapter_index], val, sizeof(*opt)); \
67 } else { \
68 /* set option for specific port */ \
69 opt[adapter_index][port_index] = val; \
70 }
71
72/* constants for undefined kernel exit routine;
73 * see register_krnl_exit() func */
74#define DevHlp_RegisterKrnlExit 0x006f
75
76#define FLAG_KRNL_EXIT_ADD 0x1000
77#define FLAG_KRNL_EXIT_REMOVE 0x2000
78
79#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
80#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
81#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
82#define TYPE_KRNL_EXIT_DYN 0x0003
83#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
84
85/* ------------------------ typedefs and structures ------------------------ */
86
87/* -------------------------- function prototypes -------------------------- */
88
89void _cdecl small_code_ (void);
90
91static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
92 int a, int p, int d, int scsi_id);
93
94static void register_krnl_exit (void);
95
96/* ------------------------ global/static variables ------------------------ */
97
98int debug = 0; /* if > 0, print debug messages to COM1 */
99int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
100int init_reset = 1; /* if != 0, reset ports during init */
101int force_write_cache; /* if != 0, force write cache */
102int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
103int use_lvm_info = 1;
104int wrap_trace_buffer = 0;
105long com_baud = 0;
106
107PFN Device_Help = 0; /* pointer to device helper entry point */
108ULONG RMFlags = 0; /* required by resource manager library */
109PFN RM_Help0 = NULL; /* required by resource manager library */
110PFN RM_Help3 = NULL; /* required by resource manager library */
111HDRIVER rm_drvh; /* resource manager driver handle */
112char rm_drvname[80]; /* driver name as returned by RM */
113USHORT add_handle; /* driver handle (RegisterDeviceClass) */
114UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
115char drv_name[] = "OS2AHCI"; /* driver name as string */
116
117/* resource manager driver information structure */
118DRIVERSTRUCT rm_drvinfo = {
119 drv_name, /* driver name */
120 "AHCI SATA Driver", /* driver description */
121 DVENDOR, /* vendor name */
122 CMVERSION_MAJOR, /* RM interface version major */
123 CMVERSION_MINOR, /* RM interface version minor */
124 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
125 0, /* driver flags */
126 DRT_ADDDM, /* driver type */
127 DRS_ADD, /* driver sub type */
128 NULL /* driver callback */
129};
130
131ULONG drv_lock; /* driver-level spinlock */
132IORB_QUEUE driver_queue; /* driver-level IORB queue */
133AD_INFO ad_infos[MAX_AD]; /* adapter information list */
134int ad_info_cnt; /* number of entries in ad_infos[] */
135u16 ad_ignore; /* bitmap with adapter indexes to ignore */
136int init_complete; /* if != 0, initialization has completed */
137int suspended;
138int resume_sleep_flag;
139
140/* apapter/port-specific options saved when parsing the command line */
141u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
142u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
143u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
144u8 link_power[MAX_AD][AHCI_MAX_PORTS];
145u8 track_size[MAX_AD][AHCI_MAX_PORTS];
146u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
147
148static char init_msg[] = "%s driver version %d.%02d\n";
149static char exit_msg[] = "%s driver *not* installed\n";
150char BldLevel[] = BLDLEVEL;
151
152/* ----------------------------- start of code ----------------------------- */
153
154/******************************************************************************
155 * OS/2 device driver main strategy function.
156 *
157 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
158 * packet for IDC calls, so they can be handled by gen_ioctl.
159 */
160USHORT _cdecl c_strat(RPH _far *req)
161{
162 u16 rc;
163
164 switch (req->Cmd) {
165
166 case CMDInitBase:
167 rc = init_drv((RPINITIN _far *) req);
168 break;
169
170 case CMDShutdown:
171 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
172 break;
173
174 case CMDGenIOCTL:
175 rc = gen_ioctl((RP_GENIOCTL _far *) req);
176 break;
177
178 case CMDOpen:
179 build_user_info(1);
180 rc = STDON;
181 break;
182
183 case CMDINPUT:
184 rc = char_dev_input((RP_RWV _far *) req);
185 break;
186
187 case CMDSaveRestore:
188 rc = sr_drv(((RPSAVERESTORE _far *) req)->FuncCode);
189 break;
190
191 case CMDClose:
192 case CMDInputS:
193 case CMDInputF:
194 /* noop */
195 rc = STDON;
196 break;
197
198 default:
199 rc = STDON | STATUS_ERR_UNKCMD;
200 break;
201 }
202
203 return(rc);
204}
205
206/******************************************************************************
207 * Intialize the os2ahci driver. This includes command line parsing, scanning
208 * the PCI bus for supported AHCI adapters, etc.
209 */
210USHORT init_drv(RPINITIN _far *req)
211{
212 static int init_drv_called;
213 static int init_drv_failed;
214 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
215 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
216 APIRET rmrc;
217 char _far *cmd_line;
218 char _far *s;
219 int adapter_index = -1;
220 int port_index = -1;
221 int invert_option;
222 int optval;
223 u16 vendor;
224 u16 device;
225
226 if (init_drv_called) {
227 /* This is the init call for the second (legacy IBMS506$) character
228 * device driver. If the main driver failed initialization, fail this
229 * one as well.
230 */
231 rsp->CodeEnd = (u16) end_of_code;
232 rsp->DataEnd = (u16) &end_of_data;
233 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
234 }
235 init_drv_called = 1;
236 suspended = 0;
237 resume_sleep_flag = 0;
238 memset(ad_infos, 0, sizeof(ad_infos));
239 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
240
241 /* set device helper entry point */
242 Device_Help = req->DevHlpEP;
243
244 /* create driver-level spinlock */
245 DevHelp_CreateSpinLock(&drv_lock);
246
247 /* initialize libc code */
248 init_libc();
249
250 /* register driver with resource manager */
251 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
252 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
253 drv_name, rmrc);
254 goto init_fail;
255 }
256
257 /* parse command line parameters */
258 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
259
260 for (s = cmd_line; *s != 0; s++) {
261 if (*s == '/') {
262 if ((invert_option = (s[1] == '!')) != 0) {
263 s++;
264 }
265 s++;
266 switch (tolower(*s)) {
267
268 case '\0':
269 /* end of command line; can only happen if command line is incorrect */
270 cprintf("%s: incomplete command line option\n", drv_name);
271 goto init_fail;
272
273 case 'b':
274 drv_parm_int(s, com_baud, u32, 10);
275 break;
276
277 case 'c':
278 /* set COM port base address for debug messages */
279 drv_parm_int(s, com_base, u16, 16);
280 if (com_base == 1) com_base = 0x3f8;
281 if (com_base == 2) com_base = 0x2f8;
282 break;
283
284 case 'd':
285 /* increase debug level */
286 drv_parm_int_optional(s, debug, int, 10);
287 break;
288
289 case 'g':
290 /* add specfied PCI ID as a supported generic AHCI adapter */
291 drv_parm_int(s, vendor, u16, 16);
292 s--;
293 drv_parm_int(s, device, u16, 16);
294 if (add_pci_id(vendor, device)) {
295 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
296 goto init_fail;
297 }
298 thorough_scan = 1;
299 break;
300
301 case 't':
302 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
303 thorough_scan = !invert_option;
304 break;
305
306 case 'r':
307 /* reset ports during initialization */
308 init_reset = !invert_option;
309 break;
310
311 case 'f':
312 /* force write cache regardless of IORB flags */
313 force_write_cache = 1;
314 break;
315
316 case 'a':
317 /* set adapter index for adapter and port-related options */
318 drv_parm_int(s, adapter_index, int, 10);
319 if (adapter_index < 0 || adapter_index >= MAX_AD) {
320 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
321 goto init_fail;
322 }
323 break;
324
325 case 'p':
326 /* set port index for port-related options */
327 drv_parm_int(s, port_index, int, 10);
328 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
329 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
330 goto init_fail;
331 }
332 break;
333
334 case 'i':
335 /* ignore current adapter index */
336 if (adapter_index >= 0) {
337 if (port_index >= 0) port_ignore[adapter_index][port_index] = !invert_option;
338 else ad_ignore |= 1U << adapter_index;
339 }
340 break;
341
342 case 's':
343 /* enable SCSI emulation for ATAPI devices */
344 set_port_option(emulate_scsi, !invert_option);
345 break;
346
347 case 'n':
348 /* enable NCQ */
349 set_port_option(enable_ncq, !invert_option);
350 break;
351
352 case 'l':
353 /* set link speed or power savings */
354 s++;
355 switch (tolower(*s)) {
356 case 's':
357 /* set link speed */
358 drv_parm_int(s, optval, int, 10);
359 set_port_option(link_speed, optval);
360 break;
361 case 'p':
362 /* set power management */
363 drv_parm_int(s, optval, int, 10);
364 set_port_option(link_power, optval);
365 break;
366 default:
367 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
368 goto init_fail;
369 }
370 /* need to reset the port in order to establish link settings */
371 init_reset = 1;
372 break;
373
374 case '4':
375 /* enable 4K sector geometry enhancement (track size = 56) */
376 if (!invert_option) {
377 set_port_option(track_size, 56);
378 }
379 break;
380
381 case 'z':
382 /* Specify to not use the LVM information. There is no reason why anyone would
383 * want to do this, but previous versions of this driver did not have LVM capability,
384 * so this switch is here temporarily just in case.
385 */
386 use_lvm_info = !invert_option;
387 break;
388
389 case 'v':
390 /* be verbose during boot */
391 drv_parm_int_optional(s, verbosity, int, 10);
392 break;
393
394 case 'w':
395 /* Specify to allow the trace buffer to wrap when full. */
396 wrap_trace_buffer = !invert_option;
397 break;
398
399 case 'q':
400 /* Temporarily output a non-fatal message to get anyone using this
401 * undocumented switch to stop using it. This will be removed soon
402 * and the error will become fatal.
403 */
404 cprintf("%s: unknown option: /%c\n", drv_name, *s);
405 break;
406
407 default:
408 cprintf("%s: unknown option: /%c\n", drv_name, *s);
409 goto init_fail;
410 }
411 }
412 }
413
414 if (com_baud) init_com(com_baud); /* initialize com port for debug output */
415
416 /* initialize trace buffer if applicable */
417 if (debug > 0 && com_base == 0) {
418 /* debug is on, but COM port is off -> use our trace buffer */
419 trace_init(AHCI_DEBUG_BUF_SIZE);
420 } else {
421 trace_init(AHCI_INFO_BUF_SIZE);
422 }
423
424 ntprintf("BldLevel: %s\n", BldLevel);
425 ntprintf("CmdLine: %Fs\n", cmd_line);
426
427 /* print initialization message */
428 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
429
430 #ifdef TESTVER
431 #include "testver.c"
432 #endif
433
434 /* scan PCI bus for supported devices */
435 scan_pci_bus();
436
437 if (ad_info_cnt > 0) {
438 /* initialization succeeded and we found at least one AHCI adapter */
439 ADD_InitTimer(timer_pool, sizeof(timer_pool));
440
441 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1, &add_handle)) {
442 cprintf("%s: couldn't register device class\n", drv_name);
443 goto init_fail;
444 }
445
446 /* allocate context hooks */
447 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
448 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
449 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
450 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
451 goto init_fail;
452 }
453
454 rsp->CodeEnd = (u16) end_of_code;
455 rsp->DataEnd = (u16) &end_of_data;
456
457 /* register kernel exit routine for trap dumps */
458 register_krnl_exit();
459
460 return(STDON);
461
462 } else {
463 /* no adapters found */
464 ciprintf(" No adapters found.\n");
465 }
466
467init_fail:
468 /* initialization failed; set segment sizes to 0 and return error */
469 rsp->CodeEnd = 0;
470 rsp->DataEnd = 0;
471 init_drv_failed = 1;
472
473 /* free context hooks */
474 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
475 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
476 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
477
478 if (rm_drvh != 0) {
479 /* remove driver from resource manager */
480 RMDestroyDriver(rm_drvh);
481 }
482
483 ciprintf(exit_msg, drv_name);
484 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
485}
486
487/******************************************************************************
488 * Generic IOCTL via character device driver. IOCTLs are used to control the
489 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
490 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
491 * commands for ATA disks) are implemented here.
492 */
493USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
494{
495 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
496
497 switch (ioctl->Category) {
498
499 case OS2AHCI_IOCTL_CATEGORY:
500 switch (ioctl->Function) {
501
502 case OS2AHCI_IOCTL_GET_DEVLIST:
503 return(ioctl_get_devlist(ioctl));
504
505 case OS2AHCI_IOCTL_PASSTHROUGH:
506 return(ioctl_passthrough(ioctl));
507
508 }
509 break;
510
511 case DSKSP_CAT_GENERIC:
512 return(ioctl_gen_dsk(ioctl));
513
514 case DSKSP_CAT_SMART:
515 return(ioctl_smart(ioctl));
516
517 }
518
519 return(STDON | STATUS_ERR_UNKCMD);
520}
521
522/******************************************************************************
523 * Read from character device. If tracing is on (internal ring buffer trace),
524 * we return data from the trace buffer; if not, we might return a device
525 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
526 */
527USHORT char_dev_input(RP_RWV _far *rwrb)
528{
529 return(trace_char_dev(rwrb));
530}
531
532/******************************************************************************
533 * Device driver exit handler. This handler is called when OS/2 shuts down and
534 * flushes the write caches of all attached devices. Since this is effectively
535 * the same we do when suspending, we'll call out to the corresponding suspend
536 * function.
537 *
538 * NOTE: Errors are ignored because there's no way we could stop the shutdown
539 * or do something about the error, unless retrying endlessly is
540 * considered an option.
541 */
542USHORT exit_drv(int func)
543{
544 dprintf("exit_drv(%d) called\n", func);
545
546 if (func == 0) {
547 /* we're only interested in the second phase of the shutdown */
548 return(STDON);
549 }
550
551 suspend();
552 return(STDON);
553}
554
555/******************************************************************************
556 * Device driver suspend/resume handler. This handler is called when ACPI is
557 * executing a suspend or resume.
558 */
559USHORT sr_drv(int func)
560{
561 dprintf("sr_drv(%d) called\n", func);
562
563 if (func) resume();
564 else suspend();
565
566 return(STDON);
567}
568
569/******************************************************************************
570 * ADD entry point. This is the main entry point for all ADD requests. Due to
571 * the asynchronous nature of ADD drivers, this function primarily queues the
572 * IORB(s) to the corresponding adapter or port queues, then triggers the
573 * state machine to initiate processing queued IORBs.
574 *
575 * NOTE: In order to prevent race conditions or engine stalls, certain rules
576 * around locking, unlocking and IORB handling in general have been
577 * established. Refer to the comments in "trigger_engine()" for
578 * details.
579 */
580void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
581{
582 IORBH _far *iorb;
583 IORBH _far *next = NULL;
584
585 spin_lock(drv_lock);
586
587 for (iorb = first_iorb; iorb != NULL; iorb = next) {
588 /* Queue this IORB. Queues primarily exist on port level but there are
589 * some requests which affect the whole driver, most notably
590 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
591 * port queue will change the links, thus we need to save the original
592 * link in 'next'.
593 */
594 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
595
596 iorb->Status = 0;
597 iorb->ErrorCode = 0;
598 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
599
600 if (iorb_driver_level(iorb)) {
601 /* driver-level IORB */
602 iorb->UnitHandle = 0;
603 iorb_queue_add(&driver_queue, iorb);
604
605 } else {
606 /* port-level IORB */
607 int a = iorb_unit_adapter(iorb);
608 int p = iorb_unit_port(iorb);
609 int d = iorb_unit_device(iorb);
610
611 if (a >= ad_info_cnt ||
612 p > ad_infos[a].port_max ||
613 d > ad_infos[a].ports[p].dev_max ||
614 (ad_infos[a].port_map & (1UL << p)) == 0) {
615
616 /* unit handle outside of the allowed range */
617 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
618 iorb->Status = IORB_ERROR;
619 iorb->ErrorCode = IOERR_CMD_SYNTAX;
620 iorb_complete(iorb);
621 continue;
622 }
623
624 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
625 }
626 }
627
628 /* trigger state machine */
629 trigger_engine();
630
631 spin_unlock(drv_lock);
632}
633
634/******************************************************************************
635 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
636 * which will try to get all IORBs sent on their way a couple of times. If
637 * there are still IORBs ready for processing after this, this function will
638 * hand off to a context hook which will continue to trigger the engine until
639 * all IORBs have been sent.
640 *
641 * NOTE: While initialization has not completed (or during suspend/resume
642 * operations), this function will loop indefinitely because we can't
643 * rely on interrupt handlers or context hooks and complex IORBs
644 * requiring multiple requeues would eventually hang and time out if
645 * we stopped triggering here.
646 */
647void trigger_engine(void)
648{
649 int i;
650
651 for (i = 0; i < 3 || !init_complete; i++) {
652 if (trigger_engine_1() == 0) {
653 /* done -- all IORBs have been sent on their way */
654 return;
655 }
656 }
657
658 /* Something keeps bouncing; hand off to the engine context hook which will
659 * keep trying in the background.
660 */
661 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
662}
663
664/******************************************************************************
665 * Trigger IORB queue engine in order to send commands in the driver/port IORB
666 * queues to the AHCI hardware. This function will return the number of IORBs
667 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
668 * a state to accept the command, thus it might take quite a few calls to get
669 * all IORBs on their way. This is why there's a wrapper function which tries
670 * it a few times, then hands off to a context hook which will keep trying in
671 * the background.
672 *
673 * IORBs might complete before send_iorb() has returned, at any time during
674 * interrupt processing or on another CPU on SMP systems. IORB completion
675 * means modifications to the corresponding IORB queue (the completed IORB
676 * is removed from the queue) thus we need to protect the IORB queues from
677 * race conditions. The safest approach short of keeping the driver-level
678 * spinlock aquired permanently is to keep it throughout this function and
679 * release it temporarily in send_iorb().
680 *
681 * This implies that the handler functions are fully responsible for aquiring
682 * the driver-level spinlock when they need it, and for releasing it again.
683 *
684 * As a rule of thumb, get the driver-level spinlock whenever accessing
685 * volatile variables (IORB queues, values in ad_info[], ...).
686 *
687 * Additional Notes:
688 *
689 * - This function is expected to be called with the spinlock aquired
690 *
691 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
692 * just remain in the queue). This can be used to release the driver-level
693 * spinlock while making sure no new IORBs are going to hit the hardware.
694 * In order to prevent engine stalls, all handlers using this functionality
695 * need to invoke trigger_engine() after resetting the busy flag.
696 *
697 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
698 * However, the driver-level queue is worked "one entry at a time" which
699 * means that no new IORBs will be queued on the driver-level queue until
700 * the head element has completed processing. This means that driver-
701 * level IORB handlers don't need to protect against each other. But they
702 * they do need to keep in mind interference with port-level IORBs:
703 *
704 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
705 * adapters as 'busy' which are affected by the driver-level IORB
706 *
707 * - Driver-level IORB handlers must not access the hardware of a
708 * particular adapter if it's flagged as 'busy' by another IORB.
709 */
710int trigger_engine_1(void)
711{
712 IORBH _far *iorb;
713 IORBH _far *next;
714 int iorbs_sent = 0;
715 int a;
716 int p;
717
718 iorbs_sent = 0;
719
720 /* process driver-level IORBs */
721 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
722 send_iorb(iorb);
723 iorbs_sent++;
724 }
725
726 /* process port-level IORBs */
727 for (a = 0; a < ad_info_cnt; a++) {
728 AD_INFO *ai = ad_infos + a;
729 if (ai->busy) {
730 /* adapter is busy; don't process any IORBs */
731 continue;
732 }
733 for (p = 0; p <= ai->port_max; p++) {
734 /* send all queued IORBs on this port */
735 next = NULL;
736 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
737 next = iorb->pNxtIORB;
738 if (!add_workspace(iorb)->processing) {
739 send_iorb(iorb);
740 iorbs_sent++;
741 }
742 }
743 }
744 }
745
746 return(iorbs_sent);
747}
748
749/******************************************************************************
750 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
751 * switch board for calling the corresponding iocc_*() handler function.
752 *
753 * NOTE: This function is expected to be called with the driver-level spinlock
754 * aquired. It will release it before calling any of the handler
755 * functions and re-aquire it when done.
756 */
757void send_iorb(IORBH _far *iorb)
758{
759 /* Mark IORB as "processing" before doing anything else. Once the IORB is
760 * marked as "processing", we can release the spinlock because subsequent
761 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
762 * IORB.
763 */
764 add_workspace(iorb)->processing = 1;
765 spin_unlock(drv_lock);
766
767 switch (iorb->CommandCode) {
768
769 case IOCC_CONFIGURATION:
770 iocc_configuration(iorb);
771 break;
772
773 case IOCC_DEVICE_CONTROL:
774 iocc_device_control(iorb);
775 break;
776
777 case IOCC_UNIT_CONTROL:
778 iocc_unit_control(iorb);
779 break;
780
781 case IOCC_GEOMETRY:
782 iocc_geometry(iorb);
783 break;
784
785 case IOCC_EXECUTE_IO:
786 iocc_execute_io(iorb);
787 break;
788
789 case IOCC_UNIT_STATUS:
790 iocc_unit_status(iorb);
791 break;
792
793 case IOCC_ADAPTER_PASSTHRU:
794 iocc_adapter_passthru(iorb);
795 break;
796
797 default:
798 /* unsupported call */
799 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
800 iorb_done(iorb);
801 break;
802 }
803
804 /* re-aquire spinlock before returning to trigger_engine() */
805 spin_lock(drv_lock);
806}
807
808/******************************************************************************
809 * Handle IOCC_CONFIGURATION requests.
810 */
811void iocc_configuration(IORBH _far *iorb)
812{
813 int a;
814
815 switch (iorb->CommandModifier) {
816
817 case IOCM_COMPLETE_INIT:
818 /* Complete initialization. From now on, we won't have to restore the BIOS
819 * configuration after each command and we're fully operational (i.e. will
820 * use interrupts, timers and context hooks instead of polling).
821 */
822 if (!init_complete) {
823 dprintf("leaving initialization mode\n");
824 for (a = 0; a < ad_info_cnt; a++) {
825 lock_adapter(ad_infos + a);
826 ahci_complete_init(ad_infos + a);
827 }
828 init_complete = 1;
829
830 /* DAZ turn off COM port output if on */
831 //com_base = 0;
832
833 /* release all adapters */
834 for (a = 0; a < ad_info_cnt; a++) {
835 unlock_adapter(ad_infos + a);
836 }
837
838 #ifdef LEGACY_APM
839 /* register APM hook */
840 apm_init();
841 #endif
842
843 build_user_info(0);
844 }
845 iorb_done(iorb);
846 break;
847
848 case IOCM_GET_DEVICE_TABLE:
849 /* construct a device table */
850 iocm_device_table(iorb);
851 break;
852
853 default:
854 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
855 iorb_done(iorb);
856 break;
857 }
858}
859
860/******************************************************************************
861 * Handle IOCC_DEVICE_CONTROL requests.
862 */
863void iocc_device_control(IORBH _far *iorb)
864{
865 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
866 IORBH _far *ptr;
867 IORBH _far *next = NULL;
868 int p = iorb_unit_port(iorb);
869 int d = iorb_unit_device(iorb);
870
871 switch (iorb->CommandModifier) {
872
873 case IOCM_ABORT:
874 /* abort all pending commands on specified port and device */
875 spin_lock(drv_lock);
876 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
877 next = ptr->pNxtIORB;
878 /* move all matching IORBs to the abort queue */
879 if (ptr != iorb && iorb_unit_device(ptr) == d) {
880 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
881 iorb_queue_add(&abort_queue, ptr);
882 ptr->ErrorCode = IOERR_CMD_ABORTED;
883 }
884 }
885 spin_unlock(drv_lock);
886
887 /* trigger reset context hook which will finish the abort processing */
888 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
889 break;
890
891 case IOCM_SUSPEND:
892 case IOCM_RESUME:
893 case IOCM_GET_QUEUE_STATUS:
894 /* Suspend/resume operations allow access to the hardware for other
895 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
896 * and ATAPI in the same driver, this won't be required.
897 */
898 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
899 break;
900
901 case IOCM_LOCK_MEDIA:
902 case IOCM_UNLOCK_MEDIA:
903 case IOCM_EJECT_MEDIA:
904 /* unit control commands to lock, unlock and eject media */
905 /* will be supported later... */
906 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
907 break;
908
909 default:
910 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
911 break;
912 }
913
914 iorb_done(iorb);
915}
916
917/******************************************************************************
918 * Handle IOCC_UNIT_CONTROL requests.
919 */
920void iocc_unit_control(IORBH _far *iorb)
921{
922 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
923 int a = iorb_unit_adapter(iorb);
924 int p = iorb_unit_port(iorb);
925 int d = iorb_unit_device(iorb);
926
927 spin_lock(drv_lock);
928 switch (iorb->CommandModifier) {
929
930 case IOCM_ALLOCATE_UNIT:
931 /* allocate unit for exclusive access */
932 if (ad_infos[a].ports[p].devs[d].allocated) {
933 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
934 } else {
935 ad_infos[a].ports[p].devs[d].allocated = 1;
936 }
937 break;
938
939 case IOCM_DEALLOCATE_UNIT:
940 /* deallocate exclusive access to unit */
941 if (!ad_infos[a].ports[p].devs[d].allocated) {
942 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
943 } else {
944 ad_infos[a].ports[p].devs[d].allocated = 0;
945 }
946 break;
947
948 case IOCM_CHANGE_UNITINFO:
949 /* Change unit (device) information. One reason for this IOCM is the
950 * interface for filter device drivers: a filter device driver can
951 * either change existing UNITINFOs or permanently allocate units
952 * and fabricate new [logical] units; the former is the reason why we
953 * must store the pointer to the updated UNITNIFO for subsequent
954 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
955 */
956 if (!ad_infos[a].ports[p].devs[d].allocated) {
957 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
958 break;
959 }
960 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
961 break;
962
963 default:
964 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
965 break;
966 }
967
968 spin_unlock(drv_lock);
969 iorb_done(iorb);
970}
971
972/******************************************************************************
973 * Scan all ports for AHCI devices and construct a DASD device table.
974 *
975 * NOTES: This function may be called multiple times. Only the first
976 * invocation will actually scan for devices; all subsequent calls will
977 * merely return the results of the initial scan, potentially augmented
978 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
979 * requests.
980 *
981 * In order to support applications that can't deal with ATAPI devices
982 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
983 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
984 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
985 * request. The units attached to this adapter will use the real HW
986 * unit IDs, thus we'll never receive a command specific to the
987 * emulated SCSI adapter and won't need to set up any sort of entity
988 * for it; the only purpose of the emulated SCSI adapter is to pass the
989 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
990 * course. The emulated SCSI target IDs are allocated as follows:
991 *
992 * 0 the virtual adapter
993 * 1..n emulated devices; SCSI target ID increments sequentially
994 */
995void iocm_device_table(IORBH _far *iorb)
996{
997 IORB_CONFIGURATION _far *iorb_conf;
998 DEVICETABLE _far *dt;
999 char _far *pos;
1000 int scsi_units = 0;
1001 int scsi_id = 1;
1002 int rc;
1003 int dta;
1004 int a;
1005 int p;
1006 int d;
1007
1008 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
1009 dt = iorb_conf->pDeviceTable;
1010
1011 spin_lock(drv_lock);
1012
1013 /* initialize device table header */
1014 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1015 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
1016 dt->ADDHandle = add_handle;
1017 dt->TotalAdapters = ad_info_cnt + 1;
1018
1019 /* set start of adapter and device information tables */
1020 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
1021
1022 /* go through all adapters, including the virtual SCSI adapter */
1023 for (dta = 0; dta < dt->TotalAdapters; dta++) {
1024 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
1025
1026 /* sanity check for sufficient space in device table */
1027 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1028 dprintf("error: device table provided by DASD too small\n");
1029 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1030 goto iocm_device_table_done;
1031 }
1032
1033 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
1034 memset(ptr, 0x00, sizeof(*ptr));
1035
1036 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1037 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1038 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1039 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1040
1041 if (dta < ad_info_cnt) {
1042 /* this is a physical AHCI adapter */
1043 AD_INFO *ad_info = ad_infos + dta;
1044
1045 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1046 sprintf(ptr->AdapterName, "AHCI_%d", dta);
1047
1048 if (!ad_info->port_scan_done) {
1049 /* first call; need to scan AHCI hardware for devices */
1050 if (ad_info->busy) {
1051 dprintf("error: port scan requested while adapter was busy\n");
1052 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1053 goto iocm_device_table_done;
1054 }
1055 ad_info->busy = 1;
1056 spin_unlock(drv_lock);
1057 rc = ahci_scan_ports(ad_info);
1058 spin_lock(drv_lock);
1059 ad_info->busy = 0;
1060
1061 if (rc != 0) {
1062 dprintf("error: port scan failed on adapter #%d\n", dta);
1063 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1064 goto iocm_device_table_done;
1065 }
1066 ad_info->port_scan_done = 1;
1067 }
1068
1069 /* insert physical (i.e. AHCI) devices into the device table */
1070 for (p = 0; p <= ad_info->port_max; p++) {
1071 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1072 if (ad_info->ports[p].devs[d].present) {
1073 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1074 /* report this unit as SCSI unit */
1075 scsi_units++;
1076 //continue;
1077 }
1078 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1079 goto iocm_device_table_done;
1080 }
1081 }
1082 }
1083 }
1084
1085 } else {
1086 /* this is the virtual SCSI adapter */
1087 if (scsi_units == 0) {
1088 /* not a single unit to be emulated via SCSI */
1089 dt->TotalAdapters--;
1090 break;
1091 }
1092
1093 /* set adapter name and bus type to mimic a SCSI controller */
1094 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1095 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1096
1097 /* add all ATAPI units to be emulated by this virtual adaper */
1098 for (a = 0; a < ad_info_cnt; a++) {
1099 AD_INFO *ad_info = ad_infos + a;
1100
1101 for (p = 0; p <= ad_info->port_max; p++) {
1102 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1103 if (ad_info->ports[p].devs[d].present && ad_info->ports[p].devs[d].atapi && emulate_scsi[a][p]) {
1104 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1105 goto iocm_device_table_done;
1106 }
1107 }
1108 }
1109 }
1110 }
1111 }
1112
1113 /* calculate offset for next adapter */
1114 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1115 }
1116
1117iocm_device_table_done:
1118 spin_unlock(drv_lock);
1119 iorb_done(iorb);
1120}
1121
1122/******************************************************************************
1123 * Handle IOCC_GEOMETRY requests.
1124 */
1125void iocc_geometry(IORBH _far *iorb)
1126{
1127 switch (iorb->CommandModifier) {
1128
1129 case IOCM_GET_MEDIA_GEOMETRY:
1130 case IOCM_GET_DEVICE_GEOMETRY:
1131 add_workspace(iorb)->idempotent = 1;
1132 ahci_get_geometry(iorb);
1133 break;
1134
1135 default:
1136 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1137 iorb_done(iorb);
1138 }
1139}
1140
1141/******************************************************************************
1142 * Handle IOCC_EXECUTE_IO requests.
1143 */
1144void iocc_execute_io(IORBH _far *iorb)
1145{
1146 switch (iorb->CommandModifier) {
1147
1148 case IOCM_READ:
1149 add_workspace(iorb)->idempotent = 1;
1150 ahci_read(iorb);
1151 break;
1152
1153 case IOCM_READ_VERIFY:
1154 add_workspace(iorb)->idempotent = 1;
1155 ahci_verify(iorb);
1156 break;
1157
1158 case IOCM_WRITE:
1159 add_workspace(iorb)->idempotent = 1;
1160 ahci_write(iorb);
1161 break;
1162
1163 case IOCM_WRITE_VERIFY:
1164 add_workspace(iorb)->idempotent = 1;
1165 ahci_write(iorb);
1166 break;
1167
1168 default:
1169 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1170 iorb_done(iorb);
1171 }
1172}
1173
1174/******************************************************************************
1175 * Handle IOCC_UNIT_STATUS requests.
1176 */
1177void iocc_unit_status(IORBH _far *iorb)
1178{
1179 switch (iorb->CommandModifier) {
1180
1181 case IOCM_GET_UNIT_STATUS:
1182 add_workspace(iorb)->idempotent = 1;
1183 ahci_unit_ready(iorb);
1184 break;
1185
1186 default:
1187 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1188 iorb_done(iorb);
1189 }
1190}
1191
1192/******************************************************************************
1193 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1194 */
1195void iocc_adapter_passthru(IORBH _far *iorb)
1196{
1197 switch (iorb->CommandModifier) {
1198
1199 case IOCM_EXECUTE_CDB:
1200 add_workspace(iorb)->idempotent = 0;
1201 ahci_execute_cdb(iorb);
1202 break;
1203
1204 case IOCM_EXECUTE_ATA:
1205 add_workspace(iorb)->idempotent = 0;
1206 ahci_execute_ata(iorb);
1207 break;
1208
1209 default:
1210 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1211 iorb_done(iorb);
1212 }
1213}
1214
1215/******************************************************************************
1216 * Add an IORB to the specified queue. This function must be called with the
1217 * adapter-level spinlock aquired.
1218 */
1219void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1220{
1221 if (iorb_priority(iorb) {
1222 /* priority IORB; insert at first position */
1223 iorb->pNxtIORB = queue->root;
1224 queue->root = iorb;
1225
1226 } else {
1227 /* append IORB to end of queue */
1228 iorb->pNxtIORB = NULL;
1229
1230 if (queue->root == NULL) {
1231 queue->root = iorb;
1232 } else {
1233 queue->tail->pNxtIORB = iorb;
1234 }
1235 queue->tail = iorb;
1236 }
1237
1238 if (debug) {
1239 /* determine queue type (local, driver, abort or port) and minimum debug
1240 * level; otherwise, queue debug prints can become really confusing.
1241 */
1242 char *queue_type;
1243 int min_debug = 1;
1244
1245 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1246 /* this queue is on the stack */
1247 queue_type = "local";
1248 min_debug = 2;
1249
1250 } else if (queue == &driver_queue) {
1251 queue_type = "driver";
1252
1253 } else if (queue == &abort_queue) {
1254 queue_type = "abort";
1255 min_debug = 2;
1256
1257 } else {
1258 queue_type = "port";
1259 }
1260
1261 if (debug > min_debug) {
1262 aprintf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1263 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1264 iorb->Timeout);
1265 }
1266 }
1267}
1268
1269/******************************************************************************
1270 * Remove an IORB from the specified queue. This function must be called with
1271 * the adapter-level spinlock aquired.
1272 */
1273int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1274{
1275 IORBH _far *_iorb;
1276 IORBH _far *_prev = NULL;
1277 int found = 0;
1278
1279 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1280 if (_iorb == iorb) {
1281 /* found the IORB to be removed */
1282 if (_prev != NULL) {
1283 _prev->pNxtIORB = _iorb->pNxtIORB;
1284 } else {
1285 queue->root = _iorb->pNxtIORB;
1286 }
1287 if (_iorb == queue->tail) {
1288 queue->tail = _prev;
1289 }
1290 found = 1;
1291 break;
1292 }
1293 _prev = _iorb;
1294 }
1295
1296 if (found) {
1297 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1298 } else {
1299 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1300 }
1301
1302 return(!found);
1303}
1304
1305/******************************************************************************
1306 * Set the error code in the specified IORB
1307 *
1308 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1309 * status to the specified error code.
1310 */
1311void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1312{
1313 iorb->ErrorCode = error_code;
1314 iorb->Status |= IORB_ERROR;
1315}
1316
1317/******************************************************************************
1318 * Mark the specified IORB as done and notify the asynchronous post function,
1319 * if any. The IORB is also removed from the corresponding IORB queue.
1320 *
1321 * NOTES: This function does not clear the Status field; it merely adds the
1322 * IORB_DONE flag.
1323 *
1324 * This function is expected to be called *without* the corresponding
1325 * driver-level drv_lock aquired. It will aquire the spinlock before
1326 * updating the IORB queue and release it before notifying the upstream
1327 * code in order to prevent deadlocks.
1328 *
1329 * Due to this logic, this function is only good for simple task-time
1330 * completions. Functions working on lists of IORBs (such as interrupt
1331 * handlers or context hooks) should call iorb_complete() directly and
1332 * implement their own logic for removing the IORB from the port queue.
1333 * See abort_ctxhook() for an example.
1334 */
1335void iorb_done(IORBH _far *iorb)
1336{
1337 int a = iorb_unit_adapter(iorb);
1338 int p = iorb_unit_port(iorb);
1339
1340 /* remove IORB from corresponding queue */
1341 spin_lock(drv_lock);
1342 if (iorb_driver_level(iorb)) {
1343 iorb_queue_del(&driver_queue, iorb);
1344 } else {
1345 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1346 }
1347 aws_free(add_workspace(iorb));
1348 spin_unlock(drv_lock);
1349
1350 iorb_complete(iorb);
1351}
1352
1353/******************************************************************************
1354 * Complete an IORB. This should be called without the adapter-level spinlock
1355 * to allow the IORB completion routine to perform whatever processing it
1356 * requires. This implies that the IORB should no longer be in any global
1357 * queue because the IORB completion routine may well reuse the IORB and send
1358 * the next request to us before even returning from this function.
1359 */
1360void iorb_complete(IORBH _far *iorb)
1361{
1362 iorb->Status |= IORB_DONE;
1363
1364 ddprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1365 iorb, iorb->Status, iorb->ErrorCode);
1366
1367 if (iorb->RequestControl & IORB_ASYNC_POST) {
1368 iorb->NotifyAddress(iorb);
1369 }
1370}
1371
1372/******************************************************************************
1373 * Requeue the specified IORB such that it will be sent downstream for
1374 * processing again. This includes freeing all resources currently allocated
1375 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1376 * spinlock must be aquired when calling this function.
1377 *
1378 * The following flags are preserved:
1379 * - no_ncq
1380 */
1381void iorb_requeue(IORBH _far *iorb)
1382{
1383 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1384 u16 no_ncq = aws->no_ncq;
1385 u16 unaligned = aws->unaligned;
1386 u16 retries = aws->retries;
1387
1388 aws_free(aws);
1389 memset(aws, 0x00, sizeof(*aws));
1390
1391 aws->no_ncq = no_ncq;
1392 aws->unaligned = unaligned;
1393 aws->retries = retries;
1394}
1395
1396/******************************************************************************
1397 * Free resources in ADD workspace (timer, buffer, ...). This function should
1398 * be called with the spinlock held to prevent race conditions.
1399 */
1400void aws_free(ADD_WORKSPACE _far *aws)
1401{
1402 if (aws->timer != 0) {
1403 ADD_CancelTimer(aws->timer);
1404 aws->timer = 0;
1405 }
1406
1407 if (aws->buf != NULL) {
1408 free(aws->buf);
1409 aws->buf = NULL;
1410 }
1411}
1412
1413/******************************************************************************
1414 * Lock the adapter, waiting for availability if necessary. This is expected
1415 * to be called at task/request time without the driver-level spinlock
1416 * aquired. Don't call at interrupt time.
1417 */
1418void lock_adapter(AD_INFO *ai)
1419{
1420 TIMER Timer;
1421
1422 spin_lock(drv_lock);
1423 while (ai->busy) {
1424 spin_unlock(drv_lock);
1425 timer_init(&Timer, 250);
1426 while (!timer_check_and_block(&Timer));
1427 spin_lock(drv_lock);
1428 }
1429 ai->busy = 1;
1430 spin_unlock(drv_lock);
1431}
1432
1433/******************************************************************************
1434 * Unlock adapter (i.e. reset busy flag)
1435 */
1436void unlock_adapter(AD_INFO *ai)
1437{
1438 ai->busy = 0;
1439}
1440
1441/******************************************************************************
1442 * Timeout handler for I/O commands. Since timeout handling can involve
1443 * lengthy operations like port resets, the main code is located in a
1444 * separate function which is invoked via a context hook.
1445 */
1446void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1, ULONG p2)
1447{
1448 IORBH _far *iorb = (IORBH _far *) p1;
1449 int a = iorb_unit_adapter(iorb);
1450 int p = iorb_unit_port(iorb);
1451
1452 ADD_CancelTimer(timer_handle);
1453 dprintf("timeout for IORB %Fp\n", iorb);
1454
1455 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1456 * IORB has completed after the timeout has expired but before we got to
1457 * this line of code, we'll check the return code of iorb_queue_del(): If it
1458 * returns an error, the IORB must have completed a few microseconds ago and
1459 * there is no timeout.
1460 */
1461 spin_lock(drv_lock);
1462 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1463 iorb_queue_add(&abort_queue, iorb);
1464 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1465 }
1466 spin_unlock(drv_lock);
1467
1468 /* Trigger abort processing function. We don't really care whether this
1469 * succeeds because the only reason why it would fail should be multiple
1470 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1471 * start executing, which leaves two scenarios:
1472 *
1473 * - We succeded in arming the context hook. Fine.
1474 *
1475 * - We armed the context hook a second time before it had a chance to
1476 * start executing. In this case, the already scheduled context hook
1477 * will process our IORB as well.
1478 */
1479 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1480
1481 /* Set up a watchdog timer which calls the context hook manually in case
1482 * some kernel thread is looping around the IORB_COMPLETE status bit
1483 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1484 * happen per design because kernel threads are supposed to yield but it
1485 * does in the early boot phase.
1486 */
1487 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1488}
1489
1490/******************************************************************************
1491 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1492 * will execute as soon as a kernel thread yields the CPU. However, some
1493 * kernel components won't yield the CPU during the early boot phase and the
1494 * only way to kick some sense into those components is to run the context
1495 * hook right inside this timer callback. Not exactly pretty, especially
1496 * considering the fact that context hooks were implemented to prevent running
1497 * lengthy operations like a port reset at interrupt time, but without this
1498 * watchdog mechanism we run the risk of getting completely stalled by device
1499 * problems during the early boot phase.
1500 */
1501void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1, ULONG p2)
1502{
1503 /* reset watchdog timer */
1504 ADD_CancelTimer(timer_handle);
1505 dprintf("reset watchdog invoked\n");
1506
1507 /* call context hook manually */
1508 reset_ctxhook(0);
1509}
1510
1511/******************************************************************************
1512 * small_code_ - this dummy func resolves the undefined reference linker
1513 * error that occurrs when linking WATCOM objects with DDK's link.exe
1514 */
1515void _cdecl small_code_(void)
1516{
1517}
1518
1519/******************************************************************************
1520 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1521 * adapter info array in the device table, dt->pAdapter[], is expected to be
1522 * initialized for the specified index (dt_ai).
1523 *
1524 * Please note that the device table adapter index, dta, is not always equal
1525 * to the physical adapter index, a: if SCSI emulation has been activated, the
1526 * last reported adapter is a virtual SCSI adapter and the physical adapter
1527 * indexes for those units are, of course, different from the device table
1528 * index of the virtual SCSI adapter.
1529 */
1530static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1531 int a, int p, int d, int scsi_id)
1532{
1533 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1534 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1535 (u16) dt->pAdapter[dta]);
1536 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1537 AD_INFO *ai = ad_infos + a;
1538
1539 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1540 dprintf("error: device table provided by DASD too small\n");
1541 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1542 return(-1);
1543 }
1544
1545 if (ai->ports[p].devs[d].unit_info == NULL) {
1546 /* provide original information about this device (unit) */
1547 memset(ui, 0x00, sizeof(*ui));
1548 ui->AdapterIndex = dta; /* device table adapter index */
1549 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1550 ui->UnitIndex = ptr->AdapterUnits;
1551 ui->UnitType = ai->ports[p].devs[d].dev_type;
1552 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1553 if (ai->ports[p].devs[d].removable) {
1554 ui->UnitFlags |= UF_REMOVABLE;
1555 }
1556 if (scsi_id > 0) {
1557 /* set fake SCSI ID for this unit */
1558 ui->UnitSCSITargetID = scsi_id;
1559 }
1560 } else {
1561 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1562 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1563 }
1564
1565 ptr->AdapterUnits++;
1566 return(0);
1567}
1568
1569/*******************************************************************************
1570 * Register kernel exit handler for trap dumps. Our exit handler will be called
1571 * right before the kernel starts a dump; that's where we reset the controller
1572 * so it supports BIOS int13 I/O calls.
1573 */
1574static void register_krnl_exit(void)
1575{
1576 _asm {
1577 push ds
1578 push es
1579 push bx
1580 push si
1581 push di
1582
1583 mov ax, FLAG_KRNL_EXIT_ADD
1584 mov cx, TYPE_KRNL_EXIT_INT13
1585 mov bx, SEG asm_krnl_exit
1586 mov si, OFFSET asm_krnl_exit
1587 mov dl, DevHlp_RegisterKrnlExit
1588
1589 call dword ptr [Device_Help]
1590
1591 pop di
1592 pop si
1593 pop bx
1594 pop es
1595 pop ds
1596 }
1597
1598 dprintf("Registered kernel exit routine for INT13 mode\n");
1599}
1600
Note: See TracBrowser for help on using the repository browser.