source: trunk/src/os2ahci/os2ahci.c@ 166

Last change on this file since 166 was 166, checked in by David Azarewicz, 12 years ago

added ability to ignore individual ports

File size: 52.1 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Portions copyright (c) 2013 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31
32/* -------------------------- macros and constants ------------------------- */
33
34/* parse integer command line parameter */
35#define drv_parm_int(s, value, type, radix) \
36 { \
37 char _far *_ep; \
38 if ((s)[1] != ':') { \
39 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
40 goto init_fail; \
41 } \
42 value = (type) strtol((s) + 2, \
43 (const char _far* _far*) &_ep, \
44 radix); \
45 s = _ep; \
46 }
47
48#define drv_parm_int_optional(s, value, type, radix) \
49 { \
50 char _far *_ep; \
51 if ((s)[1] == ':') { \
52 value = (type) strtol((s) + 2, (const char _far* _far*) &_ep, radix); \
53 s = _ep; \
54 } else { \
55 value++; \
56 } \
57 }
58
59/* set two-dimensional array of port options */
60#define set_port_option(opt, val) \
61 if (adapter_index == -1) { \
62 /* set option for all adapters and ports */ \
63 memset(opt, val, sizeof(opt)); \
64 } else if (port_index == -1) { \
65 /* set option for all ports on current adapter */ \
66 memset(opt[adapter_index], val, sizeof(*opt)); \
67 } else { \
68 /* set option for specific port */ \
69 opt[adapter_index][port_index] = val; \
70 }
71
72/* constants for undefined kernel exit routine;
73 * see register_krnl_exit() func */
74#define DevHlp_RegisterKrnlExit 0x006f
75
76#define FLAG_KRNL_EXIT_ADD 0x1000
77#define FLAG_KRNL_EXIT_REMOVE 0x2000
78
79#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
80#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
81#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
82#define TYPE_KRNL_EXIT_DYN 0x0003
83#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
84
85/* ------------------------ typedefs and structures ------------------------ */
86
87/* -------------------------- function prototypes -------------------------- */
88
89void _cdecl small_code_ (void);
90
91static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
92 int a, int p, int d, int scsi_id);
93
94static void register_krnl_exit (void);
95
96/* ------------------------ global/static variables ------------------------ */
97
98int debug = 0; /* if > 0, print debug messages to COM1 */
99int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
100int init_reset = 1; /* if != 0, reset ports during init */
101int force_write_cache; /* if != 0, force write cache */
102int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
103int use_lvm_info = 1;
104int wrap_trace_buffer = 0;
105long com_baud = 0;
106
107PFN Device_Help = 0; /* pointer to device helper entry point */
108ULONG RMFlags = 0; /* required by resource manager library */
109PFN RM_Help0 = NULL; /* required by resource manager library */
110PFN RM_Help3 = NULL; /* required by resource manager library */
111HDRIVER rm_drvh; /* resource manager driver handle */
112char rm_drvname[80]; /* driver name as returned by RM */
113USHORT add_handle; /* driver handle (RegisterDeviceClass) */
114UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
115char drv_name[] = "OS2AHCI"; /* driver name as string */
116
117/* resource manager driver information structure */
118DRIVERSTRUCT rm_drvinfo = {
119 drv_name, /* driver name */
120 "AHCI SATA Driver", /* driver description */
121 DVENDOR, /* vendor name */
122 CMVERSION_MAJOR, /* RM interface version major */
123 CMVERSION_MINOR, /* RM interface version minor */
124 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
125 0, /* driver flags */
126 DRT_ADDDM, /* driver type */
127 DRS_ADD, /* driver sub type */
128 NULL /* driver callback */
129};
130
131ULONG drv_lock; /* driver-level spinlock */
132IORB_QUEUE driver_queue; /* driver-level IORB queue */
133AD_INFO ad_infos[MAX_AD]; /* adapter information list */
134int ad_info_cnt; /* number of entries in ad_infos[] */
135u16 ad_ignore; /* bitmap with adapter indexes to ignore */
136int init_complete; /* if != 0, initialization has completed */
137int suspended;
138int resume_sleep_flag;
139
140/* apapter/port-specific options saved when parsing the command line */
141u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
142u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
143u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
144u8 link_power[MAX_AD][AHCI_MAX_PORTS];
145u8 track_size[MAX_AD][AHCI_MAX_PORTS];
146u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
147
148static char init_msg[] = "%s driver version %d.%02d\n";
149static char exit_msg[] = "%s driver *not* installed\n";
150char BldLevel[] = BLDLEVEL;
151
152/* ----------------------------- start of code ----------------------------- */
153
154/******************************************************************************
155 * OS/2 device driver main strategy function. This function is only used
156 * for initialization purposes; all other calls go directly to the adapter
157 * device driver's strategy function.
158 *
159 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
160 * packet for IDC calls, so they can be handled by gen_ioctl.
161 */
162USHORT _cdecl c_strat(RPH _far *req)
163{
164 u16 rc;
165
166 switch (req->Cmd) {
167
168 case CMDInitBase:
169 rc = init_drv((RPINITIN _far *) req);
170 break;
171
172 case CMDShutdown:
173 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
174 break;
175
176 case CMDGenIOCTL:
177 rc = gen_ioctl((RP_GENIOCTL _far *) req);
178 break;
179
180 case CMDINPUT:
181 rc = char_dev_input((RP_RWV _far *) req);
182 break;
183
184 case CMDSaveRestore:
185 rc = sr_drv(((RPSAVERESTORE _far *) req)->FuncCode);
186 break;
187
188 default:
189 rc = STDON | STATUS_ERR_UNKCMD;
190 break;
191 }
192
193 return(rc);
194}
195
196/******************************************************************************
197 * Intialize the os2ahci driver. This includes command line parsing, scanning
198 * the PCI bus for supported AHCI adapters, etc.
199 */
200USHORT init_drv(RPINITIN _far *req)
201{
202 static int init_drv_called;
203 static int init_drv_failed;
204 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
205 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
206 APIRET rmrc;
207 char _far *cmd_line;
208 char _far *s;
209 int adapter_index = -1;
210 int port_index = -1;
211 int invert_option;
212 int optval;
213 u16 vendor;
214 u16 device;
215
216 if (init_drv_called) {
217 /* This is the init call for the second (legacy IBMS506$) character
218 * device driver. If the main driver failed initialization, fail this
219 * one as well.
220 */
221 rsp->CodeEnd = (u16) end_of_code;
222 rsp->DataEnd = (u16) &end_of_data;
223 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
224 }
225 init_drv_called = 1;
226 suspended = 0;
227 resume_sleep_flag = 0;
228 memset(ad_infos, 0, sizeof(ad_infos));
229 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
230
231 /* set device helper entry point */
232 Device_Help = req->DevHlpEP;
233
234 /* create driver-level spinlock */
235 DevHelp_CreateSpinLock(&drv_lock);
236
237 /* initialize libc code */
238 init_libc();
239
240 /* register driver with resource manager */
241 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
242 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
243 drv_name, rmrc);
244 goto init_fail;
245 }
246
247 /* parse command line parameters */
248 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
249
250 for (s = cmd_line; *s != 0; s++) {
251 if (*s == '/') {
252 if ((invert_option = (s[1] == '!')) != 0) {
253 s++;
254 }
255 s++;
256 switch (tolower(*s)) {
257
258 case '\0':
259 /* end of command line; can only happen if command line is incorrect */
260 cprintf("%s: incomplete command line option\n", drv_name);
261 goto init_fail;
262
263 case 'b':
264 drv_parm_int(s, com_baud, u32, 10);
265 break;
266
267 case 'c':
268 /* set COM port base address for debug messages */
269 drv_parm_int(s, com_base, u16, 16);
270 if (com_base == 1) com_base = 0x3f8;
271 if (com_base == 2) com_base = 0x2f8;
272 break;
273
274 case 'd':
275 /* increase debug level */
276 drv_parm_int_optional(s, debug, int, 10);
277 break;
278
279 case 'g':
280 /* add specfied PCI ID as a supported generic AHCI adapter */
281 drv_parm_int(s, vendor, u16, 16);
282 s--;
283 drv_parm_int(s, device, u16, 16);
284 if (add_pci_id(vendor, device)) {
285 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
286 goto init_fail;
287 }
288 thorough_scan = 1;
289 break;
290
291 case 't':
292 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
293 thorough_scan = !invert_option;
294 break;
295
296 case 'r':
297 /* reset ports during initialization */
298 init_reset = !invert_option;
299 break;
300
301 case 'f':
302 /* force write cache regardless of IORB flags */
303 force_write_cache = 1;
304 break;
305
306 case 'a':
307 /* set adapter index for adapter and port-related options */
308 drv_parm_int(s, adapter_index, int, 10);
309 if (adapter_index < 0 || adapter_index >= MAX_AD) {
310 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
311 goto init_fail;
312 }
313 break;
314
315 case 'p':
316 /* set port index for port-related options */
317 drv_parm_int(s, port_index, int, 10);
318 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
319 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
320 goto init_fail;
321 }
322 break;
323
324 case 'i':
325 /* ignore current adapter index */
326 if (adapter_index >= 0) {
327 if (port_index >= 0) port_ignore[adapter_index][port_index] = !invert_option;
328 else ad_ignore |= 1U << adapter_index;
329 }
330 break;
331
332 case 's':
333 /* enable SCSI emulation for ATAPI devices */
334 set_port_option(emulate_scsi, !invert_option);
335 break;
336
337 case 'n':
338 /* enable NCQ */
339 set_port_option(enable_ncq, !invert_option);
340 break;
341
342 case 'l':
343 /* set link speed or power savings */
344 s++;
345 switch (tolower(*s)) {
346 case 's':
347 /* set link speed */
348 drv_parm_int(s, optval, int, 10);
349 set_port_option(link_speed, optval);
350 break;
351 case 'p':
352 /* set power management */
353 drv_parm_int(s, optval, int, 10);
354 set_port_option(link_power, optval);
355 break;
356 default:
357 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
358 goto init_fail;
359 }
360 /* need to reset the port in order to establish link settings */
361 init_reset = 1;
362 break;
363
364 case '4':
365 /* enable 4K sector geometry enhancement (track size = 56) */
366 if (!invert_option) {
367 set_port_option(track_size, 56);
368 }
369 break;
370
371 case 'z':
372 /* Specify to not use the LVM information. There is no reason why anyone would
373 * want to do this, but previous versions of this driver did not have LVM capability,
374 * so this switch is here temporarily just in case.
375 */
376 use_lvm_info = !invert_option;
377 break;
378
379 case 'v':
380 /* be verbose during boot */
381 drv_parm_int_optional(s, verbosity, int, 10);
382 break;
383
384 case 'w':
385 /* Specify to allow the trace buffer to wrap when full. */
386 wrap_trace_buffer = !invert_option;
387 break;
388
389 case 'q':
390 /* Temporarily output a non-fatal message to get anyone using this
391 * undocumented switch to stop using it. This will be removed soon
392 * and the error will become fatal.
393 */
394 cprintf("%s: unknown option: /%c\n", drv_name, *s);
395 break;
396
397 default:
398 cprintf("%s: unknown option: /%c\n", drv_name, *s);
399 goto init_fail;
400 }
401 }
402 }
403
404 if (com_baud) init_com(com_baud); /* initialize com port for debug output */
405
406 /* initialize trace buffer if applicable */
407 if (TRACE_ACTIVE) {
408 /* debug is on, but COM port is off -> use our trace buffer */
409 trace_init(AHCI_TRACE_BUF_SIZE);
410 } else {
411 trace_init(AHCI_INFO_BUF_SIZE);
412 }
413
414 ntprintf("BldLevel: %s\n", BldLevel);
415 ntprintf("CmdLine: %Fs\n", cmd_line);
416
417 /* print initialization message */
418 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
419
420 /* scan PCI bus for supported devices */
421 scan_pci_bus();
422
423 if (ad_info_cnt > 0) {
424 /* initialization succeeded and we found at least one AHCI adapter */
425 ADD_InitTimer(timer_pool, sizeof(timer_pool));
426 //NOT_USED mdelay_cal();
427
428 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1, &add_handle)) {
429 cprintf("%s: couldn't register device class\n", drv_name);
430 goto init_fail;
431 }
432
433 /* allocate context hooks */
434 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
435 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
436 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
437 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
438 goto init_fail;
439 }
440
441 rsp->CodeEnd = (u16) end_of_code;
442 rsp->DataEnd = (u16) &end_of_data;
443
444 /* register kernel exit routine for trap dumps */
445 register_krnl_exit();
446
447 return(STDON);
448
449 } else {
450 /* no adapters found */
451 ciprintf(" No adapters found.\n");
452 }
453
454init_fail:
455 /* initialization failed; set segment sizes to 0 and return error */
456 rsp->CodeEnd = 0;
457 rsp->DataEnd = 0;
458 init_drv_failed = 1;
459
460 /* free context hooks */
461 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
462 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
463 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
464
465 if (rm_drvh != 0) {
466 /* remove driver from resource manager */
467 RMDestroyDriver(rm_drvh);
468 }
469
470 ciprintf(exit_msg, drv_name);
471 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
472}
473
474/******************************************************************************
475 * Generic IOCTL via character device driver. IOCTLs are used to control the
476 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
477 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
478 * commands for ATA disks) are implemented here.
479 */
480USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
481{
482 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
483
484 switch (ioctl->Category) {
485
486 case OS2AHCI_IOCTL_CATEGORY:
487 switch (ioctl->Function) {
488
489 case OS2AHCI_IOCTL_GET_DEVLIST:
490 return(ioctl_get_devlist(ioctl));
491
492 case OS2AHCI_IOCTL_PASSTHROUGH:
493 return(ioctl_passthrough(ioctl));
494
495 }
496 break;
497
498 case DSKSP_CAT_GENERIC:
499 return(ioctl_gen_dsk(ioctl));
500
501 case DSKSP_CAT_SMART:
502 return(ioctl_smart(ioctl));
503
504 }
505
506 return(STDON | STATUS_ERR_UNKCMD);
507}
508
509/******************************************************************************
510 * Read from character device. If tracing is on (internal ring buffer trace),
511 * we return data from the trace buffer; if not, we might return a device
512 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
513 */
514USHORT char_dev_input(RP_RWV _far *rwrb)
515{
516 return(trace_char_dev(rwrb));
517}
518
519/******************************************************************************
520 * Device driver exit handler. This handler is called when OS/2 shuts down and
521 * flushes the write caches of all attached devices. Since this is effectively
522 * the same we do when suspending, we'll call out to the corresponding suspend
523 * function.
524 *
525 * NOTE: Errors are ignored because there's no way we could stop the shutdown
526 * or do something about the error, unless retrying endlessly is
527 * considered an option.
528 */
529USHORT exit_drv(int func)
530{
531 dprintf("exit_drv(%d) called\n", func);
532
533 if (func == 0) {
534 /* we're only interested in the second phase of the shutdown */
535 return(STDON);
536 }
537
538 suspend();
539 return(STDON);
540}
541
542/******************************************************************************
543 * Device driver suspend/resume handler. This handler is called when ACPI is
544 * executing a suspend or resume.
545 */
546USHORT sr_drv(int func)
547{
548 dprintf("sr_drv(%d) called\n", func);
549
550 if (func) resume();
551 else suspend();
552
553 return(STDON);
554}
555
556/******************************************************************************
557 * ADD entry point. This is the main entry point for all ADD requests. Due to
558 * the asynchronous nature of ADD drivers, this function primarily queues the
559 * IORB(s) to the corresponding adapter or port queues, then triggers the
560 * state machine to initiate processing queued IORBs.
561 *
562 * NOTE: In order to prevent race conditions or engine stalls, certain rules
563 * around locking, unlocking and IORB handling in general have been
564 * established. Refer to the comments in "trigger_engine()" for
565 * details.
566 */
567void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
568{
569 IORBH _far *iorb;
570 IORBH _far *next = NULL;
571
572 spin_lock(drv_lock);
573
574 for (iorb = first_iorb; iorb != NULL; iorb = next) {
575 /* Queue this IORB. Queues primarily exist on port level but there are
576 * some requests which affect the whole driver, most notably
577 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
578 * port queue will change the links, thus we need to save the original
579 * link in 'next'.
580 */
581 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
582
583 iorb->Status = 0;
584 iorb->ErrorCode = 0;
585 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
586
587 if (iorb_driver_level(iorb)) {
588 /* driver-level IORB */
589 iorb->UnitHandle = 0;
590 iorb_queue_add(&driver_queue, iorb);
591
592 } else {
593 /* port-level IORB */
594 int a = iorb_unit_adapter(iorb);
595 int p = iorb_unit_port(iorb);
596 int d = iorb_unit_device(iorb);
597
598 if (a >= ad_info_cnt ||
599 p > ad_infos[a].port_max ||
600 d > ad_infos[a].ports[p].dev_max ||
601 (ad_infos[a].port_map & (1UL << p)) == 0) {
602
603 /* unit handle outside of the allowed range */
604 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
605 iorb->Status = IORB_ERROR;
606 iorb->ErrorCode = IOERR_CMD_SYNTAX;
607 iorb_complete(iorb);
608 continue;
609 }
610
611 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
612 }
613 }
614
615 /* trigger state machine */
616 trigger_engine();
617
618 spin_unlock(drv_lock);
619}
620
621/******************************************************************************
622 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
623 * which will try to get all IORBs sent on their way a couple of times. If
624 * there are still IORBs ready for processing after this, this function will
625 * hand off to a context hook which will continue to trigger the engine until
626 * all IORBs have been sent.
627 *
628 * NOTE: While initialization has not completed (or during suspend/resume
629 * operations), this function will loop indefinitely because we can't
630 * rely on interrupt handlers or context hooks and complex IORBs
631 * requiring multiple requeues would eventually hang and time out if
632 * we stopped triggering here.
633 */
634void trigger_engine(void)
635{
636 int i;
637
638 for (i = 0; i < 3 || !init_complete; i++) {
639 if (trigger_engine_1() == 0) {
640 /* done -- all IORBs have been sent on their way */
641 return;
642 }
643 }
644
645 /* Something keeps bouncing; hand off to the engine context hook which will
646 * keep trying in the background.
647 */
648 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
649}
650
651/******************************************************************************
652 * Trigger IORB queue engine in order to send commands in the driver/port IORB
653 * queues to the AHCI hardware. This function will return the number of IORBs
654 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
655 * a state to accept the command, thus it might take quite a few calls to get
656 * all IORBs on their way. This is why there's a wrapper function which tries
657 * it a few times, then hands off to a context hook which will keep trying in
658 * the background.
659 *
660 * IORBs might complete before send_iorb() has returned, at any time during
661 * interrupt processing or on another CPU on SMP systems. IORB completion
662 * means modifications to the corresponding IORB queue (the completed IORB
663 * is removed from the queue) thus we need to protect the IORB queues from
664 * race conditions. The safest approach short of keeping the driver-level
665 * spinlock aquired permanently is to keep it throughout this function and
666 * release it temporarily in send_iorb().
667 *
668 * This implies that the handler functions are fully responsible for aquiring
669 * the driver-level spinlock when they need it, and for releasing it again.
670 *
671 * As a rule of thumb, get the driver-level spinlock whenever accessing
672 * volatile variables (IORB queues, values in ad_info[], ...).
673 *
674 * Additional Notes:
675 *
676 * - This function is expected to be called with the spinlock aquired
677 *
678 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
679 * just remain in the queue). This can be used to release the driver-level
680 * spinlock while making sure no new IORBs are going to hit the hardware.
681 * In order to prevent engine stalls, all handlers using this functionality
682 * need to invoke trigger_engine() after resetting the busy flag.
683 *
684 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
685 * However, the driver-level queue is worked "one entry at a time" which
686 * means that no new IORBs will be queued on the driver-level queue until
687 * the head element has completed processing. This means that driver-
688 * level IORB handlers don't need to protect against each other. But they
689 * they do need to keep in mind interference with port-level IORBs:
690 *
691 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
692 * adapters as 'busy' which are affected by the driver-level IORB
693 *
694 * - Driver-level IORB handlers must not access the hardware of a
695 * particular adapter if it's flagged as 'busy' by another IORB.
696 */
697int trigger_engine_1(void)
698{
699 IORBH _far *iorb;
700 IORBH _far *next;
701 int iorbs_sent = 0;
702 int a;
703 int p;
704
705 iorbs_sent = 0;
706
707 /* process driver-level IORBs */
708 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
709 send_iorb(iorb);
710 iorbs_sent++;
711 }
712
713 /* process port-level IORBs */
714 for (a = 0; a < ad_info_cnt; a++) {
715 AD_INFO *ai = ad_infos + a;
716 if (ai->busy) {
717 /* adapter is busy; don't process any IORBs */
718 continue;
719 }
720 for (p = 0; p <= ai->port_max; p++) {
721 /* send all queued IORBs on this port */
722 next = NULL;
723 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
724 next = iorb->pNxtIORB;
725 if (!add_workspace(iorb)->processing) {
726 send_iorb(iorb);
727 iorbs_sent++;
728 }
729 }
730 }
731 }
732
733 return(iorbs_sent);
734}
735
736/******************************************************************************
737 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
738 * switch board for calling the corresponding iocc_*() handler function.
739 *
740 * NOTE: This function is expected to be called with the driver-level spinlock
741 * aquired. It will release it before calling any of the handler
742 * functions and re-aquire it when done.
743 */
744void send_iorb(IORBH _far *iorb)
745{
746 /* Mark IORB as "processing" before doing anything else. Once the IORB is
747 * marked as "processing", we can release the spinlock because subsequent
748 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
749 * IORB.
750 */
751 add_workspace(iorb)->processing = 1;
752 spin_unlock(drv_lock);
753
754 switch (iorb->CommandCode) {
755
756 case IOCC_CONFIGURATION:
757 iocc_configuration(iorb);
758 break;
759
760 case IOCC_DEVICE_CONTROL:
761 iocc_device_control(iorb);
762 break;
763
764 case IOCC_UNIT_CONTROL:
765 iocc_unit_control(iorb);
766 break;
767
768 case IOCC_GEOMETRY:
769 iocc_geometry(iorb);
770 break;
771
772 case IOCC_EXECUTE_IO:
773 iocc_execute_io(iorb);
774 break;
775
776 case IOCC_UNIT_STATUS:
777 iocc_unit_status(iorb);
778 break;
779
780 case IOCC_ADAPTER_PASSTHRU:
781 iocc_adapter_passthru(iorb);
782 break;
783
784 default:
785 /* unsupported call */
786 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
787 iorb_done(iorb);
788 break;
789 }
790
791 /* re-aquire spinlock before returning to trigger_engine() */
792 spin_lock(drv_lock);
793}
794
795/******************************************************************************
796 * Handle IOCC_CONFIGURATION requests.
797 */
798void iocc_configuration(IORBH _far *iorb)
799{
800 int a;
801
802 switch (iorb->CommandModifier) {
803
804 case IOCM_COMPLETE_INIT:
805 /* Complete initialization. From now on, we won't have to restore the BIOS
806 * configuration after each command and we're fully operational (i.e. will
807 * use interrupts, timers and context hooks instead of polling).
808 */
809 if (!init_complete) {
810 dprintf("leaving initialization mode\n");
811 for (a = 0; a < ad_info_cnt; a++) {
812 lock_adapter(ad_infos + a);
813 ahci_complete_init(ad_infos + a);
814 }
815 init_complete = 1;
816
817 /* DAZ turn off COM port output if on */
818 //com_base = 0;
819
820 /* release all adapters */
821 for (a = 0; a < ad_info_cnt; a++) {
822 unlock_adapter(ad_infos + a);
823 }
824
825 #ifdef LEGACY_APM
826 /* register APM hook */
827 apm_init();
828 #endif
829
830 build_user_info();
831 }
832 iorb_done(iorb);
833 break;
834
835 case IOCM_GET_DEVICE_TABLE:
836 /* construct a device table */
837 iocm_device_table(iorb);
838 break;
839
840 default:
841 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
842 iorb_done(iorb);
843 break;
844 }
845}
846
847/******************************************************************************
848 * Handle IOCC_DEVICE_CONTROL requests.
849 */
850void iocc_device_control(IORBH _far *iorb)
851{
852 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
853 IORBH _far *ptr;
854 IORBH _far *next = NULL;
855 int p = iorb_unit_port(iorb);
856 int d = iorb_unit_device(iorb);
857
858 switch (iorb->CommandModifier) {
859
860 case IOCM_ABORT:
861 /* abort all pending commands on specified port and device */
862 spin_lock(drv_lock);
863 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
864 next = ptr->pNxtIORB;
865 /* move all matching IORBs to the abort queue */
866 if (ptr != iorb && iorb_unit_device(ptr) == d) {
867 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
868 iorb_queue_add(&abort_queue, ptr);
869 ptr->ErrorCode = IOERR_CMD_ABORTED;
870 }
871 }
872 spin_unlock(drv_lock);
873
874 /* trigger reset context hook which will finish the abort processing */
875 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
876 break;
877
878 case IOCM_SUSPEND:
879 case IOCM_RESUME:
880 case IOCM_GET_QUEUE_STATUS:
881 /* Suspend/resume operations allow access to the hardware for other
882 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
883 * and ATAPI in the same driver, this won't be required.
884 */
885 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
886 break;
887
888 case IOCM_LOCK_MEDIA:
889 case IOCM_UNLOCK_MEDIA:
890 case IOCM_EJECT_MEDIA:
891 /* unit control commands to lock, unlock and eject media */
892 /* will be supported later... */
893 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
894 break;
895
896 default:
897 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
898 break;
899 }
900
901 iorb_done(iorb);
902}
903
904/******************************************************************************
905 * Handle IOCC_UNIT_CONTROL requests.
906 */
907void iocc_unit_control(IORBH _far *iorb)
908{
909 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
910 int a = iorb_unit_adapter(iorb);
911 int p = iorb_unit_port(iorb);
912 int d = iorb_unit_device(iorb);
913
914 spin_lock(drv_lock);
915 switch (iorb->CommandModifier) {
916
917 case IOCM_ALLOCATE_UNIT:
918 /* allocate unit for exclusive access */
919 if (ad_infos[a].ports[p].devs[d].allocated) {
920 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
921 } else {
922 ad_infos[a].ports[p].devs[d].allocated = 1;
923 }
924 break;
925
926 case IOCM_DEALLOCATE_UNIT:
927 /* deallocate exclusive access to unit */
928 if (!ad_infos[a].ports[p].devs[d].allocated) {
929 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
930 } else {
931 ad_infos[a].ports[p].devs[d].allocated = 0;
932 }
933 break;
934
935 case IOCM_CHANGE_UNITINFO:
936 /* Change unit (device) information. One reason for this IOCM is the
937 * interface for filter device drivers: a filter device driver can
938 * either change existing UNITINFOs or permanently allocate units
939 * and fabricate new [logical] units; the former is the reason why we
940 * must store the pointer to the updated UNITNIFO for subsequent
941 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
942 */
943 if (!ad_infos[a].ports[p].devs[d].allocated) {
944 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
945 break;
946 }
947 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
948 break;
949
950 default:
951 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
952 break;
953 }
954
955 spin_unlock(drv_lock);
956 iorb_done(iorb);
957}
958
959/******************************************************************************
960 * Scan all ports for AHCI devices and construct a DASD device table.
961 *
962 * NOTES: This function may be called multiple times. Only the first
963 * invocation will actually scan for devices; all subsequent calls will
964 * merely return the results of the initial scan, potentially augmented
965 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
966 * requests.
967 *
968 * In order to support applications that can't deal with ATAPI devices
969 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
970 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
971 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
972 * request. The units attached to this adapter will use the real HW
973 * unit IDs, thus we'll never receive a command specific to the
974 * emulated SCSI adapter and won't need to set up any sort of entity
975 * for it; the only purpose of the emulated SCSI adapter is to pass the
976 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
977 * course. The emulated SCSI target IDs are allocated as follows:
978 *
979 * 0 the virtual adapter
980 * 1..n emulated devices; SCSI target ID increments sequentially
981 */
982void iocm_device_table(IORBH _far *iorb)
983{
984 IORB_CONFIGURATION _far *iorb_conf;
985 DEVICETABLE _far *dt;
986 char _far *pos;
987 int scsi_units = 0;
988 int scsi_id = 1;
989 int rc;
990 int dta;
991 int a;
992 int p;
993 int d;
994
995 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
996 dt = iorb_conf->pDeviceTable;
997
998 spin_lock(drv_lock);
999
1000 /* initialize device table header */
1001 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1002 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
1003 dt->ADDHandle = add_handle;
1004 dt->TotalAdapters = ad_info_cnt + 1;
1005
1006 /* set start of adapter and device information tables */
1007 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
1008
1009 /* go through all adapters, including the virtual SCSI adapter */
1010 for (dta = 0; dta < dt->TotalAdapters; dta++) {
1011 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
1012
1013 /* sanity check for sufficient space in device table */
1014 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1015 dprintf("error: device table provided by DASD too small\n");
1016 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1017 goto iocm_device_table_done;
1018 }
1019
1020 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
1021 memset(ptr, 0x00, sizeof(*ptr));
1022
1023 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1024 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1025 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1026 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1027
1028 if (dta < ad_info_cnt) {
1029 /* this is a physical AHCI adapter */
1030 AD_INFO *ad_info = ad_infos + dta;
1031
1032 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1033 sprintf(ptr->AdapterName, "AHCI_%d", dta);
1034
1035 if (!ad_info->port_scan_done) {
1036 /* first call; need to scan AHCI hardware for devices */
1037 if (ad_info->busy) {
1038 dprintf("error: port scan requested while adapter was busy\n");
1039 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1040 goto iocm_device_table_done;
1041 }
1042 ad_info->busy = 1;
1043 spin_unlock(drv_lock);
1044 rc = ahci_scan_ports(ad_info);
1045 spin_lock(drv_lock);
1046 ad_info->busy = 0;
1047
1048 if (rc != 0) {
1049 dprintf("error: port scan failed on adapter #%d\n", dta);
1050 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1051 goto iocm_device_table_done;
1052 }
1053 ad_info->port_scan_done = 1;
1054 }
1055
1056 /* insert physical (i.e. AHCI) devices into the device table */
1057 for (p = 0; p <= ad_info->port_max; p++) {
1058 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1059 if (ad_info->ports[p].devs[d].present) {
1060 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1061 /* only report this unit as SCSI unit */
1062 scsi_units++;
1063 continue;
1064 }
1065 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1066 goto iocm_device_table_done;
1067 }
1068 }
1069 }
1070 }
1071
1072 } else {
1073 /* this is the virtual SCSI adapter */
1074 if (scsi_units == 0) {
1075 /* not a single unit to be emulated via SCSI */
1076 dt->TotalAdapters--;
1077 break;
1078 }
1079
1080 /* set adapter name and bus type to mimic a SCSI controller */
1081 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1082 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1083
1084 /* add all ATAPI units to be emulated by this virtual adaper */
1085 for (a = 0; a < ad_info_cnt; a++) {
1086 AD_INFO *ad_info = ad_infos + a;
1087
1088 for (p = 0; p <= ad_info->port_max; p++) {
1089 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1090 if (ad_info->ports[p].devs[d].present &&
1091 ad_info->ports[p].devs[d].atapi &&
1092 emulate_scsi[a][p]) {
1093 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1094 goto iocm_device_table_done;
1095 }
1096 }
1097 }
1098 }
1099 }
1100 }
1101
1102 /* calculate offset for next adapter */
1103 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1104 }
1105
1106iocm_device_table_done:
1107 spin_unlock(drv_lock);
1108 iorb_done(iorb);
1109}
1110
1111/******************************************************************************
1112 * Handle IOCC_GEOMETRY requests.
1113 */
1114void iocc_geometry(IORBH _far *iorb)
1115{
1116 switch (iorb->CommandModifier) {
1117
1118 case IOCM_GET_MEDIA_GEOMETRY:
1119 case IOCM_GET_DEVICE_GEOMETRY:
1120 add_workspace(iorb)->idempotent = 1;
1121 ahci_get_geometry(iorb);
1122 break;
1123
1124 default:
1125 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1126 iorb_done(iorb);
1127 }
1128}
1129
1130/******************************************************************************
1131 * Handle IOCC_EXECUTE_IO requests.
1132 */
1133void iocc_execute_io(IORBH _far *iorb)
1134{
1135 switch (iorb->CommandModifier) {
1136
1137 case IOCM_READ:
1138 add_workspace(iorb)->idempotent = 1;
1139 ahci_read(iorb);
1140 break;
1141
1142 case IOCM_READ_VERIFY:
1143 add_workspace(iorb)->idempotent = 1;
1144 ahci_verify(iorb);
1145 break;
1146
1147 case IOCM_WRITE:
1148 add_workspace(iorb)->idempotent = 1;
1149 ahci_write(iorb);
1150 break;
1151
1152 case IOCM_WRITE_VERIFY:
1153 add_workspace(iorb)->idempotent = 1;
1154 ahci_write(iorb);
1155 break;
1156
1157 default:
1158 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1159 iorb_done(iorb);
1160 }
1161}
1162
1163/******************************************************************************
1164 * Handle IOCC_UNIT_STATUS requests.
1165 */
1166void iocc_unit_status(IORBH _far *iorb)
1167{
1168 switch (iorb->CommandModifier) {
1169
1170 case IOCM_GET_UNIT_STATUS:
1171 add_workspace(iorb)->idempotent = 1;
1172 ahci_unit_ready(iorb);
1173 break;
1174
1175 default:
1176 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1177 iorb_done(iorb);
1178 }
1179}
1180
1181/******************************************************************************
1182 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1183 */
1184void iocc_adapter_passthru(IORBH _far *iorb)
1185{
1186 switch (iorb->CommandModifier) {
1187
1188 case IOCM_EXECUTE_CDB:
1189 add_workspace(iorb)->idempotent = 0;
1190 ahci_execute_cdb(iorb);
1191 break;
1192
1193 case IOCM_EXECUTE_ATA:
1194 add_workspace(iorb)->idempotent = 0;
1195 ahci_execute_ata(iorb);
1196 break;
1197
1198 default:
1199 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1200 iorb_done(iorb);
1201 }
1202}
1203
1204/******************************************************************************
1205 * Add an IORB to the specified queue. This function must be called with the
1206 * adapter-level spinlock aquired.
1207 */
1208void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1209{
1210 if (iorb_priority(iorb) {
1211 /* priority IORB; insert at first position */
1212 iorb->pNxtIORB = queue->root;
1213 queue->root = iorb;
1214
1215 } else {
1216 /* append IORB to end of queue */
1217 iorb->pNxtIORB = NULL;
1218
1219 if (queue->root == NULL) {
1220 queue->root = iorb;
1221 } else {
1222 queue->tail->pNxtIORB = iorb;
1223 }
1224 queue->tail = iorb;
1225 }
1226
1227 if (debug) {
1228 /* determine queue type (local, driver, abort or port) and minimum debug
1229 * level; otherwise, queue debug prints can become really confusing.
1230 */
1231 char *queue_type;
1232 int min_debug = 1;
1233
1234 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1235 /* this queue is on the stack */
1236 queue_type = "local";
1237 min_debug = 2;
1238
1239 } else if (queue == &driver_queue) {
1240 queue_type = "driver";
1241
1242 } else if (queue == &abort_queue) {
1243 queue_type = "abort";
1244 min_debug = 2;
1245
1246 } else {
1247 queue_type = "port";
1248 }
1249
1250 if (debug > min_debug) {
1251 aprintf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1252 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1253 iorb->Timeout);
1254 }
1255 }
1256}
1257
1258/******************************************************************************
1259 * Remove an IORB from the specified queue. This function must be called with
1260 * the adapter-level spinlock aquired.
1261 */
1262int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1263{
1264 IORBH _far *_iorb;
1265 IORBH _far *_prev = NULL;
1266 int found = 0;
1267
1268 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1269 if (_iorb == iorb) {
1270 /* found the IORB to be removed */
1271 if (_prev != NULL) {
1272 _prev->pNxtIORB = _iorb->pNxtIORB;
1273 } else {
1274 queue->root = _iorb->pNxtIORB;
1275 }
1276 if (_iorb == queue->tail) {
1277 queue->tail = _prev;
1278 }
1279 found = 1;
1280 break;
1281 }
1282 _prev = _iorb;
1283 }
1284
1285 if (found) {
1286 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1287 } else {
1288 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1289 }
1290
1291 return(!found);
1292}
1293
1294/******************************************************************************
1295 * Set the error code in the specified IORB
1296 *
1297 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1298 * status to the specified error code.
1299 */
1300void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1301{
1302 iorb->ErrorCode = error_code;
1303 iorb->Status |= IORB_ERROR;
1304}
1305
1306/******************************************************************************
1307 * Mark the specified IORB as done and notify the asynchronous post function,
1308 * if any. The IORB is also removed from the corresponding IORB queue.
1309 *
1310 * NOTES: This function does not clear the Status field; it merely adds the
1311 * IORB_DONE flag.
1312 *
1313 * This function is expected to be called *without* the corresponding
1314 * driver-level drv_lock aquired. It will aquire the spinlock before
1315 * updating the IORB queue and release it before notifying the upstream
1316 * code in order to prevent deadlocks.
1317 *
1318 * Due to this logic, this function is only good for simple task-time
1319 * completions. Functions working on lists of IORBs (such as interrupt
1320 * handlers or context hooks) should call iorb_complete() directly and
1321 * implement their own logic for removing the IORB from the port queue.
1322 * See abort_ctxhook() for an example.
1323 */
1324void iorb_done(IORBH _far *iorb)
1325{
1326 int a = iorb_unit_adapter(iorb);
1327 int p = iorb_unit_port(iorb);
1328
1329 /* remove IORB from corresponding queue */
1330 spin_lock(drv_lock);
1331 if (iorb_driver_level(iorb)) {
1332 iorb_queue_del(&driver_queue, iorb);
1333 } else {
1334 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1335 }
1336 aws_free(add_workspace(iorb));
1337 spin_unlock(drv_lock);
1338
1339 iorb_complete(iorb);
1340}
1341
1342/******************************************************************************
1343 * Complete an IORB. This should be called without the adapter-level spinlock
1344 * to allow the IORB completion routine to perform whatever processing it
1345 * requires. This implies that the IORB should no longer be in any global
1346 * queue because the IORB completion routine may well reuse the IORB and send
1347 * the next request to us before even returning from this function.
1348 */
1349void iorb_complete(IORBH _far *iorb)
1350{
1351 iorb->Status |= IORB_DONE;
1352
1353 ddprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1354 iorb, iorb->Status, iorb->ErrorCode);
1355
1356 if (iorb->RequestControl & IORB_ASYNC_POST) {
1357 iorb->NotifyAddress(iorb);
1358 }
1359}
1360
1361/******************************************************************************
1362 * Requeue the specified IORB such that it will be sent downstream for
1363 * processing again. This includes freeing all resources currently allocated
1364 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1365 * spinlock must be aquired when calling this function.
1366 *
1367 * The following flags are preserved:
1368 * - no_ncq
1369 */
1370void iorb_requeue(IORBH _far *iorb)
1371{
1372 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1373 u16 no_ncq = aws->no_ncq;
1374 u16 unaligned = aws->unaligned;
1375 u16 retries = aws->retries;
1376
1377 aws_free(aws);
1378 memset(aws, 0x00, sizeof(*aws));
1379
1380 aws->no_ncq = no_ncq;
1381 aws->unaligned = unaligned;
1382 aws->retries = retries;
1383}
1384
1385/******************************************************************************
1386 * Free resources in ADD workspace (timer, buffer, ...). This function should
1387 * be called with the spinlock held to prevent race conditions.
1388 */
1389void aws_free(ADD_WORKSPACE _far *aws)
1390{
1391 if (aws->timer != 0) {
1392 ADD_CancelTimer(aws->timer);
1393 aws->timer = 0;
1394 }
1395
1396 if (aws->buf != NULL) {
1397 free(aws->buf);
1398 aws->buf = NULL;
1399 }
1400}
1401
1402/******************************************************************************
1403 * Lock the adapter, waiting for availability if necessary. This is expected
1404 * to be called at task/request time without the driver-level spinlock
1405 * aquired. Don't call at interrupt time.
1406 */
1407void lock_adapter(AD_INFO *ai)
1408{
1409 TIMER Timer;
1410
1411 spin_lock(drv_lock);
1412 while (ai->busy) {
1413 spin_unlock(drv_lock);
1414 timer_init(&Timer, 250);
1415 while (!timer_check_and_block(&Timer));
1416 spin_lock(drv_lock);
1417 }
1418 ai->busy = 1;
1419 spin_unlock(drv_lock);
1420}
1421
1422/******************************************************************************
1423 * Unlock adapter (i.e. reset busy flag)
1424 */
1425void unlock_adapter(AD_INFO *ai)
1426{
1427 ai->busy = 0;
1428}
1429
1430/******************************************************************************
1431 * Timeout handler for I/O commands. Since timeout handling can involve
1432 * lengthy operations like port resets, the main code is located in a
1433 * separate function which is invoked via a context hook.
1434 */
1435void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1,
1436 ULONG p2)
1437{
1438 IORBH _far *iorb = (IORBH _far *) p1;
1439 int a = iorb_unit_adapter(iorb);
1440 int p = iorb_unit_port(iorb);
1441
1442 ADD_CancelTimer(timer_handle);
1443 dprintf("timeout for IORB %Fp\n", iorb);
1444
1445 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1446 * IORB has completed after the timeout has expired but before we got to
1447 * this line of code, we'll check the return code of iorb_queue_del(): If it
1448 * returns an error, the IORB must have completed a few microseconds ago and
1449 * there is no timeout.
1450 */
1451 spin_lock(drv_lock);
1452 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1453 iorb_queue_add(&abort_queue, iorb);
1454 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1455 }
1456 spin_unlock(drv_lock);
1457
1458 /* Trigger abort processing function. We don't really care whether this
1459 * succeeds because the only reason why it would fail should be multiple
1460 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1461 * start executing, which leaves two scenarios:
1462 *
1463 * - We succeded in arming the context hook. Fine.
1464 *
1465 * - We armed the context hook a second time before it had a chance to
1466 * start executing. In this case, the already scheduled context hook
1467 * will process our IORB as well.
1468 */
1469 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1470
1471 /* Set up a watchdog timer which calls the context hook manually in case
1472 * some kernel thread is looping around the IORB_COMPLETE status bit
1473 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1474 * happen per design because kernel threads are supposed to yield but it
1475 * does in the early boot phase.
1476 */
1477 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1478}
1479
1480/******************************************************************************
1481 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1482 * will execute as soon as a kernel thread yields the CPU. However, some
1483 * kernel components won't yield the CPU during the early boot phase and the
1484 * only way to kick some sense into those components is to run the context
1485 * hook right inside this timer callback. Not exactly pretty, especially
1486 * considering the fact that context hooks were implemented to prevent running
1487 * lengthy operations like a port reset at interrupt time, but without this
1488 * watchdog mechanism we run the risk of getting completely stalled by device
1489 * problems during the early boot phase.
1490 */
1491void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1,
1492 ULONG p2)
1493{
1494 /* reset watchdog timer */
1495 ADD_CancelTimer(timer_handle);
1496 dprintf("reset watchdog invoked\n");
1497
1498 /* call context hook manually */
1499 reset_ctxhook(0);
1500}
1501
1502/******************************************************************************
1503 * small_code_ - this dummy func resolves the undefined reference linker
1504 * error that occurrs when linking WATCOM objects with DDK's link.exe
1505 */
1506void _cdecl small_code_(void)
1507{
1508}
1509
1510/******************************************************************************
1511 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1512 * adapter info array in the device table, dt->pAdapter[], is expected to be
1513 * initialized for the specified index (dt_ai).
1514 *
1515 * Please note that the device table adapter index, dta, is not always equal
1516 * to the physical adapter index, a: if SCSI emulation has been activated, the
1517 * last reported adapter is a virtual SCSI adapter and the physical adapter
1518 * indexes for those units are, of course, different from the device table
1519 * index of the virtual SCSI adapter.
1520 */
1521static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1522 int a, int p, int d, int scsi_id)
1523{
1524 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1525 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1526 (u16) dt->pAdapter[dta]);
1527 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1528 AD_INFO *ai = ad_infos + a;
1529
1530 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1531 dprintf("error: device table provided by DASD too small\n");
1532 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1533 return(-1);
1534 }
1535
1536 if (ai->ports[p].devs[d].unit_info == NULL) {
1537 /* provide original information about this device (unit) */
1538 memset(ui, 0x00, sizeof(*ui));
1539 ui->AdapterIndex = dta; /* device table adapter index */
1540 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1541 ui->UnitIndex = ptr->AdapterUnits;
1542 ui->UnitType = ai->ports[p].devs[d].dev_type;
1543 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1544 if (ai->ports[p].devs[d].removable) {
1545 ui->UnitFlags |= UF_REMOVABLE;
1546 }
1547 if (scsi_id > 0) {
1548 /* set fake SCSI ID for this unit */
1549 ui->UnitSCSITargetID = scsi_id;
1550 }
1551 } else {
1552 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1553 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1554 }
1555
1556 ptr->AdapterUnits++;
1557 return(0);
1558}
1559
1560/*******************************************************************************
1561 * Register kernel exit handler for trap dumps. Our exit handler will be called
1562 * right before the kernel starts a dump; that's where we reset the controller
1563 * so it supports BIOS int13 I/O calls.
1564 */
1565static void register_krnl_exit(void)
1566{
1567 _asm {
1568 push ds
1569 push es
1570 push bx
1571 push si
1572 push di
1573
1574 mov ax, FLAG_KRNL_EXIT_ADD
1575 mov cx, TYPE_KRNL_EXIT_INT13
1576 mov bx, SEG asm_krnl_exit
1577 mov si, OFFSET asm_krnl_exit
1578 mov dl, DevHlp_RegisterKrnlExit
1579
1580 call dword ptr [Device_Help]
1581
1582 pop di
1583 pop si
1584 pop bx
1585 pop es
1586 pop ds
1587 }
1588
1589 dprintf("Registered kernel exit routine for INT13 mode\n");
1590}
1591
Note: See TracBrowser for help on using the repository browser.