source: trunk/src/os2ahci/os2ahci.c@ 156

Last change on this file since 156 was 156, checked in by David Azarewicz, 12 years ago

debugging updates

File size: 52.4 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 *
7 * Authors: Christian Mueller, Markus Thielen
8 *
9 * Parts copied from/inspired by the Linux AHCI driver;
10 * those parts are (c) Linux AHCI/ATA maintainers
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include "os2ahci.h"
28#include "ioctl.h"
29#include "version.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* parse integer command line parameter */
34#define drv_parm_int(s, value, type, radix) \
35 { \
36 char _far *_ep; \
37 if ((s)[1] != ':') { \
38 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
39 goto init_fail; \
40 } \
41 value = (type) strtol((s) + 2, \
42 (const char _far* _far*) &_ep, \
43 radix); \
44 s = _ep; \
45 }
46
47#define drv_parm_int_optional(s, value, type, radix) \
48 { \
49 char _far *_ep; \
50 if ((s)[1] == ':') { \
51 value = (type) strtol((s) + 2, (const char _far* _far*) &_ep, radix); \
52 s = _ep; \
53 } else { \
54 value++; \
55 } \
56 }
57
58/* set two-dimensional array of port options */
59#define set_port_option(opt, val) \
60 if (adapter_index == -1) { \
61 /* set option for all adapters and ports */ \
62 memset(opt, val, sizeof(opt)); \
63 } else if (port_index == -1) { \
64 /* set option for all ports on current adapter */ \
65 memset(opt[adapter_index], val, sizeof(*opt)); \
66 } else { \
67 /* set option for specific port */ \
68 opt[adapter_index][port_index] = val; \
69 }
70
71/* constants for undefined kernel exit routine;
72 * see register_krnl_exit() func */
73#define DevHlp_RegisterKrnlExit 0x006f
74
75#define FLAG_KRNL_EXIT_ADD 0x1000
76#define FLAG_KRNL_EXIT_REMOVE 0x2000
77
78#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
79#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
80#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
81#define TYPE_KRNL_EXIT_DYN 0x0003
82#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
83
84/* ------------------------ typedefs and structures ------------------------ */
85
86/* -------------------------- function prototypes -------------------------- */
87
88void _cdecl small_code_ (void);
89
90static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
91 int a, int p, int d, int scsi_id);
92
93static void register_krnl_exit (void);
94
95/* ------------------------ global/static variables ------------------------ */
96
97int debug = 0; /* if > 0, print debug messages to COM1 */
98int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
99int init_reset = 1; /* if != 0, reset ports during init */
100int force_write_cache; /* if != 0, force write cache */
101int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
102int use_lvm_info = 1;
103int wrap_trace_buffer = 0;
104long com_baud = 0;
105
106PFN Device_Help = 0; /* pointer to device helper entry point */
107ULONG RMFlags = 0; /* required by resource manager library */
108PFN RM_Help0 = NULL; /* required by resource manager library */
109PFN RM_Help3 = NULL; /* required by resource manager library */
110HDRIVER rm_drvh; /* resource manager driver handle */
111char rm_drvname[80]; /* driver name as returned by RM */
112USHORT add_handle; /* driver handle (RegisterDeviceClass) */
113UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
114char drv_name[] = "OS2AHCI"; /* driver name as string */
115
116/* resource manager driver information structure */
117DRIVERSTRUCT rm_drvinfo = {
118 drv_name, /* driver name */
119 "AHCI SATA Driver", /* driver description */
120 DVENDOR, /* vendor name */
121 CMVERSION_MAJOR, /* RM interface version major */
122 CMVERSION_MINOR, /* RM interface version minor */
123 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
124 0, /* driver flags */
125 DRT_ADDDM, /* driver type */
126 DRS_ADD, /* driver sub type */
127 NULL /* driver callback */
128};
129
130ULONG drv_lock; /* driver-level spinlock */
131IORB_QUEUE driver_queue; /* driver-level IORB queue */
132AD_INFO ad_infos[MAX_AD]; /* adapter information list */
133int ad_info_cnt; /* number of entries in ad_infos[] */
134u16 ad_ignore; /* bitmap with adapter indexes to ignore */
135int init_complete; /* if != 0, initialization has completed */
136int suspended;
137
138/* apapter/port-specific options saved when parsing the command line */
139u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
140u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
141u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
142u8 link_power[MAX_AD][AHCI_MAX_PORTS];
143u8 track_size[MAX_AD][AHCI_MAX_PORTS];
144
145static char init_msg[] = "%s driver version %d.%02d\n";
146static char exit_msg[] = "%s driver *not* installed\n";
147char BldLevel[] = BLDLEVEL;
148
149/* ----------------------------- start of code ----------------------------- */
150
151/******************************************************************************
152 * OS/2 device driver main strategy function. This function is only used
153 * for initialization purposes; all other calls go directly to the adapter
154 * device driver's strategy function.
155 *
156 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
157 * packet for IDC calls, so they can be handled by gen_ioctl.
158 */
159USHORT _cdecl c_strat(RPH _far *req)
160{
161 u16 rc;
162
163 switch (req->Cmd) {
164
165 case CMDInitBase:
166 rc = init_drv((RPINITIN _far *) req);
167 break;
168
169 case CMDShutdown:
170 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
171 break;
172
173 case CMDGenIOCTL:
174 rc = gen_ioctl((RP_GENIOCTL _far *) req);
175 break;
176
177 case CMDINPUT:
178 rc = char_dev_input((RP_RWV _far *) req);
179 break;
180
181 case CMDSaveRestore:
182 rc = sr_drv(((RPSAVERESTORE _far *) req)->FuncCode);
183 break;
184
185 default:
186 rc = STDON | STATUS_ERR_UNKCMD;
187 break;
188 }
189
190 return(rc);
191}
192
193/******************************************************************************
194 * Intialize the os2ahci driver. This includes command line parsing, scanning
195 * the PCI bus for supported AHCI adapters, etc.
196 */
197USHORT init_drv(RPINITIN _far *req)
198{
199 static int init_drv_called;
200 static int init_drv_failed;
201 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
202 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
203 APIRET rmrc;
204 char _far *cmd_line;
205 char _far *s;
206 int adapter_index = -1;
207 int port_index = -1;
208 int invert_option;
209 int optval;
210 u16 vendor;
211 u16 device;
212
213 if (init_drv_called) {
214 /* This is the init call for the second (legacy IBMS506$) character
215 * device driver. If the main driver failed initialization, fail this
216 * one as well.
217 */
218 rsp->CodeEnd = (u16) end_of_code;
219 rsp->DataEnd = (u16) &end_of_data;
220 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
221 }
222 init_drv_called = 1;
223 suspended = 0;
224
225 /* set device helper entry point */
226 Device_Help = req->DevHlpEP;
227
228 /* create driver-level spinlock */
229 DevHelp_CreateSpinLock(&drv_lock);
230
231 /* initialize libc code */
232 init_libc();
233
234 /* register driver with resource manager */
235 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
236 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
237 drv_name, rmrc);
238 goto init_fail;
239 }
240
241 /* parse command line parameters */
242 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
243
244 for (s = cmd_line; *s != 0; s++) {
245 if (*s == '/') {
246 if ((invert_option = (s[1] == '!')) != 0) {
247 s++;
248 }
249 s++;
250 switch (tolower(*s)) {
251
252 case '\0':
253 /* end of command line; can only happen if command line is incorrect */
254 cprintf("%s: incomplete command line option\n", drv_name);
255 goto init_fail;
256
257 case 'b':
258 drv_parm_int(s, com_baud, u32, 10);
259 break;
260
261 case 'c':
262 /* set COM port base address for debug messages */
263 drv_parm_int(s, com_base, u16, 16);
264 if (com_base == 1) com_base = 0x3f8;
265 if (com_base == 2) com_base = 0x2f8;
266 break;
267
268 case 'd':
269 /* increase debug level */
270 drv_parm_int_optional(s, debug, int, 10);
271 break;
272
273 case 'g':
274 /* add specfied PCI ID as a supported generic AHCI adapter */
275 drv_parm_int(s, vendor, u16, 16);
276 s--;
277 drv_parm_int(s, device, u16, 16);
278 if (add_pci_id(vendor, device)) {
279 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
280 goto init_fail;
281 }
282 thorough_scan = 1;
283 break;
284
285 case 't':
286 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
287 thorough_scan = !invert_option;
288 break;
289
290 case 'r':
291 /* reset ports during initialization */
292 init_reset = !invert_option;
293 break;
294
295 case 'f':
296 /* force write cache regardless of IORB flags */
297 force_write_cache = 1;
298 break;
299
300 case 'a':
301 /* set adapter index for adapter and port-related options */
302 drv_parm_int(s, adapter_index, int, 10);
303 if (adapter_index < 0 || adapter_index >= MAX_AD) {
304 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
305 goto init_fail;
306 }
307 break;
308
309 case 'p':
310 /* set port index for port-related options */
311 drv_parm_int(s, port_index, int, 10);
312 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
313 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
314 goto init_fail;
315 }
316 break;
317
318 case 'i':
319 /* ignore current adapter index */
320 if (adapter_index >= 0) {
321 ad_ignore |= 1U << adapter_index;
322 }
323 break;
324
325 case 's':
326 /* enable SCSI emulation for ATAPI devices */
327 set_port_option(emulate_scsi, !invert_option);
328 break;
329
330 case 'n':
331 /* enable NCQ */
332 set_port_option(enable_ncq, !invert_option);
333 break;
334
335 case 'l':
336 /* set link speed or power savings */
337 s++;
338 switch (tolower(*s)) {
339 case 's':
340 /* set link speed */
341 drv_parm_int(s, optval, int, 10);
342 set_port_option(link_speed, optval);
343 break;
344 case 'p':
345 /* set power management */
346 drv_parm_int(s, optval, int, 10);
347 set_port_option(link_power, optval);
348 break;
349 default:
350 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
351 goto init_fail;
352 }
353 /* need to reset the port in order to establish link settings */
354 init_reset = 1;
355 break;
356
357 case '4':
358 /* enable 4K sector geometry enhancement (track size = 56) */
359 if (!invert_option) {
360 set_port_option(track_size, 56);
361 }
362 break;
363
364 case 'z':
365 /* Specify to not use the LVM information. There is no reason why anyone would
366 * want to do this, but previous versions of this driver did not have LVM capability,
367 * so this switch is here temporarily just in case.
368 */
369 use_lvm_info = !invert_option;
370 break;
371
372 case 'v':
373 /* be verbose during boot */
374 drv_parm_int_optional(s, verbosity, int, 10);
375 break;
376
377 case 'w':
378 /* Specify to allow the trace buffer to wrap when full. */
379 wrap_trace_buffer = !invert_option;
380 break;
381
382 case 'q':
383 /* Temporarily output a non-fatal message to get anyone using this
384 * undocumented switch to stop using it. This will be removed soon
385 * and the error will become fatal.
386 */
387 cprintf("%s: unknown option: /%c\n", drv_name, *s);
388 break;
389
390 default:
391 cprintf("%s: unknown option: /%c\n", drv_name, *s);
392 goto init_fail;
393 }
394 }
395 }
396
397 /* print initialization message */
398 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
399
400 if (com_baud) init_com(com_baud); /* initialize com port for debug output */
401
402 /* initialize trace buffer if applicable */
403 if (TRACE_ACTIVE) {
404 /* debug is on, but COM port is off -> use our trace buffer */
405 trace_init(AHCI_TRACE_BUF_SIZE);
406 } else {
407 trace_init(AHCI_INFO_BUF_SIZE);
408 }
409
410 printf_nts("BldLevel: %s\n", BldLevel);
411 printf_nts("CmdLine: %Fs\n", cmd_line);
412
413 /* scan PCI bus for supported devices */
414 scan_pci_bus();
415
416 if (ad_info_cnt > 0) {
417 /* initialization succeeded and we found at least one AHCI adapter */
418 ADD_InitTimer(timer_pool, sizeof(timer_pool));
419 mdelay_cal();
420
421 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1, &add_handle)) {
422 cprintf("%s: couldn't register device class\n", drv_name);
423 goto init_fail;
424 }
425
426 /* allocate context hooks */
427 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
428 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
429 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
430 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
431 goto init_fail;
432 }
433
434 rsp->CodeEnd = (u16) end_of_code;
435 rsp->DataEnd = (u16) &end_of_data;
436
437 /* register kernel exit routine for trap dumps */
438 register_krnl_exit();
439
440 return(STDON);
441
442 } else {
443 /* no adapters found */
444 ciprintf(" No adapters found.\n");
445 }
446
447init_fail:
448 /* initialization failed; set segment sizes to 0 and return error */
449 rsp->CodeEnd = 0;
450 rsp->DataEnd = 0;
451 init_drv_failed = 1;
452
453 /* free context hooks */
454 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
455 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
456 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
457
458 if (rm_drvh != 0) {
459 /* remove driver from resource manager */
460 RMDestroyDriver(rm_drvh);
461 }
462
463 ciprintf(exit_msg, drv_name);
464 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
465}
466
467/******************************************************************************
468 * Generic IOCTL via character device driver. IOCTLs are used to control the
469 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
470 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
471 * commands for ATA disks) are implemented here.
472 */
473USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
474{
475 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
476
477 switch (ioctl->Category) {
478
479 case OS2AHCI_IOCTL_CATEGORY:
480 switch (ioctl->Function) {
481
482 case OS2AHCI_IOCTL_GET_DEVLIST:
483 return(ioctl_get_devlist(ioctl));
484
485 case OS2AHCI_IOCTL_PASSTHROUGH:
486 return(ioctl_passthrough(ioctl));
487
488 }
489 break;
490
491 case OS2AHCI_IDC_CATEGORY:
492 switch (ioctl->Function) {
493
494 case OS2AHCI_IDC_BIOSMODE:
495 /* reconfigure adapters in BIOS/int13 mode; needed for generating
496 * trap dumps on some machines. This was intended to be called by ACPI.PSD,
497 * but that is never done. This is obslete. The kernel exit accomplishes
498 * this instead.
499 *
500 * To enter BIOS mode, we flush all write caches, turn off interrupts
501 * and restore the BIOS configuration. This is exactly what
502 * apm_suspend() does.
503 */
504 apm_suspend();
505 return(STDON);
506
507 case OS2AHCI_IDC_BEEP:
508 /* IOCTL for IDC testing - just beep */
509 DevHelp_Beep(2000, 100);
510 return(STDON);
511 }
512 break;
513
514 case DSKSP_CAT_GENERIC:
515 return(ioctl_gen_dsk(ioctl));
516
517 case DSKSP_CAT_SMART:
518 return(ioctl_smart(ioctl));
519
520 }
521
522 return(STDON | STATUS_ERR_UNKCMD);
523}
524
525/******************************************************************************
526 * Read from character device. If tracing is on (internal ring buffer trace),
527 * we return data from the trace buffer; if not, we might return a device
528 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
529 */
530USHORT char_dev_input(RP_RWV _far *rwrb)
531{
532 return(trace_char_dev(rwrb));
533}
534
535/******************************************************************************
536 * Device driver exit handler. This handler is called when OS/2 shuts down and
537 * flushes the write caches of all attached devices. Since this is effectively
538 * the same we do when suspending, we'll call out to the corresponding APM
539 * function.
540 *
541 * NOTE: Errors are ignored because there's no way we could stop the shutdown
542 * or do something about the error, unless retrying endlessly is
543 * considered an option.
544 */
545USHORT exit_drv(int func)
546{
547 dprintf("exit_drv(%d) called\n", func);
548
549 if (func == 0) {
550 /* we're only interested in the second phase of the shutdown */
551 return(STDON);
552 }
553
554 apm_suspend();
555 return(STDON);
556}
557
558/******************************************************************************
559 * Device driver suspend/resume handler. This handler is called when ACPI is
560 * executing a suspend or resume.
561 */
562USHORT sr_drv(int func)
563{
564 dprintf("sr_drv(%d) called\n", func);
565
566 if (func) apm_resume();
567 else apm_suspend();
568
569 return(STDON);
570}
571
572/******************************************************************************
573 * ADD entry point. This is the main entry point for all ADD requests. Due to
574 * the asynchronous nature of ADD drivers, this function primarily queues the
575 * IORB(s) to the corresponding adapter or port queues, then triggers the
576 * state machine to initiate processing queued IORBs.
577 *
578 * NOTE: In order to prevent race conditions or engine stalls, certain rules
579 * around locking, unlocking and IORB handling in general have been
580 * established. Refer to the comments in "trigger_engine()" for
581 * details.
582 */
583void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
584{
585 IORBH _far *iorb;
586 IORBH _far *next = NULL;
587
588 spin_lock(drv_lock);
589
590 for (iorb = first_iorb; iorb != NULL; iorb = next) {
591 /* Queue this IORB. Queues primarily exist on port level but there are
592 * some requests which affect the whole driver, most notably
593 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
594 * port queue will change the links, thus we need to save the original
595 * link in 'next'.
596 */
597 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
598
599 iorb->Status = 0;
600 iorb->ErrorCode = 0;
601 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
602
603 if (iorb_driver_level(iorb)) {
604 /* driver-level IORB */
605 iorb->UnitHandle = 0;
606 iorb_queue_add(&driver_queue, iorb);
607
608 } else {
609 /* port-level IORB */
610 int a = iorb_unit_adapter(iorb);
611 int p = iorb_unit_port(iorb);
612 int d = iorb_unit_device(iorb);
613
614 if (a >= ad_info_cnt ||
615 p > ad_infos[a].port_max ||
616 d > ad_infos[a].ports[p].dev_max ||
617 (ad_infos[a].port_map & (1UL << p)) == 0) {
618
619 /* unit handle outside of the allowed range */
620 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
621 iorb->Status = IORB_ERROR;
622 iorb->ErrorCode = IOERR_CMD_SYNTAX;
623 iorb_complete(iorb);
624 continue;
625 }
626
627 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
628 }
629 }
630
631 /* trigger state machine */
632 trigger_engine();
633
634 spin_unlock(drv_lock);
635}
636
637/******************************************************************************
638 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
639 * which will try to get all IORBs sent on their way a couple of times. If
640 * there are still IORBs ready for processing after this, this function will
641 * hand off to a context hook which will continue to trigger the engine until
642 * all IORBs have been sent.
643 *
644 * NOTE: While initialization has not completed (or during APM suspend/resume
645 * operations), this function will loop indefinitely because we can't
646 * rely on interrupt handlers or context hooks and complex IORBs
647 * requiring multiple requeues would eventually hang and time out if
648 * we stopped triggering here.
649 */
650void trigger_engine(void)
651{
652 int i;
653
654 for (i = 0; i < 3 || !init_complete; i++) {
655 if (trigger_engine_1() == 0) {
656 /* done -- all IORBs have been sent on their way */
657 return;
658 }
659 }
660
661 /* Something keeps bouncing; hand off to the engine context hook which will
662 * keep trying in the background.
663 */
664 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
665}
666
667/******************************************************************************
668 * Trigger IORB queue engine in order to send commands in the driver/port IORB
669 * queues to the AHCI hardware. This function will return the number of IORBs
670 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
671 * a state to accept the command, thus it might take quite a few calls to get
672 * all IORBs on their way. This is why there's a wrapper function which tries
673 * it a few times, then hands off to a context hook which will keep trying in
674 * the background.
675 *
676 * IORBs might complete before send_iorb() has returned, at any time during
677 * interrupt processing or on another CPU on SMP systems. IORB completion
678 * means modifications to the corresponding IORB queue (the completed IORB
679 * is removed from the queue) thus we need to protect the IORB queues from
680 * race conditions. The safest approach short of keeping the driver-level
681 * spinlock aquired permanently is to keep it throughout this function and
682 * release it temporarily in send_iorb().
683 *
684 * This implies that the handler functions are fully responsible for aquiring
685 * the driver-level spinlock when they need it, and for releasing it again.
686 *
687 * As a rule of thumb, get the driver-level spinlock whenever accessing
688 * volatile variables (IORB queues, values in ad_info[], ...).
689 *
690 * Additional Notes:
691 *
692 * - This function is expected to be called with the spinlock aquired
693 *
694 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
695 * just remain in the queue). This can be used to release the driver-level
696 * spinlock while making sure no new IORBs are going to hit the hardware.
697 * In order to prevent engine stalls, all handlers using this functionality
698 * need to invoke trigger_engine() after resetting the busy flag.
699 *
700 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
701 * However, the driver-level queue is worked "one entry at a time" which
702 * means that no new IORBs will be queued on the driver-level queue until
703 * the head element has completed processing. This means that driver-
704 * level IORB handlers don't need to protect against each other. But they
705 * they do need to keep in mind interference with port-level IORBs:
706 *
707 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
708 * adapters as 'busy' which are affected by the driver-level IORB
709 *
710 * - Driver-level IORB handlers must not access the hardware of a
711 * particular adapter if it's flagged as 'busy' by another IORB.
712 */
713int trigger_engine_1(void)
714{
715 IORBH _far *iorb;
716 IORBH _far *next;
717 int iorbs_sent = 0;
718 int a;
719 int p;
720
721 iorbs_sent = 0;
722
723 /* process driver-level IORBs */
724 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
725 send_iorb(iorb);
726 iorbs_sent++;
727 }
728
729 /* process port-level IORBs */
730 for (a = 0; a < ad_info_cnt; a++) {
731 AD_INFO *ai = ad_infos + a;
732 if (ai->busy) {
733 /* adapter is busy; don't process any IORBs */
734 continue;
735 }
736 for (p = 0; p <= ai->port_max; p++) {
737 /* send all queued IORBs on this port */
738 next = NULL;
739 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
740 next = iorb->pNxtIORB;
741 if (!add_workspace(iorb)->processing) {
742 send_iorb(iorb);
743 iorbs_sent++;
744 }
745 }
746 }
747 }
748
749 return(iorbs_sent);
750}
751
752/******************************************************************************
753 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
754 * switch board for calling the corresponding iocc_*() handler function.
755 *
756 * NOTE: This function is expected to be called with the driver-level spinlock
757 * aquired. It will release it before calling any of the handler
758 * functions and re-aquire it when done.
759 */
760void send_iorb(IORBH _far *iorb)
761{
762 /* Mark IORB as "processing" before doing anything else. Once the IORB is
763 * marked as "processing", we can release the spinlock because subsequent
764 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
765 * IORB.
766 */
767 add_workspace(iorb)->processing = 1;
768 spin_unlock(drv_lock);
769
770 switch (iorb->CommandCode) {
771
772 case IOCC_CONFIGURATION:
773 iocc_configuration(iorb);
774 break;
775
776 case IOCC_DEVICE_CONTROL:
777 iocc_device_control(iorb);
778 break;
779
780 case IOCC_UNIT_CONTROL:
781 iocc_unit_control(iorb);
782 break;
783
784 case IOCC_GEOMETRY:
785 iocc_geometry(iorb);
786 break;
787
788 case IOCC_EXECUTE_IO:
789 iocc_execute_io(iorb);
790 break;
791
792 case IOCC_UNIT_STATUS:
793 iocc_unit_status(iorb);
794 break;
795
796 case IOCC_ADAPTER_PASSTHRU:
797 iocc_adapter_passthru(iorb);
798 break;
799
800 default:
801 /* unsupported call */
802 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
803 iorb_done(iorb);
804 break;
805 }
806
807 /* re-aquire spinlock before returning to trigger_engine() */
808 spin_lock(drv_lock);
809}
810
811/******************************************************************************
812 * Handle IOCC_CONFIGURATION requests.
813 */
814void iocc_configuration(IORBH _far *iorb)
815{
816 int a;
817
818 switch (iorb->CommandModifier) {
819
820 case IOCM_COMPLETE_INIT:
821 /* Complete initialization. From now on, we won't have to restore the BIOS
822 * configuration after each command and we're fully operational (i.e. will
823 * use interrupts, timers and context hooks instead of polling).
824 */
825 if (!init_complete) {
826 dprintf("leaving initialization mode\n");
827 for (a = 0; a < ad_info_cnt; a++) {
828 lock_adapter(ad_infos + a);
829 ahci_complete_init(ad_infos + a);
830 }
831 init_complete = 1;
832
833 /* DAZ turn off COM port output if on */
834 //com_base = 0;
835
836 /* release all adapters */
837 for (a = 0; a < ad_info_cnt; a++) {
838 unlock_adapter(ad_infos + a);
839 }
840
841 /* register APM hook */
842 apm_init();
843
844 if (!TRACE_ACTIVE) build_user_info();
845 }
846 iorb_done(iorb);
847 break;
848
849 case IOCM_GET_DEVICE_TABLE:
850 /* construct a device table */
851 iocm_device_table(iorb);
852 break;
853
854 default:
855 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
856 iorb_done(iorb);
857 break;
858 }
859}
860
861/******************************************************************************
862 * Handle IOCC_DEVICE_CONTROL requests.
863 */
864void iocc_device_control(IORBH _far *iorb)
865{
866 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
867 IORBH _far *ptr;
868 IORBH _far *next = NULL;
869 int p = iorb_unit_port(iorb);
870 int d = iorb_unit_device(iorb);
871
872 switch (iorb->CommandModifier) {
873
874 case IOCM_ABORT:
875 /* abort all pending commands on specified port and device */
876 spin_lock(drv_lock);
877 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
878 next = ptr->pNxtIORB;
879 /* move all matching IORBs to the abort queue */
880 if (ptr != iorb && iorb_unit_device(ptr) == d) {
881 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
882 iorb_queue_add(&abort_queue, ptr);
883 ptr->ErrorCode = IOERR_CMD_ABORTED;
884 }
885 }
886 spin_unlock(drv_lock);
887
888 /* trigger reset context hook which will finish the abort processing */
889 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
890 break;
891
892 case IOCM_SUSPEND:
893 case IOCM_RESUME:
894 case IOCM_GET_QUEUE_STATUS:
895 /* Suspend/resume operations allow access to the hardware for other
896 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
897 * and ATAPI in the same driver, this won't be required.
898 */
899 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
900 break;
901
902 case IOCM_LOCK_MEDIA:
903 case IOCM_UNLOCK_MEDIA:
904 case IOCM_EJECT_MEDIA:
905 /* unit control commands to lock, unlock and eject media */
906 /* will be supported later... */
907 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
908 break;
909
910 default:
911 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
912 break;
913 }
914
915 iorb_done(iorb);
916}
917
918/******************************************************************************
919 * Handle IOCC_UNIT_CONTROL requests.
920 */
921void iocc_unit_control(IORBH _far *iorb)
922{
923 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
924 int a = iorb_unit_adapter(iorb);
925 int p = iorb_unit_port(iorb);
926 int d = iorb_unit_device(iorb);
927
928 spin_lock(drv_lock);
929 switch (iorb->CommandModifier) {
930
931 case IOCM_ALLOCATE_UNIT:
932 /* allocate unit for exclusive access */
933 if (ad_infos[a].ports[p].devs[d].allocated) {
934 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
935 } else {
936 ad_infos[a].ports[p].devs[d].allocated = 1;
937 }
938 break;
939
940 case IOCM_DEALLOCATE_UNIT:
941 /* deallocate exclusive access to unit */
942 if (!ad_infos[a].ports[p].devs[d].allocated) {
943 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
944 } else {
945 ad_infos[a].ports[p].devs[d].allocated = 0;
946 }
947 break;
948
949 case IOCM_CHANGE_UNITINFO:
950 /* Change unit (device) information. One reason for this IOCM is the
951 * interface for filter device drivers: a filter device driver can
952 * either change existing UNITINFOs or permanently allocate units
953 * and fabricate new [logical] units; the former is the reason why we
954 * must store the pointer to the updated UNITNIFO for subsequent
955 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
956 */
957 if (!ad_infos[a].ports[p].devs[d].allocated) {
958 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
959 break;
960 }
961 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
962 break;
963
964 default:
965 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
966 break;
967 }
968
969 spin_unlock(drv_lock);
970 iorb_done(iorb);
971}
972
973/******************************************************************************
974 * Scan all ports for AHCI devices and construct a DASD device table.
975 *
976 * NOTES: This function may be called multiple times. Only the first
977 * invocation will actually scan for devices; all subsequent calls will
978 * merely return the results of the initial scan, potentially augmented
979 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
980 * requests.
981 *
982 * In order to support applications that can't deal with ATAPI devices
983 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
984 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
985 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
986 * request. The units attached to this adapter will use the real HW
987 * unit IDs, thus we'll never receive a command specific to the
988 * emulated SCSI adapter and won't need to set up any sort of entity
989 * for it; the only purpose of the emulated SCSI adapter is to pass the
990 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
991 * course. The emulated SCSI target IDs are allocated as follows:
992 *
993 * 0 the virtual adapter
994 * 1..n emulated devices; SCSI target ID increments sequentially
995 */
996void iocm_device_table(IORBH _far *iorb)
997{
998 IORB_CONFIGURATION _far *iorb_conf;
999 DEVICETABLE _far *dt;
1000 char _far *pos;
1001 int scsi_units = 0;
1002 int scsi_id = 1;
1003 int rc;
1004 int dta;
1005 int a;
1006 int p;
1007 int d;
1008
1009 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
1010 dt = iorb_conf->pDeviceTable;
1011
1012 spin_lock(drv_lock);
1013
1014 /* initialize device table header */
1015 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1016 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
1017 dt->ADDHandle = add_handle;
1018 dt->TotalAdapters = ad_info_cnt + 1;
1019
1020 /* set start of adapter and device information tables */
1021 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
1022
1023 /* go through all adapters, including the virtual SCSI adapter */
1024 for (dta = 0; dta < dt->TotalAdapters; dta++) {
1025 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
1026
1027 /* sanity check for sufficient space in device table */
1028 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1029 dprintf("error: device table provided by DASD too small\n");
1030 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1031 goto iocm_device_table_done;
1032 }
1033
1034 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
1035 memset(ptr, 0x00, sizeof(*ptr));
1036
1037 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1038 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1039 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1040 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1041
1042 if (dta < ad_info_cnt) {
1043 /* this is a physical AHCI adapter */
1044 AD_INFO *ad_info = ad_infos + dta;
1045
1046 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1047 sprintf(ptr->AdapterName, "AHCI_%d", dta);
1048
1049 if (!ad_info->port_scan_done) {
1050 /* first call; need to scan AHCI hardware for devices */
1051 if (ad_info->busy) {
1052 dprintf("error: port scan requested while adapter was busy\n");
1053 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1054 goto iocm_device_table_done;
1055 }
1056 ad_info->busy = 1;
1057 spin_unlock(drv_lock);
1058 rc = ahci_scan_ports(ad_info);
1059 spin_lock(drv_lock);
1060 ad_info->busy = 0;
1061
1062 if (rc != 0) {
1063 dprintf("error: port scan failed on adapter #%d\n", dta);
1064 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1065 goto iocm_device_table_done;
1066 }
1067 ad_info->port_scan_done = 1;
1068 }
1069
1070 /* insert physical (i.e. AHCI) devices into the device table */
1071 for (p = 0; p <= ad_info->port_max; p++) {
1072 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1073 if (ad_info->ports[p].devs[d].present) {
1074 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1075 /* only report this unit as SCSI unit */
1076 scsi_units++;
1077 continue;
1078 }
1079 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1080 goto iocm_device_table_done;
1081 }
1082 }
1083 }
1084 }
1085
1086 } else {
1087 /* this is the virtual SCSI adapter */
1088 if (scsi_units == 0) {
1089 /* not a single unit to be emulated via SCSI */
1090 dt->TotalAdapters--;
1091 break;
1092 }
1093
1094 /* set adapter name and bus type to mimic a SCSI controller */
1095 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1096 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1097
1098 /* add all ATAPI units to be emulated by this virtual adaper */
1099 for (a = 0; a < ad_info_cnt; a++) {
1100 AD_INFO *ad_info = ad_infos + a;
1101
1102 for (p = 0; p <= ad_info->port_max; p++) {
1103 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1104 if (ad_info->ports[p].devs[d].present &&
1105 ad_info->ports[p].devs[d].atapi &&
1106 emulate_scsi[a][p]) {
1107 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1108 goto iocm_device_table_done;
1109 }
1110 }
1111 }
1112 }
1113 }
1114 }
1115
1116 /* calculate offset for next adapter */
1117 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1118 }
1119
1120iocm_device_table_done:
1121 spin_unlock(drv_lock);
1122 iorb_done(iorb);
1123}
1124
1125/******************************************************************************
1126 * Handle IOCC_GEOMETRY requests.
1127 */
1128void iocc_geometry(IORBH _far *iorb)
1129{
1130 switch (iorb->CommandModifier) {
1131
1132 case IOCM_GET_MEDIA_GEOMETRY:
1133 case IOCM_GET_DEVICE_GEOMETRY:
1134 add_workspace(iorb)->idempotent = 1;
1135 ahci_get_geometry(iorb);
1136 break;
1137
1138 default:
1139 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1140 iorb_done(iorb);
1141 }
1142}
1143
1144/******************************************************************************
1145 * Handle IOCC_EXECUTE_IO requests.
1146 */
1147void iocc_execute_io(IORBH _far *iorb)
1148{
1149 switch (iorb->CommandModifier) {
1150
1151 case IOCM_READ:
1152 add_workspace(iorb)->idempotent = 1;
1153 ahci_read(iorb);
1154 break;
1155
1156 case IOCM_READ_VERIFY:
1157 add_workspace(iorb)->idempotent = 1;
1158 ahci_verify(iorb);
1159 break;
1160
1161 case IOCM_WRITE:
1162 add_workspace(iorb)->idempotent = 1;
1163 ahci_write(iorb);
1164 break;
1165
1166 case IOCM_WRITE_VERIFY:
1167 add_workspace(iorb)->idempotent = 1;
1168 ahci_write(iorb);
1169 break;
1170
1171 default:
1172 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1173 iorb_done(iorb);
1174 }
1175}
1176
1177/******************************************************************************
1178 * Handle IOCC_UNIT_STATUS requests.
1179 */
1180void iocc_unit_status(IORBH _far *iorb)
1181{
1182 switch (iorb->CommandModifier) {
1183
1184 case IOCM_GET_UNIT_STATUS:
1185 add_workspace(iorb)->idempotent = 1;
1186 ahci_unit_ready(iorb);
1187 break;
1188
1189 default:
1190 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1191 iorb_done(iorb);
1192 }
1193}
1194
1195/******************************************************************************
1196 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1197 */
1198void iocc_adapter_passthru(IORBH _far *iorb)
1199{
1200 switch (iorb->CommandModifier) {
1201
1202 case IOCM_EXECUTE_CDB:
1203 add_workspace(iorb)->idempotent = 0;
1204 ahci_execute_cdb(iorb);
1205 break;
1206
1207 case IOCM_EXECUTE_ATA:
1208 add_workspace(iorb)->idempotent = 0;
1209 ahci_execute_ata(iorb);
1210 break;
1211
1212 default:
1213 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1214 iorb_done(iorb);
1215 }
1216}
1217
1218/******************************************************************************
1219 * Add an IORB to the specified queue. This function must be called with the
1220 * adapter-level spinlock aquired.
1221 */
1222void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1223{
1224 if (iorb_priority(iorb) {
1225 /* priority IORB; insert at first position */
1226 iorb->pNxtIORB = queue->root;
1227 queue->root = iorb;
1228
1229 } else {
1230 /* append IORB to end of queue */
1231 iorb->pNxtIORB = NULL;
1232
1233 if (queue->root == NULL) {
1234 queue->root = iorb;
1235 } else {
1236 queue->tail->pNxtIORB = iorb;
1237 }
1238 queue->tail = iorb;
1239 }
1240
1241 if (debug) {
1242 /* determine queue type (local, driver, abort or port) and minimum debug
1243 * level; otherwise, queue debug prints can become really confusing.
1244 */
1245 char *queue_type;
1246 int min_debug = 1;
1247
1248 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1249 /* this queue is on the stack */
1250 queue_type = "local";
1251 min_debug = 2;
1252
1253 } else if (queue == &driver_queue) {
1254 queue_type = "driver";
1255
1256 } else if (queue == &abort_queue) {
1257 queue_type = "abort";
1258 min_debug = 2;
1259
1260 } else {
1261 queue_type = "port";
1262 }
1263
1264 if (debug > min_debug) {
1265 printf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1266 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1267 iorb->Timeout);
1268 }
1269 }
1270}
1271
1272/******************************************************************************
1273 * Remove an IORB from the specified queue. This function must be called with
1274 * the adapter-level spinlock aquired.
1275 */
1276int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1277{
1278 IORBH _far *_iorb;
1279 IORBH _far *_prev = NULL;
1280 int found = 0;
1281
1282 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1283 if (_iorb == iorb) {
1284 /* found the IORB to be removed */
1285 if (_prev != NULL) {
1286 _prev->pNxtIORB = _iorb->pNxtIORB;
1287 } else {
1288 queue->root = _iorb->pNxtIORB;
1289 }
1290 if (_iorb == queue->tail) {
1291 queue->tail = _prev;
1292 }
1293 found = 1;
1294 break;
1295 }
1296 _prev = _iorb;
1297 }
1298
1299 if (found) {
1300 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1301 } else {
1302 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1303 }
1304
1305 return(!found);
1306}
1307
1308/******************************************************************************
1309 * Set the error code in the specified IORB
1310 *
1311 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1312 * status to the specified error code.
1313 */
1314void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1315{
1316 iorb->ErrorCode = error_code;
1317 iorb->Status |= IORB_ERROR;
1318}
1319
1320/******************************************************************************
1321 * Mark the specified IORB as done and notify the asynchronous post function,
1322 * if any. The IORB is also removed from the corresponding IORB queue.
1323 *
1324 * NOTES: This function does not clear the Status field; it merely adds the
1325 * IORB_DONE flag.
1326 *
1327 * This function is expected to be called *without* the corresponding
1328 * driver-level drv_lock aquired. It will aquire the spinlock before
1329 * updating the IORB queue and release it before notifying the upstream
1330 * code in order to prevent deadlocks.
1331 *
1332 * Due to this logic, this function is only good for simple task-time
1333 * completions. Functions working on lists of IORBs (such as interrupt
1334 * handlers or context hooks) should call iorb_complete() directly and
1335 * implement their own logic for removing the IORB from the port queue.
1336 * See abort_ctxhook() for an example.
1337 */
1338void iorb_done(IORBH _far *iorb)
1339{
1340 int a = iorb_unit_adapter(iorb);
1341 int p = iorb_unit_port(iorb);
1342
1343 /* remove IORB from corresponding queue */
1344 spin_lock(drv_lock);
1345 if (iorb_driver_level(iorb)) {
1346 iorb_queue_del(&driver_queue, iorb);
1347 } else {
1348 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1349 }
1350 aws_free(add_workspace(iorb));
1351 spin_unlock(drv_lock);
1352
1353 iorb_complete(iorb);
1354}
1355
1356/******************************************************************************
1357 * Complete an IORB. This should be called without the adapter-level spinlock
1358 * to allow the IORB completion routine to perform whatever processing it
1359 * requires. This implies that the IORB should no longer be in any global
1360 * queue because the IORB completion routine may well reuse the IORB and send
1361 * the next request to us before even returning from this function.
1362 */
1363void iorb_complete(IORBH _far *iorb)
1364{
1365 iorb->Status |= IORB_DONE;
1366
1367 ddprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1368 iorb, iorb->Status, iorb->ErrorCode);
1369
1370 if (iorb->RequestControl & IORB_ASYNC_POST) {
1371 iorb->NotifyAddress(iorb);
1372 }
1373}
1374
1375/******************************************************************************
1376 * Requeue the specified IORB such that it will be sent downstream for
1377 * processing again. This includes freeing all resources currently allocated
1378 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1379 * spinlock must be aquired when calling this function.
1380 *
1381 * The following flags are preserved:
1382 * - no_ncq
1383 */
1384void iorb_requeue(IORBH _far *iorb)
1385{
1386 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1387 u16 no_ncq = aws->no_ncq;
1388 u16 unaligned = aws->unaligned;
1389 u16 retries = aws->retries;
1390
1391 aws_free(aws);
1392 memset(aws, 0x00, sizeof(*aws));
1393
1394 aws->no_ncq = no_ncq;
1395 aws->unaligned = unaligned;
1396 aws->retries = retries;
1397}
1398
1399/******************************************************************************
1400 * Free resources in ADD workspace (timer, buffer, ...). This function should
1401 * be called with the spinlock held to prevent race conditions.
1402 */
1403void aws_free(ADD_WORKSPACE _far *aws)
1404{
1405 if (aws->timer != 0) {
1406 ADD_CancelTimer(aws->timer);
1407 aws->timer = 0;
1408 }
1409
1410 if (aws->buf != NULL) {
1411 free(aws->buf);
1412 aws->buf = NULL;
1413 }
1414}
1415
1416/******************************************************************************
1417 * Lock the adapter, waiting for availability if necessary. This is expected
1418 * to be called at task/request time without the driver-level spinlock
1419 * aquired. Don't call at interrupt time.
1420 */
1421void lock_adapter(AD_INFO *ai)
1422{
1423 spin_lock(drv_lock);
1424 while (ai->busy) {
1425 spin_unlock(drv_lock);
1426 msleep(250);
1427 spin_lock(drv_lock);
1428 }
1429 ai->busy = 1;
1430 spin_unlock(drv_lock);
1431}
1432
1433/******************************************************************************
1434 * Unlock adapter (i.e. reset busy flag)
1435 */
1436void unlock_adapter(AD_INFO *ai)
1437{
1438 ai->busy = 0;
1439}
1440
1441/******************************************************************************
1442 * Timeout handler for I/O commands. Since timeout handling can involve
1443 * lengthy operations like port resets, the main code is located in a
1444 * separate function which is invoked via a context hook.
1445 */
1446void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1,
1447 ULONG p2)
1448{
1449 IORBH _far *iorb = (IORBH _far *) p1;
1450 int a = iorb_unit_adapter(iorb);
1451 int p = iorb_unit_port(iorb);
1452
1453 ADD_CancelTimer(timer_handle);
1454 dprintf("timeout for IORB %Fp\n", iorb);
1455
1456 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1457 * IORB has completed after the timeout has expired but before we got to
1458 * this line of code, we'll check the return code of iorb_queue_del(): If it
1459 * returns an error, the IORB must have completed a few microseconds ago and
1460 * there is no timeout.
1461 */
1462 spin_lock(drv_lock);
1463 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1464 iorb_queue_add(&abort_queue, iorb);
1465 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1466 }
1467 spin_unlock(drv_lock);
1468
1469 /* Trigger abort processing function. We don't really care whether this
1470 * succeeds because the only reason why it would fail should be multiple
1471 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1472 * start executing, which leaves two scenarios:
1473 *
1474 * - We succeded in arming the context hook. Fine.
1475 *
1476 * - We armed the context hook a second time before it had a chance to
1477 * start executing. In this case, the already scheduled context hook
1478 * will process our IORB as well.
1479 */
1480 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1481
1482 /* Set up a watchdog timer which calls the context hook manually in case
1483 * some kernel thread is looping around the IORB_COMPLETE status bit
1484 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1485 * happen per design because kernel threads are supposed to yield but it
1486 * does in the early boot phase.
1487 */
1488 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1489}
1490
1491/******************************************************************************
1492 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1493 * will execute as soon as a kernel thread yields the CPU. However, some
1494 * kernel components won't yield the CPU during the early boot phase and the
1495 * only way to kick some sense into those components is to run the context
1496 * hook right inside this timer callback. Not exactly pretty, especially
1497 * considering the fact that context hooks were implemented to prevent running
1498 * lengthy operations like a port reset at interrupt time, but without this
1499 * watchdog mechanism we run the risk of getting completely stalled by device
1500 * problems during the early boot phase.
1501 */
1502void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1,
1503 ULONG p2)
1504{
1505 /* reset watchdog timer */
1506 ADD_CancelTimer(timer_handle);
1507 dprintf("reset watchdog invoked\n");
1508
1509 /* call context hook manually */
1510 reset_ctxhook(0);
1511}
1512
1513/******************************************************************************
1514 * small_code_ - this dummy func resolves the undefined reference linker
1515 * error that occurrs when linking WATCOM objects with DDK's link.exe
1516 */
1517void _cdecl small_code_(void)
1518{
1519}
1520
1521/******************************************************************************
1522 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1523 * adapter info array in the device table, dt->pAdapter[], is expected to be
1524 * initialized for the specified index (dt_ai).
1525 *
1526 * Please note that the device table adapter index, dta, is not always equal
1527 * to the physical adapter index, a: if SCSI emulation has been activated, the
1528 * last reported adapter is a virtual SCSI adapter and the physical adapter
1529 * indexes for those units are, of course, different from the device table
1530 * index of the virtual SCSI adapter.
1531 */
1532static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1533 int a, int p, int d, int scsi_id)
1534{
1535 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1536 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1537 (u16) dt->pAdapter[dta]);
1538 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1539 AD_INFO *ai = ad_infos + a;
1540
1541 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1542 dprintf("error: device table provided by DASD too small\n");
1543 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1544 return(-1);
1545 }
1546
1547 if (ai->ports[p].devs[d].unit_info == NULL) {
1548 /* provide original information about this device (unit) */
1549 memset(ui, 0x00, sizeof(*ui));
1550 ui->AdapterIndex = dta; /* device table adapter index */
1551 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1552 ui->UnitIndex = ptr->AdapterUnits;
1553 ui->UnitType = ai->ports[p].devs[d].dev_type;
1554 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1555 if (ai->ports[p].devs[d].removable) {
1556 ui->UnitFlags |= UF_REMOVABLE;
1557 }
1558 if (scsi_id > 0) {
1559 /* set fake SCSI ID for this unit */
1560 ui->UnitSCSITargetID = scsi_id;
1561 }
1562 } else {
1563 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1564 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1565 }
1566
1567 ptr->AdapterUnits++;
1568 return(0);
1569}
1570
1571/*******************************************************************************
1572 * Register kernel exit handler for trap dumps. Our exit handler will be called
1573 * right before the kernel starts a dump; that's where we reset the controller
1574 * so it supports BIOS int13 I/O calls.
1575 */
1576static void register_krnl_exit(void)
1577{
1578 _asm {
1579 push ds
1580 push es
1581 push bx
1582 push si
1583 push di
1584
1585 mov ax, FLAG_KRNL_EXIT_ADD
1586 mov cx, TYPE_KRNL_EXIT_INT13
1587 mov bx, SEG asm_krnl_exit
1588 mov si, OFFSET asm_krnl_exit
1589 mov dl, DevHlp_RegisterKrnlExit
1590
1591 call dword ptr [Device_Help]
1592
1593 pop di
1594 pop si
1595 pop bx
1596 pop es
1597 pop ds
1598 }
1599
1600 dprintf("Registered kernel exit routine for INT13 mode\n");
1601}
1602
Note: See TracBrowser for help on using the repository browser.