source: trunk/src/os2ahci/os2ahci.c@ 165

Last change on this file since 165 was 165, checked in by David Azarewicz, 12 years ago

code cleanup - debug messages
fixed defect in smart ioctl

File size: 51.9 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Portions copyright (c) 2013 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31
32/* -------------------------- macros and constants ------------------------- */
33
34/* parse integer command line parameter */
35#define drv_parm_int(s, value, type, radix) \
36 { \
37 char _far *_ep; \
38 if ((s)[1] != ':') { \
39 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
40 goto init_fail; \
41 } \
42 value = (type) strtol((s) + 2, \
43 (const char _far* _far*) &_ep, \
44 radix); \
45 s = _ep; \
46 }
47
48#define drv_parm_int_optional(s, value, type, radix) \
49 { \
50 char _far *_ep; \
51 if ((s)[1] == ':') { \
52 value = (type) strtol((s) + 2, (const char _far* _far*) &_ep, radix); \
53 s = _ep; \
54 } else { \
55 value++; \
56 } \
57 }
58
59/* set two-dimensional array of port options */
60#define set_port_option(opt, val) \
61 if (adapter_index == -1) { \
62 /* set option for all adapters and ports */ \
63 memset(opt, val, sizeof(opt)); \
64 } else if (port_index == -1) { \
65 /* set option for all ports on current adapter */ \
66 memset(opt[adapter_index], val, sizeof(*opt)); \
67 } else { \
68 /* set option for specific port */ \
69 opt[adapter_index][port_index] = val; \
70 }
71
72/* constants for undefined kernel exit routine;
73 * see register_krnl_exit() func */
74#define DevHlp_RegisterKrnlExit 0x006f
75
76#define FLAG_KRNL_EXIT_ADD 0x1000
77#define FLAG_KRNL_EXIT_REMOVE 0x2000
78
79#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
80#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
81#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
82#define TYPE_KRNL_EXIT_DYN 0x0003
83#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
84
85/* ------------------------ typedefs and structures ------------------------ */
86
87/* -------------------------- function prototypes -------------------------- */
88
89void _cdecl small_code_ (void);
90
91static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
92 int a, int p, int d, int scsi_id);
93
94static void register_krnl_exit (void);
95
96/* ------------------------ global/static variables ------------------------ */
97
98int debug = 0; /* if > 0, print debug messages to COM1 */
99int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
100int init_reset = 1; /* if != 0, reset ports during init */
101int force_write_cache; /* if != 0, force write cache */
102int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
103int use_lvm_info = 1;
104int wrap_trace_buffer = 0;
105long com_baud = 0;
106
107PFN Device_Help = 0; /* pointer to device helper entry point */
108ULONG RMFlags = 0; /* required by resource manager library */
109PFN RM_Help0 = NULL; /* required by resource manager library */
110PFN RM_Help3 = NULL; /* required by resource manager library */
111HDRIVER rm_drvh; /* resource manager driver handle */
112char rm_drvname[80]; /* driver name as returned by RM */
113USHORT add_handle; /* driver handle (RegisterDeviceClass) */
114UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
115char drv_name[] = "OS2AHCI"; /* driver name as string */
116
117/* resource manager driver information structure */
118DRIVERSTRUCT rm_drvinfo = {
119 drv_name, /* driver name */
120 "AHCI SATA Driver", /* driver description */
121 DVENDOR, /* vendor name */
122 CMVERSION_MAJOR, /* RM interface version major */
123 CMVERSION_MINOR, /* RM interface version minor */
124 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
125 0, /* driver flags */
126 DRT_ADDDM, /* driver type */
127 DRS_ADD, /* driver sub type */
128 NULL /* driver callback */
129};
130
131ULONG drv_lock; /* driver-level spinlock */
132IORB_QUEUE driver_queue; /* driver-level IORB queue */
133AD_INFO ad_infos[MAX_AD]; /* adapter information list */
134int ad_info_cnt; /* number of entries in ad_infos[] */
135u16 ad_ignore; /* bitmap with adapter indexes to ignore */
136int init_complete; /* if != 0, initialization has completed */
137int suspended;
138int resume_sleep_flag;
139
140/* apapter/port-specific options saved when parsing the command line */
141u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
142u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
143u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
144u8 link_power[MAX_AD][AHCI_MAX_PORTS];
145u8 track_size[MAX_AD][AHCI_MAX_PORTS];
146
147static char init_msg[] = "%s driver version %d.%02d\n";
148static char exit_msg[] = "%s driver *not* installed\n";
149char BldLevel[] = BLDLEVEL;
150
151/* ----------------------------- start of code ----------------------------- */
152
153/******************************************************************************
154 * OS/2 device driver main strategy function. This function is only used
155 * for initialization purposes; all other calls go directly to the adapter
156 * device driver's strategy function.
157 *
158 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
159 * packet for IDC calls, so they can be handled by gen_ioctl.
160 */
161USHORT _cdecl c_strat(RPH _far *req)
162{
163 u16 rc;
164
165 switch (req->Cmd) {
166
167 case CMDInitBase:
168 rc = init_drv((RPINITIN _far *) req);
169 break;
170
171 case CMDShutdown:
172 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
173 break;
174
175 case CMDGenIOCTL:
176 rc = gen_ioctl((RP_GENIOCTL _far *) req);
177 break;
178
179 case CMDINPUT:
180 rc = char_dev_input((RP_RWV _far *) req);
181 break;
182
183 case CMDSaveRestore:
184 rc = sr_drv(((RPSAVERESTORE _far *) req)->FuncCode);
185 break;
186
187 default:
188 rc = STDON | STATUS_ERR_UNKCMD;
189 break;
190 }
191
192 return(rc);
193}
194
195/******************************************************************************
196 * Intialize the os2ahci driver. This includes command line parsing, scanning
197 * the PCI bus for supported AHCI adapters, etc.
198 */
199USHORT init_drv(RPINITIN _far *req)
200{
201 static int init_drv_called;
202 static int init_drv_failed;
203 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
204 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
205 APIRET rmrc;
206 char _far *cmd_line;
207 char _far *s;
208 int adapter_index = -1;
209 int port_index = -1;
210 int invert_option;
211 int optval;
212 u16 vendor;
213 u16 device;
214
215 if (init_drv_called) {
216 /* This is the init call for the second (legacy IBMS506$) character
217 * device driver. If the main driver failed initialization, fail this
218 * one as well.
219 */
220 rsp->CodeEnd = (u16) end_of_code;
221 rsp->DataEnd = (u16) &end_of_data;
222 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
223 }
224 init_drv_called = 1;
225 suspended = 0;
226 resume_sleep_flag = 0;
227 memset(ad_infos, 0, sizeof(ad_infos));
228 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
229
230 /* set device helper entry point */
231 Device_Help = req->DevHlpEP;
232
233 /* create driver-level spinlock */
234 DevHelp_CreateSpinLock(&drv_lock);
235
236 /* initialize libc code */
237 init_libc();
238
239 /* register driver with resource manager */
240 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
241 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
242 drv_name, rmrc);
243 goto init_fail;
244 }
245
246 /* parse command line parameters */
247 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
248
249 for (s = cmd_line; *s != 0; s++) {
250 if (*s == '/') {
251 if ((invert_option = (s[1] == '!')) != 0) {
252 s++;
253 }
254 s++;
255 switch (tolower(*s)) {
256
257 case '\0':
258 /* end of command line; can only happen if command line is incorrect */
259 cprintf("%s: incomplete command line option\n", drv_name);
260 goto init_fail;
261
262 case 'b':
263 drv_parm_int(s, com_baud, u32, 10);
264 break;
265
266 case 'c':
267 /* set COM port base address for debug messages */
268 drv_parm_int(s, com_base, u16, 16);
269 if (com_base == 1) com_base = 0x3f8;
270 if (com_base == 2) com_base = 0x2f8;
271 break;
272
273 case 'd':
274 /* increase debug level */
275 drv_parm_int_optional(s, debug, int, 10);
276 break;
277
278 case 'g':
279 /* add specfied PCI ID as a supported generic AHCI adapter */
280 drv_parm_int(s, vendor, u16, 16);
281 s--;
282 drv_parm_int(s, device, u16, 16);
283 if (add_pci_id(vendor, device)) {
284 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
285 goto init_fail;
286 }
287 thorough_scan = 1;
288 break;
289
290 case 't':
291 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
292 thorough_scan = !invert_option;
293 break;
294
295 case 'r':
296 /* reset ports during initialization */
297 init_reset = !invert_option;
298 break;
299
300 case 'f':
301 /* force write cache regardless of IORB flags */
302 force_write_cache = 1;
303 break;
304
305 case 'a':
306 /* set adapter index for adapter and port-related options */
307 drv_parm_int(s, adapter_index, int, 10);
308 if (adapter_index < 0 || adapter_index >= MAX_AD) {
309 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
310 goto init_fail;
311 }
312 break;
313
314 case 'p':
315 /* set port index for port-related options */
316 drv_parm_int(s, port_index, int, 10);
317 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
318 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
319 goto init_fail;
320 }
321 break;
322
323 case 'i':
324 /* ignore current adapter index */
325 if (adapter_index >= 0) {
326 ad_ignore |= 1U << adapter_index;
327 }
328 break;
329
330 case 's':
331 /* enable SCSI emulation for ATAPI devices */
332 set_port_option(emulate_scsi, !invert_option);
333 break;
334
335 case 'n':
336 /* enable NCQ */
337 set_port_option(enable_ncq, !invert_option);
338 break;
339
340 case 'l':
341 /* set link speed or power savings */
342 s++;
343 switch (tolower(*s)) {
344 case 's':
345 /* set link speed */
346 drv_parm_int(s, optval, int, 10);
347 set_port_option(link_speed, optval);
348 break;
349 case 'p':
350 /* set power management */
351 drv_parm_int(s, optval, int, 10);
352 set_port_option(link_power, optval);
353 break;
354 default:
355 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
356 goto init_fail;
357 }
358 /* need to reset the port in order to establish link settings */
359 init_reset = 1;
360 break;
361
362 case '4':
363 /* enable 4K sector geometry enhancement (track size = 56) */
364 if (!invert_option) {
365 set_port_option(track_size, 56);
366 }
367 break;
368
369 case 'z':
370 /* Specify to not use the LVM information. There is no reason why anyone would
371 * want to do this, but previous versions of this driver did not have LVM capability,
372 * so this switch is here temporarily just in case.
373 */
374 use_lvm_info = !invert_option;
375 break;
376
377 case 'v':
378 /* be verbose during boot */
379 drv_parm_int_optional(s, verbosity, int, 10);
380 break;
381
382 case 'w':
383 /* Specify to allow the trace buffer to wrap when full. */
384 wrap_trace_buffer = !invert_option;
385 break;
386
387 case 'q':
388 /* Temporarily output a non-fatal message to get anyone using this
389 * undocumented switch to stop using it. This will be removed soon
390 * and the error will become fatal.
391 */
392 cprintf("%s: unknown option: /%c\n", drv_name, *s);
393 break;
394
395 default:
396 cprintf("%s: unknown option: /%c\n", drv_name, *s);
397 goto init_fail;
398 }
399 }
400 }
401
402 /* print initialization message */
403 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
404
405 if (com_baud) init_com(com_baud); /* initialize com port for debug output */
406
407 /* initialize trace buffer if applicable */
408 if (TRACE_ACTIVE) {
409 /* debug is on, but COM port is off -> use our trace buffer */
410 trace_init(AHCI_TRACE_BUF_SIZE);
411 } else {
412 trace_init(AHCI_INFO_BUF_SIZE);
413 }
414
415 ntprintf("BldLevel: %s\n", BldLevel);
416 ntprintf("CmdLine: %Fs\n", cmd_line);
417
418 /* scan PCI bus for supported devices */
419 scan_pci_bus();
420
421 if (ad_info_cnt > 0) {
422 /* initialization succeeded and we found at least one AHCI adapter */
423 ADD_InitTimer(timer_pool, sizeof(timer_pool));
424 //NOT_USED mdelay_cal();
425
426 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1, &add_handle)) {
427 cprintf("%s: couldn't register device class\n", drv_name);
428 goto init_fail;
429 }
430
431 /* allocate context hooks */
432 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
433 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
434 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
435 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
436 goto init_fail;
437 }
438
439 rsp->CodeEnd = (u16) end_of_code;
440 rsp->DataEnd = (u16) &end_of_data;
441
442 /* register kernel exit routine for trap dumps */
443 register_krnl_exit();
444
445 return(STDON);
446
447 } else {
448 /* no adapters found */
449 ciprintf(" No adapters found.\n");
450 }
451
452init_fail:
453 /* initialization failed; set segment sizes to 0 and return error */
454 rsp->CodeEnd = 0;
455 rsp->DataEnd = 0;
456 init_drv_failed = 1;
457
458 /* free context hooks */
459 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
460 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
461 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
462
463 if (rm_drvh != 0) {
464 /* remove driver from resource manager */
465 RMDestroyDriver(rm_drvh);
466 }
467
468 ciprintf(exit_msg, drv_name);
469 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
470}
471
472/******************************************************************************
473 * Generic IOCTL via character device driver. IOCTLs are used to control the
474 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
475 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
476 * commands for ATA disks) are implemented here.
477 */
478USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
479{
480 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
481
482 switch (ioctl->Category) {
483
484 case OS2AHCI_IOCTL_CATEGORY:
485 switch (ioctl->Function) {
486
487 case OS2AHCI_IOCTL_GET_DEVLIST:
488 return(ioctl_get_devlist(ioctl));
489
490 case OS2AHCI_IOCTL_PASSTHROUGH:
491 return(ioctl_passthrough(ioctl));
492
493 }
494 break;
495
496 case DSKSP_CAT_GENERIC:
497 return(ioctl_gen_dsk(ioctl));
498
499 case DSKSP_CAT_SMART:
500 return(ioctl_smart(ioctl));
501
502 }
503
504 return(STDON | STATUS_ERR_UNKCMD);
505}
506
507/******************************************************************************
508 * Read from character device. If tracing is on (internal ring buffer trace),
509 * we return data from the trace buffer; if not, we might return a device
510 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
511 */
512USHORT char_dev_input(RP_RWV _far *rwrb)
513{
514 return(trace_char_dev(rwrb));
515}
516
517/******************************************************************************
518 * Device driver exit handler. This handler is called when OS/2 shuts down and
519 * flushes the write caches of all attached devices. Since this is effectively
520 * the same we do when suspending, we'll call out to the corresponding suspend
521 * function.
522 *
523 * NOTE: Errors are ignored because there's no way we could stop the shutdown
524 * or do something about the error, unless retrying endlessly is
525 * considered an option.
526 */
527USHORT exit_drv(int func)
528{
529 dprintf("exit_drv(%d) called\n", func);
530
531 if (func == 0) {
532 /* we're only interested in the second phase of the shutdown */
533 return(STDON);
534 }
535
536 suspend();
537 return(STDON);
538}
539
540/******************************************************************************
541 * Device driver suspend/resume handler. This handler is called when ACPI is
542 * executing a suspend or resume.
543 */
544USHORT sr_drv(int func)
545{
546 dprintf("sr_drv(%d) called\n", func);
547
548 if (func) resume();
549 else suspend();
550
551 return(STDON);
552}
553
554/******************************************************************************
555 * ADD entry point. This is the main entry point for all ADD requests. Due to
556 * the asynchronous nature of ADD drivers, this function primarily queues the
557 * IORB(s) to the corresponding adapter or port queues, then triggers the
558 * state machine to initiate processing queued IORBs.
559 *
560 * NOTE: In order to prevent race conditions or engine stalls, certain rules
561 * around locking, unlocking and IORB handling in general have been
562 * established. Refer to the comments in "trigger_engine()" for
563 * details.
564 */
565void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
566{
567 IORBH _far *iorb;
568 IORBH _far *next = NULL;
569
570 spin_lock(drv_lock);
571
572 for (iorb = first_iorb; iorb != NULL; iorb = next) {
573 /* Queue this IORB. Queues primarily exist on port level but there are
574 * some requests which affect the whole driver, most notably
575 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
576 * port queue will change the links, thus we need to save the original
577 * link in 'next'.
578 */
579 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
580
581 iorb->Status = 0;
582 iorb->ErrorCode = 0;
583 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
584
585 if (iorb_driver_level(iorb)) {
586 /* driver-level IORB */
587 iorb->UnitHandle = 0;
588 iorb_queue_add(&driver_queue, iorb);
589
590 } else {
591 /* port-level IORB */
592 int a = iorb_unit_adapter(iorb);
593 int p = iorb_unit_port(iorb);
594 int d = iorb_unit_device(iorb);
595
596 if (a >= ad_info_cnt ||
597 p > ad_infos[a].port_max ||
598 d > ad_infos[a].ports[p].dev_max ||
599 (ad_infos[a].port_map & (1UL << p)) == 0) {
600
601 /* unit handle outside of the allowed range */
602 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
603 iorb->Status = IORB_ERROR;
604 iorb->ErrorCode = IOERR_CMD_SYNTAX;
605 iorb_complete(iorb);
606 continue;
607 }
608
609 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
610 }
611 }
612
613 /* trigger state machine */
614 trigger_engine();
615
616 spin_unlock(drv_lock);
617}
618
619/******************************************************************************
620 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
621 * which will try to get all IORBs sent on their way a couple of times. If
622 * there are still IORBs ready for processing after this, this function will
623 * hand off to a context hook which will continue to trigger the engine until
624 * all IORBs have been sent.
625 *
626 * NOTE: While initialization has not completed (or during suspend/resume
627 * operations), this function will loop indefinitely because we can't
628 * rely on interrupt handlers or context hooks and complex IORBs
629 * requiring multiple requeues would eventually hang and time out if
630 * we stopped triggering here.
631 */
632void trigger_engine(void)
633{
634 int i;
635
636 for (i = 0; i < 3 || !init_complete; i++) {
637 if (trigger_engine_1() == 0) {
638 /* done -- all IORBs have been sent on their way */
639 return;
640 }
641 }
642
643 /* Something keeps bouncing; hand off to the engine context hook which will
644 * keep trying in the background.
645 */
646 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
647}
648
649/******************************************************************************
650 * Trigger IORB queue engine in order to send commands in the driver/port IORB
651 * queues to the AHCI hardware. This function will return the number of IORBs
652 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
653 * a state to accept the command, thus it might take quite a few calls to get
654 * all IORBs on their way. This is why there's a wrapper function which tries
655 * it a few times, then hands off to a context hook which will keep trying in
656 * the background.
657 *
658 * IORBs might complete before send_iorb() has returned, at any time during
659 * interrupt processing or on another CPU on SMP systems. IORB completion
660 * means modifications to the corresponding IORB queue (the completed IORB
661 * is removed from the queue) thus we need to protect the IORB queues from
662 * race conditions. The safest approach short of keeping the driver-level
663 * spinlock aquired permanently is to keep it throughout this function and
664 * release it temporarily in send_iorb().
665 *
666 * This implies that the handler functions are fully responsible for aquiring
667 * the driver-level spinlock when they need it, and for releasing it again.
668 *
669 * As a rule of thumb, get the driver-level spinlock whenever accessing
670 * volatile variables (IORB queues, values in ad_info[], ...).
671 *
672 * Additional Notes:
673 *
674 * - This function is expected to be called with the spinlock aquired
675 *
676 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
677 * just remain in the queue). This can be used to release the driver-level
678 * spinlock while making sure no new IORBs are going to hit the hardware.
679 * In order to prevent engine stalls, all handlers using this functionality
680 * need to invoke trigger_engine() after resetting the busy flag.
681 *
682 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
683 * However, the driver-level queue is worked "one entry at a time" which
684 * means that no new IORBs will be queued on the driver-level queue until
685 * the head element has completed processing. This means that driver-
686 * level IORB handlers don't need to protect against each other. But they
687 * they do need to keep in mind interference with port-level IORBs:
688 *
689 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
690 * adapters as 'busy' which are affected by the driver-level IORB
691 *
692 * - Driver-level IORB handlers must not access the hardware of a
693 * particular adapter if it's flagged as 'busy' by another IORB.
694 */
695int trigger_engine_1(void)
696{
697 IORBH _far *iorb;
698 IORBH _far *next;
699 int iorbs_sent = 0;
700 int a;
701 int p;
702
703 iorbs_sent = 0;
704
705 /* process driver-level IORBs */
706 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
707 send_iorb(iorb);
708 iorbs_sent++;
709 }
710
711 /* process port-level IORBs */
712 for (a = 0; a < ad_info_cnt; a++) {
713 AD_INFO *ai = ad_infos + a;
714 if (ai->busy) {
715 /* adapter is busy; don't process any IORBs */
716 continue;
717 }
718 for (p = 0; p <= ai->port_max; p++) {
719 /* send all queued IORBs on this port */
720 next = NULL;
721 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
722 next = iorb->pNxtIORB;
723 if (!add_workspace(iorb)->processing) {
724 send_iorb(iorb);
725 iorbs_sent++;
726 }
727 }
728 }
729 }
730
731 return(iorbs_sent);
732}
733
734/******************************************************************************
735 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
736 * switch board for calling the corresponding iocc_*() handler function.
737 *
738 * NOTE: This function is expected to be called with the driver-level spinlock
739 * aquired. It will release it before calling any of the handler
740 * functions and re-aquire it when done.
741 */
742void send_iorb(IORBH _far *iorb)
743{
744 /* Mark IORB as "processing" before doing anything else. Once the IORB is
745 * marked as "processing", we can release the spinlock because subsequent
746 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
747 * IORB.
748 */
749 add_workspace(iorb)->processing = 1;
750 spin_unlock(drv_lock);
751
752 switch (iorb->CommandCode) {
753
754 case IOCC_CONFIGURATION:
755 iocc_configuration(iorb);
756 break;
757
758 case IOCC_DEVICE_CONTROL:
759 iocc_device_control(iorb);
760 break;
761
762 case IOCC_UNIT_CONTROL:
763 iocc_unit_control(iorb);
764 break;
765
766 case IOCC_GEOMETRY:
767 iocc_geometry(iorb);
768 break;
769
770 case IOCC_EXECUTE_IO:
771 iocc_execute_io(iorb);
772 break;
773
774 case IOCC_UNIT_STATUS:
775 iocc_unit_status(iorb);
776 break;
777
778 case IOCC_ADAPTER_PASSTHRU:
779 iocc_adapter_passthru(iorb);
780 break;
781
782 default:
783 /* unsupported call */
784 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
785 iorb_done(iorb);
786 break;
787 }
788
789 /* re-aquire spinlock before returning to trigger_engine() */
790 spin_lock(drv_lock);
791}
792
793/******************************************************************************
794 * Handle IOCC_CONFIGURATION requests.
795 */
796void iocc_configuration(IORBH _far *iorb)
797{
798 int a;
799
800 switch (iorb->CommandModifier) {
801
802 case IOCM_COMPLETE_INIT:
803 /* Complete initialization. From now on, we won't have to restore the BIOS
804 * configuration after each command and we're fully operational (i.e. will
805 * use interrupts, timers and context hooks instead of polling).
806 */
807 if (!init_complete) {
808 dprintf("leaving initialization mode\n");
809 for (a = 0; a < ad_info_cnt; a++) {
810 lock_adapter(ad_infos + a);
811 ahci_complete_init(ad_infos + a);
812 }
813 init_complete = 1;
814
815 /* DAZ turn off COM port output if on */
816 //com_base = 0;
817
818 /* release all adapters */
819 for (a = 0; a < ad_info_cnt; a++) {
820 unlock_adapter(ad_infos + a);
821 }
822
823 #ifdef LEGACY_APM
824 /* register APM hook */
825 apm_init();
826 #endif
827
828 build_user_info();
829 }
830 iorb_done(iorb);
831 break;
832
833 case IOCM_GET_DEVICE_TABLE:
834 /* construct a device table */
835 iocm_device_table(iorb);
836 break;
837
838 default:
839 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
840 iorb_done(iorb);
841 break;
842 }
843}
844
845/******************************************************************************
846 * Handle IOCC_DEVICE_CONTROL requests.
847 */
848void iocc_device_control(IORBH _far *iorb)
849{
850 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
851 IORBH _far *ptr;
852 IORBH _far *next = NULL;
853 int p = iorb_unit_port(iorb);
854 int d = iorb_unit_device(iorb);
855
856 switch (iorb->CommandModifier) {
857
858 case IOCM_ABORT:
859 /* abort all pending commands on specified port and device */
860 spin_lock(drv_lock);
861 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
862 next = ptr->pNxtIORB;
863 /* move all matching IORBs to the abort queue */
864 if (ptr != iorb && iorb_unit_device(ptr) == d) {
865 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
866 iorb_queue_add(&abort_queue, ptr);
867 ptr->ErrorCode = IOERR_CMD_ABORTED;
868 }
869 }
870 spin_unlock(drv_lock);
871
872 /* trigger reset context hook which will finish the abort processing */
873 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
874 break;
875
876 case IOCM_SUSPEND:
877 case IOCM_RESUME:
878 case IOCM_GET_QUEUE_STATUS:
879 /* Suspend/resume operations allow access to the hardware for other
880 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
881 * and ATAPI in the same driver, this won't be required.
882 */
883 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
884 break;
885
886 case IOCM_LOCK_MEDIA:
887 case IOCM_UNLOCK_MEDIA:
888 case IOCM_EJECT_MEDIA:
889 /* unit control commands to lock, unlock and eject media */
890 /* will be supported later... */
891 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
892 break;
893
894 default:
895 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
896 break;
897 }
898
899 iorb_done(iorb);
900}
901
902/******************************************************************************
903 * Handle IOCC_UNIT_CONTROL requests.
904 */
905void iocc_unit_control(IORBH _far *iorb)
906{
907 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
908 int a = iorb_unit_adapter(iorb);
909 int p = iorb_unit_port(iorb);
910 int d = iorb_unit_device(iorb);
911
912 spin_lock(drv_lock);
913 switch (iorb->CommandModifier) {
914
915 case IOCM_ALLOCATE_UNIT:
916 /* allocate unit for exclusive access */
917 if (ad_infos[a].ports[p].devs[d].allocated) {
918 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
919 } else {
920 ad_infos[a].ports[p].devs[d].allocated = 1;
921 }
922 break;
923
924 case IOCM_DEALLOCATE_UNIT:
925 /* deallocate exclusive access to unit */
926 if (!ad_infos[a].ports[p].devs[d].allocated) {
927 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
928 } else {
929 ad_infos[a].ports[p].devs[d].allocated = 0;
930 }
931 break;
932
933 case IOCM_CHANGE_UNITINFO:
934 /* Change unit (device) information. One reason for this IOCM is the
935 * interface for filter device drivers: a filter device driver can
936 * either change existing UNITINFOs or permanently allocate units
937 * and fabricate new [logical] units; the former is the reason why we
938 * must store the pointer to the updated UNITNIFO for subsequent
939 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
940 */
941 if (!ad_infos[a].ports[p].devs[d].allocated) {
942 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
943 break;
944 }
945 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
946 break;
947
948 default:
949 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
950 break;
951 }
952
953 spin_unlock(drv_lock);
954 iorb_done(iorb);
955}
956
957/******************************************************************************
958 * Scan all ports for AHCI devices and construct a DASD device table.
959 *
960 * NOTES: This function may be called multiple times. Only the first
961 * invocation will actually scan for devices; all subsequent calls will
962 * merely return the results of the initial scan, potentially augmented
963 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
964 * requests.
965 *
966 * In order to support applications that can't deal with ATAPI devices
967 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
968 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
969 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
970 * request. The units attached to this adapter will use the real HW
971 * unit IDs, thus we'll never receive a command specific to the
972 * emulated SCSI adapter and won't need to set up any sort of entity
973 * for it; the only purpose of the emulated SCSI adapter is to pass the
974 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
975 * course. The emulated SCSI target IDs are allocated as follows:
976 *
977 * 0 the virtual adapter
978 * 1..n emulated devices; SCSI target ID increments sequentially
979 */
980void iocm_device_table(IORBH _far *iorb)
981{
982 IORB_CONFIGURATION _far *iorb_conf;
983 DEVICETABLE _far *dt;
984 char _far *pos;
985 int scsi_units = 0;
986 int scsi_id = 1;
987 int rc;
988 int dta;
989 int a;
990 int p;
991 int d;
992
993 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
994 dt = iorb_conf->pDeviceTable;
995
996 spin_lock(drv_lock);
997
998 /* initialize device table header */
999 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1000 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
1001 dt->ADDHandle = add_handle;
1002 dt->TotalAdapters = ad_info_cnt + 1;
1003
1004 /* set start of adapter and device information tables */
1005 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
1006
1007 /* go through all adapters, including the virtual SCSI adapter */
1008 for (dta = 0; dta < dt->TotalAdapters; dta++) {
1009 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
1010
1011 /* sanity check for sufficient space in device table */
1012 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1013 dprintf("error: device table provided by DASD too small\n");
1014 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1015 goto iocm_device_table_done;
1016 }
1017
1018 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
1019 memset(ptr, 0x00, sizeof(*ptr));
1020
1021 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1022 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1023 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1024 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1025
1026 if (dta < ad_info_cnt) {
1027 /* this is a physical AHCI adapter */
1028 AD_INFO *ad_info = ad_infos + dta;
1029
1030 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1031 sprintf(ptr->AdapterName, "AHCI_%d", dta);
1032
1033 if (!ad_info->port_scan_done) {
1034 /* first call; need to scan AHCI hardware for devices */
1035 if (ad_info->busy) {
1036 dprintf("error: port scan requested while adapter was busy\n");
1037 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1038 goto iocm_device_table_done;
1039 }
1040 ad_info->busy = 1;
1041 spin_unlock(drv_lock);
1042 rc = ahci_scan_ports(ad_info);
1043 spin_lock(drv_lock);
1044 ad_info->busy = 0;
1045
1046 if (rc != 0) {
1047 dprintf("error: port scan failed on adapter #%d\n", dta);
1048 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1049 goto iocm_device_table_done;
1050 }
1051 ad_info->port_scan_done = 1;
1052 }
1053
1054 /* insert physical (i.e. AHCI) devices into the device table */
1055 for (p = 0; p <= ad_info->port_max; p++) {
1056 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1057 if (ad_info->ports[p].devs[d].present) {
1058 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1059 /* only report this unit as SCSI unit */
1060 scsi_units++;
1061 continue;
1062 }
1063 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1064 goto iocm_device_table_done;
1065 }
1066 }
1067 }
1068 }
1069
1070 } else {
1071 /* this is the virtual SCSI adapter */
1072 if (scsi_units == 0) {
1073 /* not a single unit to be emulated via SCSI */
1074 dt->TotalAdapters--;
1075 break;
1076 }
1077
1078 /* set adapter name and bus type to mimic a SCSI controller */
1079 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1080 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1081
1082 /* add all ATAPI units to be emulated by this virtual adaper */
1083 for (a = 0; a < ad_info_cnt; a++) {
1084 AD_INFO *ad_info = ad_infos + a;
1085
1086 for (p = 0; p <= ad_info->port_max; p++) {
1087 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1088 if (ad_info->ports[p].devs[d].present &&
1089 ad_info->ports[p].devs[d].atapi &&
1090 emulate_scsi[a][p]) {
1091 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1092 goto iocm_device_table_done;
1093 }
1094 }
1095 }
1096 }
1097 }
1098 }
1099
1100 /* calculate offset for next adapter */
1101 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1102 }
1103
1104iocm_device_table_done:
1105 spin_unlock(drv_lock);
1106 iorb_done(iorb);
1107}
1108
1109/******************************************************************************
1110 * Handle IOCC_GEOMETRY requests.
1111 */
1112void iocc_geometry(IORBH _far *iorb)
1113{
1114 switch (iorb->CommandModifier) {
1115
1116 case IOCM_GET_MEDIA_GEOMETRY:
1117 case IOCM_GET_DEVICE_GEOMETRY:
1118 add_workspace(iorb)->idempotent = 1;
1119 ahci_get_geometry(iorb);
1120 break;
1121
1122 default:
1123 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1124 iorb_done(iorb);
1125 }
1126}
1127
1128/******************************************************************************
1129 * Handle IOCC_EXECUTE_IO requests.
1130 */
1131void iocc_execute_io(IORBH _far *iorb)
1132{
1133 switch (iorb->CommandModifier) {
1134
1135 case IOCM_READ:
1136 add_workspace(iorb)->idempotent = 1;
1137 ahci_read(iorb);
1138 break;
1139
1140 case IOCM_READ_VERIFY:
1141 add_workspace(iorb)->idempotent = 1;
1142 ahci_verify(iorb);
1143 break;
1144
1145 case IOCM_WRITE:
1146 add_workspace(iorb)->idempotent = 1;
1147 ahci_write(iorb);
1148 break;
1149
1150 case IOCM_WRITE_VERIFY:
1151 add_workspace(iorb)->idempotent = 1;
1152 ahci_write(iorb);
1153 break;
1154
1155 default:
1156 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1157 iorb_done(iorb);
1158 }
1159}
1160
1161/******************************************************************************
1162 * Handle IOCC_UNIT_STATUS requests.
1163 */
1164void iocc_unit_status(IORBH _far *iorb)
1165{
1166 switch (iorb->CommandModifier) {
1167
1168 case IOCM_GET_UNIT_STATUS:
1169 add_workspace(iorb)->idempotent = 1;
1170 ahci_unit_ready(iorb);
1171 break;
1172
1173 default:
1174 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1175 iorb_done(iorb);
1176 }
1177}
1178
1179/******************************************************************************
1180 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1181 */
1182void iocc_adapter_passthru(IORBH _far *iorb)
1183{
1184 switch (iorb->CommandModifier) {
1185
1186 case IOCM_EXECUTE_CDB:
1187 add_workspace(iorb)->idempotent = 0;
1188 ahci_execute_cdb(iorb);
1189 break;
1190
1191 case IOCM_EXECUTE_ATA:
1192 add_workspace(iorb)->idempotent = 0;
1193 ahci_execute_ata(iorb);
1194 break;
1195
1196 default:
1197 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1198 iorb_done(iorb);
1199 }
1200}
1201
1202/******************************************************************************
1203 * Add an IORB to the specified queue. This function must be called with the
1204 * adapter-level spinlock aquired.
1205 */
1206void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1207{
1208 if (iorb_priority(iorb) {
1209 /* priority IORB; insert at first position */
1210 iorb->pNxtIORB = queue->root;
1211 queue->root = iorb;
1212
1213 } else {
1214 /* append IORB to end of queue */
1215 iorb->pNxtIORB = NULL;
1216
1217 if (queue->root == NULL) {
1218 queue->root = iorb;
1219 } else {
1220 queue->tail->pNxtIORB = iorb;
1221 }
1222 queue->tail = iorb;
1223 }
1224
1225 if (debug) {
1226 /* determine queue type (local, driver, abort or port) and minimum debug
1227 * level; otherwise, queue debug prints can become really confusing.
1228 */
1229 char *queue_type;
1230 int min_debug = 1;
1231
1232 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1233 /* this queue is on the stack */
1234 queue_type = "local";
1235 min_debug = 2;
1236
1237 } else if (queue == &driver_queue) {
1238 queue_type = "driver";
1239
1240 } else if (queue == &abort_queue) {
1241 queue_type = "abort";
1242 min_debug = 2;
1243
1244 } else {
1245 queue_type = "port";
1246 }
1247
1248 if (debug > min_debug) {
1249 aprintf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1250 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1251 iorb->Timeout);
1252 }
1253 }
1254}
1255
1256/******************************************************************************
1257 * Remove an IORB from the specified queue. This function must be called with
1258 * the adapter-level spinlock aquired.
1259 */
1260int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1261{
1262 IORBH _far *_iorb;
1263 IORBH _far *_prev = NULL;
1264 int found = 0;
1265
1266 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1267 if (_iorb == iorb) {
1268 /* found the IORB to be removed */
1269 if (_prev != NULL) {
1270 _prev->pNxtIORB = _iorb->pNxtIORB;
1271 } else {
1272 queue->root = _iorb->pNxtIORB;
1273 }
1274 if (_iorb == queue->tail) {
1275 queue->tail = _prev;
1276 }
1277 found = 1;
1278 break;
1279 }
1280 _prev = _iorb;
1281 }
1282
1283 if (found) {
1284 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1285 } else {
1286 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1287 }
1288
1289 return(!found);
1290}
1291
1292/******************************************************************************
1293 * Set the error code in the specified IORB
1294 *
1295 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1296 * status to the specified error code.
1297 */
1298void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1299{
1300 iorb->ErrorCode = error_code;
1301 iorb->Status |= IORB_ERROR;
1302}
1303
1304/******************************************************************************
1305 * Mark the specified IORB as done and notify the asynchronous post function,
1306 * if any. The IORB is also removed from the corresponding IORB queue.
1307 *
1308 * NOTES: This function does not clear the Status field; it merely adds the
1309 * IORB_DONE flag.
1310 *
1311 * This function is expected to be called *without* the corresponding
1312 * driver-level drv_lock aquired. It will aquire the spinlock before
1313 * updating the IORB queue and release it before notifying the upstream
1314 * code in order to prevent deadlocks.
1315 *
1316 * Due to this logic, this function is only good for simple task-time
1317 * completions. Functions working on lists of IORBs (such as interrupt
1318 * handlers or context hooks) should call iorb_complete() directly and
1319 * implement their own logic for removing the IORB from the port queue.
1320 * See abort_ctxhook() for an example.
1321 */
1322void iorb_done(IORBH _far *iorb)
1323{
1324 int a = iorb_unit_adapter(iorb);
1325 int p = iorb_unit_port(iorb);
1326
1327 /* remove IORB from corresponding queue */
1328 spin_lock(drv_lock);
1329 if (iorb_driver_level(iorb)) {
1330 iorb_queue_del(&driver_queue, iorb);
1331 } else {
1332 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1333 }
1334 aws_free(add_workspace(iorb));
1335 spin_unlock(drv_lock);
1336
1337 iorb_complete(iorb);
1338}
1339
1340/******************************************************************************
1341 * Complete an IORB. This should be called without the adapter-level spinlock
1342 * to allow the IORB completion routine to perform whatever processing it
1343 * requires. This implies that the IORB should no longer be in any global
1344 * queue because the IORB completion routine may well reuse the IORB and send
1345 * the next request to us before even returning from this function.
1346 */
1347void iorb_complete(IORBH _far *iorb)
1348{
1349 iorb->Status |= IORB_DONE;
1350
1351 ddprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1352 iorb, iorb->Status, iorb->ErrorCode);
1353
1354 if (iorb->RequestControl & IORB_ASYNC_POST) {
1355 iorb->NotifyAddress(iorb);
1356 }
1357}
1358
1359/******************************************************************************
1360 * Requeue the specified IORB such that it will be sent downstream for
1361 * processing again. This includes freeing all resources currently allocated
1362 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1363 * spinlock must be aquired when calling this function.
1364 *
1365 * The following flags are preserved:
1366 * - no_ncq
1367 */
1368void iorb_requeue(IORBH _far *iorb)
1369{
1370 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1371 u16 no_ncq = aws->no_ncq;
1372 u16 unaligned = aws->unaligned;
1373 u16 retries = aws->retries;
1374
1375 aws_free(aws);
1376 memset(aws, 0x00, sizeof(*aws));
1377
1378 aws->no_ncq = no_ncq;
1379 aws->unaligned = unaligned;
1380 aws->retries = retries;
1381}
1382
1383/******************************************************************************
1384 * Free resources in ADD workspace (timer, buffer, ...). This function should
1385 * be called with the spinlock held to prevent race conditions.
1386 */
1387void aws_free(ADD_WORKSPACE _far *aws)
1388{
1389 if (aws->timer != 0) {
1390 ADD_CancelTimer(aws->timer);
1391 aws->timer = 0;
1392 }
1393
1394 if (aws->buf != NULL) {
1395 free(aws->buf);
1396 aws->buf = NULL;
1397 }
1398}
1399
1400/******************************************************************************
1401 * Lock the adapter, waiting for availability if necessary. This is expected
1402 * to be called at task/request time without the driver-level spinlock
1403 * aquired. Don't call at interrupt time.
1404 */
1405void lock_adapter(AD_INFO *ai)
1406{
1407 TIMER Timer;
1408
1409 spin_lock(drv_lock);
1410 while (ai->busy) {
1411 spin_unlock(drv_lock);
1412 timer_init(&Timer, 250);
1413 while (!timer_check_and_block(&Timer));
1414 spin_lock(drv_lock);
1415 }
1416 ai->busy = 1;
1417 spin_unlock(drv_lock);
1418}
1419
1420/******************************************************************************
1421 * Unlock adapter (i.e. reset busy flag)
1422 */
1423void unlock_adapter(AD_INFO *ai)
1424{
1425 ai->busy = 0;
1426}
1427
1428/******************************************************************************
1429 * Timeout handler for I/O commands. Since timeout handling can involve
1430 * lengthy operations like port resets, the main code is located in a
1431 * separate function which is invoked via a context hook.
1432 */
1433void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1,
1434 ULONG p2)
1435{
1436 IORBH _far *iorb = (IORBH _far *) p1;
1437 int a = iorb_unit_adapter(iorb);
1438 int p = iorb_unit_port(iorb);
1439
1440 ADD_CancelTimer(timer_handle);
1441 dprintf("timeout for IORB %Fp\n", iorb);
1442
1443 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1444 * IORB has completed after the timeout has expired but before we got to
1445 * this line of code, we'll check the return code of iorb_queue_del(): If it
1446 * returns an error, the IORB must have completed a few microseconds ago and
1447 * there is no timeout.
1448 */
1449 spin_lock(drv_lock);
1450 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1451 iorb_queue_add(&abort_queue, iorb);
1452 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1453 }
1454 spin_unlock(drv_lock);
1455
1456 /* Trigger abort processing function. We don't really care whether this
1457 * succeeds because the only reason why it would fail should be multiple
1458 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1459 * start executing, which leaves two scenarios:
1460 *
1461 * - We succeded in arming the context hook. Fine.
1462 *
1463 * - We armed the context hook a second time before it had a chance to
1464 * start executing. In this case, the already scheduled context hook
1465 * will process our IORB as well.
1466 */
1467 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1468
1469 /* Set up a watchdog timer which calls the context hook manually in case
1470 * some kernel thread is looping around the IORB_COMPLETE status bit
1471 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1472 * happen per design because kernel threads are supposed to yield but it
1473 * does in the early boot phase.
1474 */
1475 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1476}
1477
1478/******************************************************************************
1479 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1480 * will execute as soon as a kernel thread yields the CPU. However, some
1481 * kernel components won't yield the CPU during the early boot phase and the
1482 * only way to kick some sense into those components is to run the context
1483 * hook right inside this timer callback. Not exactly pretty, especially
1484 * considering the fact that context hooks were implemented to prevent running
1485 * lengthy operations like a port reset at interrupt time, but without this
1486 * watchdog mechanism we run the risk of getting completely stalled by device
1487 * problems during the early boot phase.
1488 */
1489void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1,
1490 ULONG p2)
1491{
1492 /* reset watchdog timer */
1493 ADD_CancelTimer(timer_handle);
1494 dprintf("reset watchdog invoked\n");
1495
1496 /* call context hook manually */
1497 reset_ctxhook(0);
1498}
1499
1500/******************************************************************************
1501 * small_code_ - this dummy func resolves the undefined reference linker
1502 * error that occurrs when linking WATCOM objects with DDK's link.exe
1503 */
1504void _cdecl small_code_(void)
1505{
1506}
1507
1508/******************************************************************************
1509 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1510 * adapter info array in the device table, dt->pAdapter[], is expected to be
1511 * initialized for the specified index (dt_ai).
1512 *
1513 * Please note that the device table adapter index, dta, is not always equal
1514 * to the physical adapter index, a: if SCSI emulation has been activated, the
1515 * last reported adapter is a virtual SCSI adapter and the physical adapter
1516 * indexes for those units are, of course, different from the device table
1517 * index of the virtual SCSI adapter.
1518 */
1519static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1520 int a, int p, int d, int scsi_id)
1521{
1522 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1523 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1524 (u16) dt->pAdapter[dta]);
1525 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1526 AD_INFO *ai = ad_infos + a;
1527
1528 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1529 dprintf("error: device table provided by DASD too small\n");
1530 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1531 return(-1);
1532 }
1533
1534 if (ai->ports[p].devs[d].unit_info == NULL) {
1535 /* provide original information about this device (unit) */
1536 memset(ui, 0x00, sizeof(*ui));
1537 ui->AdapterIndex = dta; /* device table adapter index */
1538 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1539 ui->UnitIndex = ptr->AdapterUnits;
1540 ui->UnitType = ai->ports[p].devs[d].dev_type;
1541 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1542 if (ai->ports[p].devs[d].removable) {
1543 ui->UnitFlags |= UF_REMOVABLE;
1544 }
1545 if (scsi_id > 0) {
1546 /* set fake SCSI ID for this unit */
1547 ui->UnitSCSITargetID = scsi_id;
1548 }
1549 } else {
1550 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1551 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1552 }
1553
1554 ptr->AdapterUnits++;
1555 return(0);
1556}
1557
1558/*******************************************************************************
1559 * Register kernel exit handler for trap dumps. Our exit handler will be called
1560 * right before the kernel starts a dump; that's where we reset the controller
1561 * so it supports BIOS int13 I/O calls.
1562 */
1563static void register_krnl_exit(void)
1564{
1565 _asm {
1566 push ds
1567 push es
1568 push bx
1569 push si
1570 push di
1571
1572 mov ax, FLAG_KRNL_EXIT_ADD
1573 mov cx, TYPE_KRNL_EXIT_INT13
1574 mov bx, SEG asm_krnl_exit
1575 mov si, OFFSET asm_krnl_exit
1576 mov dl, DevHlp_RegisterKrnlExit
1577
1578 call dword ptr [Device_Help]
1579
1580 pop di
1581 pop si
1582 pop bx
1583 pop es
1584 pop ds
1585 }
1586
1587 dprintf("Registered kernel exit routine for INT13 mode\n");
1588}
1589
Note: See TracBrowser for help on using the repository browser.