source: trunk/src/os2ahci/os2ahci.c@ 163

Last change on this file since 163 was 163, checked in by David Azarewicz, 12 years ago

Changed scsi emulation to be on by default
This is version 1.29

File size: 51.9 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 *
7 * Authors: Christian Mueller, Markus Thielen
8 *
9 * Parts copied from/inspired by the Linux AHCI driver;
10 * those parts are (c) Linux AHCI/ATA maintainers
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include "os2ahci.h"
28#include "ioctl.h"
29#include "version.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* parse integer command line parameter */
34#define drv_parm_int(s, value, type, radix) \
35 { \
36 char _far *_ep; \
37 if ((s)[1] != ':') { \
38 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
39 goto init_fail; \
40 } \
41 value = (type) strtol((s) + 2, \
42 (const char _far* _far*) &_ep, \
43 radix); \
44 s = _ep; \
45 }
46
47#define drv_parm_int_optional(s, value, type, radix) \
48 { \
49 char _far *_ep; \
50 if ((s)[1] == ':') { \
51 value = (type) strtol((s) + 2, (const char _far* _far*) &_ep, radix); \
52 s = _ep; \
53 } else { \
54 value++; \
55 } \
56 }
57
58/* set two-dimensional array of port options */
59#define set_port_option(opt, val) \
60 if (adapter_index == -1) { \
61 /* set option for all adapters and ports */ \
62 memset(opt, val, sizeof(opt)); \
63 } else if (port_index == -1) { \
64 /* set option for all ports on current adapter */ \
65 memset(opt[adapter_index], val, sizeof(*opt)); \
66 } else { \
67 /* set option for specific port */ \
68 opt[adapter_index][port_index] = val; \
69 }
70
71/* constants for undefined kernel exit routine;
72 * see register_krnl_exit() func */
73#define DevHlp_RegisterKrnlExit 0x006f
74
75#define FLAG_KRNL_EXIT_ADD 0x1000
76#define FLAG_KRNL_EXIT_REMOVE 0x2000
77
78#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
79#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
80#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
81#define TYPE_KRNL_EXIT_DYN 0x0003
82#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
83
84/* ------------------------ typedefs and structures ------------------------ */
85
86/* -------------------------- function prototypes -------------------------- */
87
88void _cdecl small_code_ (void);
89
90static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
91 int a, int p, int d, int scsi_id);
92
93static void register_krnl_exit (void);
94
95/* ------------------------ global/static variables ------------------------ */
96
97int debug = 0; /* if > 0, print debug messages to COM1 */
98int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
99int init_reset = 1; /* if != 0, reset ports during init */
100int force_write_cache; /* if != 0, force write cache */
101int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
102int use_lvm_info = 1;
103int wrap_trace_buffer = 0;
104long com_baud = 0;
105
106PFN Device_Help = 0; /* pointer to device helper entry point */
107ULONG RMFlags = 0; /* required by resource manager library */
108PFN RM_Help0 = NULL; /* required by resource manager library */
109PFN RM_Help3 = NULL; /* required by resource manager library */
110HDRIVER rm_drvh; /* resource manager driver handle */
111char rm_drvname[80]; /* driver name as returned by RM */
112USHORT add_handle; /* driver handle (RegisterDeviceClass) */
113UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
114char drv_name[] = "OS2AHCI"; /* driver name as string */
115
116/* resource manager driver information structure */
117DRIVERSTRUCT rm_drvinfo = {
118 drv_name, /* driver name */
119 "AHCI SATA Driver", /* driver description */
120 DVENDOR, /* vendor name */
121 CMVERSION_MAJOR, /* RM interface version major */
122 CMVERSION_MINOR, /* RM interface version minor */
123 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
124 0, /* driver flags */
125 DRT_ADDDM, /* driver type */
126 DRS_ADD, /* driver sub type */
127 NULL /* driver callback */
128};
129
130ULONG drv_lock; /* driver-level spinlock */
131IORB_QUEUE driver_queue; /* driver-level IORB queue */
132AD_INFO ad_infos[MAX_AD]; /* adapter information list */
133int ad_info_cnt; /* number of entries in ad_infos[] */
134u16 ad_ignore; /* bitmap with adapter indexes to ignore */
135int init_complete; /* if != 0, initialization has completed */
136int suspended;
137int resume_sleep_flag;
138
139/* apapter/port-specific options saved when parsing the command line */
140u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
141u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
142u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
143u8 link_power[MAX_AD][AHCI_MAX_PORTS];
144u8 track_size[MAX_AD][AHCI_MAX_PORTS];
145
146static char init_msg[] = "%s driver version %d.%02d\n";
147static char exit_msg[] = "%s driver *not* installed\n";
148char BldLevel[] = BLDLEVEL;
149
150/* ----------------------------- start of code ----------------------------- */
151
152/******************************************************************************
153 * OS/2 device driver main strategy function. This function is only used
154 * for initialization purposes; all other calls go directly to the adapter
155 * device driver's strategy function.
156 *
157 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
158 * packet for IDC calls, so they can be handled by gen_ioctl.
159 */
160USHORT _cdecl c_strat(RPH _far *req)
161{
162 u16 rc;
163
164 switch (req->Cmd) {
165
166 case CMDInitBase:
167 rc = init_drv((RPINITIN _far *) req);
168 break;
169
170 case CMDShutdown:
171 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
172 break;
173
174 case CMDGenIOCTL:
175 rc = gen_ioctl((RP_GENIOCTL _far *) req);
176 break;
177
178 case CMDINPUT:
179 rc = char_dev_input((RP_RWV _far *) req);
180 break;
181
182 case CMDSaveRestore:
183 rc = sr_drv(((RPSAVERESTORE _far *) req)->FuncCode);
184 break;
185
186 default:
187 rc = STDON | STATUS_ERR_UNKCMD;
188 break;
189 }
190
191 return(rc);
192}
193
194/******************************************************************************
195 * Intialize the os2ahci driver. This includes command line parsing, scanning
196 * the PCI bus for supported AHCI adapters, etc.
197 */
198USHORT init_drv(RPINITIN _far *req)
199{
200 static int init_drv_called;
201 static int init_drv_failed;
202 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
203 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
204 APIRET rmrc;
205 char _far *cmd_line;
206 char _far *s;
207 int adapter_index = -1;
208 int port_index = -1;
209 int invert_option;
210 int optval;
211 u16 vendor;
212 u16 device;
213
214 if (init_drv_called) {
215 /* This is the init call for the second (legacy IBMS506$) character
216 * device driver. If the main driver failed initialization, fail this
217 * one as well.
218 */
219 rsp->CodeEnd = (u16) end_of_code;
220 rsp->DataEnd = (u16) &end_of_data;
221 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
222 }
223 init_drv_called = 1;
224 suspended = 0;
225 resume_sleep_flag = 0;
226 memset(ad_infos, 0, sizeof(ad_infos));
227 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
228
229 /* set device helper entry point */
230 Device_Help = req->DevHlpEP;
231
232 /* create driver-level spinlock */
233 DevHelp_CreateSpinLock(&drv_lock);
234
235 /* initialize libc code */
236 init_libc();
237
238 /* register driver with resource manager */
239 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
240 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
241 drv_name, rmrc);
242 goto init_fail;
243 }
244
245 /* parse command line parameters */
246 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
247
248 for (s = cmd_line; *s != 0; s++) {
249 if (*s == '/') {
250 if ((invert_option = (s[1] == '!')) != 0) {
251 s++;
252 }
253 s++;
254 switch (tolower(*s)) {
255
256 case '\0':
257 /* end of command line; can only happen if command line is incorrect */
258 cprintf("%s: incomplete command line option\n", drv_name);
259 goto init_fail;
260
261 case 'b':
262 drv_parm_int(s, com_baud, u32, 10);
263 break;
264
265 case 'c':
266 /* set COM port base address for debug messages */
267 drv_parm_int(s, com_base, u16, 16);
268 if (com_base == 1) com_base = 0x3f8;
269 if (com_base == 2) com_base = 0x2f8;
270 break;
271
272 case 'd':
273 /* increase debug level */
274 drv_parm_int_optional(s, debug, int, 10);
275 break;
276
277 case 'g':
278 /* add specfied PCI ID as a supported generic AHCI adapter */
279 drv_parm_int(s, vendor, u16, 16);
280 s--;
281 drv_parm_int(s, device, u16, 16);
282 if (add_pci_id(vendor, device)) {
283 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
284 goto init_fail;
285 }
286 thorough_scan = 1;
287 break;
288
289 case 't':
290 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
291 thorough_scan = !invert_option;
292 break;
293
294 case 'r':
295 /* reset ports during initialization */
296 init_reset = !invert_option;
297 break;
298
299 case 'f':
300 /* force write cache regardless of IORB flags */
301 force_write_cache = 1;
302 break;
303
304 case 'a':
305 /* set adapter index for adapter and port-related options */
306 drv_parm_int(s, adapter_index, int, 10);
307 if (adapter_index < 0 || adapter_index >= MAX_AD) {
308 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
309 goto init_fail;
310 }
311 break;
312
313 case 'p':
314 /* set port index for port-related options */
315 drv_parm_int(s, port_index, int, 10);
316 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
317 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
318 goto init_fail;
319 }
320 break;
321
322 case 'i':
323 /* ignore current adapter index */
324 if (adapter_index >= 0) {
325 ad_ignore |= 1U << adapter_index;
326 }
327 break;
328
329 case 's':
330 /* enable SCSI emulation for ATAPI devices */
331 set_port_option(emulate_scsi, !invert_option);
332 break;
333
334 case 'n':
335 /* enable NCQ */
336 set_port_option(enable_ncq, !invert_option);
337 break;
338
339 case 'l':
340 /* set link speed or power savings */
341 s++;
342 switch (tolower(*s)) {
343 case 's':
344 /* set link speed */
345 drv_parm_int(s, optval, int, 10);
346 set_port_option(link_speed, optval);
347 break;
348 case 'p':
349 /* set power management */
350 drv_parm_int(s, optval, int, 10);
351 set_port_option(link_power, optval);
352 break;
353 default:
354 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
355 goto init_fail;
356 }
357 /* need to reset the port in order to establish link settings */
358 init_reset = 1;
359 break;
360
361 case '4':
362 /* enable 4K sector geometry enhancement (track size = 56) */
363 if (!invert_option) {
364 set_port_option(track_size, 56);
365 }
366 break;
367
368 case 'z':
369 /* Specify to not use the LVM information. There is no reason why anyone would
370 * want to do this, but previous versions of this driver did not have LVM capability,
371 * so this switch is here temporarily just in case.
372 */
373 use_lvm_info = !invert_option;
374 break;
375
376 case 'v':
377 /* be verbose during boot */
378 drv_parm_int_optional(s, verbosity, int, 10);
379 break;
380
381 case 'w':
382 /* Specify to allow the trace buffer to wrap when full. */
383 wrap_trace_buffer = !invert_option;
384 break;
385
386 case 'q':
387 /* Temporarily output a non-fatal message to get anyone using this
388 * undocumented switch to stop using it. This will be removed soon
389 * and the error will become fatal.
390 */
391 cprintf("%s: unknown option: /%c\n", drv_name, *s);
392 break;
393
394 default:
395 cprintf("%s: unknown option: /%c\n", drv_name, *s);
396 goto init_fail;
397 }
398 }
399 }
400
401 /* print initialization message */
402 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
403
404 if (com_baud) init_com(com_baud); /* initialize com port for debug output */
405
406 /* initialize trace buffer if applicable */
407 if (TRACE_ACTIVE) {
408 /* debug is on, but COM port is off -> use our trace buffer */
409 trace_init(AHCI_TRACE_BUF_SIZE);
410 } else {
411 trace_init(AHCI_INFO_BUF_SIZE);
412 }
413
414 ntprintf("BldLevel: %s\n", BldLevel);
415 ntprintf("CmdLine: %Fs\n", cmd_line);
416
417 /* scan PCI bus for supported devices */
418 scan_pci_bus();
419
420 if (ad_info_cnt > 0) {
421 /* initialization succeeded and we found at least one AHCI adapter */
422 ADD_InitTimer(timer_pool, sizeof(timer_pool));
423 //NOT_USED mdelay_cal();
424
425 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1, &add_handle)) {
426 cprintf("%s: couldn't register device class\n", drv_name);
427 goto init_fail;
428 }
429
430 /* allocate context hooks */
431 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
432 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
433 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
434 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
435 goto init_fail;
436 }
437
438 rsp->CodeEnd = (u16) end_of_code;
439 rsp->DataEnd = (u16) &end_of_data;
440
441 /* register kernel exit routine for trap dumps */
442 register_krnl_exit();
443
444 return(STDON);
445
446 } else {
447 /* no adapters found */
448 ciprintf(" No adapters found.\n");
449 }
450
451init_fail:
452 /* initialization failed; set segment sizes to 0 and return error */
453 rsp->CodeEnd = 0;
454 rsp->DataEnd = 0;
455 init_drv_failed = 1;
456
457 /* free context hooks */
458 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
459 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
460 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
461
462 if (rm_drvh != 0) {
463 /* remove driver from resource manager */
464 RMDestroyDriver(rm_drvh);
465 }
466
467 ciprintf(exit_msg, drv_name);
468 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
469}
470
471/******************************************************************************
472 * Generic IOCTL via character device driver. IOCTLs are used to control the
473 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
474 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
475 * commands for ATA disks) are implemented here.
476 */
477USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
478{
479 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
480
481 switch (ioctl->Category) {
482
483 case OS2AHCI_IOCTL_CATEGORY:
484 switch (ioctl->Function) {
485
486 case OS2AHCI_IOCTL_GET_DEVLIST:
487 return(ioctl_get_devlist(ioctl));
488
489 case OS2AHCI_IOCTL_PASSTHROUGH:
490 return(ioctl_passthrough(ioctl));
491
492 }
493 break;
494
495 case DSKSP_CAT_GENERIC:
496 return(ioctl_gen_dsk(ioctl));
497
498 case DSKSP_CAT_SMART:
499 return(ioctl_smart(ioctl));
500
501 }
502
503 return(STDON | STATUS_ERR_UNKCMD);
504}
505
506/******************************************************************************
507 * Read from character device. If tracing is on (internal ring buffer trace),
508 * we return data from the trace buffer; if not, we might return a device
509 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
510 */
511USHORT char_dev_input(RP_RWV _far *rwrb)
512{
513 return(trace_char_dev(rwrb));
514}
515
516/******************************************************************************
517 * Device driver exit handler. This handler is called when OS/2 shuts down and
518 * flushes the write caches of all attached devices. Since this is effectively
519 * the same we do when suspending, we'll call out to the corresponding suspend
520 * function.
521 *
522 * NOTE: Errors are ignored because there's no way we could stop the shutdown
523 * or do something about the error, unless retrying endlessly is
524 * considered an option.
525 */
526USHORT exit_drv(int func)
527{
528 dprintf("exit_drv(%d) called\n", func);
529
530 if (func == 0) {
531 /* we're only interested in the second phase of the shutdown */
532 return(STDON);
533 }
534
535 suspend();
536 return(STDON);
537}
538
539/******************************************************************************
540 * Device driver suspend/resume handler. This handler is called when ACPI is
541 * executing a suspend or resume.
542 */
543USHORT sr_drv(int func)
544{
545 dprintf("sr_drv(%d) called\n", func);
546
547 if (func) resume();
548 else suspend();
549
550 return(STDON);
551}
552
553/******************************************************************************
554 * ADD entry point. This is the main entry point for all ADD requests. Due to
555 * the asynchronous nature of ADD drivers, this function primarily queues the
556 * IORB(s) to the corresponding adapter or port queues, then triggers the
557 * state machine to initiate processing queued IORBs.
558 *
559 * NOTE: In order to prevent race conditions or engine stalls, certain rules
560 * around locking, unlocking and IORB handling in general have been
561 * established. Refer to the comments in "trigger_engine()" for
562 * details.
563 */
564void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
565{
566 IORBH _far *iorb;
567 IORBH _far *next = NULL;
568
569 spin_lock(drv_lock);
570
571 for (iorb = first_iorb; iorb != NULL; iorb = next) {
572 /* Queue this IORB. Queues primarily exist on port level but there are
573 * some requests which affect the whole driver, most notably
574 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
575 * port queue will change the links, thus we need to save the original
576 * link in 'next'.
577 */
578 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
579
580 iorb->Status = 0;
581 iorb->ErrorCode = 0;
582 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
583
584 if (iorb_driver_level(iorb)) {
585 /* driver-level IORB */
586 iorb->UnitHandle = 0;
587 iorb_queue_add(&driver_queue, iorb);
588
589 } else {
590 /* port-level IORB */
591 int a = iorb_unit_adapter(iorb);
592 int p = iorb_unit_port(iorb);
593 int d = iorb_unit_device(iorb);
594
595 if (a >= ad_info_cnt ||
596 p > ad_infos[a].port_max ||
597 d > ad_infos[a].ports[p].dev_max ||
598 (ad_infos[a].port_map & (1UL << p)) == 0) {
599
600 /* unit handle outside of the allowed range */
601 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
602 iorb->Status = IORB_ERROR;
603 iorb->ErrorCode = IOERR_CMD_SYNTAX;
604 iorb_complete(iorb);
605 continue;
606 }
607
608 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
609 }
610 }
611
612 /* trigger state machine */
613 trigger_engine();
614
615 spin_unlock(drv_lock);
616}
617
618/******************************************************************************
619 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
620 * which will try to get all IORBs sent on their way a couple of times. If
621 * there are still IORBs ready for processing after this, this function will
622 * hand off to a context hook which will continue to trigger the engine until
623 * all IORBs have been sent.
624 *
625 * NOTE: While initialization has not completed (or during suspend/resume
626 * operations), this function will loop indefinitely because we can't
627 * rely on interrupt handlers or context hooks and complex IORBs
628 * requiring multiple requeues would eventually hang and time out if
629 * we stopped triggering here.
630 */
631void trigger_engine(void)
632{
633 int i;
634
635 for (i = 0; i < 3 || !init_complete; i++) {
636 if (trigger_engine_1() == 0) {
637 /* done -- all IORBs have been sent on their way */
638 return;
639 }
640 }
641
642 /* Something keeps bouncing; hand off to the engine context hook which will
643 * keep trying in the background.
644 */
645 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
646}
647
648/******************************************************************************
649 * Trigger IORB queue engine in order to send commands in the driver/port IORB
650 * queues to the AHCI hardware. This function will return the number of IORBs
651 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
652 * a state to accept the command, thus it might take quite a few calls to get
653 * all IORBs on their way. This is why there's a wrapper function which tries
654 * it a few times, then hands off to a context hook which will keep trying in
655 * the background.
656 *
657 * IORBs might complete before send_iorb() has returned, at any time during
658 * interrupt processing or on another CPU on SMP systems. IORB completion
659 * means modifications to the corresponding IORB queue (the completed IORB
660 * is removed from the queue) thus we need to protect the IORB queues from
661 * race conditions. The safest approach short of keeping the driver-level
662 * spinlock aquired permanently is to keep it throughout this function and
663 * release it temporarily in send_iorb().
664 *
665 * This implies that the handler functions are fully responsible for aquiring
666 * the driver-level spinlock when they need it, and for releasing it again.
667 *
668 * As a rule of thumb, get the driver-level spinlock whenever accessing
669 * volatile variables (IORB queues, values in ad_info[], ...).
670 *
671 * Additional Notes:
672 *
673 * - This function is expected to be called with the spinlock aquired
674 *
675 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
676 * just remain in the queue). This can be used to release the driver-level
677 * spinlock while making sure no new IORBs are going to hit the hardware.
678 * In order to prevent engine stalls, all handlers using this functionality
679 * need to invoke trigger_engine() after resetting the busy flag.
680 *
681 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
682 * However, the driver-level queue is worked "one entry at a time" which
683 * means that no new IORBs will be queued on the driver-level queue until
684 * the head element has completed processing. This means that driver-
685 * level IORB handlers don't need to protect against each other. But they
686 * they do need to keep in mind interference with port-level IORBs:
687 *
688 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
689 * adapters as 'busy' which are affected by the driver-level IORB
690 *
691 * - Driver-level IORB handlers must not access the hardware of a
692 * particular adapter if it's flagged as 'busy' by another IORB.
693 */
694int trigger_engine_1(void)
695{
696 IORBH _far *iorb;
697 IORBH _far *next;
698 int iorbs_sent = 0;
699 int a;
700 int p;
701
702 iorbs_sent = 0;
703
704 /* process driver-level IORBs */
705 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
706 send_iorb(iorb);
707 iorbs_sent++;
708 }
709
710 /* process port-level IORBs */
711 for (a = 0; a < ad_info_cnt; a++) {
712 AD_INFO *ai = ad_infos + a;
713 if (ai->busy) {
714 /* adapter is busy; don't process any IORBs */
715 continue;
716 }
717 for (p = 0; p <= ai->port_max; p++) {
718 /* send all queued IORBs on this port */
719 next = NULL;
720 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
721 next = iorb->pNxtIORB;
722 if (!add_workspace(iorb)->processing) {
723 send_iorb(iorb);
724 iorbs_sent++;
725 }
726 }
727 }
728 }
729
730 return(iorbs_sent);
731}
732
733/******************************************************************************
734 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
735 * switch board for calling the corresponding iocc_*() handler function.
736 *
737 * NOTE: This function is expected to be called with the driver-level spinlock
738 * aquired. It will release it before calling any of the handler
739 * functions and re-aquire it when done.
740 */
741void send_iorb(IORBH _far *iorb)
742{
743 /* Mark IORB as "processing" before doing anything else. Once the IORB is
744 * marked as "processing", we can release the spinlock because subsequent
745 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
746 * IORB.
747 */
748 add_workspace(iorb)->processing = 1;
749 spin_unlock(drv_lock);
750
751 switch (iorb->CommandCode) {
752
753 case IOCC_CONFIGURATION:
754 iocc_configuration(iorb);
755 break;
756
757 case IOCC_DEVICE_CONTROL:
758 iocc_device_control(iorb);
759 break;
760
761 case IOCC_UNIT_CONTROL:
762 iocc_unit_control(iorb);
763 break;
764
765 case IOCC_GEOMETRY:
766 iocc_geometry(iorb);
767 break;
768
769 case IOCC_EXECUTE_IO:
770 iocc_execute_io(iorb);
771 break;
772
773 case IOCC_UNIT_STATUS:
774 iocc_unit_status(iorb);
775 break;
776
777 case IOCC_ADAPTER_PASSTHRU:
778 iocc_adapter_passthru(iorb);
779 break;
780
781 default:
782 /* unsupported call */
783 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
784 iorb_done(iorb);
785 break;
786 }
787
788 /* re-aquire spinlock before returning to trigger_engine() */
789 spin_lock(drv_lock);
790}
791
792/******************************************************************************
793 * Handle IOCC_CONFIGURATION requests.
794 */
795void iocc_configuration(IORBH _far *iorb)
796{
797 int a;
798
799 switch (iorb->CommandModifier) {
800
801 case IOCM_COMPLETE_INIT:
802 /* Complete initialization. From now on, we won't have to restore the BIOS
803 * configuration after each command and we're fully operational (i.e. will
804 * use interrupts, timers and context hooks instead of polling).
805 */
806 if (!init_complete) {
807 dprintf("leaving initialization mode\n");
808 for (a = 0; a < ad_info_cnt; a++) {
809 lock_adapter(ad_infos + a);
810 ahci_complete_init(ad_infos + a);
811 }
812 init_complete = 1;
813
814 /* DAZ turn off COM port output if on */
815 //com_base = 0;
816
817 /* release all adapters */
818 for (a = 0; a < ad_info_cnt; a++) {
819 unlock_adapter(ad_infos + a);
820 }
821
822 #ifdef LEGACY_APM
823 /* register APM hook */
824 apm_init();
825 #endif
826
827 if (!TRACE_ACTIVE) build_user_info();
828 }
829 iorb_done(iorb);
830 break;
831
832 case IOCM_GET_DEVICE_TABLE:
833 /* construct a device table */
834 iocm_device_table(iorb);
835 break;
836
837 default:
838 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
839 iorb_done(iorb);
840 break;
841 }
842}
843
844/******************************************************************************
845 * Handle IOCC_DEVICE_CONTROL requests.
846 */
847void iocc_device_control(IORBH _far *iorb)
848{
849 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
850 IORBH _far *ptr;
851 IORBH _far *next = NULL;
852 int p = iorb_unit_port(iorb);
853 int d = iorb_unit_device(iorb);
854
855 switch (iorb->CommandModifier) {
856
857 case IOCM_ABORT:
858 /* abort all pending commands on specified port and device */
859 spin_lock(drv_lock);
860 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
861 next = ptr->pNxtIORB;
862 /* move all matching IORBs to the abort queue */
863 if (ptr != iorb && iorb_unit_device(ptr) == d) {
864 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
865 iorb_queue_add(&abort_queue, ptr);
866 ptr->ErrorCode = IOERR_CMD_ABORTED;
867 }
868 }
869 spin_unlock(drv_lock);
870
871 /* trigger reset context hook which will finish the abort processing */
872 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
873 break;
874
875 case IOCM_SUSPEND:
876 case IOCM_RESUME:
877 case IOCM_GET_QUEUE_STATUS:
878 /* Suspend/resume operations allow access to the hardware for other
879 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
880 * and ATAPI in the same driver, this won't be required.
881 */
882 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
883 break;
884
885 case IOCM_LOCK_MEDIA:
886 case IOCM_UNLOCK_MEDIA:
887 case IOCM_EJECT_MEDIA:
888 /* unit control commands to lock, unlock and eject media */
889 /* will be supported later... */
890 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
891 break;
892
893 default:
894 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
895 break;
896 }
897
898 iorb_done(iorb);
899}
900
901/******************************************************************************
902 * Handle IOCC_UNIT_CONTROL requests.
903 */
904void iocc_unit_control(IORBH _far *iorb)
905{
906 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
907 int a = iorb_unit_adapter(iorb);
908 int p = iorb_unit_port(iorb);
909 int d = iorb_unit_device(iorb);
910
911 spin_lock(drv_lock);
912 switch (iorb->CommandModifier) {
913
914 case IOCM_ALLOCATE_UNIT:
915 /* allocate unit for exclusive access */
916 if (ad_infos[a].ports[p].devs[d].allocated) {
917 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
918 } else {
919 ad_infos[a].ports[p].devs[d].allocated = 1;
920 }
921 break;
922
923 case IOCM_DEALLOCATE_UNIT:
924 /* deallocate exclusive access to unit */
925 if (!ad_infos[a].ports[p].devs[d].allocated) {
926 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
927 } else {
928 ad_infos[a].ports[p].devs[d].allocated = 0;
929 }
930 break;
931
932 case IOCM_CHANGE_UNITINFO:
933 /* Change unit (device) information. One reason for this IOCM is the
934 * interface for filter device drivers: a filter device driver can
935 * either change existing UNITINFOs or permanently allocate units
936 * and fabricate new [logical] units; the former is the reason why we
937 * must store the pointer to the updated UNITNIFO for subsequent
938 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
939 */
940 if (!ad_infos[a].ports[p].devs[d].allocated) {
941 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
942 break;
943 }
944 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
945 break;
946
947 default:
948 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
949 break;
950 }
951
952 spin_unlock(drv_lock);
953 iorb_done(iorb);
954}
955
956/******************************************************************************
957 * Scan all ports for AHCI devices and construct a DASD device table.
958 *
959 * NOTES: This function may be called multiple times. Only the first
960 * invocation will actually scan for devices; all subsequent calls will
961 * merely return the results of the initial scan, potentially augmented
962 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
963 * requests.
964 *
965 * In order to support applications that can't deal with ATAPI devices
966 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
967 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
968 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
969 * request. The units attached to this adapter will use the real HW
970 * unit IDs, thus we'll never receive a command specific to the
971 * emulated SCSI adapter and won't need to set up any sort of entity
972 * for it; the only purpose of the emulated SCSI adapter is to pass the
973 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
974 * course. The emulated SCSI target IDs are allocated as follows:
975 *
976 * 0 the virtual adapter
977 * 1..n emulated devices; SCSI target ID increments sequentially
978 */
979void iocm_device_table(IORBH _far *iorb)
980{
981 IORB_CONFIGURATION _far *iorb_conf;
982 DEVICETABLE _far *dt;
983 char _far *pos;
984 int scsi_units = 0;
985 int scsi_id = 1;
986 int rc;
987 int dta;
988 int a;
989 int p;
990 int d;
991
992 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
993 dt = iorb_conf->pDeviceTable;
994
995 spin_lock(drv_lock);
996
997 /* initialize device table header */
998 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
999 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
1000 dt->ADDHandle = add_handle;
1001 dt->TotalAdapters = ad_info_cnt + 1;
1002
1003 /* set start of adapter and device information tables */
1004 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
1005
1006 /* go through all adapters, including the virtual SCSI adapter */
1007 for (dta = 0; dta < dt->TotalAdapters; dta++) {
1008 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
1009
1010 /* sanity check for sufficient space in device table */
1011 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1012 dprintf("error: device table provided by DASD too small\n");
1013 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1014 goto iocm_device_table_done;
1015 }
1016
1017 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
1018 memset(ptr, 0x00, sizeof(*ptr));
1019
1020 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1021 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1022 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1023 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1024
1025 if (dta < ad_info_cnt) {
1026 /* this is a physical AHCI adapter */
1027 AD_INFO *ad_info = ad_infos + dta;
1028
1029 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1030 sprintf(ptr->AdapterName, "AHCI_%d", dta);
1031
1032 if (!ad_info->port_scan_done) {
1033 /* first call; need to scan AHCI hardware for devices */
1034 if (ad_info->busy) {
1035 dprintf("error: port scan requested while adapter was busy\n");
1036 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1037 goto iocm_device_table_done;
1038 }
1039 ad_info->busy = 1;
1040 spin_unlock(drv_lock);
1041 rc = ahci_scan_ports(ad_info);
1042 spin_lock(drv_lock);
1043 ad_info->busy = 0;
1044
1045 if (rc != 0) {
1046 dprintf("error: port scan failed on adapter #%d\n", dta);
1047 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1048 goto iocm_device_table_done;
1049 }
1050 ad_info->port_scan_done = 1;
1051 }
1052
1053 /* insert physical (i.e. AHCI) devices into the device table */
1054 for (p = 0; p <= ad_info->port_max; p++) {
1055 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1056 if (ad_info->ports[p].devs[d].present) {
1057 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1058 /* only report this unit as SCSI unit */
1059 scsi_units++;
1060 continue;
1061 }
1062 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1063 goto iocm_device_table_done;
1064 }
1065 }
1066 }
1067 }
1068
1069 } else {
1070 /* this is the virtual SCSI adapter */
1071 if (scsi_units == 0) {
1072 /* not a single unit to be emulated via SCSI */
1073 dt->TotalAdapters--;
1074 break;
1075 }
1076
1077 /* set adapter name and bus type to mimic a SCSI controller */
1078 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1079 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1080
1081 /* add all ATAPI units to be emulated by this virtual adaper */
1082 for (a = 0; a < ad_info_cnt; a++) {
1083 AD_INFO *ad_info = ad_infos + a;
1084
1085 for (p = 0; p <= ad_info->port_max; p++) {
1086 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1087 if (ad_info->ports[p].devs[d].present &&
1088 ad_info->ports[p].devs[d].atapi &&
1089 emulate_scsi[a][p]) {
1090 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1091 goto iocm_device_table_done;
1092 }
1093 }
1094 }
1095 }
1096 }
1097 }
1098
1099 /* calculate offset for next adapter */
1100 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1101 }
1102
1103iocm_device_table_done:
1104 spin_unlock(drv_lock);
1105 iorb_done(iorb);
1106}
1107
1108/******************************************************************************
1109 * Handle IOCC_GEOMETRY requests.
1110 */
1111void iocc_geometry(IORBH _far *iorb)
1112{
1113 switch (iorb->CommandModifier) {
1114
1115 case IOCM_GET_MEDIA_GEOMETRY:
1116 case IOCM_GET_DEVICE_GEOMETRY:
1117 add_workspace(iorb)->idempotent = 1;
1118 ahci_get_geometry(iorb);
1119 break;
1120
1121 default:
1122 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1123 iorb_done(iorb);
1124 }
1125}
1126
1127/******************************************************************************
1128 * Handle IOCC_EXECUTE_IO requests.
1129 */
1130void iocc_execute_io(IORBH _far *iorb)
1131{
1132 switch (iorb->CommandModifier) {
1133
1134 case IOCM_READ:
1135 add_workspace(iorb)->idempotent = 1;
1136 ahci_read(iorb);
1137 break;
1138
1139 case IOCM_READ_VERIFY:
1140 add_workspace(iorb)->idempotent = 1;
1141 ahci_verify(iorb);
1142 break;
1143
1144 case IOCM_WRITE:
1145 add_workspace(iorb)->idempotent = 1;
1146 ahci_write(iorb);
1147 break;
1148
1149 case IOCM_WRITE_VERIFY:
1150 add_workspace(iorb)->idempotent = 1;
1151 ahci_write(iorb);
1152 break;
1153
1154 default:
1155 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1156 iorb_done(iorb);
1157 }
1158}
1159
1160/******************************************************************************
1161 * Handle IOCC_UNIT_STATUS requests.
1162 */
1163void iocc_unit_status(IORBH _far *iorb)
1164{
1165 switch (iorb->CommandModifier) {
1166
1167 case IOCM_GET_UNIT_STATUS:
1168 add_workspace(iorb)->idempotent = 1;
1169 ahci_unit_ready(iorb);
1170 break;
1171
1172 default:
1173 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1174 iorb_done(iorb);
1175 }
1176}
1177
1178/******************************************************************************
1179 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1180 */
1181void iocc_adapter_passthru(IORBH _far *iorb)
1182{
1183 switch (iorb->CommandModifier) {
1184
1185 case IOCM_EXECUTE_CDB:
1186 add_workspace(iorb)->idempotent = 0;
1187 ahci_execute_cdb(iorb);
1188 break;
1189
1190 case IOCM_EXECUTE_ATA:
1191 add_workspace(iorb)->idempotent = 0;
1192 ahci_execute_ata(iorb);
1193 break;
1194
1195 default:
1196 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1197 iorb_done(iorb);
1198 }
1199}
1200
1201/******************************************************************************
1202 * Add an IORB to the specified queue. This function must be called with the
1203 * adapter-level spinlock aquired.
1204 */
1205void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1206{
1207 if (iorb_priority(iorb) {
1208 /* priority IORB; insert at first position */
1209 iorb->pNxtIORB = queue->root;
1210 queue->root = iorb;
1211
1212 } else {
1213 /* append IORB to end of queue */
1214 iorb->pNxtIORB = NULL;
1215
1216 if (queue->root == NULL) {
1217 queue->root = iorb;
1218 } else {
1219 queue->tail->pNxtIORB = iorb;
1220 }
1221 queue->tail = iorb;
1222 }
1223
1224 if (debug) {
1225 /* determine queue type (local, driver, abort or port) and minimum debug
1226 * level; otherwise, queue debug prints can become really confusing.
1227 */
1228 char *queue_type;
1229 int min_debug = 1;
1230
1231 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1232 /* this queue is on the stack */
1233 queue_type = "local";
1234 min_debug = 2;
1235
1236 } else if (queue == &driver_queue) {
1237 queue_type = "driver";
1238
1239 } else if (queue == &abort_queue) {
1240 queue_type = "abort";
1241 min_debug = 2;
1242
1243 } else {
1244 queue_type = "port";
1245 }
1246
1247 if (debug > min_debug) {
1248 aprintf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1249 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1250 iorb->Timeout);
1251 }
1252 }
1253}
1254
1255/******************************************************************************
1256 * Remove an IORB from the specified queue. This function must be called with
1257 * the adapter-level spinlock aquired.
1258 */
1259int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1260{
1261 IORBH _far *_iorb;
1262 IORBH _far *_prev = NULL;
1263 int found = 0;
1264
1265 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1266 if (_iorb == iorb) {
1267 /* found the IORB to be removed */
1268 if (_prev != NULL) {
1269 _prev->pNxtIORB = _iorb->pNxtIORB;
1270 } else {
1271 queue->root = _iorb->pNxtIORB;
1272 }
1273 if (_iorb == queue->tail) {
1274 queue->tail = _prev;
1275 }
1276 found = 1;
1277 break;
1278 }
1279 _prev = _iorb;
1280 }
1281
1282 if (found) {
1283 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1284 } else {
1285 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1286 }
1287
1288 return(!found);
1289}
1290
1291/******************************************************************************
1292 * Set the error code in the specified IORB
1293 *
1294 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1295 * status to the specified error code.
1296 */
1297void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1298{
1299 iorb->ErrorCode = error_code;
1300 iorb->Status |= IORB_ERROR;
1301}
1302
1303/******************************************************************************
1304 * Mark the specified IORB as done and notify the asynchronous post function,
1305 * if any. The IORB is also removed from the corresponding IORB queue.
1306 *
1307 * NOTES: This function does not clear the Status field; it merely adds the
1308 * IORB_DONE flag.
1309 *
1310 * This function is expected to be called *without* the corresponding
1311 * driver-level drv_lock aquired. It will aquire the spinlock before
1312 * updating the IORB queue and release it before notifying the upstream
1313 * code in order to prevent deadlocks.
1314 *
1315 * Due to this logic, this function is only good for simple task-time
1316 * completions. Functions working on lists of IORBs (such as interrupt
1317 * handlers or context hooks) should call iorb_complete() directly and
1318 * implement their own logic for removing the IORB from the port queue.
1319 * See abort_ctxhook() for an example.
1320 */
1321void iorb_done(IORBH _far *iorb)
1322{
1323 int a = iorb_unit_adapter(iorb);
1324 int p = iorb_unit_port(iorb);
1325
1326 /* remove IORB from corresponding queue */
1327 spin_lock(drv_lock);
1328 if (iorb_driver_level(iorb)) {
1329 iorb_queue_del(&driver_queue, iorb);
1330 } else {
1331 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1332 }
1333 aws_free(add_workspace(iorb));
1334 spin_unlock(drv_lock);
1335
1336 iorb_complete(iorb);
1337}
1338
1339/******************************************************************************
1340 * Complete an IORB. This should be called without the adapter-level spinlock
1341 * to allow the IORB completion routine to perform whatever processing it
1342 * requires. This implies that the IORB should no longer be in any global
1343 * queue because the IORB completion routine may well reuse the IORB and send
1344 * the next request to us before even returning from this function.
1345 */
1346void iorb_complete(IORBH _far *iorb)
1347{
1348 iorb->Status |= IORB_DONE;
1349
1350 ddprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1351 iorb, iorb->Status, iorb->ErrorCode);
1352
1353 if (iorb->RequestControl & IORB_ASYNC_POST) {
1354 iorb->NotifyAddress(iorb);
1355 }
1356}
1357
1358/******************************************************************************
1359 * Requeue the specified IORB such that it will be sent downstream for
1360 * processing again. This includes freeing all resources currently allocated
1361 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1362 * spinlock must be aquired when calling this function.
1363 *
1364 * The following flags are preserved:
1365 * - no_ncq
1366 */
1367void iorb_requeue(IORBH _far *iorb)
1368{
1369 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1370 u16 no_ncq = aws->no_ncq;
1371 u16 unaligned = aws->unaligned;
1372 u16 retries = aws->retries;
1373
1374 aws_free(aws);
1375 memset(aws, 0x00, sizeof(*aws));
1376
1377 aws->no_ncq = no_ncq;
1378 aws->unaligned = unaligned;
1379 aws->retries = retries;
1380}
1381
1382/******************************************************************************
1383 * Free resources in ADD workspace (timer, buffer, ...). This function should
1384 * be called with the spinlock held to prevent race conditions.
1385 */
1386void aws_free(ADD_WORKSPACE _far *aws)
1387{
1388 if (aws->timer != 0) {
1389 ADD_CancelTimer(aws->timer);
1390 aws->timer = 0;
1391 }
1392
1393 if (aws->buf != NULL) {
1394 free(aws->buf);
1395 aws->buf = NULL;
1396 }
1397}
1398
1399/******************************************************************************
1400 * Lock the adapter, waiting for availability if necessary. This is expected
1401 * to be called at task/request time without the driver-level spinlock
1402 * aquired. Don't call at interrupt time.
1403 */
1404void lock_adapter(AD_INFO *ai)
1405{
1406 TIMER Timer;
1407
1408 spin_lock(drv_lock);
1409 while (ai->busy) {
1410 spin_unlock(drv_lock);
1411 timer_init(&Timer, 250);
1412 while (!timer_check_and_block(&Timer));
1413 spin_lock(drv_lock);
1414 }
1415 ai->busy = 1;
1416 spin_unlock(drv_lock);
1417}
1418
1419/******************************************************************************
1420 * Unlock adapter (i.e. reset busy flag)
1421 */
1422void unlock_adapter(AD_INFO *ai)
1423{
1424 ai->busy = 0;
1425}
1426
1427/******************************************************************************
1428 * Timeout handler for I/O commands. Since timeout handling can involve
1429 * lengthy operations like port resets, the main code is located in a
1430 * separate function which is invoked via a context hook.
1431 */
1432void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1,
1433 ULONG p2)
1434{
1435 IORBH _far *iorb = (IORBH _far *) p1;
1436 int a = iorb_unit_adapter(iorb);
1437 int p = iorb_unit_port(iorb);
1438
1439 ADD_CancelTimer(timer_handle);
1440 dprintf("timeout for IORB %Fp\n", iorb);
1441
1442 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1443 * IORB has completed after the timeout has expired but before we got to
1444 * this line of code, we'll check the return code of iorb_queue_del(): If it
1445 * returns an error, the IORB must have completed a few microseconds ago and
1446 * there is no timeout.
1447 */
1448 spin_lock(drv_lock);
1449 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1450 iorb_queue_add(&abort_queue, iorb);
1451 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1452 }
1453 spin_unlock(drv_lock);
1454
1455 /* Trigger abort processing function. We don't really care whether this
1456 * succeeds because the only reason why it would fail should be multiple
1457 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1458 * start executing, which leaves two scenarios:
1459 *
1460 * - We succeded in arming the context hook. Fine.
1461 *
1462 * - We armed the context hook a second time before it had a chance to
1463 * start executing. In this case, the already scheduled context hook
1464 * will process our IORB as well.
1465 */
1466 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1467
1468 /* Set up a watchdog timer which calls the context hook manually in case
1469 * some kernel thread is looping around the IORB_COMPLETE status bit
1470 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1471 * happen per design because kernel threads are supposed to yield but it
1472 * does in the early boot phase.
1473 */
1474 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1475}
1476
1477/******************************************************************************
1478 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1479 * will execute as soon as a kernel thread yields the CPU. However, some
1480 * kernel components won't yield the CPU during the early boot phase and the
1481 * only way to kick some sense into those components is to run the context
1482 * hook right inside this timer callback. Not exactly pretty, especially
1483 * considering the fact that context hooks were implemented to prevent running
1484 * lengthy operations like a port reset at interrupt time, but without this
1485 * watchdog mechanism we run the risk of getting completely stalled by device
1486 * problems during the early boot phase.
1487 */
1488void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1,
1489 ULONG p2)
1490{
1491 /* reset watchdog timer */
1492 ADD_CancelTimer(timer_handle);
1493 dprintf("reset watchdog invoked\n");
1494
1495 /* call context hook manually */
1496 reset_ctxhook(0);
1497}
1498
1499/******************************************************************************
1500 * small_code_ - this dummy func resolves the undefined reference linker
1501 * error that occurrs when linking WATCOM objects with DDK's link.exe
1502 */
1503void _cdecl small_code_(void)
1504{
1505}
1506
1507/******************************************************************************
1508 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1509 * adapter info array in the device table, dt->pAdapter[], is expected to be
1510 * initialized for the specified index (dt_ai).
1511 *
1512 * Please note that the device table adapter index, dta, is not always equal
1513 * to the physical adapter index, a: if SCSI emulation has been activated, the
1514 * last reported adapter is a virtual SCSI adapter and the physical adapter
1515 * indexes for those units are, of course, different from the device table
1516 * index of the virtual SCSI adapter.
1517 */
1518static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1519 int a, int p, int d, int scsi_id)
1520{
1521 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1522 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1523 (u16) dt->pAdapter[dta]);
1524 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1525 AD_INFO *ai = ad_infos + a;
1526
1527 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1528 dprintf("error: device table provided by DASD too small\n");
1529 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1530 return(-1);
1531 }
1532
1533 if (ai->ports[p].devs[d].unit_info == NULL) {
1534 /* provide original information about this device (unit) */
1535 memset(ui, 0x00, sizeof(*ui));
1536 ui->AdapterIndex = dta; /* device table adapter index */
1537 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1538 ui->UnitIndex = ptr->AdapterUnits;
1539 ui->UnitType = ai->ports[p].devs[d].dev_type;
1540 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1541 if (ai->ports[p].devs[d].removable) {
1542 ui->UnitFlags |= UF_REMOVABLE;
1543 }
1544 if (scsi_id > 0) {
1545 /* set fake SCSI ID for this unit */
1546 ui->UnitSCSITargetID = scsi_id;
1547 }
1548 } else {
1549 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1550 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1551 }
1552
1553 ptr->AdapterUnits++;
1554 return(0);
1555}
1556
1557/*******************************************************************************
1558 * Register kernel exit handler for trap dumps. Our exit handler will be called
1559 * right before the kernel starts a dump; that's where we reset the controller
1560 * so it supports BIOS int13 I/O calls.
1561 */
1562static void register_krnl_exit(void)
1563{
1564 _asm {
1565 push ds
1566 push es
1567 push bx
1568 push si
1569 push di
1570
1571 mov ax, FLAG_KRNL_EXIT_ADD
1572 mov cx, TYPE_KRNL_EXIT_INT13
1573 mov bx, SEG asm_krnl_exit
1574 mov si, OFFSET asm_krnl_exit
1575 mov dl, DevHlp_RegisterKrnlExit
1576
1577 call dword ptr [Device_Help]
1578
1579 pop di
1580 pop si
1581 pop bx
1582 pop es
1583 pop ds
1584 }
1585
1586 dprintf("Registered kernel exit routine for INT13 mode\n");
1587}
1588
Note: See TracBrowser for help on using the repository browser.