source: trunk/src/os2ahci/os2ahci.c@ 162

Last change on this file since 162 was 162, checked in by David Azarewicz, 12 years ago

driver info updates, misc cleanup, add comments
This is version 1.28

File size: 51.8 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 *
7 * Authors: Christian Mueller, Markus Thielen
8 *
9 * Parts copied from/inspired by the Linux AHCI driver;
10 * those parts are (c) Linux AHCI/ATA maintainers
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include "os2ahci.h"
28#include "ioctl.h"
29#include "version.h"
30
31/* -------------------------- macros and constants ------------------------- */
32
33/* parse integer command line parameter */
34#define drv_parm_int(s, value, type, radix) \
35 { \
36 char _far *_ep; \
37 if ((s)[1] != ':') { \
38 cprintf("%s: missing colon (:) after /%c\n", drv_name, *(s)); \
39 goto init_fail; \
40 } \
41 value = (type) strtol((s) + 2, \
42 (const char _far* _far*) &_ep, \
43 radix); \
44 s = _ep; \
45 }
46
47#define drv_parm_int_optional(s, value, type, radix) \
48 { \
49 char _far *_ep; \
50 if ((s)[1] == ':') { \
51 value = (type) strtol((s) + 2, (const char _far* _far*) &_ep, radix); \
52 s = _ep; \
53 } else { \
54 value++; \
55 } \
56 }
57
58/* set two-dimensional array of port options */
59#define set_port_option(opt, val) \
60 if (adapter_index == -1) { \
61 /* set option for all adapters and ports */ \
62 memset(opt, val, sizeof(opt)); \
63 } else if (port_index == -1) { \
64 /* set option for all ports on current adapter */ \
65 memset(opt[adapter_index], val, sizeof(*opt)); \
66 } else { \
67 /* set option for specific port */ \
68 opt[adapter_index][port_index] = val; \
69 }
70
71/* constants for undefined kernel exit routine;
72 * see register_krnl_exit() func */
73#define DevHlp_RegisterKrnlExit 0x006f
74
75#define FLAG_KRNL_EXIT_ADD 0x1000
76#define FLAG_KRNL_EXIT_REMOVE 0x2000
77
78#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
79#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
80#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
81#define TYPE_KRNL_EXIT_DYN 0x0003
82#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
83
84/* ------------------------ typedefs and structures ------------------------ */
85
86/* -------------------------- function prototypes -------------------------- */
87
88void _cdecl small_code_ (void);
89
90static int add_unit_info (IORB_CONFIGURATION _far *iorb_conf, int dt_ai,
91 int a, int p, int d, int scsi_id);
92
93static void register_krnl_exit (void);
94
95/* ------------------------ global/static variables ------------------------ */
96
97int debug = 0; /* if > 0, print debug messages to COM1 */
98int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
99int init_reset = 1; /* if != 0, reset ports during init */
100int force_write_cache; /* if != 0, force write cache */
101int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
102int use_lvm_info = 1;
103int wrap_trace_buffer = 0;
104long com_baud = 0;
105
106PFN Device_Help = 0; /* pointer to device helper entry point */
107ULONG RMFlags = 0; /* required by resource manager library */
108PFN RM_Help0 = NULL; /* required by resource manager library */
109PFN RM_Help3 = NULL; /* required by resource manager library */
110HDRIVER rm_drvh; /* resource manager driver handle */
111char rm_drvname[80]; /* driver name as returned by RM */
112USHORT add_handle; /* driver handle (RegisterDeviceClass) */
113UCHAR timer_pool[TIMER_POOL_SIZE]; /* timer pool */
114char drv_name[] = "OS2AHCI"; /* driver name as string */
115
116/* resource manager driver information structure */
117DRIVERSTRUCT rm_drvinfo = {
118 drv_name, /* driver name */
119 "AHCI SATA Driver", /* driver description */
120 DVENDOR, /* vendor name */
121 CMVERSION_MAJOR, /* RM interface version major */
122 CMVERSION_MINOR, /* RM interface version minor */
123 BLD_YEAR, BLD_MONTH, BLD_DAY, /* date */
124 0, /* driver flags */
125 DRT_ADDDM, /* driver type */
126 DRS_ADD, /* driver sub type */
127 NULL /* driver callback */
128};
129
130ULONG drv_lock; /* driver-level spinlock */
131IORB_QUEUE driver_queue; /* driver-level IORB queue */
132AD_INFO ad_infos[MAX_AD]; /* adapter information list */
133int ad_info_cnt; /* number of entries in ad_infos[] */
134u16 ad_ignore; /* bitmap with adapter indexes to ignore */
135int init_complete; /* if != 0, initialization has completed */
136int suspended;
137int resume_sleep_flag;
138
139/* apapter/port-specific options saved when parsing the command line */
140u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
141u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
142u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
143u8 link_power[MAX_AD][AHCI_MAX_PORTS];
144u8 track_size[MAX_AD][AHCI_MAX_PORTS];
145
146static char init_msg[] = "%s driver version %d.%02d\n";
147static char exit_msg[] = "%s driver *not* installed\n";
148char BldLevel[] = BLDLEVEL;
149
150/* ----------------------------- start of code ----------------------------- */
151
152/******************************************************************************
153 * OS/2 device driver main strategy function. This function is only used
154 * for initialization purposes; all other calls go directly to the adapter
155 * device driver's strategy function.
156 *
157 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
158 * packet for IDC calls, so they can be handled by gen_ioctl.
159 */
160USHORT _cdecl c_strat(RPH _far *req)
161{
162 u16 rc;
163
164 switch (req->Cmd) {
165
166 case CMDInitBase:
167 rc = init_drv((RPINITIN _far *) req);
168 break;
169
170 case CMDShutdown:
171 rc = exit_drv(((RPSAVERESTORE _far *) req)->FuncCode);
172 break;
173
174 case CMDGenIOCTL:
175 rc = gen_ioctl((RP_GENIOCTL _far *) req);
176 break;
177
178 case CMDINPUT:
179 rc = char_dev_input((RP_RWV _far *) req);
180 break;
181
182 case CMDSaveRestore:
183 rc = sr_drv(((RPSAVERESTORE _far *) req)->FuncCode);
184 break;
185
186 default:
187 rc = STDON | STATUS_ERR_UNKCMD;
188 break;
189 }
190
191 return(rc);
192}
193
194/******************************************************************************
195 * Intialize the os2ahci driver. This includes command line parsing, scanning
196 * the PCI bus for supported AHCI adapters, etc.
197 */
198USHORT init_drv(RPINITIN _far *req)
199{
200 static int init_drv_called;
201 static int init_drv_failed;
202 RPINITOUT _far *rsp = (RPINITOUT _far *) req;
203 DDD_PARM_LIST _far *ddd_pl = (DDD_PARM_LIST _far *) req->InitArgs;
204 APIRET rmrc;
205 char _far *cmd_line;
206 char _far *s;
207 int adapter_index = -1;
208 int port_index = -1;
209 int invert_option;
210 int optval;
211 u16 vendor;
212 u16 device;
213
214 if (init_drv_called) {
215 /* This is the init call for the second (legacy IBMS506$) character
216 * device driver. If the main driver failed initialization, fail this
217 * one as well.
218 */
219 rsp->CodeEnd = (u16) end_of_code;
220 rsp->DataEnd = (u16) &end_of_data;
221 return(STDON | ((init_drv_failed) ? ERROR_I24_QUIET_INIT_FAIL : 0));
222 }
223 init_drv_called = 1;
224 suspended = 0;
225 resume_sleep_flag = 0;
226 memset(ad_infos, 0, sizeof(ad_infos));
227
228 /* set device helper entry point */
229 Device_Help = req->DevHlpEP;
230
231 /* create driver-level spinlock */
232 DevHelp_CreateSpinLock(&drv_lock);
233
234 /* initialize libc code */
235 init_libc();
236
237 /* register driver with resource manager */
238 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS) {
239 cprintf("%s: failed to register driver with resource manager (rc = %d)\n",
240 drv_name, rmrc);
241 goto init_fail;
242 }
243
244 /* parse command line parameters */
245 cmd_line = (char _far *) ((u32) ddd_pl & 0xffff0000l) + ddd_pl->cmd_line_args;
246
247 for (s = cmd_line; *s != 0; s++) {
248 if (*s == '/') {
249 if ((invert_option = (s[1] == '!')) != 0) {
250 s++;
251 }
252 s++;
253 switch (tolower(*s)) {
254
255 case '\0':
256 /* end of command line; can only happen if command line is incorrect */
257 cprintf("%s: incomplete command line option\n", drv_name);
258 goto init_fail;
259
260 case 'b':
261 drv_parm_int(s, com_baud, u32, 10);
262 break;
263
264 case 'c':
265 /* set COM port base address for debug messages */
266 drv_parm_int(s, com_base, u16, 16);
267 if (com_base == 1) com_base = 0x3f8;
268 if (com_base == 2) com_base = 0x2f8;
269 break;
270
271 case 'd':
272 /* increase debug level */
273 drv_parm_int_optional(s, debug, int, 10);
274 break;
275
276 case 'g':
277 /* add specfied PCI ID as a supported generic AHCI adapter */
278 drv_parm_int(s, vendor, u16, 16);
279 s--;
280 drv_parm_int(s, device, u16, 16);
281 if (add_pci_id(vendor, device)) {
282 cprintf("%s: failed to add PCI ID %04x:%04x\n", drv_name, vendor, device);
283 goto init_fail;
284 }
285 thorough_scan = 1;
286 break;
287
288 case 't':
289 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
290 thorough_scan = !invert_option;
291 break;
292
293 case 'r':
294 /* reset ports during initialization */
295 init_reset = !invert_option;
296 break;
297
298 case 'f':
299 /* force write cache regardless of IORB flags */
300 force_write_cache = 1;
301 break;
302
303 case 'a':
304 /* set adapter index for adapter and port-related options */
305 drv_parm_int(s, adapter_index, int, 10);
306 if (adapter_index < 0 || adapter_index >= MAX_AD) {
307 cprintf("%s: invalid adapter index (%d)\n", drv_name, adapter_index);
308 goto init_fail;
309 }
310 break;
311
312 case 'p':
313 /* set port index for port-related options */
314 drv_parm_int(s, port_index, int, 10);
315 if (port_index < 0 || port_index >= AHCI_MAX_PORTS) {
316 cprintf("%s: invalid port index (%d)\n", drv_name, port_index);
317 goto init_fail;
318 }
319 break;
320
321 case 'i':
322 /* ignore current adapter index */
323 if (adapter_index >= 0) {
324 ad_ignore |= 1U << adapter_index;
325 }
326 break;
327
328 case 's':
329 /* enable SCSI emulation for ATAPI devices */
330 set_port_option(emulate_scsi, !invert_option);
331 break;
332
333 case 'n':
334 /* enable NCQ */
335 set_port_option(enable_ncq, !invert_option);
336 break;
337
338 case 'l':
339 /* set link speed or power savings */
340 s++;
341 switch (tolower(*s)) {
342 case 's':
343 /* set link speed */
344 drv_parm_int(s, optval, int, 10);
345 set_port_option(link_speed, optval);
346 break;
347 case 'p':
348 /* set power management */
349 drv_parm_int(s, optval, int, 10);
350 set_port_option(link_power, optval);
351 break;
352 default:
353 cprintf("%s: invalid link parameter (%c)\n", drv_name, *s);
354 goto init_fail;
355 }
356 /* need to reset the port in order to establish link settings */
357 init_reset = 1;
358 break;
359
360 case '4':
361 /* enable 4K sector geometry enhancement (track size = 56) */
362 if (!invert_option) {
363 set_port_option(track_size, 56);
364 }
365 break;
366
367 case 'z':
368 /* Specify to not use the LVM information. There is no reason why anyone would
369 * want to do this, but previous versions of this driver did not have LVM capability,
370 * so this switch is here temporarily just in case.
371 */
372 use_lvm_info = !invert_option;
373 break;
374
375 case 'v':
376 /* be verbose during boot */
377 drv_parm_int_optional(s, verbosity, int, 10);
378 break;
379
380 case 'w':
381 /* Specify to allow the trace buffer to wrap when full. */
382 wrap_trace_buffer = !invert_option;
383 break;
384
385 case 'q':
386 /* Temporarily output a non-fatal message to get anyone using this
387 * undocumented switch to stop using it. This will be removed soon
388 * and the error will become fatal.
389 */
390 cprintf("%s: unknown option: /%c\n", drv_name, *s);
391 break;
392
393 default:
394 cprintf("%s: unknown option: /%c\n", drv_name, *s);
395 goto init_fail;
396 }
397 }
398 }
399
400 /* print initialization message */
401 ciprintf(init_msg, drv_name, VERSION / 100, VERSION % 100);
402
403 if (com_baud) init_com(com_baud); /* initialize com port for debug output */
404
405 /* initialize trace buffer if applicable */
406 if (TRACE_ACTIVE) {
407 /* debug is on, but COM port is off -> use our trace buffer */
408 trace_init(AHCI_TRACE_BUF_SIZE);
409 } else {
410 trace_init(AHCI_INFO_BUF_SIZE);
411 }
412
413 ntprintf("BldLevel: %s\n", BldLevel);
414 ntprintf("CmdLine: %Fs\n", cmd_line);
415
416 /* scan PCI bus for supported devices */
417 scan_pci_bus();
418
419 if (ad_info_cnt > 0) {
420 /* initialization succeeded and we found at least one AHCI adapter */
421 ADD_InitTimer(timer_pool, sizeof(timer_pool));
422 //NOT_USED mdelay_cal();
423
424 if (DevHelp_RegisterDeviceClass(drv_name, (PFN) add_entry, 0, 1, &add_handle)) {
425 cprintf("%s: couldn't register device class\n", drv_name);
426 goto init_fail;
427 }
428
429 /* allocate context hooks */
430 if (DevHelp_AllocateCtxHook(mk_NPFN(restart_hook), &restart_ctxhook_h) != 0 ||
431 DevHelp_AllocateCtxHook(mk_NPFN(reset_hook), &reset_ctxhook_h) != 0 ||
432 DevHelp_AllocateCtxHook(mk_NPFN(engine_hook), &engine_ctxhook_h)) {
433 cprintf("%s: failed to allocate task-time context hooks\n", drv_name);
434 goto init_fail;
435 }
436
437 rsp->CodeEnd = (u16) end_of_code;
438 rsp->DataEnd = (u16) &end_of_data;
439
440 /* register kernel exit routine for trap dumps */
441 register_krnl_exit();
442
443 return(STDON);
444
445 } else {
446 /* no adapters found */
447 ciprintf(" No adapters found.\n");
448 }
449
450init_fail:
451 /* initialization failed; set segment sizes to 0 and return error */
452 rsp->CodeEnd = 0;
453 rsp->DataEnd = 0;
454 init_drv_failed = 1;
455
456 /* free context hooks */
457 if (engine_ctxhook_h != 0) DevHelp_FreeCtxHook(engine_ctxhook_h);
458 if (reset_ctxhook_h != 0) DevHelp_FreeCtxHook(reset_ctxhook_h);
459 if (restart_ctxhook_h != 0) DevHelp_FreeCtxHook(restart_ctxhook_h);
460
461 if (rm_drvh != 0) {
462 /* remove driver from resource manager */
463 RMDestroyDriver(rm_drvh);
464 }
465
466 ciprintf(exit_msg, drv_name);
467 return(STDON | ERROR_I24_QUIET_INIT_FAIL);
468}
469
470/******************************************************************************
471 * Generic IOCTL via character device driver. IOCTLs are used to control the
472 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
473 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
474 * commands for ATA disks) are implemented here.
475 */
476USHORT gen_ioctl(RP_GENIOCTL _far *ioctl)
477{
478 dprintf("IOCTL 0x%x/0x%x\n", (u16) ioctl->Category, (u16) ioctl->Function);
479
480 switch (ioctl->Category) {
481
482 case OS2AHCI_IOCTL_CATEGORY:
483 switch (ioctl->Function) {
484
485 case OS2AHCI_IOCTL_GET_DEVLIST:
486 return(ioctl_get_devlist(ioctl));
487
488 case OS2AHCI_IOCTL_PASSTHROUGH:
489 return(ioctl_passthrough(ioctl));
490
491 }
492 break;
493
494 case DSKSP_CAT_GENERIC:
495 return(ioctl_gen_dsk(ioctl));
496
497 case DSKSP_CAT_SMART:
498 return(ioctl_smart(ioctl));
499
500 }
501
502 return(STDON | STATUS_ERR_UNKCMD);
503}
504
505/******************************************************************************
506 * Read from character device. If tracing is on (internal ring buffer trace),
507 * we return data from the trace buffer; if not, we might return a device
508 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
509 */
510USHORT char_dev_input(RP_RWV _far *rwrb)
511{
512 return(trace_char_dev(rwrb));
513}
514
515/******************************************************************************
516 * Device driver exit handler. This handler is called when OS/2 shuts down and
517 * flushes the write caches of all attached devices. Since this is effectively
518 * the same we do when suspending, we'll call out to the corresponding suspend
519 * function.
520 *
521 * NOTE: Errors are ignored because there's no way we could stop the shutdown
522 * or do something about the error, unless retrying endlessly is
523 * considered an option.
524 */
525USHORT exit_drv(int func)
526{
527 dprintf("exit_drv(%d) called\n", func);
528
529 if (func == 0) {
530 /* we're only interested in the second phase of the shutdown */
531 return(STDON);
532 }
533
534 suspend();
535 return(STDON);
536}
537
538/******************************************************************************
539 * Device driver suspend/resume handler. This handler is called when ACPI is
540 * executing a suspend or resume.
541 */
542USHORT sr_drv(int func)
543{
544 dprintf("sr_drv(%d) called\n", func);
545
546 if (func) resume();
547 else suspend();
548
549 return(STDON);
550}
551
552/******************************************************************************
553 * ADD entry point. This is the main entry point for all ADD requests. Due to
554 * the asynchronous nature of ADD drivers, this function primarily queues the
555 * IORB(s) to the corresponding adapter or port queues, then triggers the
556 * state machine to initiate processing queued IORBs.
557 *
558 * NOTE: In order to prevent race conditions or engine stalls, certain rules
559 * around locking, unlocking and IORB handling in general have been
560 * established. Refer to the comments in "trigger_engine()" for
561 * details.
562 */
563void _cdecl _far _loadds add_entry(IORBH _far *first_iorb)
564{
565 IORBH _far *iorb;
566 IORBH _far *next = NULL;
567
568 spin_lock(drv_lock);
569
570 for (iorb = first_iorb; iorb != NULL; iorb = next) {
571 /* Queue this IORB. Queues primarily exist on port level but there are
572 * some requests which affect the whole driver, most notably
573 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
574 * port queue will change the links, thus we need to save the original
575 * link in 'next'.
576 */
577 next = (iorb->RequestControl | IORB_CHAIN) ? iorb->pNxtIORB : 0;
578
579 iorb->Status = 0;
580 iorb->ErrorCode = 0;
581 memset(&iorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
582
583 if (iorb_driver_level(iorb)) {
584 /* driver-level IORB */
585 iorb->UnitHandle = 0;
586 iorb_queue_add(&driver_queue, iorb);
587
588 } else {
589 /* port-level IORB */
590 int a = iorb_unit_adapter(iorb);
591 int p = iorb_unit_port(iorb);
592 int d = iorb_unit_device(iorb);
593
594 if (a >= ad_info_cnt ||
595 p > ad_infos[a].port_max ||
596 d > ad_infos[a].ports[p].dev_max ||
597 (ad_infos[a].port_map & (1UL << p)) == 0) {
598
599 /* unit handle outside of the allowed range */
600 dprintf("warning: IORB for %d.%d.%d out of range\n", a, p, d);
601 iorb->Status = IORB_ERROR;
602 iorb->ErrorCode = IOERR_CMD_SYNTAX;
603 iorb_complete(iorb);
604 continue;
605 }
606
607 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, iorb);
608 }
609 }
610
611 /* trigger state machine */
612 trigger_engine();
613
614 spin_unlock(drv_lock);
615}
616
617/******************************************************************************
618 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
619 * which will try to get all IORBs sent on their way a couple of times. If
620 * there are still IORBs ready for processing after this, this function will
621 * hand off to a context hook which will continue to trigger the engine until
622 * all IORBs have been sent.
623 *
624 * NOTE: While initialization has not completed (or during suspend/resume
625 * operations), this function will loop indefinitely because we can't
626 * rely on interrupt handlers or context hooks and complex IORBs
627 * requiring multiple requeues would eventually hang and time out if
628 * we stopped triggering here.
629 */
630void trigger_engine(void)
631{
632 int i;
633
634 for (i = 0; i < 3 || !init_complete; i++) {
635 if (trigger_engine_1() == 0) {
636 /* done -- all IORBs have been sent on their way */
637 return;
638 }
639 }
640
641 /* Something keeps bouncing; hand off to the engine context hook which will
642 * keep trying in the background.
643 */
644 DevHelp_ArmCtxHook(0, engine_ctxhook_h);
645}
646
647/******************************************************************************
648 * Trigger IORB queue engine in order to send commands in the driver/port IORB
649 * queues to the AHCI hardware. This function will return the number of IORBs
650 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
651 * a state to accept the command, thus it might take quite a few calls to get
652 * all IORBs on their way. This is why there's a wrapper function which tries
653 * it a few times, then hands off to a context hook which will keep trying in
654 * the background.
655 *
656 * IORBs might complete before send_iorb() has returned, at any time during
657 * interrupt processing or on another CPU on SMP systems. IORB completion
658 * means modifications to the corresponding IORB queue (the completed IORB
659 * is removed from the queue) thus we need to protect the IORB queues from
660 * race conditions. The safest approach short of keeping the driver-level
661 * spinlock aquired permanently is to keep it throughout this function and
662 * release it temporarily in send_iorb().
663 *
664 * This implies that the handler functions are fully responsible for aquiring
665 * the driver-level spinlock when they need it, and for releasing it again.
666 *
667 * As a rule of thumb, get the driver-level spinlock whenever accessing
668 * volatile variables (IORB queues, values in ad_info[], ...).
669 *
670 * Additional Notes:
671 *
672 * - This function is expected to be called with the spinlock aquired
673 *
674 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
675 * just remain in the queue). This can be used to release the driver-level
676 * spinlock while making sure no new IORBs are going to hit the hardware.
677 * In order to prevent engine stalls, all handlers using this functionality
678 * need to invoke trigger_engine() after resetting the busy flag.
679 *
680 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
681 * However, the driver-level queue is worked "one entry at a time" which
682 * means that no new IORBs will be queued on the driver-level queue until
683 * the head element has completed processing. This means that driver-
684 * level IORB handlers don't need to protect against each other. But they
685 * they do need to keep in mind interference with port-level IORBs:
686 *
687 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
688 * adapters as 'busy' which are affected by the driver-level IORB
689 *
690 * - Driver-level IORB handlers must not access the hardware of a
691 * particular adapter if it's flagged as 'busy' by another IORB.
692 */
693int trigger_engine_1(void)
694{
695 IORBH _far *iorb;
696 IORBH _far *next;
697 int iorbs_sent = 0;
698 int a;
699 int p;
700
701 iorbs_sent = 0;
702
703 /* process driver-level IORBs */
704 if ((iorb = driver_queue.root) != NULL && !add_workspace(iorb)->processing) {
705 send_iorb(iorb);
706 iorbs_sent++;
707 }
708
709 /* process port-level IORBs */
710 for (a = 0; a < ad_info_cnt; a++) {
711 AD_INFO *ai = ad_infos + a;
712 if (ai->busy) {
713 /* adapter is busy; don't process any IORBs */
714 continue;
715 }
716 for (p = 0; p <= ai->port_max; p++) {
717 /* send all queued IORBs on this port */
718 next = NULL;
719 for (iorb = ai->ports[p].iorb_queue.root; iorb != NULL; iorb = next) {
720 next = iorb->pNxtIORB;
721 if (!add_workspace(iorb)->processing) {
722 send_iorb(iorb);
723 iorbs_sent++;
724 }
725 }
726 }
727 }
728
729 return(iorbs_sent);
730}
731
732/******************************************************************************
733 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
734 * switch board for calling the corresponding iocc_*() handler function.
735 *
736 * NOTE: This function is expected to be called with the driver-level spinlock
737 * aquired. It will release it before calling any of the handler
738 * functions and re-aquire it when done.
739 */
740void send_iorb(IORBH _far *iorb)
741{
742 /* Mark IORB as "processing" before doing anything else. Once the IORB is
743 * marked as "processing", we can release the spinlock because subsequent
744 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
745 * IORB.
746 */
747 add_workspace(iorb)->processing = 1;
748 spin_unlock(drv_lock);
749
750 switch (iorb->CommandCode) {
751
752 case IOCC_CONFIGURATION:
753 iocc_configuration(iorb);
754 break;
755
756 case IOCC_DEVICE_CONTROL:
757 iocc_device_control(iorb);
758 break;
759
760 case IOCC_UNIT_CONTROL:
761 iocc_unit_control(iorb);
762 break;
763
764 case IOCC_GEOMETRY:
765 iocc_geometry(iorb);
766 break;
767
768 case IOCC_EXECUTE_IO:
769 iocc_execute_io(iorb);
770 break;
771
772 case IOCC_UNIT_STATUS:
773 iocc_unit_status(iorb);
774 break;
775
776 case IOCC_ADAPTER_PASSTHRU:
777 iocc_adapter_passthru(iorb);
778 break;
779
780 default:
781 /* unsupported call */
782 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
783 iorb_done(iorb);
784 break;
785 }
786
787 /* re-aquire spinlock before returning to trigger_engine() */
788 spin_lock(drv_lock);
789}
790
791/******************************************************************************
792 * Handle IOCC_CONFIGURATION requests.
793 */
794void iocc_configuration(IORBH _far *iorb)
795{
796 int a;
797
798 switch (iorb->CommandModifier) {
799
800 case IOCM_COMPLETE_INIT:
801 /* Complete initialization. From now on, we won't have to restore the BIOS
802 * configuration after each command and we're fully operational (i.e. will
803 * use interrupts, timers and context hooks instead of polling).
804 */
805 if (!init_complete) {
806 dprintf("leaving initialization mode\n");
807 for (a = 0; a < ad_info_cnt; a++) {
808 lock_adapter(ad_infos + a);
809 ahci_complete_init(ad_infos + a);
810 }
811 init_complete = 1;
812
813 /* DAZ turn off COM port output if on */
814 //com_base = 0;
815
816 /* release all adapters */
817 for (a = 0; a < ad_info_cnt; a++) {
818 unlock_adapter(ad_infos + a);
819 }
820
821 #ifdef LEGACY_APM
822 /* register APM hook */
823 apm_init();
824 #endif
825
826 if (!TRACE_ACTIVE) build_user_info();
827 }
828 iorb_done(iorb);
829 break;
830
831 case IOCM_GET_DEVICE_TABLE:
832 /* construct a device table */
833 iocm_device_table(iorb);
834 break;
835
836 default:
837 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
838 iorb_done(iorb);
839 break;
840 }
841}
842
843/******************************************************************************
844 * Handle IOCC_DEVICE_CONTROL requests.
845 */
846void iocc_device_control(IORBH _far *iorb)
847{
848 AD_INFO *ai = ad_infos + iorb_unit_adapter(iorb);
849 IORBH _far *ptr;
850 IORBH _far *next = NULL;
851 int p = iorb_unit_port(iorb);
852 int d = iorb_unit_device(iorb);
853
854 switch (iorb->CommandModifier) {
855
856 case IOCM_ABORT:
857 /* abort all pending commands on specified port and device */
858 spin_lock(drv_lock);
859 for (ptr = ai->ports[p].iorb_queue.root; ptr != NULL; ptr = next) {
860 next = ptr->pNxtIORB;
861 /* move all matching IORBs to the abort queue */
862 if (ptr != iorb && iorb_unit_device(ptr) == d) {
863 iorb_queue_del(&ai->ports[p].iorb_queue, ptr);
864 iorb_queue_add(&abort_queue, ptr);
865 ptr->ErrorCode = IOERR_CMD_ABORTED;
866 }
867 }
868 spin_unlock(drv_lock);
869
870 /* trigger reset context hook which will finish the abort processing */
871 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
872 break;
873
874 case IOCM_SUSPEND:
875 case IOCM_RESUME:
876 case IOCM_GET_QUEUE_STATUS:
877 /* Suspend/resume operations allow access to the hardware for other
878 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
879 * and ATAPI in the same driver, this won't be required.
880 */
881 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
882 break;
883
884 case IOCM_LOCK_MEDIA:
885 case IOCM_UNLOCK_MEDIA:
886 case IOCM_EJECT_MEDIA:
887 /* unit control commands to lock, unlock and eject media */
888 /* will be supported later... */
889 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
890 break;
891
892 default:
893 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
894 break;
895 }
896
897 iorb_done(iorb);
898}
899
900/******************************************************************************
901 * Handle IOCC_UNIT_CONTROL requests.
902 */
903void iocc_unit_control(IORBH _far *iorb)
904{
905 IORB_UNIT_CONTROL _far *iorb_uc = (IORB_UNIT_CONTROL _far *) iorb;
906 int a = iorb_unit_adapter(iorb);
907 int p = iorb_unit_port(iorb);
908 int d = iorb_unit_device(iorb);
909
910 spin_lock(drv_lock);
911 switch (iorb->CommandModifier) {
912
913 case IOCM_ALLOCATE_UNIT:
914 /* allocate unit for exclusive access */
915 if (ad_infos[a].ports[p].devs[d].allocated) {
916 iorb_seterr(iorb, IOERR_UNIT_ALLOCATED);
917 } else {
918 ad_infos[a].ports[p].devs[d].allocated = 1;
919 }
920 break;
921
922 case IOCM_DEALLOCATE_UNIT:
923 /* deallocate exclusive access to unit */
924 if (!ad_infos[a].ports[p].devs[d].allocated) {
925 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
926 } else {
927 ad_infos[a].ports[p].devs[d].allocated = 0;
928 }
929 break;
930
931 case IOCM_CHANGE_UNITINFO:
932 /* Change unit (device) information. One reason for this IOCM is the
933 * interface for filter device drivers: a filter device driver can
934 * either change existing UNITINFOs or permanently allocate units
935 * and fabricate new [logical] units; the former is the reason why we
936 * must store the pointer to the updated UNITNIFO for subsequent
937 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
938 */
939 if (!ad_infos[a].ports[p].devs[d].allocated) {
940 iorb_seterr(iorb, IOERR_UNIT_NOT_ALLOCATED);
941 break;
942 }
943 ad_infos[a].ports[p].devs[d].unit_info = iorb_uc->pUnitInfo;
944 break;
945
946 default:
947 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
948 break;
949 }
950
951 spin_unlock(drv_lock);
952 iorb_done(iorb);
953}
954
955/******************************************************************************
956 * Scan all ports for AHCI devices and construct a DASD device table.
957 *
958 * NOTES: This function may be called multiple times. Only the first
959 * invocation will actually scan for devices; all subsequent calls will
960 * merely return the results of the initial scan, potentially augmented
961 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
962 * requests.
963 *
964 * In order to support applications that can't deal with ATAPI devices
965 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
966 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
967 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
968 * request. The units attached to this adapter will use the real HW
969 * unit IDs, thus we'll never receive a command specific to the
970 * emulated SCSI adapter and won't need to set up any sort of entity
971 * for it; the only purpose of the emulated SCSI adapter is to pass the
972 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
973 * course. The emulated SCSI target IDs are allocated as follows:
974 *
975 * 0 the virtual adapter
976 * 1..n emulated devices; SCSI target ID increments sequentially
977 */
978void iocm_device_table(IORBH _far *iorb)
979{
980 IORB_CONFIGURATION _far *iorb_conf;
981 DEVICETABLE _far *dt;
982 char _far *pos;
983 int scsi_units = 0;
984 int scsi_id = 1;
985 int rc;
986 int dta;
987 int a;
988 int p;
989 int d;
990
991 iorb_conf = (IORB_CONFIGURATION _far *) iorb;
992 dt = iorb_conf->pDeviceTable;
993
994 spin_lock(drv_lock);
995
996 /* initialize device table header */
997 dt->ADDLevelMajor = ADD_LEVEL_MAJOR;
998 dt->ADDLevelMinor = ADD_LEVEL_MINOR;
999 dt->ADDHandle = add_handle;
1000 dt->TotalAdapters = ad_info_cnt + 1;
1001
1002 /* set start of adapter and device information tables */
1003 pos = (char _far *) (dt->pAdapter + dt->TotalAdapters);
1004
1005 /* go through all adapters, including the virtual SCSI adapter */
1006 for (dta = 0; dta < dt->TotalAdapters; dta++) {
1007 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) pos;
1008
1009 /* sanity check for sufficient space in device table */
1010 if ((u32) (ptr + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1011 dprintf("error: device table provided by DASD too small\n");
1012 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1013 goto iocm_device_table_done;
1014 }
1015
1016 dt->pAdapter[dta] = (ADAPTERINFO _near *) ((u32) ptr & 0xffff);
1017 memset(ptr, 0x00, sizeof(*ptr));
1018
1019 ptr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1020 ptr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1021 ptr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1022 ptr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1023
1024 if (dta < ad_info_cnt) {
1025 /* this is a physical AHCI adapter */
1026 AD_INFO *ad_info = ad_infos + dta;
1027
1028 ptr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1029 sprintf(ptr->AdapterName, "AHCI_%d", dta);
1030
1031 if (!ad_info->port_scan_done) {
1032 /* first call; need to scan AHCI hardware for devices */
1033 if (ad_info->busy) {
1034 dprintf("error: port scan requested while adapter was busy\n");
1035 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1036 goto iocm_device_table_done;
1037 }
1038 ad_info->busy = 1;
1039 spin_unlock(drv_lock);
1040 rc = ahci_scan_ports(ad_info);
1041 spin_lock(drv_lock);
1042 ad_info->busy = 0;
1043
1044 if (rc != 0) {
1045 dprintf("error: port scan failed on adapter #%d\n", dta);
1046 iorb_seterr(iorb, IOERR_CMD_SW_RESOURCE);
1047 goto iocm_device_table_done;
1048 }
1049 ad_info->port_scan_done = 1;
1050 }
1051
1052 /* insert physical (i.e. AHCI) devices into the device table */
1053 for (p = 0; p <= ad_info->port_max; p++) {
1054 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1055 if (ad_info->ports[p].devs[d].present) {
1056 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p]) {
1057 /* only report this unit as SCSI unit */
1058 scsi_units++;
1059 continue;
1060 }
1061 if (add_unit_info(iorb_conf, dta, dta, p, d, 0)) {
1062 goto iocm_device_table_done;
1063 }
1064 }
1065 }
1066 }
1067
1068 } else {
1069 /* this is the virtual SCSI adapter */
1070 if (scsi_units == 0) {
1071 /* not a single unit to be emulated via SCSI */
1072 dt->TotalAdapters--;
1073 break;
1074 }
1075
1076 /* set adapter name and bus type to mimic a SCSI controller */
1077 ptr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1078 sprintf(ptr->AdapterName, "AHCI_SCSI_0");
1079
1080 /* add all ATAPI units to be emulated by this virtual adaper */
1081 for (a = 0; a < ad_info_cnt; a++) {
1082 AD_INFO *ad_info = ad_infos + a;
1083
1084 for (p = 0; p <= ad_info->port_max; p++) {
1085 for (d = 0; d <= ad_info->ports[p].dev_max; d++) {
1086 if (ad_info->ports[p].devs[d].present &&
1087 ad_info->ports[p].devs[d].atapi &&
1088 emulate_scsi[a][p]) {
1089 if (add_unit_info(iorb_conf, dta, a, p, d, scsi_id++)) {
1090 goto iocm_device_table_done;
1091 }
1092 }
1093 }
1094 }
1095 }
1096 }
1097
1098 /* calculate offset for next adapter */
1099 pos = (char _far *) (ptr->UnitInfo + ptr->AdapterUnits);
1100 }
1101
1102iocm_device_table_done:
1103 spin_unlock(drv_lock);
1104 iorb_done(iorb);
1105}
1106
1107/******************************************************************************
1108 * Handle IOCC_GEOMETRY requests.
1109 */
1110void iocc_geometry(IORBH _far *iorb)
1111{
1112 switch (iorb->CommandModifier) {
1113
1114 case IOCM_GET_MEDIA_GEOMETRY:
1115 case IOCM_GET_DEVICE_GEOMETRY:
1116 add_workspace(iorb)->idempotent = 1;
1117 ahci_get_geometry(iorb);
1118 break;
1119
1120 default:
1121 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1122 iorb_done(iorb);
1123 }
1124}
1125
1126/******************************************************************************
1127 * Handle IOCC_EXECUTE_IO requests.
1128 */
1129void iocc_execute_io(IORBH _far *iorb)
1130{
1131 switch (iorb->CommandModifier) {
1132
1133 case IOCM_READ:
1134 add_workspace(iorb)->idempotent = 1;
1135 ahci_read(iorb);
1136 break;
1137
1138 case IOCM_READ_VERIFY:
1139 add_workspace(iorb)->idempotent = 1;
1140 ahci_verify(iorb);
1141 break;
1142
1143 case IOCM_WRITE:
1144 add_workspace(iorb)->idempotent = 1;
1145 ahci_write(iorb);
1146 break;
1147
1148 case IOCM_WRITE_VERIFY:
1149 add_workspace(iorb)->idempotent = 1;
1150 ahci_write(iorb);
1151 break;
1152
1153 default:
1154 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1155 iorb_done(iorb);
1156 }
1157}
1158
1159/******************************************************************************
1160 * Handle IOCC_UNIT_STATUS requests.
1161 */
1162void iocc_unit_status(IORBH _far *iorb)
1163{
1164 switch (iorb->CommandModifier) {
1165
1166 case IOCM_GET_UNIT_STATUS:
1167 add_workspace(iorb)->idempotent = 1;
1168 ahci_unit_ready(iorb);
1169 break;
1170
1171 default:
1172 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1173 iorb_done(iorb);
1174 }
1175}
1176
1177/******************************************************************************
1178 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1179 */
1180void iocc_adapter_passthru(IORBH _far *iorb)
1181{
1182 switch (iorb->CommandModifier) {
1183
1184 case IOCM_EXECUTE_CDB:
1185 add_workspace(iorb)->idempotent = 0;
1186 ahci_execute_cdb(iorb);
1187 break;
1188
1189 case IOCM_EXECUTE_ATA:
1190 add_workspace(iorb)->idempotent = 0;
1191 ahci_execute_ata(iorb);
1192 break;
1193
1194 default:
1195 iorb_seterr(iorb, IOERR_CMD_NOT_SUPPORTED);
1196 iorb_done(iorb);
1197 }
1198}
1199
1200/******************************************************************************
1201 * Add an IORB to the specified queue. This function must be called with the
1202 * adapter-level spinlock aquired.
1203 */
1204void iorb_queue_add(IORB_QUEUE _far *queue, IORBH _far *iorb)
1205{
1206 if (iorb_priority(iorb) {
1207 /* priority IORB; insert at first position */
1208 iorb->pNxtIORB = queue->root;
1209 queue->root = iorb;
1210
1211 } else {
1212 /* append IORB to end of queue */
1213 iorb->pNxtIORB = NULL;
1214
1215 if (queue->root == NULL) {
1216 queue->root = iorb;
1217 } else {
1218 queue->tail->pNxtIORB = iorb;
1219 }
1220 queue->tail = iorb;
1221 }
1222
1223 if (debug) {
1224 /* determine queue type (local, driver, abort or port) and minimum debug
1225 * level; otherwise, queue debug prints can become really confusing.
1226 */
1227 char *queue_type;
1228 int min_debug = 1;
1229
1230 if ((u32) queue >> 16 == (u32) (void _far *) &queue >> 16) {
1231 /* this queue is on the stack */
1232 queue_type = "local";
1233 min_debug = 2;
1234
1235 } else if (queue == &driver_queue) {
1236 queue_type = "driver";
1237
1238 } else if (queue == &abort_queue) {
1239 queue_type = "abort";
1240 min_debug = 2;
1241
1242 } else {
1243 queue_type = "port";
1244 }
1245
1246 if (debug > min_debug) {
1247 aprintf("IORB %Fp queued (cmd = %d/%d, queue = %Fp [%s], timeout = %ld)\n",
1248 iorb, iorb->CommandCode, iorb->CommandModifier, queue, queue_type,
1249 iorb->Timeout);
1250 }
1251 }
1252}
1253
1254/******************************************************************************
1255 * Remove an IORB from the specified queue. This function must be called with
1256 * the adapter-level spinlock aquired.
1257 */
1258int iorb_queue_del(IORB_QUEUE _far *queue, IORBH _far *iorb)
1259{
1260 IORBH _far *_iorb;
1261 IORBH _far *_prev = NULL;
1262 int found = 0;
1263
1264 for (_iorb = queue->root; _iorb != NULL; _iorb = _iorb->pNxtIORB) {
1265 if (_iorb == iorb) {
1266 /* found the IORB to be removed */
1267 if (_prev != NULL) {
1268 _prev->pNxtIORB = _iorb->pNxtIORB;
1269 } else {
1270 queue->root = _iorb->pNxtIORB;
1271 }
1272 if (_iorb == queue->tail) {
1273 queue->tail = _prev;
1274 }
1275 found = 1;
1276 break;
1277 }
1278 _prev = _iorb;
1279 }
1280
1281 if (found) {
1282 ddprintf("IORB %Fp removed (queue = %Fp)\n", iorb, queue);
1283 } else {
1284 dprintf("IORB %Fp not found in queue %Fp\n", iorb, queue);
1285 }
1286
1287 return(!found);
1288}
1289
1290/******************************************************************************
1291 * Set the error code in the specified IORB
1292 *
1293 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1294 * status to the specified error code.
1295 */
1296void iorb_seterr(IORBH _far *iorb, USHORT error_code)
1297{
1298 iorb->ErrorCode = error_code;
1299 iorb->Status |= IORB_ERROR;
1300}
1301
1302/******************************************************************************
1303 * Mark the specified IORB as done and notify the asynchronous post function,
1304 * if any. The IORB is also removed from the corresponding IORB queue.
1305 *
1306 * NOTES: This function does not clear the Status field; it merely adds the
1307 * IORB_DONE flag.
1308 *
1309 * This function is expected to be called *without* the corresponding
1310 * driver-level drv_lock aquired. It will aquire the spinlock before
1311 * updating the IORB queue and release it before notifying the upstream
1312 * code in order to prevent deadlocks.
1313 *
1314 * Due to this logic, this function is only good for simple task-time
1315 * completions. Functions working on lists of IORBs (such as interrupt
1316 * handlers or context hooks) should call iorb_complete() directly and
1317 * implement their own logic for removing the IORB from the port queue.
1318 * See abort_ctxhook() for an example.
1319 */
1320void iorb_done(IORBH _far *iorb)
1321{
1322 int a = iorb_unit_adapter(iorb);
1323 int p = iorb_unit_port(iorb);
1324
1325 /* remove IORB from corresponding queue */
1326 spin_lock(drv_lock);
1327 if (iorb_driver_level(iorb)) {
1328 iorb_queue_del(&driver_queue, iorb);
1329 } else {
1330 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb);
1331 }
1332 aws_free(add_workspace(iorb));
1333 spin_unlock(drv_lock);
1334
1335 iorb_complete(iorb);
1336}
1337
1338/******************************************************************************
1339 * Complete an IORB. This should be called without the adapter-level spinlock
1340 * to allow the IORB completion routine to perform whatever processing it
1341 * requires. This implies that the IORB should no longer be in any global
1342 * queue because the IORB completion routine may well reuse the IORB and send
1343 * the next request to us before even returning from this function.
1344 */
1345void iorb_complete(IORBH _far *iorb)
1346{
1347 iorb->Status |= IORB_DONE;
1348
1349 ddprintf("IORB %Fp complete (status = 0x%04x, error = 0x%04x)\n",
1350 iorb, iorb->Status, iorb->ErrorCode);
1351
1352 if (iorb->RequestControl & IORB_ASYNC_POST) {
1353 iorb->NotifyAddress(iorb);
1354 }
1355}
1356
1357/******************************************************************************
1358 * Requeue the specified IORB such that it will be sent downstream for
1359 * processing again. This includes freeing all resources currently allocated
1360 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1361 * spinlock must be aquired when calling this function.
1362 *
1363 * The following flags are preserved:
1364 * - no_ncq
1365 */
1366void iorb_requeue(IORBH _far *iorb)
1367{
1368 ADD_WORKSPACE _far *aws = add_workspace(iorb);
1369 u16 no_ncq = aws->no_ncq;
1370 u16 unaligned = aws->unaligned;
1371 u16 retries = aws->retries;
1372
1373 aws_free(aws);
1374 memset(aws, 0x00, sizeof(*aws));
1375
1376 aws->no_ncq = no_ncq;
1377 aws->unaligned = unaligned;
1378 aws->retries = retries;
1379}
1380
1381/******************************************************************************
1382 * Free resources in ADD workspace (timer, buffer, ...). This function should
1383 * be called with the spinlock held to prevent race conditions.
1384 */
1385void aws_free(ADD_WORKSPACE _far *aws)
1386{
1387 if (aws->timer != 0) {
1388 ADD_CancelTimer(aws->timer);
1389 aws->timer = 0;
1390 }
1391
1392 if (aws->buf != NULL) {
1393 free(aws->buf);
1394 aws->buf = NULL;
1395 }
1396}
1397
1398/******************************************************************************
1399 * Lock the adapter, waiting for availability if necessary. This is expected
1400 * to be called at task/request time without the driver-level spinlock
1401 * aquired. Don't call at interrupt time.
1402 */
1403void lock_adapter(AD_INFO *ai)
1404{
1405 TIMER Timer;
1406
1407 spin_lock(drv_lock);
1408 while (ai->busy) {
1409 spin_unlock(drv_lock);
1410 timer_init(&Timer, 250);
1411 while (!timer_check_and_block(&Timer));
1412 spin_lock(drv_lock);
1413 }
1414 ai->busy = 1;
1415 spin_unlock(drv_lock);
1416}
1417
1418/******************************************************************************
1419 * Unlock adapter (i.e. reset busy flag)
1420 */
1421void unlock_adapter(AD_INFO *ai)
1422{
1423 ai->busy = 0;
1424}
1425
1426/******************************************************************************
1427 * Timeout handler for I/O commands. Since timeout handling can involve
1428 * lengthy operations like port resets, the main code is located in a
1429 * separate function which is invoked via a context hook.
1430 */
1431void _cdecl _far timeout_callback(ULONG timer_handle, ULONG p1,
1432 ULONG p2)
1433{
1434 IORBH _far *iorb = (IORBH _far *) p1;
1435 int a = iorb_unit_adapter(iorb);
1436 int p = iorb_unit_port(iorb);
1437
1438 ADD_CancelTimer(timer_handle);
1439 dprintf("timeout for IORB %Fp\n", iorb);
1440
1441 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1442 * IORB has completed after the timeout has expired but before we got to
1443 * this line of code, we'll check the return code of iorb_queue_del(): If it
1444 * returns an error, the IORB must have completed a few microseconds ago and
1445 * there is no timeout.
1446 */
1447 spin_lock(drv_lock);
1448 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, iorb) == 0) {
1449 iorb_queue_add(&abort_queue, iorb);
1450 iorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1451 }
1452 spin_unlock(drv_lock);
1453
1454 /* Trigger abort processing function. We don't really care whether this
1455 * succeeds because the only reason why it would fail should be multiple
1456 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1457 * start executing, which leaves two scenarios:
1458 *
1459 * - We succeded in arming the context hook. Fine.
1460 *
1461 * - We armed the context hook a second time before it had a chance to
1462 * start executing. In this case, the already scheduled context hook
1463 * will process our IORB as well.
1464 */
1465 DevHelp_ArmCtxHook(0, reset_ctxhook_h);
1466
1467 /* Set up a watchdog timer which calls the context hook manually in case
1468 * some kernel thread is looping around the IORB_COMPLETE status bit
1469 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1470 * happen per design because kernel threads are supposed to yield but it
1471 * does in the early boot phase.
1472 */
1473 ADD_StartTimerMS(&th_reset_watchdog, 5000, (PFN) reset_watchdog, 0, 0);
1474}
1475
1476/******************************************************************************
1477 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1478 * will execute as soon as a kernel thread yields the CPU. However, some
1479 * kernel components won't yield the CPU during the early boot phase and the
1480 * only way to kick some sense into those components is to run the context
1481 * hook right inside this timer callback. Not exactly pretty, especially
1482 * considering the fact that context hooks were implemented to prevent running
1483 * lengthy operations like a port reset at interrupt time, but without this
1484 * watchdog mechanism we run the risk of getting completely stalled by device
1485 * problems during the early boot phase.
1486 */
1487void _cdecl _far reset_watchdog(ULONG timer_handle, ULONG p1,
1488 ULONG p2)
1489{
1490 /* reset watchdog timer */
1491 ADD_CancelTimer(timer_handle);
1492 dprintf("reset watchdog invoked\n");
1493
1494 /* call context hook manually */
1495 reset_ctxhook(0);
1496}
1497
1498/******************************************************************************
1499 * small_code_ - this dummy func resolves the undefined reference linker
1500 * error that occurrs when linking WATCOM objects with DDK's link.exe
1501 */
1502void _cdecl small_code_(void)
1503{
1504}
1505
1506/******************************************************************************
1507 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1508 * adapter info array in the device table, dt->pAdapter[], is expected to be
1509 * initialized for the specified index (dt_ai).
1510 *
1511 * Please note that the device table adapter index, dta, is not always equal
1512 * to the physical adapter index, a: if SCSI emulation has been activated, the
1513 * last reported adapter is a virtual SCSI adapter and the physical adapter
1514 * indexes for those units are, of course, different from the device table
1515 * index of the virtual SCSI adapter.
1516 */
1517static int add_unit_info(IORB_CONFIGURATION _far *iorb_conf, int dta,
1518 int a, int p, int d, int scsi_id)
1519{
1520 DEVICETABLE _far *dt = iorb_conf->pDeviceTable;
1521 ADAPTERINFO _far *ptr = (ADAPTERINFO _far *) (((u32) dt & 0xffff0000U) +
1522 (u16) dt->pAdapter[dta]);
1523 UNITINFO _far *ui = ptr->UnitInfo + ptr->AdapterUnits;
1524 AD_INFO *ai = ad_infos + a;
1525
1526 if ((u32) (ui + 1) - (u32) dt > iorb_conf->DeviceTableLen) {
1527 dprintf("error: device table provided by DASD too small\n");
1528 iorb_seterr(&iorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1529 return(-1);
1530 }
1531
1532 if (ai->ports[p].devs[d].unit_info == NULL) {
1533 /* provide original information about this device (unit) */
1534 memset(ui, 0x00, sizeof(*ui));
1535 ui->AdapterIndex = dta; /* device table adapter index */
1536 ui->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1537 ui->UnitIndex = ptr->AdapterUnits;
1538 ui->UnitType = ai->ports[p].devs[d].dev_type;
1539 ui->QueuingCount = ai->ports[p].devs[d].ncq_max;;
1540 if (ai->ports[p].devs[d].removable) {
1541 ui->UnitFlags |= UF_REMOVABLE;
1542 }
1543 if (scsi_id > 0) {
1544 /* set fake SCSI ID for this unit */
1545 ui->UnitSCSITargetID = scsi_id;
1546 }
1547 } else {
1548 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1549 memcpy(ui, ai->ports[p].devs[d].unit_info, sizeof(*ui));
1550 }
1551
1552 ptr->AdapterUnits++;
1553 return(0);
1554}
1555
1556/*******************************************************************************
1557 * Register kernel exit handler for trap dumps. Our exit handler will be called
1558 * right before the kernel starts a dump; that's where we reset the controller
1559 * so it supports BIOS int13 I/O calls.
1560 */
1561static void register_krnl_exit(void)
1562{
1563 _asm {
1564 push ds
1565 push es
1566 push bx
1567 push si
1568 push di
1569
1570 mov ax, FLAG_KRNL_EXIT_ADD
1571 mov cx, TYPE_KRNL_EXIT_INT13
1572 mov bx, SEG asm_krnl_exit
1573 mov si, OFFSET asm_krnl_exit
1574 mov dl, DevHlp_RegisterKrnlExit
1575
1576 call dword ptr [Device_Help]
1577
1578 pop di
1579 pop si
1580 pop bx
1581 pop es
1582 pop ds
1583 }
1584
1585 dprintf("Registered kernel exit routine for INT13 mode\n");
1586}
1587
Note: See TracBrowser for help on using the repository browser.