source: trunk/src/os2ahci/os2ahci.c@ 181

Last change on this file since 181 was 181, checked in by David Azarewicz, 9 years ago

Debugging changes

File size: 50.7 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31#include "devhdr.h"
32
33/* -------------------------- macros and constants ------------------------- */
34
35/* set two-dimensional array of port options */
36#define set_port_option(opt, val) \
37 if (adapter_index == -1) { \
38 /* set option for all adapters and ports */ \
39 memset(opt, val, sizeof(opt)); \
40 } else if (port_index == -1) { \
41 /* set option for all ports on current adapter */ \
42 memset(opt[adapter_index], val, sizeof(*opt)); \
43 } else { \
44 /* set option for specific port */ \
45 opt[adapter_index][port_index] = val; \
46 }
47
48#define FLAG_KRNL_EXIT_ADD 0x1000
49#define FLAG_KRNL_EXIT_REMOVE 0x2000
50
51#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
52#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
53#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
54#define TYPE_KRNL_EXIT_DYN 0x0003
55#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
56
57/* ------------------------ typedefs and structures ------------------------ */
58
59/* -------------------------- function prototypes -------------------------- */
60
61extern int SetPsdPutc(void);
62static int add_unit_info(IORB_CONFIGURATION *iorb_conf, int dt_ai, int a, int p, int d, int scsi_id);
63
64/* ------------------------ global/static variables ------------------------ */
65int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
66int init_reset = 1; /* if != 0, reset ports during init */
67int force_write_cache; /* if != 0, force write cache */
68int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
69int use_lvm_info = 1;
70long com_baud = 0;
71
72HDRIVER rm_drvh; /* resource manager driver handle */
73USHORT add_handle; /* driver handle (RegisterDeviceClass) */
74char drv_name[] = "OS2AHCI"; /* driver name as string */
75
76/* resource manager driver information structure */
77static DRIVERSTRUCT rm_drvinfo =
78{
79 NULL, /* We cannot do Flat to Far16 conversion at compile time */
80 NULL, /* so we put NULLs in all the Far16 fields and then fill */
81 NULL, /* them in at run time */
82 DMAJOR,
83 DMINOR,
84 BLD_YEAR, BLD_MONTH, BLD_DAY,
85 0,
86 DRT_ADDDM,
87 DRS_ADD,
88 NULL
89};
90
91SpinLock_t drv_lock; /* driver-level spinlock */
92IORB_QUEUE driver_queue; /* driver-level IORB queue */
93AD_INFO ad_infos[MAX_AD]; /* adapter information list */
94int ad_info_cnt; /* number of entries in ad_infos[] */
95u16 ad_ignore; /* bitmap with adapter indexes to ignore */
96int init_complete; /* if != 0, initialization has completed */
97int suspended;
98int resume_sleep_flag;
99
100/* apapter/port-specific options saved when parsing the command line */
101u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
102u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
103u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
104u8 link_power[MAX_AD][AHCI_MAX_PORTS];
105u8 track_size[MAX_AD][AHCI_MAX_PORTS];
106u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
107
108char BldLevel[] = BLDLEVEL;
109
110/* ----------------------------- start of code ----------------------------- */
111
112/******************************************************************************
113 * OS/2 device driver main strategy function.
114 *
115 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
116 * packet for IDC calls, so they can be handled by gen_ioctl.
117 */
118void StrategyHandler(REQPACKET *prp)
119{
120 u16 rc;
121
122 switch (prp->bCommand)
123 {
124 case STRATEGY_BASEDEVINIT:
125 rc = init_drv(prp);
126 break;
127
128 case STRATEGY_SHUTDOWN:
129 rc = exit_drv(prp->save_restore.Function);
130 break;
131
132 case STRATEGY_GENIOCTL:
133 rc = gen_ioctl(prp);
134 break;
135
136 case STRATEGY_OPEN:
137 build_user_info();
138 rc = RPDONE;
139 break;
140
141 case STRATEGY_READ:
142 rc = char_dev_input(prp);
143 break;
144
145 case STRATEGY_SAVERESTORE:
146 rc = sr_drv(prp->save_restore.Function);
147 break;
148
149 case STRATEGY_INITCOMPLETE:
150 case STRATEGY_CLOSE:
151 case STRATEGY_INPUTSTATUS:
152 case STRATEGY_FLUSHINPUT:
153 /* noop */
154 rc = RPDONE;
155 break;
156
157 default:
158 rc = RPDONE | RPERR_BADCOMMAND;
159 break;
160 }
161
162 prp->usStatus = rc;
163}
164
165void IdcHandler(REQPACKET *prp)
166{
167 StrategyHandler(prp);
168}
169
170/******************************************************************************
171 * Intialize the os2ahci driver. This includes command line parsing, scanning
172 * the PCI bus for supported AHCI adapters, etc.
173 */
174USHORT init_drv(REQPACKET *req)
175{
176 static int init_drv_called;
177 static int init_drv_failed;
178 APIRET rmrc;
179 const char *pszCmdLine, *cmd_line;
180 int adapter_index = -1;
181 int port_index = -1;
182 int iInvertOption;
183 int iStatus;
184
185 if (init_drv_called)
186 {
187 /* This is the init call for the second (IBMS506$) character
188 * device driver. If the main driver failed initialization, fail this
189 * one as well.
190 */
191 return(RPDONE | ((init_drv_failed) ? RPERR_INITFAIL : 0));
192 }
193 D32g_DbgLevel = 0;
194 init_drv_called = 1;
195 suspended = 0;
196 resume_sleep_flag = 0;
197 memset(ad_infos, 0, sizeof(ad_infos));
198 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
199 UtSetDriverName("OS2AHCI$");
200 Header.ulCaps |= DEV_ADAPTER_DD; /* DAZ This flag is not really needed. */
201
202 /* create driver-level spinlock */
203 KernAllocSpinLock(&drv_lock);
204
205 /* register driver with resource manager */
206 rm_drvinfo.DrvrName = drv_name;
207 rm_drvinfo.DrvrDescript = "AHCI SATA Driver";
208 rm_drvinfo.VendorName = DVENDOR;
209 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS)
210 {
211 iprintf("%s: failed to register driver with resource manager (rc = %d)", drv_name, rmrc);
212 goto init_fail;
213 }
214
215 pszCmdLine = cmd_line = req->init_in.szArgs;
216 iStatus = 0;
217 while (*pszCmdLine)
218 {
219 if (*pszCmdLine++ != '/') continue; /* Ignore anything that doesn't start with '/' */
220 /* pszCmdLine now points to first char of argument */
221
222 if ((iInvertOption = (*pszCmdLine == '!')) != 0) pszCmdLine++;
223
224 if (ArgCmp(pszCmdLine, "B:"))
225 {
226 pszCmdLine += 2;
227 com_baud = strtol(pszCmdLine, &pszCmdLine, 0);
228 continue;
229 }
230
231 if (ArgCmp(pszCmdLine, "C:"))
232 {
233 pszCmdLine += 2;
234 /* set COM port base address for debug messages */
235 D32g_ComBase = strtol(pszCmdLine, &pszCmdLine, 0);
236 if (D32g_ComBase == 1) D32g_ComBase = 0x3f8;
237 if (D32g_ComBase == 2) D32g_ComBase = 0x2f8;
238 continue;
239 }
240
241 if (ArgCmp(pszCmdLine, "D"))
242 {
243 pszCmdLine++;
244 if (*pszCmdLine == ':')
245 {
246 pszCmdLine++;
247 D32g_DbgLevel = strtol(pszCmdLine, &pszCmdLine, 0);
248 }
249 else D32g_DbgLevel++; /* increase debug level */
250 continue;
251 }
252
253 if (ArgCmp(pszCmdLine, "G:"))
254 {
255 u16 usVendor;
256 u16 usDevice;
257
258 pszCmdLine += 2;
259 /* add specfied PCI ID as a supported generic AHCI adapter */
260 usVendor = strtol(pszCmdLine, &pszCmdLine, 16);
261 if (*pszCmdLine != ':') break;
262 pszCmdLine++;
263 usDevice = strtol(pszCmdLine, &pszCmdLine, 16);
264 if (add_pci_id(usVendor, usDevice))
265 {
266 iprintf("%s: failed to add PCI ID %04x:%04x", drv_name, usVendor, usDevice);
267 iStatus = 1;
268 }
269 thorough_scan = 1;
270 continue;
271 }
272
273 if (ArgCmp(pszCmdLine, "T"))
274 {
275 pszCmdLine++;
276 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
277 thorough_scan = !iInvertOption;
278 continue;
279 }
280
281 if (ArgCmp(pszCmdLine, "R"))
282 {
283 pszCmdLine++;
284 /* reset ports during initialization */
285 init_reset = !iInvertOption;
286 continue;
287 }
288
289 if (ArgCmp(pszCmdLine, "F"))
290 {
291 pszCmdLine++;
292 /* force write cache regardless of IORB flags */
293 force_write_cache = 1;
294 continue;
295 }
296
297 if (ArgCmp(pszCmdLine, "A:"))
298 {
299 pszCmdLine += 2;
300 /* set adapter index for adapter and port-related options */
301 adapter_index = strtol(pszCmdLine, &pszCmdLine, 0);
302 if (adapter_index < 0 || adapter_index >= MAX_AD)
303 {
304 iprintf("%s: invalid adapter index (%d)", drv_name, adapter_index);
305 iStatus = 1;
306 }
307 continue;
308 }
309
310 if (ArgCmp(pszCmdLine, "P:"))
311 {
312 pszCmdLine += 2;
313 /* set port index for port-related options */
314 port_index = strtol(pszCmdLine, &pszCmdLine, 0);
315 if (port_index < 0 || port_index >= AHCI_MAX_PORTS)
316 {
317 iprintf("%s: invalid port index (%d)", drv_name, port_index);
318 iStatus = 1;
319 }
320 continue;
321 }
322
323 if (ArgCmp(pszCmdLine, "I"))
324 {
325 pszCmdLine++;
326 /* ignore current adapter index */
327 if (adapter_index >= 0)
328 {
329 if (port_index >= 0) port_ignore[adapter_index][port_index] = !iInvertOption;
330 else ad_ignore |= 1U << adapter_index;
331 }
332 continue;
333 }
334
335 if (ArgCmp(pszCmdLine, "S"))
336 {
337 pszCmdLine++;
338 /* enable SCSI emulation for ATAPI devices */
339 set_port_option(emulate_scsi, !iInvertOption);
340 continue;
341 }
342
343 if (ArgCmp(pszCmdLine, "N"))
344 {
345 pszCmdLine++;
346 /* enable NCQ */
347 set_port_option(enable_ncq, !iInvertOption);
348 continue;
349 }
350
351 if (ArgCmp(pszCmdLine, "LS:"))
352 {
353 int optval;
354
355 pszCmdLine += 3;
356 /* set link speed */
357 optval = strtol(pszCmdLine, &pszCmdLine, 0);
358 set_port_option(link_speed, optval);
359 /* need to reset the port in order to establish link settings */
360 init_reset = 1;
361 continue;
362 }
363
364 if (ArgCmp(pszCmdLine, "LP:"))
365 {
366 int optval;
367
368 pszCmdLine += 3;
369 /* set power management */
370 optval = strtol(pszCmdLine, &pszCmdLine, 0);
371 set_port_option(link_power, optval);
372 /* need to reset the port in order to establish link settings */
373 init_reset = 1;
374 continue;
375 }
376
377 if (ArgCmp(pszCmdLine, "4"))
378 {
379 pszCmdLine++;
380 /* enable 4K sector geometry enhancement (track size = 56) */
381 if (!iInvertOption) set_port_option(track_size, 56);
382 continue;
383 }
384
385 if (ArgCmp(pszCmdLine, "Z"))
386 {
387 pszCmdLine++;
388 /* Specify to not use the LVM information. There is no reason why anyone would
389 * want to do this, but previous versions of this driver did not have LVM capability,
390 * so this switch is here temporarily just in case.
391 */
392 use_lvm_info = !iInvertOption;
393 continue;
394 }
395
396 if (ArgCmp(pszCmdLine, "V"))
397 {
398 pszCmdLine++;
399 if (*pszCmdLine == ':')
400 {
401 pszCmdLine++;
402 verbosity = strtol(pszCmdLine, &pszCmdLine, 0);
403 }
404 else verbosity++; /* increase verbosity level */
405 continue;
406 }
407
408 if (ArgCmp(pszCmdLine, "W"))
409 {
410 pszCmdLine++;
411 /* Specify to allow the trace buffer to wrap when full. */
412 D32g_DbgBufWrap = !iInvertOption;
413 continue;
414 }
415
416 iprintf("Unrecognized switch: %s", pszCmdLine-1);
417 iStatus = 1; /* unrecognized argument */
418 }
419
420 if (iStatus) goto init_fail;
421
422 if (com_baud) InitComPort(com_baud);
423
424 NTPRINTF("BldLevel: %s\n", BldLevel);
425 NTPRINTF("CmdLine: %s\n", cmd_line);
426 /*
427 if (sizeof(ADD_WORKSPACE) > ADD_WORKSPACE_SIZE)
428 {
429 dprintf(0,"ADD_WORKSPACE size is too big! %d>16\n", sizeof(ADD_WORKSPACE));
430 goto init_fail;
431 }
432 */
433
434 /* print initialization message */
435 ciprintf("%s driver version %d.%02d", drv_name, DMAJOR, DMINOR);
436
437 #ifdef TESTVER
438 #include "testver.c"
439 #endif
440
441 /* scan PCI bus for supported devices */
442 scan_pci_bus();
443
444 if (ad_info_cnt > 0)
445 {
446 /* initialization succeeded and we found at least one AHCI adapter */
447
448 if (Dev32Help_RegisterDeviceClass(drv_name, add_entry, 0, 1, &add_handle))
449 {
450 iprintf("%s: couldn't register device class", drv_name);
451 goto init_fail;
452 }
453
454 Timer_InitTimer(TIMER_COUNT);
455
456 /* allocate context hooks */
457 KernAllocateContextHook(restart_ctxhook, 0, &restart_ctxhook_h);
458 KernAllocateContextHook(reset_ctxhook, 0, &reset_ctxhook_h);
459 KernAllocateContextHook(engine_ctxhook, 0, &engine_ctxhook_h);
460
461 /* register kernel exit routine for trap dumps */
462 Dev32Help_RegisterKrnlExit(shutdown_driver, FLAG_KRNL_EXIT_ADD, TYPE_KRNL_EXIT_INT13);
463
464 return(RPDONE);
465
466 }
467 else
468 {
469 /* no adapters found */
470 ciprintf("%s: No adapters found.", drv_name);
471 }
472
473init_fail:
474 /* initialization failed; set segment sizes to 0 and return error */
475 init_drv_failed = 1;
476
477 if (rm_drvh != 0)
478 {
479 /* remove driver from resource manager */
480 RMDestroyDriver(rm_drvh);
481 }
482
483 ciprintf("%s driver *not* installed", drv_name);
484 return(RPDONE | RPERR_INITFAIL);
485}
486
487/******************************************************************************
488 * Generic IOCTL via character device driver. IOCTLs are used to control the
489 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
490 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
491 * commands for ATA disks) are implemented here.
492 */
493USHORT gen_ioctl(REQPACKET *ioctl)
494{
495 DPRINTF(2,"IOCTL 0x%x/0x%x\n", ioctl->ioctl.bCategory, ioctl->ioctl.bFunction);
496
497 switch (ioctl->ioctl.bCategory)
498 {
499 case OS2AHCI_IOCTL_CATEGORY:
500 switch (ioctl->ioctl.bFunction)
501 {
502
503 case OS2AHCI_IOCTL_GET_DEVLIST:
504 return(ioctl_get_devlist(ioctl));
505
506 case OS2AHCI_IOCTL_PASSTHROUGH:
507 return(ioctl_passthrough(ioctl));
508
509 }
510 break;
511
512 case DSKSP_CAT_GENERIC:
513 return(ioctl_gen_dsk(ioctl));
514
515 case DSKSP_CAT_SMART:
516 return(ioctl_smart(ioctl));
517 }
518
519 return(RPDONE | RPERR_BADCOMMAND);
520}
521
522/******************************************************************************
523 * Read from character device. If tracing is on (internal ring buffer trace),
524 * we return data from the trace buffer; if not, we might return a device
525 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
526 */
527USHORT char_dev_input(REQPACKET *pPacket)
528{
529 void *LinAdr;
530
531 if (Dev32Help_PhysToLin(pPacket->io.ulAddress, pPacket->io.usCount, &LinAdr))
532 {
533 pPacket->io.usCount = 0;
534 return RPDONE | RPERR_GENERAL;
535 }
536
537 pPacket->io.usCount = dCopyToUser(LinAdr, pPacket->io.usCount);
538
539 return RPDONE;
540}
541
542/******************************************************************************
543 * Device driver exit handler. This handler is called when OS/2 shuts down and
544 * flushes the write caches of all attached devices. Since this is effectively
545 * the same we do when suspending, we'll call out to the corresponding suspend
546 * function.
547 *
548 * NOTE: Errors are ignored because there's no way we could stop the shutdown
549 * or do something about the error, unless retrying endlessly is
550 * considered an option.
551 */
552USHORT exit_drv(int func)
553{
554 DPRINTF(2,"exit_drv(%d) called\n", func);
555
556 if (func == 0)
557 {
558 /* we're only interested in the second phase of the shutdown */
559 return(RPDONE);
560 }
561
562 suspend();
563 return(RPDONE);
564}
565
566/******************************************************************************
567 * Device driver suspend/resume handler. This handler is called when ACPI is
568 * executing a suspend or resume.
569 */
570USHORT sr_drv(int func)
571{
572 DPRINTF(2,"sr_drv(%d) called\n", func);
573
574 if (func) resume();
575 else suspend();
576
577 return(RPDONE);
578}
579
580/******************************************************************************
581 * ADD entry point. This is the main entry point for all ADD requests. Due to
582 * the asynchronous nature of ADD drivers, this function primarily queues the
583 * IORB(s) to the corresponding adapter or port queues, then triggers the
584 * state machine to initiate processing queued IORBs.
585 *
586 * NOTE: In order to prevent race conditions or engine stalls, certain rules
587 * around locking, unlocking and IORB handling in general have been
588 * established. Refer to the comments in "trigger_engine()" for
589 * details.
590 */
591void add_entry(IORBH FAR16DATA *vFirstIorb)
592{
593 IORBH FAR16DATA *vIorb;
594 IORBH FAR16DATA *vNext = NULL;
595
596 spin_lock(drv_lock);
597
598 for (vIorb=vFirstIorb; vIorb!=NULL; vIorb=vNext)
599 {
600 IORBH *pIorb = Far16ToFlat(vIorb);
601
602 /* Queue this IORB. Queues primarily exist on port level but there are
603 * some requests which affect the whole driver, most notably
604 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
605 * port queue will change the links, thus we need to save the original
606 * link in 'vNext'.
607 */
608 if (pIorb->RequestControl & IORB_CHAIN) vNext = pIorb->pNxtIORB;
609 else vNext = (IORBH FAR16DATA *)0;
610
611 pIorb->Status = 0;
612 pIorb->ErrorCode = 0;
613 memset(&pIorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
614
615 if (iorb_driver_level(pIorb))
616 {
617 /* driver-level IORB */
618 pIorb->UnitHandle = 0;
619 iorb_queue_add(&driver_queue, vIorb, pIorb);
620
621 }
622 else
623 {
624 /* port-level IORB */
625 int a = iorb_unit_adapter(pIorb);
626 int p = iorb_unit_port(pIorb);
627 int d = iorb_unit_device(pIorb);
628
629 if (a >= ad_info_cnt ||
630 p > ad_infos[a].port_max ||
631 d > ad_infos[a].ports[p].dev_max ||
632 (ad_infos[a].port_map & (1UL << p)) == 0)
633 {
634
635 /* unit handle outside of the allowed range */
636 DPRINTF(0,"warning: IORB for %d.%d.%d out of range\n", a, p, d);
637 pIorb->Status = IORB_ERROR;
638 pIorb->ErrorCode = IOERR_CMD_SYNTAX;
639 iorb_complete(vIorb, pIorb);
640 continue;
641 }
642
643 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, vIorb, pIorb);
644 }
645 }
646
647 /* trigger state machine */
648 trigger_engine();
649
650 spin_unlock(drv_lock);
651}
652
653/******************************************************************************
654 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
655 * which will try to get all IORBs sent on their way a couple of times. If
656 * there are still IORBs ready for processing after this, this function will
657 * hand off to a context hook which will continue to trigger the engine until
658 * all IORBs have been sent.
659 *
660 * NOTE: While initialization has not completed (or during suspend/resume
661 * operations), this function will loop indefinitely because we can't
662 * rely on interrupt handlers or context hooks and complex IORBs
663 * requiring multiple requeues would eventually hang and time out if
664 * we stopped triggering here.
665 */
666void trigger_engine(void)
667{
668 int i;
669
670 for (i = 0; i < 3 || !init_complete; i++)
671 {
672 if (trigger_engine_1() == 0)
673 {
674 /* done -- all IORBs have been sent on their way */
675 return;
676 }
677 }
678
679 /* Something keeps bouncing; hand off to the engine context hook which will
680 * keep trying in the background.
681 */
682 KernArmHook(engine_ctxhook_h, 0, 0);
683}
684
685/******************************************************************************
686 * Trigger IORB queue engine in order to send commands in the driver/port IORB
687 * queues to the AHCI hardware. This function will return the number of IORBs
688 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
689 * a state to accept the command, thus it might take quite a few calls to get
690 * all IORBs on their way. This is why there's a wrapper function which tries
691 * it a few times, then hands off to a context hook which will keep trying in
692 * the background.
693 *
694 * IORBs might complete before send_iorb() has returned, at any time during
695 * interrupt processing or on another CPU on SMP systems. IORB completion
696 * means modifications to the corresponding IORB queue (the completed IORB
697 * is removed from the queue) thus we need to protect the IORB queues from
698 * race conditions. The safest approach short of keeping the driver-level
699 * spinlock aquired permanently is to keep it throughout this function and
700 * release it temporarily in send_iorb().
701 *
702 * This implies that the handler functions are fully responsible for aquiring
703 * the driver-level spinlock when they need it, and for releasing it again.
704 *
705 * As a rule of thumb, get the driver-level spinlock whenever accessing
706 * volatile variables (IORB queues, values in ad_info[], ...).
707 *
708 * Additional Notes:
709 *
710 * - This function is expected to be called with the spinlock aquired
711 *
712 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
713 * just remain in the queue). This can be used to release the driver-level
714 * spinlock while making sure no new IORBs are going to hit the hardware.
715 * In order to prevent engine stalls, all handlers using this functionality
716 * need to invoke trigger_engine() after resetting the busy flag.
717 *
718 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
719 * However, the driver-level queue is worked "one entry at a time" which
720 * means that no new IORBs will be queued on the driver-level queue until
721 * the head element has completed processing. This means that driver-
722 * level IORB handlers don't need to protect against each other. But they
723 * they do need to keep in mind interference with port-level IORBs:
724 *
725 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
726 * adapters as 'busy' which are affected by the driver-level IORB
727 *
728 * - Driver-level IORB handlers must not access the hardware of a
729 * particular adapter if it's flagged as 'busy' by another IORB.
730 */
731int trigger_engine_1(void)
732{
733 IORBH FAR16DATA *vIorb;
734 IORBH *pIorb;
735 IORBH FAR16DATA *vNext;
736 int iorbs_sent = 0;
737 int a;
738 int p;
739
740 iorbs_sent = 0;
741
742 /* process driver-level IORBs */
743 if ((vIorb = driver_queue.vRoot) != NULL)
744 {
745 pIorb = Far16ToFlat(vIorb);
746
747 if (!add_workspace(pIorb)->processing)
748 {
749 send_iorb(vIorb, pIorb);
750 iorbs_sent++;
751 }
752 }
753
754 /* process port-level IORBs */
755 for (a = 0; a < ad_info_cnt; a++)
756 {
757 AD_INFO *ai = ad_infos + a;
758 if (ai->busy)
759 {
760 /* adapter is busy; don't process any IORBs */
761 continue;
762 }
763 for (p = 0; p <= ai->port_max; p++)
764 {
765 /* send all queued IORBs on this port */
766 vNext = NULL;
767 for (vIorb = ai->ports[p].iorb_queue.vRoot; vIorb != NULL; vIorb = vNext)
768 {
769 pIorb = Far16ToFlat(vIorb);
770
771 vNext = pIorb->pNxtIORB;
772 if (!add_workspace(pIorb)->processing)
773 {
774 send_iorb(vIorb, pIorb);
775 iorbs_sent++;
776 }
777 }
778 }
779 }
780
781 return(iorbs_sent);
782}
783
784/******************************************************************************
785 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
786 * switch board for calling the corresponding iocc_*() handler function.
787 *
788 * NOTE: This function is expected to be called with the driver-level spinlock
789 * aquired. It will release it before calling any of the handler
790 * functions and re-aquire it when done.
791 */
792void send_iorb(IORBH FAR16DATA *vIorb, IORBH *pIorb)
793{
794 /* Mark IORB as "processing" before doing anything else. Once the IORB is
795 * marked as "processing", we can release the spinlock because subsequent
796 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
797 * IORB.
798 */
799 add_workspace(pIorb)->processing = 1;
800 spin_unlock(drv_lock);
801
802 switch (pIorb->CommandCode)
803 {
804 case IOCC_CONFIGURATION:
805 iocc_configuration(vIorb, pIorb);
806 break;
807
808 case IOCC_DEVICE_CONTROL:
809 iocc_device_control(vIorb, pIorb);
810 break;
811
812 case IOCC_UNIT_CONTROL:
813 iocc_unit_control(vIorb, pIorb);
814 break;
815
816 case IOCC_GEOMETRY:
817 iocc_geometry(vIorb, pIorb);
818 break;
819
820 case IOCC_EXECUTE_IO:
821 iocc_execute_io(vIorb, pIorb);
822 break;
823
824 case IOCC_UNIT_STATUS:
825 iocc_unit_status(vIorb, pIorb);
826 break;
827
828 case IOCC_ADAPTER_PASSTHRU:
829 iocc_adapter_passthru(vIorb, pIorb);
830 break;
831
832 default:
833 /* unsupported call */
834 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
835 iorb_done(vIorb, pIorb);
836 break;
837 }
838
839 /* re-aquire spinlock before returning to trigger_engine() */
840 spin_lock(drv_lock);
841}
842
843/******************************************************************************
844 * Handle IOCC_CONFIGURATION requests.
845 */
846void iocc_configuration(IORBH FAR16DATA *vIorb, IORBH *pIorb)
847{
848 int a;
849
850 switch (pIorb->CommandModifier)
851 {
852
853 case IOCM_COMPLETE_INIT:
854 /* Complete initialization. From now on, we won't have to restore the BIOS
855 * configuration after each command and we're fully operational (i.e. will
856 * use interrupts, timers and context hooks instead of polling).
857 */
858 if (!init_complete)
859 {
860 DPRINTF(1,"leaving initialization mode\n");
861 for (a = 0; a < ad_info_cnt; a++)
862 {
863 lock_adapter(ad_infos + a);
864 ahci_complete_init(ad_infos + a);
865 }
866 init_complete = 1;
867
868 /* release all adapters */
869 for (a = 0; a < ad_info_cnt; a++)
870 {
871 unlock_adapter(ad_infos + a);
872 }
873 DPRINTF(1,"leaving initialization mode 2\n");
874
875 #ifdef LEGACY_APM
876 /* register APM hook */
877 apm_init();
878 #endif
879 }
880 iorb_done(vIorb, pIorb);
881 break;
882
883 case IOCM_GET_DEVICE_TABLE:
884 /* construct a device table */
885 iocm_device_table(vIorb, pIorb);
886 break;
887
888 default:
889 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
890 iorb_done(vIorb, pIorb);
891 break;
892 }
893}
894
895/******************************************************************************
896 * Handle IOCC_DEVICE_CONTROL requests.
897 */
898void iocc_device_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
899{
900 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
901 IORBH FAR16DATA *vPtr;
902 IORBH FAR16DATA *vNext = NULL;
903 int p = iorb_unit_port(pIorb);
904 int d = iorb_unit_device(pIorb);
905
906 switch (pIorb->CommandModifier)
907 {
908
909 case IOCM_ABORT:
910 /* abort all pending commands on specified port and device */
911 spin_lock(drv_lock);
912 for (vPtr = ai->ports[p].iorb_queue.vRoot; vPtr != NULL; vPtr = vNext)
913 {
914 IORBH *pPtr = Far16ToFlat(vPtr);
915
916 vNext = pPtr->pNxtIORB;
917 /* move all matching IORBs to the abort queue */
918 if (vPtr != vIorb && iorb_unit_device(pPtr) == d)
919 {
920 iorb_queue_del(&ai->ports[p].iorb_queue, vPtr);
921 iorb_queue_add(&abort_queue, vPtr, pPtr);
922 pPtr->ErrorCode = IOERR_CMD_ABORTED;
923 }
924 }
925 spin_unlock(drv_lock);
926
927 /* trigger reset context hook which will finish the abort processing */
928 KernArmHook(reset_ctxhook_h, 0, 0);
929 break;
930
931 case IOCM_SUSPEND:
932 case IOCM_RESUME:
933 case IOCM_GET_QUEUE_STATUS:
934 /* Suspend/resume operations allow access to the hardware for other
935 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
936 * and ATAPI in the same driver, this won't be required.
937 */
938 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
939 break;
940
941 case IOCM_LOCK_MEDIA:
942 case IOCM_UNLOCK_MEDIA:
943 case IOCM_EJECT_MEDIA:
944 /* unit control commands to lock, unlock and eject media */
945 /* will be supported later... */
946 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
947 break;
948
949 default:
950 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
951 break;
952 }
953
954 iorb_done(vIorb, pIorb);
955}
956
957/******************************************************************************
958 * Handle IOCC_UNIT_CONTROL requests.
959 */
960void iocc_unit_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
961{
962 IORB_UNIT_CONTROL *pIorb_uc = (IORB_UNIT_CONTROL *)pIorb;
963 int a = iorb_unit_adapter(pIorb);
964 int p = iorb_unit_port(pIorb);
965 int d = iorb_unit_device(pIorb);
966
967 spin_lock(drv_lock);
968 switch (pIorb->CommandModifier)
969 {
970 case IOCM_ALLOCATE_UNIT:
971 /* allocate unit for exclusive access */
972 if (ad_infos[a].ports[p].devs[d].allocated)
973 {
974 iorb_seterr(pIorb, IOERR_UNIT_ALLOCATED);
975 }
976 else
977 {
978 ad_infos[a].ports[p].devs[d].allocated = 1;
979 }
980 break;
981
982 case IOCM_DEALLOCATE_UNIT:
983 /* deallocate exclusive access to unit */
984 if (!ad_infos[a].ports[p].devs[d].allocated)
985 {
986 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
987 }
988 else
989 {
990 ad_infos[a].ports[p].devs[d].allocated = 0;
991 }
992 break;
993
994 case IOCM_CHANGE_UNITINFO:
995 /* Change unit (device) information. One reason for this IOCM is the
996 * interface for filter device drivers: a filter device driver can
997 * either change existing UNITINFOs or permanently allocate units
998 * and fabricate new [logical] units; the former is the reason why we
999 * must store the pointer to the updated UNITNIFO for subsequent
1000 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
1001 */
1002 if (!ad_infos[a].ports[p].devs[d].allocated)
1003 {
1004 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
1005 break;
1006 }
1007 ad_infos[a].ports[p].devs[d].unit_info = pIorb_uc->pUnitInfo;
1008 break;
1009
1010 default:
1011 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1012 break;
1013 }
1014
1015 spin_unlock(drv_lock);
1016 iorb_done(vIorb, pIorb);
1017}
1018
1019/******************************************************************************
1020 * Scan all ports for AHCI devices and construct a DASD device table.
1021 *
1022 * NOTES: This function may be called multiple times. Only the first
1023 * invocation will actually scan for devices; all subsequent calls will
1024 * merely return the results of the initial scan, potentially augmented
1025 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
1026 * requests.
1027 *
1028 * In order to support applications that can't deal with ATAPI devices
1029 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
1030 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
1031 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
1032 * request. The units attached to this adapter will use the real HW
1033 * unit IDs, thus we'll never receive a command specific to the
1034 * emulated SCSI adapter and won't need to set up any sort of entity
1035 * for it; the only purpose of the emulated SCSI adapter is to pass the
1036 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
1037 * course. The emulated SCSI target IDs are allocated as follows:
1038 *
1039 * 0 the virtual adapter
1040 * 1..n emulated devices; SCSI target ID increments sequentially
1041 */
1042void iocm_device_table(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1043{
1044 IORB_CONFIGURATION *pIorb_conf;
1045 DEVICETABLE FAR16DATA *vDt;
1046 DEVICETABLE *pDt;
1047 char *pPos;
1048 int scsi_units = 0;
1049 int scsi_id = 1;
1050 int rc;
1051 int dta;
1052 int a;
1053 int p;
1054 int d;
1055
1056 pIorb_conf = (IORB_CONFIGURATION *)pIorb;
1057 vDt = pIorb_conf->pDeviceTable;
1058 pDt = Far16ToFlat(vDt);
1059
1060 spin_lock(drv_lock);
1061
1062 /* initialize device table header */
1063 pDt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1064 pDt->ADDLevelMinor = ADD_LEVEL_MINOR;
1065 pDt->ADDHandle = add_handle;
1066 pDt->TotalAdapters = ad_info_cnt + 1;
1067
1068 /* set start of adapter and device information tables */
1069 pPos = (char*)&pDt->pAdapter[pDt->TotalAdapters];
1070
1071 /* go through all adapters, including the virtual SCSI adapter */
1072 for (dta = 0; dta < pDt->TotalAdapters; dta++)
1073 {
1074 ADAPTERINFO *pPtr = (ADAPTERINFO *)pPos;
1075
1076 /* sanity check for sufficient space in device table */
1077 if ((u32)(pPtr + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1078 {
1079 DPRINTF(0,"error: device table provided by DASD too small\n");
1080 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1081 goto iocm_device_table_done;
1082 }
1083
1084 pDt->pAdapter[dta] = MakeNear16PtrFromDiff(pIorb_conf->pDeviceTable, pDt, pPtr);
1085
1086 //DPRINTF(2,"iocm_device_table: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1087 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1088 memset(pPtr, 0x00, sizeof(*pPtr));
1089
1090 pPtr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1091 pPtr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1092 pPtr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1093 pPtr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1094
1095 if (dta < ad_info_cnt)
1096 {
1097 /* this is a physical AHCI adapter */
1098 AD_INFO *ad_info = ad_infos + dta;
1099
1100 pPtr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1101 sprintf(pPtr->AdapterName, "AHCI_%d", dta);
1102
1103 if (!ad_info->port_scan_done)
1104 {
1105 /* first call; need to scan AHCI hardware for devices */
1106 if (ad_info->busy)
1107 {
1108 DPRINTF(0,"error: port scan requested while adapter was busy\n");
1109 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1110 goto iocm_device_table_done;
1111 }
1112 ad_info->busy = 1;
1113 spin_unlock(drv_lock);
1114 rc = ahci_scan_ports(ad_info);
1115 spin_lock(drv_lock);
1116 ad_info->busy = 0;
1117
1118 if (rc != 0)
1119 {
1120 DPRINTF(0,"error: port scan failed on adapter #%d\n", dta);
1121 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1122 goto iocm_device_table_done;
1123 }
1124 ad_info->port_scan_done = 1;
1125 }
1126
1127 /* insert physical (i.e. AHCI) devices into the device table */
1128 for (p = 0; p <= ad_info->port_max; p++)
1129 {
1130 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1131 {
1132 if (ad_info->ports[p].devs[d].present)
1133 {
1134 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p])
1135 {
1136 /* report this unit as SCSI unit */
1137 scsi_units++;
1138 //continue;
1139 }
1140 if (add_unit_info(pIorb_conf, dta, dta, p, d, 0))
1141 {
1142 goto iocm_device_table_done;
1143 }
1144 }
1145 }
1146 }
1147 }
1148 else
1149 {
1150 /* this is the virtual SCSI adapter */
1151 if (scsi_units == 0)
1152 {
1153 /* not a single unit to be emulated via SCSI */
1154 pDt->TotalAdapters--;
1155 break;
1156 }
1157
1158 /* set adapter name and bus type to mimic a SCSI controller */
1159 pPtr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1160 sprintf(pPtr->AdapterName, "AHCI_SCSI_0");
1161
1162 /* add all ATAPI units to be emulated by this virtual adaper */
1163 for (a = 0; a < ad_info_cnt; a++)
1164 {
1165 AD_INFO *ad_info = ad_infos + a;
1166
1167 for (p = 0; p <= ad_info->port_max; p++)
1168 {
1169 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1170 {
1171 if (ad_info->ports[p].devs[d].present && ad_info->ports[p].devs[d].atapi && emulate_scsi[a][p])
1172 {
1173 if (add_unit_info(pIorb_conf, dta, a, p, d, scsi_id++))
1174 {
1175 goto iocm_device_table_done;
1176 }
1177 }
1178 }
1179 }
1180 }
1181 }
1182
1183 /* calculate offset for next adapter */
1184 pPos = (char *)(pPtr->UnitInfo + pPtr->AdapterUnits);
1185 }
1186
1187iocm_device_table_done:
1188 spin_unlock(drv_lock);
1189 iorb_done(vIorb, pIorb);
1190}
1191
1192/******************************************************************************
1193 * Handle IOCC_GEOMETRY requests.
1194 */
1195void iocc_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1196{
1197 switch (pIorb->CommandModifier)
1198 {
1199 case IOCM_GET_MEDIA_GEOMETRY:
1200 case IOCM_GET_DEVICE_GEOMETRY:
1201 add_workspace(pIorb)->idempotent = 1;
1202 ahci_get_geometry(vIorb, pIorb);
1203 break;
1204
1205 default:
1206 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1207 iorb_done(vIorb, pIorb);
1208 }
1209}
1210
1211/******************************************************************************
1212 * Handle IOCC_EXECUTE_IO requests.
1213 */
1214void iocc_execute_io(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1215{
1216 switch (pIorb->CommandModifier)
1217 {
1218 case IOCM_READ:
1219 add_workspace(pIorb)->idempotent = 1;
1220 ahci_read(vIorb, pIorb);
1221 break;
1222
1223 case IOCM_READ_VERIFY:
1224 add_workspace(pIorb)->idempotent = 1;
1225 ahci_verify(vIorb, pIorb);
1226 break;
1227
1228 case IOCM_WRITE:
1229 add_workspace(pIorb)->idempotent = 1;
1230 ahci_write(vIorb, pIorb);
1231 break;
1232
1233 case IOCM_WRITE_VERIFY:
1234 add_workspace(pIorb)->idempotent = 1;
1235 ahci_write(vIorb, pIorb);
1236 break;
1237
1238 default:
1239 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1240 iorb_done(vIorb, pIorb);
1241 }
1242}
1243
1244/******************************************************************************
1245 * Handle IOCC_UNIT_STATUS requests.
1246 */
1247void iocc_unit_status(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1248{
1249 switch (pIorb->CommandModifier)
1250 {
1251 case IOCM_GET_UNIT_STATUS:
1252 add_workspace(pIorb)->idempotent = 1;
1253 ahci_unit_ready(vIorb, pIorb);
1254 break;
1255
1256 default:
1257 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1258 iorb_done(vIorb, pIorb);
1259 }
1260}
1261
1262/******************************************************************************
1263 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1264 */
1265void iocc_adapter_passthru(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1266{
1267 switch (pIorb->CommandModifier)
1268 {
1269
1270 case IOCM_EXECUTE_CDB:
1271 add_workspace(pIorb)->idempotent = 0;
1272 ahci_execute_cdb(vIorb, pIorb);
1273 break;
1274
1275 case IOCM_EXECUTE_ATA:
1276 add_workspace(pIorb)->idempotent = 0;
1277 ahci_execute_ata(vIorb, pIorb);
1278 break;
1279
1280 default:
1281 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1282 iorb_done(vIorb, pIorb);
1283 }
1284}
1285
1286/******************************************************************************
1287 * Add an IORB to the specified queue. This function must be called with the
1288 * adapter-level spinlock aquired.
1289 */
1290void iorb_queue_add(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb, IORBH *pIorb)
1291{
1292 if (iorb_priority(pIorb)
1293 {
1294 /* priority IORB; insert at first position */
1295 pIorb->pNxtIORB = queue->vRoot;
1296 queue->vRoot = vIorb;
1297 }
1298 else
1299 {
1300 /* append IORB to end of queue */
1301 pIorb->pNxtIORB = NULL;
1302
1303 if (queue->vRoot == NULL)
1304 {
1305 queue->vRoot = vIorb;
1306 }
1307 else
1308 {
1309 ((IORBH *)Far16ToFlat(queue->vTail))->pNxtIORB = vIorb;
1310 }
1311 queue->vTail = vIorb;
1312 }
1313
1314 if (D32g_DbgLevel)
1315 {
1316 /* determine queue type (local, driver, abort or port) and minimum debug
1317 * level; otherwise, queue debug prints can become really confusing.
1318 */
1319 char *queue_type;
1320 int min_debug = 7;
1321
1322 if ((u32)queue >> 16 == (u32)&queue >> 16) /* DAZ this is bogus */
1323 {
1324 /* this queue is on the stack */
1325 queue_type = "local";
1326 min_debug = 8;
1327
1328 }
1329 else if (queue == &driver_queue)
1330 {
1331 queue_type = "driver";
1332
1333 }
1334 else if (queue == &abort_queue)
1335 {
1336 queue_type = "abort";
1337 min_debug = 8;
1338
1339 }
1340 else
1341 {
1342 queue_type = "port";
1343 }
1344
1345 DPRINTF(min_debug,"IORB %x queued (cmd=%d/%d queue=%x [%s], timeout=%d)\n",
1346 vIorb, pIorb->CommandCode, pIorb->CommandModifier, queue, queue_type,
1347 pIorb->Timeout);
1348 }
1349}
1350
1351/******************************************************************************
1352 * Remove an IORB from the specified queue. This function must be called with
1353 * the adapter-level spinlock aquired.
1354 */
1355int iorb_queue_del(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb)
1356{
1357 IORBH FAR16DATA *_vIorb;
1358 IORBH FAR16DATA *_vPrev = NULL;
1359 int found = 0;
1360
1361 for (_vIorb = queue->vRoot; _vIorb != NULL; )
1362 {
1363 IORBH *_pIorb = Far16ToFlat(_vIorb);
1364 if (_vIorb == vIorb)
1365 {
1366
1367 /* found the IORB to be removed */
1368 if (_vPrev != NULL)
1369 {
1370 ((IORBH*)Far16ToFlat(_vPrev))->pNxtIORB = _pIorb->pNxtIORB;
1371 }
1372 else
1373 {
1374 queue->vRoot = _pIorb->pNxtIORB;
1375 }
1376 if (_vIorb == queue->vTail)
1377 {
1378 queue->vTail = _vPrev;
1379 }
1380 found = 1;
1381 break;
1382 }
1383 _vPrev = _vIorb;
1384 _vIorb = _pIorb->pNxtIORB;
1385 }
1386
1387 if (found)
1388 {
1389 DPRINTF(8,"IORB %x removed (queue = %x)\n", vIorb, queue);
1390 }
1391 else
1392 {
1393 DPRINTF(2,"IORB %x not found in queue %x\n", vIorb, queue);
1394 }
1395
1396 return(!found);
1397}
1398
1399/******************************************************************************
1400 * Set the error code in the specified IORB
1401 *
1402 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1403 * status to the specified error code.
1404 */
1405void iorb_seterr(IORBH *pIorb, USHORT error_code)
1406{
1407 pIorb->ErrorCode = error_code;
1408 pIorb->Status |= IORB_ERROR;
1409}
1410
1411/******************************************************************************
1412 * Mark the specified IORB as done and notify the asynchronous post function,
1413 * if any. The IORB is also removed from the corresponding IORB queue.
1414 *
1415 * NOTES: This function does not clear the Status field; it merely adds the
1416 * IORB_DONE flag.
1417 *
1418 * This function is expected to be called *without* the corresponding
1419 * driver-level drv_lock aquired. It will aquire the spinlock before
1420 * updating the IORB queue and release it before notifying the upstream
1421 * code in order to prevent deadlocks.
1422 *
1423 * Due to this logic, this function is only good for simple task-time
1424 * completions. Functions working on lists of IORBs (such as interrupt
1425 * handlers or context hooks) should call iorb_complete() directly and
1426 * implement their own logic for removing the IORB from the port queue.
1427 * See abort_ctxhook() for an example.
1428 */
1429void iorb_done(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1430{
1431 int a = iorb_unit_adapter(pIorb);
1432 int p = iorb_unit_port(pIorb);
1433
1434 /* remove IORB from corresponding queue */
1435 spin_lock(drv_lock);
1436 if (iorb_driver_level(pIorb))
1437 {
1438 iorb_queue_del(&driver_queue, vIorb);
1439 }
1440 else
1441 {
1442 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb);
1443 }
1444 aws_free(add_workspace(pIorb));
1445 spin_unlock(drv_lock);
1446
1447 iorb_complete(vIorb, pIorb);
1448}
1449
1450/******************************************************************************
1451 * Complete an IORB. This should be called without the adapter-level spinlock
1452 * to allow the IORB completion routine to perform whatever processing it
1453 * requires. This implies that the IORB should no longer be in any global
1454 * queue because the IORB completion routine may well reuse the IORB and send
1455 * the next request to us before even returning from this function.
1456 */
1457void iorb_complete(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1458{
1459 pIorb->Status |= IORB_DONE;
1460
1461 DPRINTF(7,"IORB %x complete status=0x%04x error=0x%04x\n",
1462 vIorb, pIorb->Status, pIorb->ErrorCode);
1463
1464 if (pIorb->RequestControl & IORB_ASYNC_POST)
1465 {
1466 Dev32Help_CallFar16((PFNFAR16)pIorb->NotifyAddress, vIorb);
1467 }
1468}
1469
1470/******************************************************************************
1471 * Requeue the specified IORB such that it will be sent downstream for
1472 * processing again. This includes freeing all resources currently allocated
1473 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1474 * spinlock must be aquired when calling this function.
1475 *
1476 * The following flags are preserved:
1477 * - no_ncq
1478 */
1479void iorb_requeue(IORBH *pIorb)
1480{
1481 ADD_WORKSPACE *aws = add_workspace(pIorb);
1482 u16 no_ncq = aws->no_ncq;
1483 u16 unaligned = aws->unaligned;
1484 u16 retries = aws->retries;
1485
1486 aws_free(aws);
1487 memset(aws, 0x00, sizeof(*aws));
1488
1489 aws->no_ncq = no_ncq;
1490 aws->unaligned = unaligned;
1491 aws->retries = retries;
1492}
1493
1494/******************************************************************************
1495 * Free resources in ADD workspace (timer, buffer, ...). This function should
1496 * be called with the spinlock held to prevent race conditions.
1497 */
1498void aws_free(ADD_WORKSPACE *aws)
1499{
1500 if (aws->timer != 0)
1501 {
1502 Timer_CancelTimer(aws->timer);
1503 aws->timer = 0;
1504 }
1505
1506 if (aws->buf != NULL)
1507 {
1508 MemFree(aws->buf);
1509 aws->buf = NULL;
1510 }
1511}
1512
1513/******************************************************************************
1514 * Lock the adapter, waiting for availability if necessary. This is expected
1515 * to be called at task/request time without the driver-level spinlock
1516 * aquired. Don't call at interrupt time.
1517 */
1518void lock_adapter(AD_INFO *ai)
1519{
1520 TIMER Timer;
1521
1522 spin_lock(drv_lock);
1523 while (ai->busy)
1524 {
1525 spin_unlock(drv_lock);
1526 TimerInit(&Timer, 250);
1527 while (!TimerCheckAndBlock(&Timer));
1528 spin_lock(drv_lock);
1529 }
1530 ai->busy = 1;
1531 spin_unlock(drv_lock);
1532}
1533
1534/******************************************************************************
1535 * Unlock adapter (i.e. reset busy flag)
1536 */
1537void unlock_adapter(AD_INFO *ai)
1538{
1539 ai->busy = 0;
1540}
1541
1542/******************************************************************************
1543 * Timeout handler for I/O commands. Since timeout handling can involve
1544 * lengthy operations like port resets, the main code is located in a
1545 * separate function which is invoked via a context hook.
1546 */
1547void __syscall timeout_callback(ULONG timer_handle, ULONG p1)
1548{
1549 IORBH FAR16DATA *vIorb = (IORBH FAR16DATA *)CastULONGToFar16(p1);
1550 IORBH *pIorb = Far16ToFlat(vIorb);
1551 int a = iorb_unit_adapter(pIorb);
1552 int p = iorb_unit_port(pIorb);
1553
1554 Timer_CancelTimer(timer_handle);
1555 DPRINTF(0,"timeout for IORB %x\n", vIorb);
1556
1557 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1558 * IORB has completed after the timeout has expired but before we got to
1559 * this line of code, we'll check the return code of iorb_queue_del(): If it
1560 * returns an error, the IORB must have completed a few microseconds ago and
1561 * there is no timeout.
1562 */
1563 spin_lock(drv_lock);
1564 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb) == 0)
1565 {
1566 iorb_queue_add(&abort_queue, vIorb, pIorb);
1567 pIorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1568 }
1569 spin_unlock(drv_lock);
1570
1571 /* Trigger abort processing function. We don't really care whether this
1572 * succeeds because the only reason why it would fail should be multiple
1573 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1574 * start executing, which leaves two scenarios:
1575 *
1576 * - We succeded in arming the context hook. Fine.
1577 *
1578 * - We armed the context hook a second time before it had a chance to
1579 * start executing. In this case, the already scheduled context hook
1580 * will process our IORB as well.
1581 */
1582 KernArmHook(reset_ctxhook_h, 0, 0);
1583
1584 /* Set up a watchdog timer which calls the context hook manually in case
1585 * some kernel thread is looping around the IORB_COMPLETE status bit
1586 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1587 * happen per design because kernel threads are supposed to yield but it
1588 * does in the early boot phase.
1589 */
1590 Timer_StartTimerMS(&th_reset_watchdog, 5000, reset_watchdog, 0);
1591}
1592
1593/******************************************************************************
1594 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1595 * will execute as soon as a kernel thread yields the CPU. However, some
1596 * kernel components won't yield the CPU during the early boot phase and the
1597 * only way to kick some sense into those components is to run the context
1598 * hook right inside this timer callback. Not exactly pretty, especially
1599 * considering the fact that context hooks were implemented to prevent running
1600 * lengthy operations like a port reset at interrupt time, but without this
1601 * watchdog mechanism we run the risk of getting completely stalled by device
1602 * problems during the early boot phase.
1603 */
1604void __syscall reset_watchdog(ULONG timer_handle, ULONG p1)
1605{
1606 /* reset watchdog timer */
1607 Timer_CancelTimer(timer_handle);
1608 DPRINTF(0,"reset watchdog invoked\n");
1609
1610 /* call context hook manually */
1611 reset_ctxhook(0);
1612}
1613
1614/******************************************************************************
1615 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1616 * adapter info array in the device table, dt->pAdapter[], is expected to be
1617 * initialized for the specified index (dt_ai).
1618 *
1619 * Please note that the device table adapter index, dta, is not always equal
1620 * to the physical adapter index, a: if SCSI emulation has been activated, the
1621 * last reported adapter is a virtual SCSI adapter and the physical adapter
1622 * indexes for those units are, of course, different from the device table
1623 * index of the virtual SCSI adapter.
1624 */
1625static int add_unit_info(IORB_CONFIGURATION *pIorb_conf, int dta,
1626 int a, int p, int d, int scsi_id)
1627{
1628 DEVICETABLE *pDt = Far16ToFlat(pIorb_conf->pDeviceTable);
1629 ADAPTERINFO *pPtr;
1630 UNITINFO *pUi;
1631 AD_INFO *ai = ad_infos + a;
1632
1633 pPtr = (ADAPTERINFO *)MakeFlatFromNear16(pIorb_conf->pDeviceTable, pDt->pAdapter[dta]);
1634 //DPRINTF(2,"add_unit_info: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1635 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1636
1637 pUi = &pPtr->UnitInfo[pPtr->AdapterUnits];
1638
1639 if ((u32)(pUi + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1640 {
1641 DPRINTF(0,"error: device table provided by DASD too small\n");
1642 iorb_seterr(&pIorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1643 return(-1);
1644 }
1645
1646 if (ai->ports[p].devs[d].unit_info == NULL)
1647 {
1648 /* provide original information about this device (unit) */
1649 memset(pUi, 0x00, sizeof(*pUi));
1650 pUi->AdapterIndex = dta; /* device table adapter index */
1651 pUi->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1652 pUi->UnitIndex = pPtr->AdapterUnits;
1653 pUi->UnitType = ai->ports[p].devs[d].dev_type;
1654 pUi->QueuingCount = ai->ports[p].devs[d].ncq_max;
1655 if (ai->ports[p].devs[d].removable)
1656 {
1657 pUi->UnitFlags |= UF_REMOVABLE;
1658 }
1659 if (scsi_id > 0) {
1660 /* set fake SCSI ID for this unit */
1661 pUi->UnitSCSITargetID = scsi_id;
1662 }
1663 }
1664 else
1665 {
1666 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1667 memcpy(pUi, ai->ports[p].devs[d].unit_info, sizeof(*pUi));
1668 }
1669
1670 pPtr->AdapterUnits++;
1671 return(0);
1672}
1673
Note: See TracBrowser for help on using the repository browser.