source: trunk/src/os2ahci/os2ahci.c@ 205

Last change on this file since 205 was 205, checked in by David Azarewicz, 5 years ago

Fixed ADD RM id.

File size: 50.6 KB
Line 
1/**
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2021 David Azarewicz <david@88watts.net>
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31#include "devhdr.h"
32
33/* set two-dimensional array of port options */
34#define set_port_option(opt, val) \
35 if (adapter_index == -1) { \
36 /* set option for all adapters and ports */ \
37 memset(opt, val, sizeof(opt)); \
38 } else if (port_index == -1) { \
39 /* set option for all ports on current adapter */ \
40 memset(opt[adapter_index], val, sizeof(*opt)); \
41 } else { \
42 /* set option for specific port */ \
43 opt[adapter_index][port_index] = val; \
44 }
45
46#define FLAG_KRNL_EXIT_ADD 0x1000
47#define FLAG_KRNL_EXIT_REMOVE 0x2000
48
49#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
50#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
51#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
52#define TYPE_KRNL_EXIT_DYN 0x0003
53#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
54
55extern int SetPsdPutc(void);
56static int add_unit_info(IORB_CONFIGURATION *iorb_conf, int dt_ai, int a, int p, int d, int scsi_id);
57
58int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
59int init_reset = 1; /* if != 0, reset ports during init */
60int force_write_cache; /* if != 0, force write cache */
61int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
62int use_mbr_test = 1;
63
64HDRIVER rm_drvh; /* resource manager driver handle */
65USHORT add_handle; /* driver handle (RegisterDeviceClass) */
66char drv_name[] = "OS2AHCI"; /* driver name as string */
67
68/* resource manager driver information structure */
69static DRIVERSTRUCT rm_drvinfo =
70{
71 NULL, /* We cannot do Flat to Far16 conversion at compile time */
72 NULL, /* so we put NULLs in all the Far16 fields and then fill */
73 NULL, /* them in at run time */
74 DMAJOR,
75 DMINOR,
76 BLD_YEAR, BLD_MONTH, BLD_DAY,
77 0,
78 DRT_ADDDM,
79 DRS_ADD,
80 NULL
81};
82
83SpinLock_t drv_lock; /* driver-level spinlock */
84IORB_QUEUE driver_queue; /* driver-level IORB queue */
85AD_INFO ad_infos[MAX_AD]; /* adapter information list */
86int ad_info_cnt; /* number of entries in ad_infos[] */
87u16 ad_ignore; /* bitmap with adapter indexes to ignore */
88int init_complete; /* if != 0, initialization has completed */
89int suspended;
90int resume_sleep_flag;
91
92/* apapter/port-specific options saved when parsing the command line */
93u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
94u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
95u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
96u8 link_power[MAX_AD][AHCI_MAX_PORTS];
97u8 track_size[MAX_AD][AHCI_MAX_PORTS];
98u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
99
100char BldLevel[] = BLDLEVEL;
101
102/******************************************************************************
103 * OS/2 device driver main strategy function.
104 *
105 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
106 * packet for IDC calls, so they can be handled by gen_ioctl.
107 */
108void StrategyHandler(REQPACKET *prp)
109{
110 u16 rc;
111
112 switch (prp->bCommand)
113 {
114 case STRATEGY_BASEDEVINIT:
115 rc = init_drv(prp);
116 break;
117
118 case STRATEGY_SHUTDOWN:
119 rc = exit_drv(prp->save_restore.Function);
120 break;
121
122 case STRATEGY_GENIOCTL:
123 rc = gen_ioctl(prp);
124 break;
125
126 case STRATEGY_OPEN:
127 build_user_info();
128 rc = RPDONE;
129 break;
130
131 case STRATEGY_READ:
132 rc = char_dev_input(prp);
133 break;
134
135 case STRATEGY_SAVERESTORE:
136 rc = sr_drv(prp->save_restore.Function);
137 break;
138
139 case STRATEGY_INITCOMPLETE:
140 case STRATEGY_CLOSE:
141 case STRATEGY_INPUTSTATUS:
142 case STRATEGY_FLUSHINPUT:
143 /* noop */
144 rc = RPDONE;
145 break;
146
147 default:
148 rc = RPDONE | RPERR_BADCOMMAND;
149 break;
150 }
151
152 prp->usStatus = rc;
153}
154
155void IdcHandler(REQPACKET *prp)
156{
157 StrategyHandler(prp);
158}
159
160/******************************************************************************
161 * Intialize the os2ahci driver. This includes command line parsing, scanning
162 * the PCI bus for supported AHCI adapters, etc.
163 */
164USHORT init_drv(REQPACKET *req)
165{
166 static int init_drv_called;
167 static int init_drv_failed;
168 APIRET rmrc;
169 const char *pszCmdLine, *cmd_line;
170 int adapter_index = -1;
171 int port_index = -1;
172 int iInvertOption;
173 int iStatus;
174
175 if (init_drv_called)
176 {
177 /* This is the init call for the second (IBMS506$) character
178 * device driver. If the main driver failed initialization, fail this
179 * one as well.
180 */
181 return(RPDONE | ((init_drv_failed) ? RPERR_INITFAIL : 0));
182 }
183 D32g_DbgLevel = 0;
184 init_drv_called = 1;
185 suspended = 0;
186 resume_sleep_flag = 0;
187 memset(ad_infos, 0, sizeof(ad_infos));
188 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
189 UtSetDriverName("OS2AHCI$");
190 Header.ulCaps |= DEV_ADAPTER_DD; /* DAZ This flag is not really needed. */
191
192 /* create driver-level spinlock */
193 KernAllocSpinLock(&drv_lock);
194
195 /* register driver with resource manager */
196 rm_drvinfo.DrvrName = drv_name;
197 rm_drvinfo.DrvrDescript = "AHCI SATA Driver";
198 rm_drvinfo.VendorName = DVENDOR;
199 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS)
200 {
201 iprintf("%s: failed to register driver with resource manager (rc = %d)", drv_name, rmrc);
202 goto init_fail;
203 }
204
205 pszCmdLine = cmd_line = req->init_in.szArgs;
206 iStatus = 0;
207 while (*pszCmdLine)
208 {
209 if (*pszCmdLine++ != '/') continue; /* Ignore anything that doesn't start with '/' */
210 /* pszCmdLine now points to first char of argument */
211
212 if ((iInvertOption = (*pszCmdLine == '!')) != 0) pszCmdLine++;
213
214 if (ArgCmp(pszCmdLine, "B:"))
215 {
216 pszCmdLine += 2;
217 InitComPort(strtol(pszCmdLine, &pszCmdLine, 0));
218 continue;
219 }
220
221 if (ArgCmp(pszCmdLine, "C:"))
222 {
223 pszCmdLine += 2;
224 /* set COM port base address for debug messages */
225 D32g_ComBase = strtol(pszCmdLine, &pszCmdLine, 0);
226 #ifdef TESTVER
227 if (D32g_ComBase == 0) SetPsdPutc();
228 #endif
229 if (D32g_ComBase == 1) D32g_ComBase = 0x3f8;
230 if (D32g_ComBase == 2) D32g_ComBase = 0x2f8;
231 continue;
232 }
233
234 if (ArgCmp(pszCmdLine, "D"))
235 {
236 pszCmdLine++;
237 if (*pszCmdLine == ':')
238 {
239 pszCmdLine++;
240 D32g_DbgLevel = strtol(pszCmdLine, &pszCmdLine, 0);
241 }
242 else D32g_DbgLevel++; /* increase debug level */
243 continue;
244 }
245
246 if (ArgCmp(pszCmdLine, "G:"))
247 {
248 u16 usVendor;
249 u16 usDevice;
250
251 pszCmdLine += 2;
252 /* add specfied PCI ID as a supported generic AHCI adapter */
253 usVendor = strtol(pszCmdLine, &pszCmdLine, 16);
254 if (*pszCmdLine != ':') break;
255 pszCmdLine++;
256 usDevice = strtol(pszCmdLine, &pszCmdLine, 16);
257 if (add_pci_id(usVendor, usDevice))
258 {
259 iprintf("%s: failed to add PCI ID %04x:%04x", drv_name, usVendor, usDevice);
260 iStatus = 1;
261 }
262 thorough_scan = 1;
263 continue;
264 }
265
266 if (ArgCmp(pszCmdLine, "T"))
267 {
268 pszCmdLine++;
269 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
270 thorough_scan = !iInvertOption;
271 continue;
272 }
273
274 if (ArgCmp(pszCmdLine, "R"))
275 {
276 pszCmdLine++;
277 /* reset ports during initialization */
278 init_reset = !iInvertOption;
279 continue;
280 }
281
282 if (ArgCmp(pszCmdLine, "F"))
283 {
284 pszCmdLine++;
285 /* force write cache regardless of IORB flags */
286 force_write_cache = 1;
287 continue;
288 }
289
290 if (ArgCmp(pszCmdLine, "A:"))
291 {
292 pszCmdLine += 2;
293 /* set adapter index for adapter and port-related options */
294 adapter_index = strtol(pszCmdLine, &pszCmdLine, 0);
295 if (adapter_index < 0 || adapter_index >= MAX_AD)
296 {
297 iprintf("%s: invalid adapter index (%d)", drv_name, adapter_index);
298 iStatus = 1;
299 }
300 continue;
301 }
302
303 if (ArgCmp(pszCmdLine, "P:"))
304 {
305 pszCmdLine += 2;
306 /* set port index for port-related options */
307 port_index = strtol(pszCmdLine, &pszCmdLine, 0);
308 if (port_index < 0 || port_index >= AHCI_MAX_PORTS)
309 {
310 iprintf("%s: invalid port index (%d)", drv_name, port_index);
311 iStatus = 1;
312 }
313 continue;
314 }
315
316 if (ArgCmp(pszCmdLine, "I"))
317 {
318 pszCmdLine++;
319 /* ignore current adapter index */
320 if (adapter_index >= 0)
321 {
322 if (port_index >= 0) port_ignore[adapter_index][port_index] = !iInvertOption;
323 else ad_ignore |= 1U << adapter_index;
324 }
325 continue;
326 }
327
328 if (ArgCmp(pszCmdLine, "S"))
329 {
330 pszCmdLine++;
331 /* enable SCSI emulation for ATAPI devices */
332 set_port_option(emulate_scsi, !iInvertOption);
333 continue;
334 }
335
336 if (ArgCmp(pszCmdLine, "N"))
337 {
338 pszCmdLine++;
339 /* enable NCQ */
340 set_port_option(enable_ncq, !iInvertOption);
341 continue;
342 }
343
344 if (ArgCmp(pszCmdLine, "LS:"))
345 {
346 int optval;
347
348 pszCmdLine += 3;
349 /* set link speed */
350 optval = strtol(pszCmdLine, &pszCmdLine, 0);
351 set_port_option(link_speed, optval);
352 /* need to reset the port in order to establish link settings */
353 init_reset = 1;
354 continue;
355 }
356
357 if (ArgCmp(pszCmdLine, "LP:"))
358 {
359 int optval;
360
361 pszCmdLine += 3;
362 /* set power management */
363 optval = strtol(pszCmdLine, &pszCmdLine, 0);
364 set_port_option(link_power, optval);
365 /* need to reset the port in order to establish link settings */
366 init_reset = 1;
367 continue;
368 }
369
370 if (ArgCmp(pszCmdLine, "4"))
371 {
372 pszCmdLine++;
373 /* enable 4K sector geometry enhancement (track size = 56) */
374 if (!iInvertOption) set_port_option(track_size, 56);
375 continue;
376 }
377
378 if (ArgCmp(pszCmdLine, "U"))
379 {
380 pszCmdLine++;
381 /* Specify to use the MBR test to ignore non-MBR disks.
382 * Default is on.
383 */
384 use_mbr_test = !iInvertOption;
385 continue;
386 }
387
388 if (ArgCmp(pszCmdLine, "V"))
389 {
390 pszCmdLine++;
391 if (*pszCmdLine == ':')
392 {
393 pszCmdLine++;
394 verbosity = strtol(pszCmdLine, &pszCmdLine, 0);
395 }
396 else verbosity++; /* increase verbosity level */
397 continue;
398 }
399
400 if (ArgCmp(pszCmdLine, "W"))
401 {
402 pszCmdLine++;
403 /* Specify to allow the trace buffer to wrap when full. */
404 D32g_DbgBufWrap = !iInvertOption;
405 continue;
406 }
407
408 iprintf("Unrecognized switch: %s", pszCmdLine-1);
409 iStatus = 1; /* unrecognized argument */
410 }
411
412 if (iStatus) goto init_fail;
413
414 dprintf(0,"BldLevel: %s\n", BldLevel);
415 dprintf(0,"CmdLine: %s\n", cmd_line);
416 /*
417 if (sizeof(ADD_WORKSPACE) > ADD_WORKSPACE_SIZE)
418 {
419 dprintf(0,"ADD_WORKSPACE size is too big! %d>16\n", sizeof(ADD_WORKSPACE));
420 goto init_fail;
421 }
422 */
423
424 /* print initialization message */
425 ciprintf("%s driver version %d.%02d", drv_name, DMAJOR, DMINOR);
426
427 #ifdef TESTVER
428 #include "testver.c"
429 #endif
430
431 /* scan PCI bus for supported devices */
432 scan_pci_bus();
433
434 if (ad_info_cnt > 0)
435 {
436 /* initialization succeeded and we found at least one AHCI adapter */
437
438 if (Dev32Help_RegisterDeviceClass(drv_name, add_entry, 0, 1, &add_handle))
439 {
440 iprintf("%s: couldn't register device class", drv_name);
441 goto init_fail;
442 }
443
444 Timer_InitTimer(TIMER_COUNT);
445
446 /* allocate context hooks */
447 KernAllocateContextHook(restart_ctxhook, 0, &restart_ctxhook_h);
448 KernAllocateContextHook(reset_ctxhook, 0, &reset_ctxhook_h);
449 KernAllocateContextHook(engine_ctxhook, 0, &engine_ctxhook_h);
450
451 /* register kernel exit routine for trap dumps */
452 Dev32Help_RegisterKrnlExit(shutdown_driver, FLAG_KRNL_EXIT_ADD, TYPE_KRNL_EXIT_INT13);
453
454 return(RPDONE);
455 }
456 else
457 {
458 /* no adapters found */
459 ciprintf("%s: No adapters found.", drv_name);
460 }
461
462init_fail:
463 /* initialization failed; set segment sizes to 0 and return error */
464 init_drv_failed = 1;
465
466 if (rm_drvh != 0)
467 {
468 /* remove driver from resource manager */
469 RMDestroyDriver(rm_drvh);
470 }
471
472 ciprintf("%s driver *not* installed", drv_name);
473 return(RPDONE | RPERR_INITFAIL);
474}
475
476/******************************************************************************
477 * Generic IOCTL via character device driver. IOCTLs are used to control the
478 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
479 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
480 * commands for ATA disks) are implemented here.
481 */
482USHORT gen_ioctl(REQPACKET *ioctl)
483{
484 DPRINTF(2,"IOCTL 0x%x/0x%x\n", ioctl->ioctl.bCategory, ioctl->ioctl.bFunction);
485
486 switch (ioctl->ioctl.bCategory)
487 {
488 case OS2AHCI_IOCTL_CATEGORY:
489 switch (ioctl->ioctl.bFunction)
490 {
491 case OS2AHCI_IOCTL_GET_DEVLIST:
492 return(ioctl_get_devlist(ioctl));
493
494 case OS2AHCI_IOCTL_PASSTHROUGH:
495 return(ioctl_passthrough(ioctl));
496 }
497 break;
498
499 case DSKSP_CAT_GENERIC:
500 return(ioctl_gen_dsk(ioctl));
501
502 case DSKSP_CAT_SMART:
503 return(ioctl_smart(ioctl));
504 }
505
506 return(RPDONE | RPERR_BADCOMMAND);
507}
508
509/******************************************************************************
510 * Read from character device. If tracing is on (internal ring buffer trace),
511 * we return data from the trace buffer; if not, we might return a device
512 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
513 */
514USHORT char_dev_input(REQPACKET *pPacket)
515{
516 void *LinAdr;
517
518 if (Dev32Help_PhysToLin(pPacket->io.ulAddress, pPacket->io.usCount, &LinAdr))
519 {
520 pPacket->io.usCount = 0;
521 return RPDONE | RPERR_GENERAL;
522 }
523
524 pPacket->io.usCount = dCopyToUser(LinAdr, pPacket->io.usCount);
525
526 return RPDONE;
527}
528
529/******************************************************************************
530 * Device driver exit handler. This handler is called when OS/2 shuts down and
531 * flushes the write caches of all attached devices. Since this is effectively
532 * the same we do when suspending, we'll call out to the corresponding suspend
533 * function.
534 *
535 * NOTE: Errors are ignored because there's no way we could stop the shutdown
536 * or do something about the error, unless retrying endlessly is
537 * considered an option.
538 */
539USHORT exit_drv(int func)
540{
541 DPRINTF(2,"exit_drv(%d) called\n", func);
542
543 if (func == 0)
544 {
545 /* we're only interested in the second phase of the shutdown */
546 return(RPDONE);
547 }
548
549 suspend();
550 return(RPDONE);
551}
552
553/******************************************************************************
554 * Device driver suspend/resume handler. This handler is called when ACPI is
555 * executing a suspend or resume.
556 */
557USHORT sr_drv(int func)
558{
559 DPRINTF(2,"sr_drv(%d) called\n", func);
560
561 if (func) resume();
562 else suspend();
563
564 return(RPDONE);
565}
566
567/******************************************************************************
568 * ADD entry point. This is the main entry point for all ADD requests. Due to
569 * the asynchronous nature of ADD drivers, this function primarily queues the
570 * IORB(s) to the corresponding adapter or port queues, then triggers the
571 * state machine to initiate processing queued IORBs.
572 *
573 * NOTE: In order to prevent race conditions or engine stalls, certain rules
574 * around locking, unlocking and IORB handling in general have been
575 * established. Refer to the comments in "trigger_engine()" for
576 * details.
577 */
578void add_entry(IORBH FAR16DATA *vFirstIorb)
579{
580 IORBH FAR16DATA *vIorb;
581 IORBH FAR16DATA *vNext = FAR16NULL;
582
583 spin_lock(drv_lock);
584
585 for (vIorb=vFirstIorb; vIorb!=FAR16NULL; vIorb=vNext)
586 {
587 IORBH *pIorb = Far16ToFlat(vIorb);
588
589 /* Queue this IORB. Queues primarily exist on port level but there are
590 * some requests which affect the whole driver, most notably
591 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
592 * port queue will change the links, thus we need to save the original
593 * link in 'vNext'.
594 */
595 if (pIorb->RequestControl & IORB_CHAIN) vNext = pIorb->f16NxtIORB;
596 else vNext = (IORBH FAR16DATA *)0;
597
598 pIorb->Status = 0;
599 pIorb->ErrorCode = 0;
600 memset(&pIorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
601
602 #ifdef DEBUG
603 DumpIorb(pIorb); /* DAZ TESTING */
604 #endif
605
606 if (iorb_driver_level(pIorb))
607 {
608 /* driver-level IORB */
609 pIorb->UnitHandle = 0;
610 iorb_queue_add(&driver_queue, vIorb, pIorb);
611 }
612 else
613 {
614 /* port-level IORB */
615 int a = iorb_unit_adapter(pIorb);
616 int p = iorb_unit_port(pIorb);
617 int d = iorb_unit_device(pIorb);
618
619 if (a >= ad_info_cnt ||
620 p > ad_infos[a].port_max ||
621 d > ad_infos[a].ports[p].dev_max ||
622 (ad_infos[a].port_map & (1UL << p)) == 0)
623 {
624 /* unit handle outside of the allowed range */
625 dprintf(0,"warning: IORB for %d.%d.%d out of range\n", a, p, d);
626 pIorb->Status = IORB_ERROR;
627 pIorb->ErrorCode = IOERR_CMD_SYNTAX;
628 iorb_complete(vIorb, pIorb);
629 continue;
630 }
631
632 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, vIorb, pIorb);
633 }
634 }
635
636 /* trigger state machine */
637 trigger_engine();
638
639 spin_unlock(drv_lock);
640}
641
642/******************************************************************************
643 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
644 * which will try to get all IORBs sent on their way a couple of times. If
645 * there are still IORBs ready for processing after this, this function will
646 * hand off to a context hook which will continue to trigger the engine until
647 * all IORBs have been sent.
648 *
649 * NOTE: While initialization has not completed (or during suspend/resume
650 * operations), this function will loop indefinitely because we can't
651 * rely on interrupt handlers or context hooks and complex IORBs
652 * requiring multiple requeues would eventually hang and time out if
653 * we stopped triggering here.
654 */
655void trigger_engine(void)
656{
657 int i;
658
659 for (i = 0; i < 3 || !init_complete; i++)
660 {
661 if (trigger_engine_1() == 0)
662 {
663 /* done -- all IORBs have been sent on their way */
664 return;
665 }
666 }
667
668 /* Something keeps bouncing; hand off to the engine context hook which will
669 * keep trying in the background.
670 */
671 KernArmHook(engine_ctxhook_h, 0, 0);
672}
673
674/******************************************************************************
675 * Trigger IORB queue engine in order to send commands in the driver/port IORB
676 * queues to the AHCI hardware. This function will return the number of IORBs
677 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
678 * a state to accept the command, thus it might take quite a few calls to get
679 * all IORBs on their way. This is why there's a wrapper function which tries
680 * it a few times, then hands off to a context hook which will keep trying in
681 * the background.
682 *
683 * IORBs might complete before send_iorb() has returned, at any time during
684 * interrupt processing or on another CPU on SMP systems. IORB completion
685 * means modifications to the corresponding IORB queue (the completed IORB
686 * is removed from the queue) thus we need to protect the IORB queues from
687 * race conditions. The safest approach short of keeping the driver-level
688 * spinlock aquired permanently is to keep it throughout this function and
689 * release it temporarily in send_iorb().
690 *
691 * This implies that the handler functions are fully responsible for aquiring
692 * the driver-level spinlock when they need it, and for releasing it again.
693 *
694 * As a rule of thumb, get the driver-level spinlock whenever accessing
695 * volatile variables (IORB queues, values in ad_info[], ...).
696 *
697 * Additional Notes:
698 *
699 * - This function is expected to be called with the spinlock aquired
700 *
701 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
702 * just remain in the queue). This can be used to release the driver-level
703 * spinlock while making sure no new IORBs are going to hit the hardware.
704 * In order to prevent engine stalls, all handlers using this functionality
705 * need to invoke trigger_engine() after resetting the busy flag.
706 *
707 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
708 * However, the driver-level queue is worked "one entry at a time" which
709 * means that no new IORBs will be queued on the driver-level queue until
710 * the head element has completed processing. This means that driver-
711 * level IORB handlers don't need to protect against each other. But they
712 * they do need to keep in mind interference with port-level IORBs:
713 *
714 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
715 * adapters as 'busy' which are affected by the driver-level IORB
716 *
717 * - Driver-level IORB handlers must not access the hardware of a
718 * particular adapter if it's flagged as 'busy' by another IORB.
719 */
720int trigger_engine_1(void)
721{
722 IORBH FAR16DATA *vIorb;
723 IORBH *pIorb;
724 IORBH FAR16DATA *vNext;
725 int iorbs_sent = 0;
726 int a;
727 int p;
728
729 iorbs_sent = 0;
730
731 /* process driver-level IORBs */
732 if ((vIorb = driver_queue.vRoot) != FAR16NULL)
733 {
734 pIorb = Far16ToFlat(vIorb);
735
736 if (!add_workspace(pIorb)->processing)
737 {
738 send_iorb(vIorb, pIorb);
739 iorbs_sent++;
740 }
741 }
742
743 /* process port-level IORBs */
744 for (a = 0; a < ad_info_cnt; a++)
745 {
746 AD_INFO *ai = ad_infos + a;
747 if (ai->busy)
748 {
749 /* adapter is busy; don't process any IORBs */
750 continue;
751 }
752 for (p = 0; p <= ai->port_max; p++)
753 {
754 /* send all queued IORBs on this port */
755 vNext = FAR16NULL;
756 for (vIorb = ai->ports[p].iorb_queue.vRoot; vIorb != FAR16NULL; vIorb = vNext)
757 {
758 pIorb = Far16ToFlat(vIorb);
759
760 vNext = pIorb->f16NxtIORB;
761 if (!add_workspace(pIorb)->processing)
762 {
763 send_iorb(vIorb, pIorb);
764 iorbs_sent++;
765 }
766 }
767 }
768 }
769
770 return(iorbs_sent);
771}
772
773/******************************************************************************
774 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
775 * switch board for calling the corresponding iocc_*() handler function.
776 *
777 * NOTE: This function is expected to be called with the driver-level spinlock
778 * aquired. It will release it before calling any of the handler
779 * functions and re-aquire it when done.
780 */
781void send_iorb(IORBH FAR16DATA *vIorb, IORBH *pIorb)
782{
783 /* Mark IORB as "processing" before doing anything else. Once the IORB is
784 * marked as "processing", we can release the spinlock because subsequent
785 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
786 * IORB.
787 */
788 add_workspace(pIorb)->processing = 1;
789 spin_unlock(drv_lock);
790
791 switch (pIorb->CommandCode)
792 {
793 case IOCC_CONFIGURATION:
794 iocc_configuration(vIorb, pIorb);
795 break;
796
797 case IOCC_DEVICE_CONTROL:
798 iocc_device_control(vIorb, pIorb);
799 break;
800
801 case IOCC_UNIT_CONTROL:
802 iocc_unit_control(vIorb, pIorb);
803 break;
804
805 case IOCC_GEOMETRY:
806 iocc_geometry(vIorb, pIorb);
807 break;
808
809 case IOCC_EXECUTE_IO:
810 iocc_execute_io(vIorb, pIorb);
811 break;
812
813 case IOCC_UNIT_STATUS:
814 iocc_unit_status(vIorb, pIorb);
815 break;
816
817 case IOCC_ADAPTER_PASSTHRU:
818 iocc_adapter_passthru(vIorb, pIorb);
819 break;
820
821 default:
822 /* unsupported call */
823 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
824 iorb_done(vIorb, pIorb);
825 break;
826 }
827
828 /* re-aquire spinlock before returning to trigger_engine() */
829 spin_lock(drv_lock);
830}
831
832/******************************************************************************
833 * Handle IOCC_CONFIGURATION requests.
834 */
835void iocc_configuration(IORBH FAR16DATA *vIorb, IORBH *pIorb)
836{
837 int a;
838
839 switch (pIorb->CommandModifier)
840 {
841
842 case IOCM_COMPLETE_INIT:
843 /* Complete initialization. From now on, we won't have to restore the BIOS
844 * configuration after each command and we're fully operational (i.e. will
845 * use interrupts, timers and context hooks instead of polling).
846 */
847 if (!init_complete)
848 {
849 DPRINTF(1,"leaving initialization mode\n");
850 for (a = 0; a < ad_info_cnt; a++)
851 {
852 lock_adapter(ad_infos + a);
853 ahci_complete_init(ad_infos + a);
854 }
855 init_complete = 1;
856
857 /* release all adapters */
858 for (a = 0; a < ad_info_cnt; a++)
859 {
860 unlock_adapter(ad_infos + a);
861 }
862 DPRINTF(1,"leaving initialization mode 2\n");
863
864 #ifdef LEGACY_APM
865 /* register APM hook */
866 apm_init();
867 #endif
868 }
869 iorb_done(vIorb, pIorb);
870 break;
871
872 case IOCM_GET_DEVICE_TABLE:
873 /* construct a device table */
874 iocm_device_table(vIorb, pIorb);
875 break;
876
877 default:
878 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
879 iorb_done(vIorb, pIorb);
880 break;
881 }
882}
883
884/******************************************************************************
885 * Handle IOCC_DEVICE_CONTROL requests.
886 */
887void iocc_device_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
888{
889 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
890 IORBH FAR16DATA *vPtr;
891 IORBH FAR16DATA *vNext = FAR16NULL;
892 int p = iorb_unit_port(pIorb);
893 int d = iorb_unit_device(pIorb);
894
895 switch (pIorb->CommandModifier)
896 {
897 case IOCM_ABORT:
898 /* abort all pending commands on specified port and device */
899 spin_lock(drv_lock);
900 for (vPtr = ai->ports[p].iorb_queue.vRoot; vPtr != FAR16NULL; vPtr = vNext)
901 {
902 IORBH *pPtr = Far16ToFlat(vPtr);
903
904 vNext = pPtr->f16NxtIORB;
905 /* move all matching IORBs to the abort queue */
906 if (vPtr != vIorb && iorb_unit_device(pPtr) == d)
907 {
908 iorb_queue_del(&ai->ports[p].iorb_queue, vPtr);
909 iorb_queue_add(&abort_queue, vPtr, pPtr);
910 pPtr->ErrorCode = IOERR_CMD_ABORTED;
911 }
912 }
913 spin_unlock(drv_lock);
914
915 /* trigger reset context hook which will finish the abort processing */
916 KernArmHook(reset_ctxhook_h, 0, 0);
917 break;
918
919 case IOCM_SUSPEND:
920 case IOCM_RESUME:
921 case IOCM_GET_QUEUE_STATUS:
922 /* Suspend/resume operations allow access to the hardware for other
923 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
924 * and ATAPI in the same driver, this won't be required.
925 */
926 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
927 break;
928
929 case IOCM_LOCK_MEDIA:
930 case IOCM_UNLOCK_MEDIA:
931 case IOCM_EJECT_MEDIA:
932 /* unit control commands to lock, unlock and eject media */
933 /* will be supported later... */
934 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
935 break;
936
937 default:
938 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
939 break;
940 }
941
942 iorb_done(vIorb, pIorb);
943}
944
945/******************************************************************************
946 * Handle IOCC_UNIT_CONTROL requests.
947 */
948void iocc_unit_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
949{
950 IORB_UNIT_CONTROL *pIorb_uc = (IORB_UNIT_CONTROL *)pIorb;
951 int a = iorb_unit_adapter(pIorb);
952 int p = iorb_unit_port(pIorb);
953 int d = iorb_unit_device(pIorb);
954
955 spin_lock(drv_lock);
956 switch (pIorb->CommandModifier)
957 {
958 case IOCM_ALLOCATE_UNIT:
959 /* allocate unit for exclusive access */
960 if (ad_infos[a].ports[p].devs[d].allocated)
961 {
962 iorb_seterr(pIorb, IOERR_UNIT_ALLOCATED);
963 }
964 else
965 {
966 ad_infos[a].ports[p].devs[d].allocated = 1;
967 }
968 break;
969
970 case IOCM_DEALLOCATE_UNIT:
971 /* deallocate exclusive access to unit */
972 if (!ad_infos[a].ports[p].devs[d].allocated)
973 {
974 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
975 }
976 else
977 {
978 ad_infos[a].ports[p].devs[d].allocated = 0;
979 }
980 break;
981
982 case IOCM_CHANGE_UNITINFO:
983 /* Change unit (device) information. One reason for this IOCM is the
984 * interface for filter device drivers: a filter device driver can
985 * either change existing UNITINFOs or permanently allocate units
986 * and fabricate new [logical] units; the former is the reason why we
987 * must store the pointer to the updated UNITNIFO for subsequent
988 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
989 */
990 if (!ad_infos[a].ports[p].devs[d].allocated)
991 {
992 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
993 break;
994 }
995 ad_infos[a].ports[p].devs[d].unit_info = pIorb_uc->f16UnitInfo;
996 break;
997
998 default:
999 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1000 break;
1001 }
1002
1003 spin_unlock(drv_lock);
1004 iorb_done(vIorb, pIorb);
1005}
1006
1007/******************************************************************************
1008 * Scan all ports for AHCI devices and construct a DASD device table.
1009 *
1010 * NOTES: This function may be called multiple times. Only the first
1011 * invocation will actually scan for devices; all subsequent calls will
1012 * merely return the results of the initial scan, potentially augmented
1013 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
1014 * requests.
1015 *
1016 * In order to support applications that can't deal with ATAPI devices
1017 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
1018 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
1019 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
1020 * request. The units attached to this adapter will use the real HW
1021 * unit IDs, thus we'll never receive a command specific to the
1022 * emulated SCSI adapter and won't need to set up any sort of entity
1023 * for it; the only purpose of the emulated SCSI adapter is to pass the
1024 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
1025 * course. The emulated SCSI target IDs are allocated as follows:
1026 *
1027 * 0 the virtual adapter
1028 * 1..n emulated devices; SCSI target ID increments sequentially
1029 */
1030void iocm_device_table(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1031{
1032 IORB_CONFIGURATION *pIorb_conf;
1033 DEVICETABLE FAR16DATA *vDt;
1034 DEVICETABLE *pDt;
1035 char *pPos;
1036 int scsi_units = 0;
1037 int scsi_id = 1;
1038 int rc;
1039 int dta;
1040 int a;
1041 int p;
1042 int d;
1043
1044 pIorb_conf = (IORB_CONFIGURATION *)pIorb;
1045 vDt = pIorb_conf->f16DeviceTable;
1046 pDt = Far16ToFlat(vDt);
1047
1048 spin_lock(drv_lock);
1049
1050 /* initialize device table header */
1051 pDt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1052 pDt->ADDLevelMinor = ADD_LEVEL_MINOR;
1053 pDt->ADDHandle = add_handle;
1054 pDt->TotalAdapters = ad_info_cnt + 1;
1055
1056 /* set start of adapter and device information tables */
1057 pPos = (char*)&pDt->pAdapter[pDt->TotalAdapters];
1058
1059 /* go through all adapters, including the virtual SCSI adapter */
1060 for (dta = 0; dta < pDt->TotalAdapters; dta++)
1061 {
1062 ADAPTERINFO *pPtr = (ADAPTERINFO *)pPos;
1063
1064 /* sanity check for sufficient space in device table */
1065 if ((u32)(pPtr + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1066 {
1067 dprintf(0,"error: device table provided by DASD too small\n");
1068 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1069 goto iocm_device_table_done;
1070 }
1071
1072 pDt->pAdapter[dta] = MakeNear16PtrFromDiff(pIorb_conf->f16DeviceTable, pDt, pPtr);
1073
1074 //DPRINTF(2,"iocm_device_table: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1075 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1076 memset(pPtr, 0x00, sizeof(*pPtr));
1077
1078 pPtr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1079 pPtr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1080 pPtr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1081 pPtr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1082
1083 if (dta < ad_info_cnt)
1084 {
1085 /* this is a physical AHCI adapter */
1086 AD_INFO *ad_info = ad_infos + dta;
1087
1088 pPtr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1089 snprintf(pPtr->AdapterName, sizeof(pPtr->AdapterName), "AHCI_%d", dta);
1090
1091 if (!ad_info->port_scan_done)
1092 {
1093 /* first call; need to scan AHCI hardware for devices */
1094 if (ad_info->busy)
1095 {
1096 dprintf(0,"error: port scan requested while adapter was busy\n");
1097 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1098 goto iocm_device_table_done;
1099 }
1100 ad_info->busy = 1;
1101 spin_unlock(drv_lock);
1102 rc = ahci_scan_ports(ad_info);
1103 spin_lock(drv_lock);
1104 ad_info->busy = 0;
1105
1106 if (rc != 0)
1107 {
1108 dprintf(0,"error: port scan failed on adapter #%d\n", dta);
1109 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1110 goto iocm_device_table_done;
1111 }
1112 ad_info->port_scan_done = 1;
1113 }
1114
1115 /* insert physical (i.e. AHCI) devices into the device table */
1116 for (p = 0; p <= ad_info->port_max; p++)
1117 {
1118 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1119 {
1120 if (ad_info->ports[p].devs[d].present && !ad_info->ports[p].devs[d].ignored)
1121 {
1122 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p])
1123 {
1124 /* report this unit as SCSI unit */
1125 scsi_units++;
1126 //continue;
1127 }
1128 if (add_unit_info(pIorb_conf, dta, dta, p, d, 0))
1129 {
1130 goto iocm_device_table_done;
1131 }
1132 }
1133 }
1134 }
1135 }
1136 else
1137 {
1138 /* this is the virtual SCSI adapter */
1139 if (scsi_units == 0)
1140 {
1141 /* not a single unit to be emulated via SCSI */
1142 pDt->TotalAdapters--;
1143 break;
1144 }
1145
1146 /* set adapter name and bus type to mimic a SCSI controller */
1147 pPtr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1148 snprintf(pPtr->AdapterName, sizeof(pPtr->AdapterName), "AHCI_SCSI_0");
1149
1150 /* add all ATAPI units to be emulated by this virtual adaper */
1151 for (a = 0; a < ad_info_cnt; a++)
1152 {
1153 AD_INFO *ad_info = ad_infos + a;
1154
1155 for (p = 0; p <= ad_info->port_max; p++)
1156 {
1157 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1158 {
1159 if (ad_info->ports[p].devs[d].present && !ad_info->ports[p].devs[d].ignored
1160 && ad_info->ports[p].devs[d].atapi && emulate_scsi[a][p])
1161 {
1162 if (add_unit_info(pIorb_conf, dta, a, p, d, scsi_id++))
1163 {
1164 goto iocm_device_table_done;
1165 }
1166 }
1167 }
1168 }
1169 }
1170 }
1171
1172 /* calculate offset for next adapter */
1173 pPos = (char *)(pPtr->UnitInfo + pPtr->AdapterUnits);
1174 }
1175
1176iocm_device_table_done:
1177 spin_unlock(drv_lock);
1178 iorb_done(vIorb, pIorb);
1179}
1180
1181/******************************************************************************
1182 * Handle IOCC_GEOMETRY requests.
1183 */
1184void iocc_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1185{
1186 switch (pIorb->CommandModifier)
1187 {
1188 case IOCM_GET_MEDIA_GEOMETRY:
1189 case IOCM_GET_DEVICE_GEOMETRY:
1190 add_workspace(pIorb)->idempotent = 1;
1191 ahci_get_geometry(vIorb, pIorb);
1192 break;
1193
1194 default:
1195 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1196 iorb_done(vIorb, pIorb);
1197 }
1198}
1199
1200/******************************************************************************
1201 * Handle IOCC_EXECUTE_IO requests.
1202 */
1203void iocc_execute_io(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1204{
1205 switch (pIorb->CommandModifier)
1206 {
1207 case IOCM_READ:
1208 add_workspace(pIorb)->idempotent = 1;
1209 ahci_read(vIorb, pIorb);
1210 break;
1211
1212 case IOCM_READ_VERIFY:
1213 add_workspace(pIorb)->idempotent = 1;
1214 ahci_verify(vIorb, pIorb);
1215 break;
1216
1217 case IOCM_WRITE:
1218 add_workspace(pIorb)->idempotent = 1;
1219 ahci_write(vIorb, pIorb);
1220 break;
1221
1222 case IOCM_WRITE_VERIFY:
1223 add_workspace(pIorb)->idempotent = 1;
1224 ahci_write(vIorb, pIorb);
1225 break;
1226
1227 default:
1228 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1229 iorb_done(vIorb, pIorb);
1230 }
1231}
1232
1233/******************************************************************************
1234 * Handle IOCC_UNIT_STATUS requests.
1235 */
1236void iocc_unit_status(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1237{
1238 switch (pIorb->CommandModifier)
1239 {
1240 case IOCM_GET_UNIT_STATUS:
1241 add_workspace(pIorb)->idempotent = 1;
1242 ahci_unit_ready(vIorb, pIorb);
1243 break;
1244
1245 default:
1246 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1247 iorb_done(vIorb, pIorb);
1248 }
1249}
1250
1251/******************************************************************************
1252 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1253 */
1254void iocc_adapter_passthru(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1255{
1256 switch (pIorb->CommandModifier)
1257 {
1258 case IOCM_EXECUTE_CDB:
1259 add_workspace(pIorb)->idempotent = 0;
1260 ahci_execute_cdb(vIorb, pIorb);
1261 break;
1262
1263 case IOCM_EXECUTE_ATA:
1264 add_workspace(pIorb)->idempotent = 0;
1265 ahci_execute_ata(vIorb, pIorb);
1266 break;
1267
1268 default:
1269 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1270 iorb_done(vIorb, pIorb);
1271 }
1272}
1273
1274/******************************************************************************
1275 * Add an IORB to the specified queue. This function must be called with the
1276 * adapter-level spinlock aquired.
1277 */
1278void iorb_queue_add(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb, IORBH *pIorb)
1279{
1280 if (iorb_priority(pIorb)
1281 {
1282 /* priority IORB; insert at first position */
1283 pIorb->f16NxtIORB = queue->vRoot;
1284 queue->vRoot = vIorb;
1285 }
1286 else
1287 {
1288 /* append IORB to end of queue */
1289 pIorb->f16NxtIORB = FAR16NULL;
1290
1291 if (queue->vRoot == FAR16NULL)
1292 {
1293 queue->vRoot = vIorb;
1294 }
1295 else
1296 {
1297 ((IORBH *)Far16ToFlat(queue->vTail))->f16NxtIORB = vIorb;
1298 }
1299 queue->vTail = vIorb;
1300 }
1301
1302 #ifdef DEBUG
1303 if (D32g_DbgLevel)
1304 {
1305 /* determine queue type (local, driver, abort or port) and minimum debug
1306 * level; otherwise, queue debug prints can become really confusing.
1307 */
1308 char *queue_type;
1309 int min_debug = 7;
1310
1311 if ((u32)queue >> 16 == (u32)&queue >> 16) /* DAZ this is bogus */
1312 {
1313 /* this queue is on the stack */
1314 queue_type = "local";
1315 min_debug = 8;
1316 }
1317 else if (queue == &driver_queue)
1318 {
1319 queue_type = "driver";
1320 }
1321 else if (queue == &abort_queue)
1322 {
1323 queue_type = "abort";
1324 min_debug = 8;
1325 }
1326 else
1327 {
1328 queue_type = "port";
1329 }
1330
1331 DPRINTF(min_debug,"IORB %x queued (cmd=%d/%d queue=%x [%s], timeout=%d)\n",
1332 vIorb, pIorb->CommandCode, pIorb->CommandModifier, queue, queue_type,
1333 pIorb->Timeout);
1334 }
1335 #endif
1336}
1337
1338/******************************************************************************
1339 * Remove an IORB from the specified queue. This function must be called with
1340 * the adapter-level spinlock aquired.
1341 */
1342int iorb_queue_del(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb)
1343{
1344 IORBH FAR16DATA *_vIorb;
1345 IORBH FAR16DATA *_vPrev = FAR16NULL;
1346 int found = 0;
1347
1348 for (_vIorb = queue->vRoot; _vIorb != FAR16NULL; )
1349 {
1350 IORBH *_pIorb = Far16ToFlat(_vIorb);
1351 if (_vIorb == vIorb)
1352 {
1353 /* found the IORB to be removed */
1354 if (_vPrev != FAR16NULL)
1355 {
1356 ((IORBH*)Far16ToFlat(_vPrev))->f16NxtIORB = _pIorb->f16NxtIORB;
1357 }
1358 else
1359 {
1360 queue->vRoot = _pIorb->f16NxtIORB;
1361 }
1362 if (_vIorb == queue->vTail)
1363 {
1364 queue->vTail = _vPrev;
1365 }
1366 found = 1;
1367 break;
1368 }
1369 _vPrev = _vIorb;
1370 _vIorb = _pIorb->f16NxtIORB;
1371 }
1372
1373 #ifdef DEBUG
1374 if (found)
1375 {
1376 DPRINTF(8,"IORB %x removed (queue = %x)\n", vIorb, queue);
1377 }
1378 else
1379 {
1380 DPRINTF(2,"IORB %x not found in queue %x\n", vIorb, queue);
1381 }
1382 #endif
1383
1384 return(!found);
1385}
1386
1387/******************************************************************************
1388 * Set the error code in the specified IORB
1389 *
1390 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1391 * status to the specified error code.
1392 */
1393void iorb_seterr(IORBH *pIorb, USHORT error_code)
1394{
1395 pIorb->ErrorCode = error_code;
1396 pIorb->Status |= IORB_ERROR;
1397}
1398
1399/******************************************************************************
1400 * Mark the specified IORB as done and notify the asynchronous post function,
1401 * if any. The IORB is also removed from the corresponding IORB queue.
1402 *
1403 * NOTES: This function does not clear the Status field; it merely adds the
1404 * IORB_DONE flag.
1405 *
1406 * This function is expected to be called *without* the corresponding
1407 * driver-level drv_lock aquired. It will aquire the spinlock before
1408 * updating the IORB queue and release it before notifying the upstream
1409 * code in order to prevent deadlocks.
1410 *
1411 * Due to this logic, this function is only good for simple task-time
1412 * completions. Functions working on lists of IORBs (such as interrupt
1413 * handlers or context hooks) should call iorb_complete() directly and
1414 * implement their own logic for removing the IORB from the port queue.
1415 * See abort_ctxhook() for an example.
1416 */
1417void iorb_done(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1418{
1419 int a = iorb_unit_adapter(pIorb);
1420 int p = iorb_unit_port(pIorb);
1421
1422 /* remove IORB from corresponding queue */
1423 spin_lock(drv_lock);
1424 if (iorb_driver_level(pIorb))
1425 {
1426 iorb_queue_del(&driver_queue, vIorb);
1427 }
1428 else
1429 {
1430 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb);
1431 }
1432 aws_free(add_workspace(pIorb));
1433 spin_unlock(drv_lock);
1434
1435 iorb_complete(vIorb, pIorb);
1436}
1437
1438/******************************************************************************
1439 * Complete an IORB. This should be called without the adapter-level spinlock
1440 * to allow the IORB completion routine to perform whatever processing it
1441 * requires. This implies that the IORB should no longer be in any global
1442 * queue because the IORB completion routine may well reuse the IORB and send
1443 * the next request to us before even returning from this function.
1444 */
1445void iorb_complete(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1446{
1447 pIorb->Status |= IORB_DONE;
1448
1449 DPRINTF(7,"IORB %x complete status=0x%04x error=0x%04x\n",
1450 vIorb, pIorb->Status, pIorb->ErrorCode);
1451
1452 if (pIorb->RequestControl & IORB_ASYNC_POST)
1453 {
1454 Dev32Help_CallFar16((PFNFAR16)pIorb->f16NotifyAddress, vIorb);
1455 }
1456}
1457
1458/******************************************************************************
1459 * Requeue the specified IORB such that it will be sent downstream for
1460 * processing again. This includes freeing all resources currently allocated
1461 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1462 * spinlock must be aquired when calling this function.
1463 *
1464 * The following flags are preserved:
1465 * - no_ncq
1466 */
1467void iorb_requeue(IORBH *pIorb)
1468{
1469 ADD_WORKSPACE *aws = add_workspace(pIorb);
1470 u16 no_ncq = aws->no_ncq;
1471 u16 unaligned = aws->unaligned;
1472 u16 retries = aws->retries;
1473
1474 aws_free(aws);
1475 memset(aws, 0x00, sizeof(*aws));
1476
1477 aws->no_ncq = no_ncq;
1478 aws->unaligned = unaligned;
1479 aws->retries = retries;
1480}
1481
1482/******************************************************************************
1483 * Free resources in ADD workspace (timer, buffer, ...). This function should
1484 * be called with the spinlock held to prevent race conditions.
1485 */
1486void aws_free(ADD_WORKSPACE *aws)
1487{
1488 if (aws->timer != 0)
1489 {
1490 Timer_CancelTimer(aws->timer);
1491 aws->timer = 0;
1492 }
1493
1494 if (aws->buf != NULL)
1495 {
1496 MemFree(aws->buf);
1497 aws->buf = NULL;
1498 }
1499}
1500
1501/******************************************************************************
1502 * Lock the adapter, waiting for availability if necessary. This is expected
1503 * to be called at task/request time without the driver-level spinlock
1504 * aquired. Don't call at interrupt time.
1505 */
1506void lock_adapter(AD_INFO *ai)
1507{
1508 TIMER Timer;
1509
1510 spin_lock(drv_lock);
1511 while (ai->busy)
1512 {
1513 spin_unlock(drv_lock);
1514 TimerInit(&Timer, 250);
1515 while (!TimerCheckAndBlock(&Timer));
1516 spin_lock(drv_lock);
1517 }
1518 ai->busy = 1;
1519 spin_unlock(drv_lock);
1520}
1521
1522/******************************************************************************
1523 * Unlock adapter (i.e. reset busy flag)
1524 */
1525void unlock_adapter(AD_INFO *ai)
1526{
1527 ai->busy = 0;
1528}
1529
1530/******************************************************************************
1531 * Timeout handler for I/O commands. Since timeout handling can involve
1532 * lengthy operations like port resets, the main code is located in a
1533 * separate function which is invoked via a context hook.
1534 */
1535void __syscall timeout_callback(ULONG timer_handle, ULONG p1)
1536{
1537 IORBH FAR16DATA *vIorb = (IORBH FAR16DATA *)CastULONGToFar16(p1);
1538 IORBH *pIorb = Far16ToFlat(vIorb);
1539 int a = iorb_unit_adapter(pIorb);
1540 int p = iorb_unit_port(pIorb);
1541
1542 Timer_CancelTimer(timer_handle);
1543 dprintf(0,"timeout for IORB %x port=%x\n", vIorb, p);
1544
1545 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1546 * IORB has completed after the timeout has expired but before we got to
1547 * this line of code, we'll check the return code of iorb_queue_del(): If it
1548 * returns an error, the IORB must have completed a few microseconds ago and
1549 * there is no timeout.
1550 */
1551 spin_lock(drv_lock);
1552 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb) == 0)
1553 {
1554 iorb_queue_add(&abort_queue, vIorb, pIorb);
1555 pIorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1556 }
1557 spin_unlock(drv_lock);
1558
1559 /* Trigger abort processing function. We don't really care whether this
1560 * succeeds because the only reason why it would fail should be multiple
1561 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1562 * start executing, which leaves two scenarios:
1563 *
1564 * - We succeded in arming the context hook. Fine.
1565 *
1566 * - We armed the context hook a second time before it had a chance to
1567 * start executing. In this case, the already scheduled context hook
1568 * will process our IORB as well.
1569 */
1570 KernArmHook(reset_ctxhook_h, 0, 0);
1571
1572 /* Set up a watchdog timer which calls the context hook manually in case
1573 * some kernel thread is looping around the IORB_COMPLETE status bit
1574 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1575 * happen per design because kernel threads are supposed to yield but it
1576 * does in the early boot phase.
1577 */
1578 Timer_StartTimerMS(&th_reset_watchdog, 5000, reset_watchdog, 0);
1579}
1580
1581/******************************************************************************
1582 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1583 * will execute as soon as a kernel thread yields the CPU. However, some
1584 * kernel components won't yield the CPU during the early boot phase and the
1585 * only way to kick some sense into those components is to run the context
1586 * hook right inside this timer callback. Not exactly pretty, especially
1587 * considering the fact that context hooks were implemented to prevent running
1588 * lengthy operations like a port reset at interrupt time, but without this
1589 * watchdog mechanism we run the risk of getting completely stalled by device
1590 * problems during the early boot phase.
1591 */
1592void __syscall reset_watchdog(ULONG timer_handle, ULONG p1)
1593{
1594 /* reset watchdog timer */
1595 Timer_CancelTimer(timer_handle);
1596 th_reset_watchdog = 0;
1597 dprintf(0,"reset watchdog invoked\n");
1598
1599 /* You cannot call the reset_ctxhook directly because it does things
1600 * that are illegal in an interrupt handler.
1601 */
1602
1603 KernArmHook(reset_ctxhook_h, 0, 0);
1604
1605 /* call context hook manually */
1606 //reset_ctxhook(0);
1607}
1608
1609/******************************************************************************
1610 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1611 * adapter info array in the device table, dt->pAdapter[], is expected to be
1612 * initialized for the specified index (dt_ai).
1613 *
1614 * Please note that the device table adapter index, dta, is not always equal
1615 * to the physical adapter index, a: if SCSI emulation has been activated, the
1616 * last reported adapter is a virtual SCSI adapter and the physical adapter
1617 * indexes for those units are, of course, different from the device table
1618 * index of the virtual SCSI adapter.
1619 */
1620static int add_unit_info(IORB_CONFIGURATION *pIorb_conf, int dta,
1621 int a, int p, int d, int scsi_id)
1622{
1623 DEVICETABLE *pDt = Far16ToFlat(pIorb_conf->f16DeviceTable);
1624 ADAPTERINFO *pPtr;
1625 UNITINFO *pUi;
1626 AD_INFO *ai = ad_infos + a;
1627
1628 pPtr = (ADAPTERINFO *)MakeFlatFromNear16(pIorb_conf->f16DeviceTable, pDt->pAdapter[dta]);
1629 //DPRINTF(2,"add_unit_info: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1630 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1631
1632 pUi = &pPtr->UnitInfo[pPtr->AdapterUnits];
1633
1634 if ((u32)(pUi + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1635 {
1636 dprintf(0,"error: device table provided by DASD too small\n");
1637 iorb_seterr(&pIorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1638 return(-1);
1639 }
1640
1641 if (ai->ports[p].devs[d].unit_info == NULL)
1642 {
1643 /* provide original information about this device (unit) */
1644 memset(pUi, 0x00, sizeof(*pUi));
1645 pUi->AdapterIndex = dta; /* device table adapter index */
1646 pUi->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1647 pUi->UnitIndex = pPtr->AdapterUnits;
1648 pUi->UnitType = ai->ports[p].devs[d].dev_type;
1649 pUi->QueuingCount = ai->ports[p].devs[d].ncq_max;
1650 if (ai->ports[p].devs[d].removable)
1651 {
1652 pUi->UnitFlags |= UF_REMOVABLE;
1653 }
1654 if (scsi_id > 0) {
1655 /* set fake SCSI ID for this unit */
1656 pUi->UnitSCSITargetID = scsi_id;
1657 }
1658 }
1659 else
1660 {
1661 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1662 memcpy(pUi, ai->ports[p].devs[d].unit_info, sizeof(*pUi));
1663 }
1664
1665 pPtr->AdapterUnits++;
1666 return(0);
1667}
1668
Note: See TracBrowser for help on using the repository browser.