source: trunk/src/os2ahci/os2ahci.c@ 186

Last change on this file since 186 was 186, checked in by David Azarewicz, 8 years ago

Doc changes
Warpin package changes

File size: 50.8 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31#include "devhdr.h"
32
33/* -------------------------- macros and constants ------------------------- */
34
35/* set two-dimensional array of port options */
36#define set_port_option(opt, val) \
37 if (adapter_index == -1) { \
38 /* set option for all adapters and ports */ \
39 memset(opt, val, sizeof(opt)); \
40 } else if (port_index == -1) { \
41 /* set option for all ports on current adapter */ \
42 memset(opt[adapter_index], val, sizeof(*opt)); \
43 } else { \
44 /* set option for specific port */ \
45 opt[adapter_index][port_index] = val; \
46 }
47
48#define FLAG_KRNL_EXIT_ADD 0x1000
49#define FLAG_KRNL_EXIT_REMOVE 0x2000
50
51#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
52#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
53#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
54#define TYPE_KRNL_EXIT_DYN 0x0003
55#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
56
57/* ------------------------ typedefs and structures ------------------------ */
58
59/* -------------------------- function prototypes -------------------------- */
60
61extern int SetPsdPutc(void);
62static int add_unit_info(IORB_CONFIGURATION *iorb_conf, int dt_ai, int a, int p, int d, int scsi_id);
63
64/* ------------------------ global/static variables ------------------------ */
65int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
66int init_reset = 1; /* if != 0, reset ports during init */
67int force_write_cache; /* if != 0, force write cache */
68int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
69int use_lvm_info = 1;
70long com_baud = 0;
71
72HDRIVER rm_drvh; /* resource manager driver handle */
73USHORT add_handle; /* driver handle (RegisterDeviceClass) */
74char drv_name[] = "OS2AHCI"; /* driver name as string */
75
76/* resource manager driver information structure */
77static DRIVERSTRUCT rm_drvinfo =
78{
79 NULL, /* We cannot do Flat to Far16 conversion at compile time */
80 NULL, /* so we put NULLs in all the Far16 fields and then fill */
81 NULL, /* them in at run time */
82 DMAJOR,
83 DMINOR,
84 BLD_YEAR, BLD_MONTH, BLD_DAY,
85 0,
86 DRT_ADDDM,
87 DRS_ADD,
88 NULL
89};
90
91SpinLock_t drv_lock; /* driver-level spinlock */
92IORB_QUEUE driver_queue; /* driver-level IORB queue */
93AD_INFO ad_infos[MAX_AD]; /* adapter information list */
94int ad_info_cnt; /* number of entries in ad_infos[] */
95u16 ad_ignore; /* bitmap with adapter indexes to ignore */
96int init_complete; /* if != 0, initialization has completed */
97int suspended;
98int resume_sleep_flag;
99
100/* apapter/port-specific options saved when parsing the command line */
101u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
102u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
103u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
104u8 link_power[MAX_AD][AHCI_MAX_PORTS];
105u8 track_size[MAX_AD][AHCI_MAX_PORTS];
106u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
107
108char BldLevel[] = BLDLEVEL;
109
110/* ----------------------------- start of code ----------------------------- */
111
112/******************************************************************************
113 * OS/2 device driver main strategy function.
114 *
115 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
116 * packet for IDC calls, so they can be handled by gen_ioctl.
117 */
118void StrategyHandler(REQPACKET *prp)
119{
120 u16 rc;
121
122 switch (prp->bCommand)
123 {
124 case STRATEGY_BASEDEVINIT:
125 rc = init_drv(prp);
126 break;
127
128 case STRATEGY_SHUTDOWN:
129 rc = exit_drv(prp->save_restore.Function);
130 break;
131
132 case STRATEGY_GENIOCTL:
133 rc = gen_ioctl(prp);
134 break;
135
136 case STRATEGY_OPEN:
137 build_user_info();
138 rc = RPDONE;
139 break;
140
141 case STRATEGY_READ:
142 rc = char_dev_input(prp);
143 break;
144
145 case STRATEGY_SAVERESTORE:
146 rc = sr_drv(prp->save_restore.Function);
147 break;
148
149 case STRATEGY_INITCOMPLETE:
150 case STRATEGY_CLOSE:
151 case STRATEGY_INPUTSTATUS:
152 case STRATEGY_FLUSHINPUT:
153 /* noop */
154 rc = RPDONE;
155 break;
156
157 default:
158 rc = RPDONE | RPERR_BADCOMMAND;
159 break;
160 }
161
162 prp->usStatus = rc;
163}
164
165void IdcHandler(REQPACKET *prp)
166{
167 StrategyHandler(prp);
168}
169
170/******************************************************************************
171 * Intialize the os2ahci driver. This includes command line parsing, scanning
172 * the PCI bus for supported AHCI adapters, etc.
173 */
174USHORT init_drv(REQPACKET *req)
175{
176 static int init_drv_called;
177 static int init_drv_failed;
178 APIRET rmrc;
179 const char *pszCmdLine, *cmd_line;
180 int adapter_index = -1;
181 int port_index = -1;
182 int iInvertOption;
183 int iStatus;
184
185 if (init_drv_called)
186 {
187 /* This is the init call for the second (IBMS506$) character
188 * device driver. If the main driver failed initialization, fail this
189 * one as well.
190 */
191 return(RPDONE | ((init_drv_failed) ? RPERR_INITFAIL : 0));
192 }
193 D32g_DbgLevel = 0;
194 init_drv_called = 1;
195 suspended = 0;
196 resume_sleep_flag = 0;
197 memset(ad_infos, 0, sizeof(ad_infos));
198 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
199 UtSetDriverName("OS2AHCI$");
200 Header.ulCaps |= DEV_ADAPTER_DD; /* DAZ This flag is not really needed. */
201
202 /* create driver-level spinlock */
203 KernAllocSpinLock(&drv_lock);
204
205 /* register driver with resource manager */
206 rm_drvinfo.DrvrName = drv_name;
207 rm_drvinfo.DrvrDescript = "AHCI SATA Driver";
208 rm_drvinfo.VendorName = DVENDOR;
209 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS)
210 {
211 iprintf("%s: failed to register driver with resource manager (rc = %d)", drv_name, rmrc);
212 goto init_fail;
213 }
214
215 pszCmdLine = cmd_line = req->init_in.szArgs;
216 iStatus = 0;
217 while (*pszCmdLine)
218 {
219 if (*pszCmdLine++ != '/') continue; /* Ignore anything that doesn't start with '/' */
220 /* pszCmdLine now points to first char of argument */
221
222 if ((iInvertOption = (*pszCmdLine == '!')) != 0) pszCmdLine++;
223
224 if (ArgCmp(pszCmdLine, "B:"))
225 {
226 pszCmdLine += 2;
227 com_baud = strtol(pszCmdLine, &pszCmdLine, 0);
228 continue;
229 }
230
231 if (ArgCmp(pszCmdLine, "C:"))
232 {
233 pszCmdLine += 2;
234 /* set COM port base address for debug messages */
235 D32g_ComBase = strtol(pszCmdLine, &pszCmdLine, 0);
236 if (D32g_ComBase == 1) D32g_ComBase = 0x3f8;
237 if (D32g_ComBase == 2) D32g_ComBase = 0x2f8;
238 continue;
239 }
240
241 if (ArgCmp(pszCmdLine, "D"))
242 {
243 pszCmdLine++;
244 if (*pszCmdLine == ':')
245 {
246 pszCmdLine++;
247 D32g_DbgLevel = strtol(pszCmdLine, &pszCmdLine, 0);
248 }
249 else D32g_DbgLevel++; /* increase debug level */
250 continue;
251 }
252
253 if (ArgCmp(pszCmdLine, "G:"))
254 {
255 u16 usVendor;
256 u16 usDevice;
257
258 pszCmdLine += 2;
259 /* add specfied PCI ID as a supported generic AHCI adapter */
260 usVendor = strtol(pszCmdLine, &pszCmdLine, 16);
261 if (*pszCmdLine != ':') break;
262 pszCmdLine++;
263 usDevice = strtol(pszCmdLine, &pszCmdLine, 16);
264 if (add_pci_id(usVendor, usDevice))
265 {
266 iprintf("%s: failed to add PCI ID %04x:%04x", drv_name, usVendor, usDevice);
267 iStatus = 1;
268 }
269 thorough_scan = 1;
270 continue;
271 }
272
273 if (ArgCmp(pszCmdLine, "T"))
274 {
275 pszCmdLine++;
276 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
277 thorough_scan = !iInvertOption;
278 continue;
279 }
280
281 if (ArgCmp(pszCmdLine, "R"))
282 {
283 pszCmdLine++;
284 /* reset ports during initialization */
285 init_reset = !iInvertOption;
286 continue;
287 }
288
289 if (ArgCmp(pszCmdLine, "F"))
290 {
291 pszCmdLine++;
292 /* force write cache regardless of IORB flags */
293 force_write_cache = 1;
294 continue;
295 }
296
297 if (ArgCmp(pszCmdLine, "A:"))
298 {
299 pszCmdLine += 2;
300 /* set adapter index for adapter and port-related options */
301 adapter_index = strtol(pszCmdLine, &pszCmdLine, 0);
302 if (adapter_index < 0 || adapter_index >= MAX_AD)
303 {
304 iprintf("%s: invalid adapter index (%d)", drv_name, adapter_index);
305 iStatus = 1;
306 }
307 continue;
308 }
309
310 if (ArgCmp(pszCmdLine, "P:"))
311 {
312 pszCmdLine += 2;
313 /* set port index for port-related options */
314 port_index = strtol(pszCmdLine, &pszCmdLine, 0);
315 if (port_index < 0 || port_index >= AHCI_MAX_PORTS)
316 {
317 iprintf("%s: invalid port index (%d)", drv_name, port_index);
318 iStatus = 1;
319 }
320 continue;
321 }
322
323 if (ArgCmp(pszCmdLine, "I"))
324 {
325 pszCmdLine++;
326 /* ignore current adapter index */
327 if (adapter_index >= 0)
328 {
329 if (port_index >= 0) port_ignore[adapter_index][port_index] = !iInvertOption;
330 else ad_ignore |= 1U << adapter_index;
331 }
332 continue;
333 }
334
335 if (ArgCmp(pszCmdLine, "S"))
336 {
337 pszCmdLine++;
338 /* enable SCSI emulation for ATAPI devices */
339 set_port_option(emulate_scsi, !iInvertOption);
340 continue;
341 }
342
343 if (ArgCmp(pszCmdLine, "N"))
344 {
345 pszCmdLine++;
346 /* enable NCQ */
347 set_port_option(enable_ncq, !iInvertOption);
348 continue;
349 }
350
351 if (ArgCmp(pszCmdLine, "LS:"))
352 {
353 int optval;
354
355 pszCmdLine += 3;
356 /* set link speed */
357 optval = strtol(pszCmdLine, &pszCmdLine, 0);
358 set_port_option(link_speed, optval);
359 /* need to reset the port in order to establish link settings */
360 init_reset = 1;
361 continue;
362 }
363
364 if (ArgCmp(pszCmdLine, "LP:"))
365 {
366 int optval;
367
368 pszCmdLine += 3;
369 /* set power management */
370 optval = strtol(pszCmdLine, &pszCmdLine, 0);
371 set_port_option(link_power, optval);
372 /* need to reset the port in order to establish link settings */
373 init_reset = 1;
374 continue;
375 }
376
377 if (ArgCmp(pszCmdLine, "4"))
378 {
379 pszCmdLine++;
380 /* enable 4K sector geometry enhancement (track size = 56) */
381 if (!iInvertOption) set_port_option(track_size, 56);
382 continue;
383 }
384
385 if (ArgCmp(pszCmdLine, "Z"))
386 {
387 pszCmdLine++;
388 /* Specify to not use the LVM information. There is no reason why anyone would
389 * want to do this, but previous versions of this driver did not have LVM capability,
390 * so this switch is here temporarily just in case.
391 */
392 use_lvm_info = !iInvertOption;
393 continue;
394 }
395
396 if (ArgCmp(pszCmdLine, "V"))
397 {
398 pszCmdLine++;
399 if (*pszCmdLine == ':')
400 {
401 pszCmdLine++;
402 verbosity = strtol(pszCmdLine, &pszCmdLine, 0);
403 }
404 else verbosity++; /* increase verbosity level */
405 continue;
406 }
407
408 if (ArgCmp(pszCmdLine, "W"))
409 {
410 pszCmdLine++;
411 /* Specify to allow the trace buffer to wrap when full. */
412 D32g_DbgBufWrap = !iInvertOption;
413 continue;
414 }
415
416 iprintf("Unrecognized switch: %s", pszCmdLine-1);
417 iStatus = 1; /* unrecognized argument */
418 }
419
420 if (iStatus) goto init_fail;
421
422 if (com_baud) InitComPort(com_baud);
423
424 dprintf(0,"BldLevel: %s\n", BldLevel);
425 dprintf(0,"CmdLine: %s\n", cmd_line);
426 /*
427 if (sizeof(ADD_WORKSPACE) > ADD_WORKSPACE_SIZE)
428 {
429 dprintf(0,"ADD_WORKSPACE size is too big! %d>16\n", sizeof(ADD_WORKSPACE));
430 goto init_fail;
431 }
432 */
433
434 /* print initialization message */
435 ciprintf("%s driver version %d.%02d", drv_name, DMAJOR, DMINOR);
436
437 #ifdef TESTVER
438 #include "testver.c"
439 #endif
440
441 /* scan PCI bus for supported devices */
442 scan_pci_bus();
443
444 if (ad_info_cnt > 0)
445 {
446 /* initialization succeeded and we found at least one AHCI adapter */
447
448 if (Dev32Help_RegisterDeviceClass(drv_name, add_entry, 0, 1, &add_handle))
449 {
450 iprintf("%s: couldn't register device class", drv_name);
451 goto init_fail;
452 }
453
454 Timer_InitTimer(TIMER_COUNT);
455
456 /* allocate context hooks */
457 KernAllocateContextHook(restart_ctxhook, 0, &restart_ctxhook_h);
458 KernAllocateContextHook(reset_ctxhook, 0, &reset_ctxhook_h);
459 KernAllocateContextHook(engine_ctxhook, 0, &engine_ctxhook_h);
460
461 /* register kernel exit routine for trap dumps */
462 Dev32Help_RegisterKrnlExit(shutdown_driver, FLAG_KRNL_EXIT_ADD, TYPE_KRNL_EXIT_INT13);
463
464 return(RPDONE);
465
466 }
467 else
468 {
469 /* no adapters found */
470 ciprintf("%s: No adapters found.", drv_name);
471 }
472
473init_fail:
474 /* initialization failed; set segment sizes to 0 and return error */
475 init_drv_failed = 1;
476
477 if (rm_drvh != 0)
478 {
479 /* remove driver from resource manager */
480 RMDestroyDriver(rm_drvh);
481 }
482
483 ciprintf("%s driver *not* installed", drv_name);
484 return(RPDONE | RPERR_INITFAIL);
485}
486
487/******************************************************************************
488 * Generic IOCTL via character device driver. IOCTLs are used to control the
489 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
490 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
491 * commands for ATA disks) are implemented here.
492 */
493USHORT gen_ioctl(REQPACKET *ioctl)
494{
495 DPRINTF(2,"IOCTL 0x%x/0x%x\n", ioctl->ioctl.bCategory, ioctl->ioctl.bFunction);
496
497 switch (ioctl->ioctl.bCategory)
498 {
499 case OS2AHCI_IOCTL_CATEGORY:
500 switch (ioctl->ioctl.bFunction)
501 {
502
503 case OS2AHCI_IOCTL_GET_DEVLIST:
504 return(ioctl_get_devlist(ioctl));
505
506 case OS2AHCI_IOCTL_PASSTHROUGH:
507 return(ioctl_passthrough(ioctl));
508
509 }
510 break;
511
512 case DSKSP_CAT_GENERIC:
513 return(ioctl_gen_dsk(ioctl));
514
515 case DSKSP_CAT_SMART:
516 return(ioctl_smart(ioctl));
517 }
518
519 return(RPDONE | RPERR_BADCOMMAND);
520}
521
522/******************************************************************************
523 * Read from character device. If tracing is on (internal ring buffer trace),
524 * we return data from the trace buffer; if not, we might return a device
525 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
526 */
527USHORT char_dev_input(REQPACKET *pPacket)
528{
529 void *LinAdr;
530
531 if (Dev32Help_PhysToLin(pPacket->io.ulAddress, pPacket->io.usCount, &LinAdr))
532 {
533 pPacket->io.usCount = 0;
534 return RPDONE | RPERR_GENERAL;
535 }
536
537 pPacket->io.usCount = dCopyToUser(LinAdr, pPacket->io.usCount);
538
539 return RPDONE;
540}
541
542/******************************************************************************
543 * Device driver exit handler. This handler is called when OS/2 shuts down and
544 * flushes the write caches of all attached devices. Since this is effectively
545 * the same we do when suspending, we'll call out to the corresponding suspend
546 * function.
547 *
548 * NOTE: Errors are ignored because there's no way we could stop the shutdown
549 * or do something about the error, unless retrying endlessly is
550 * considered an option.
551 */
552USHORT exit_drv(int func)
553{
554 DPRINTF(2,"exit_drv(%d) called\n", func);
555
556 if (func == 0)
557 {
558 /* we're only interested in the second phase of the shutdown */
559 return(RPDONE);
560 }
561
562 suspend();
563 return(RPDONE);
564}
565
566/******************************************************************************
567 * Device driver suspend/resume handler. This handler is called when ACPI is
568 * executing a suspend or resume.
569 */
570USHORT sr_drv(int func)
571{
572 DPRINTF(2,"sr_drv(%d) called\n", func);
573
574 if (func) resume();
575 else suspend();
576
577 return(RPDONE);
578}
579
580/******************************************************************************
581 * ADD entry point. This is the main entry point for all ADD requests. Due to
582 * the asynchronous nature of ADD drivers, this function primarily queues the
583 * IORB(s) to the corresponding adapter or port queues, then triggers the
584 * state machine to initiate processing queued IORBs.
585 *
586 * NOTE: In order to prevent race conditions or engine stalls, certain rules
587 * around locking, unlocking and IORB handling in general have been
588 * established. Refer to the comments in "trigger_engine()" for
589 * details.
590 */
591void add_entry(IORBH FAR16DATA *vFirstIorb)
592{
593 IORBH FAR16DATA *vIorb;
594 IORBH FAR16DATA *vNext = NULL;
595
596 spin_lock(drv_lock);
597
598 for (vIorb=vFirstIorb; vIorb!=NULL; vIorb=vNext)
599 {
600 IORBH *pIorb = Far16ToFlat(vIorb);
601
602 /* Queue this IORB. Queues primarily exist on port level but there are
603 * some requests which affect the whole driver, most notably
604 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
605 * port queue will change the links, thus we need to save the original
606 * link in 'vNext'.
607 */
608 if (pIorb->RequestControl & IORB_CHAIN) vNext = pIorb->pNxtIORB;
609 else vNext = (IORBH FAR16DATA *)0;
610
611 pIorb->Status = 0;
612 pIorb->ErrorCode = 0;
613 memset(&pIorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
614
615 #ifdef DEBUG
616 DumpIorb(pIorb); /* DAZ TESTING */
617 #endif
618
619 if (iorb_driver_level(pIorb))
620 {
621 /* driver-level IORB */
622 pIorb->UnitHandle = 0;
623 iorb_queue_add(&driver_queue, vIorb, pIorb);
624
625 }
626 else
627 {
628 /* port-level IORB */
629 int a = iorb_unit_adapter(pIorb);
630 int p = iorb_unit_port(pIorb);
631 int d = iorb_unit_device(pIorb);
632
633 if (a >= ad_info_cnt ||
634 p > ad_infos[a].port_max ||
635 d > ad_infos[a].ports[p].dev_max ||
636 (ad_infos[a].port_map & (1UL << p)) == 0)
637 {
638
639 /* unit handle outside of the allowed range */
640 dprintf(0,"warning: IORB for %d.%d.%d out of range\n", a, p, d);
641 pIorb->Status = IORB_ERROR;
642 pIorb->ErrorCode = IOERR_CMD_SYNTAX;
643 iorb_complete(vIorb, pIorb);
644 continue;
645 }
646
647 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, vIorb, pIorb);
648 }
649 }
650
651 /* trigger state machine */
652 trigger_engine();
653
654 spin_unlock(drv_lock);
655}
656
657/******************************************************************************
658 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
659 * which will try to get all IORBs sent on their way a couple of times. If
660 * there are still IORBs ready for processing after this, this function will
661 * hand off to a context hook which will continue to trigger the engine until
662 * all IORBs have been sent.
663 *
664 * NOTE: While initialization has not completed (or during suspend/resume
665 * operations), this function will loop indefinitely because we can't
666 * rely on interrupt handlers or context hooks and complex IORBs
667 * requiring multiple requeues would eventually hang and time out if
668 * we stopped triggering here.
669 */
670void trigger_engine(void)
671{
672 int i;
673
674 for (i = 0; i < 3 || !init_complete; i++)
675 {
676 if (trigger_engine_1() == 0)
677 {
678 /* done -- all IORBs have been sent on their way */
679 return;
680 }
681 }
682
683 /* Something keeps bouncing; hand off to the engine context hook which will
684 * keep trying in the background.
685 */
686 KernArmHook(engine_ctxhook_h, 0, 0);
687}
688
689/******************************************************************************
690 * Trigger IORB queue engine in order to send commands in the driver/port IORB
691 * queues to the AHCI hardware. This function will return the number of IORBs
692 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
693 * a state to accept the command, thus it might take quite a few calls to get
694 * all IORBs on their way. This is why there's a wrapper function which tries
695 * it a few times, then hands off to a context hook which will keep trying in
696 * the background.
697 *
698 * IORBs might complete before send_iorb() has returned, at any time during
699 * interrupt processing or on another CPU on SMP systems. IORB completion
700 * means modifications to the corresponding IORB queue (the completed IORB
701 * is removed from the queue) thus we need to protect the IORB queues from
702 * race conditions. The safest approach short of keeping the driver-level
703 * spinlock aquired permanently is to keep it throughout this function and
704 * release it temporarily in send_iorb().
705 *
706 * This implies that the handler functions are fully responsible for aquiring
707 * the driver-level spinlock when they need it, and for releasing it again.
708 *
709 * As a rule of thumb, get the driver-level spinlock whenever accessing
710 * volatile variables (IORB queues, values in ad_info[], ...).
711 *
712 * Additional Notes:
713 *
714 * - This function is expected to be called with the spinlock aquired
715 *
716 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
717 * just remain in the queue). This can be used to release the driver-level
718 * spinlock while making sure no new IORBs are going to hit the hardware.
719 * In order to prevent engine stalls, all handlers using this functionality
720 * need to invoke trigger_engine() after resetting the busy flag.
721 *
722 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
723 * However, the driver-level queue is worked "one entry at a time" which
724 * means that no new IORBs will be queued on the driver-level queue until
725 * the head element has completed processing. This means that driver-
726 * level IORB handlers don't need to protect against each other. But they
727 * they do need to keep in mind interference with port-level IORBs:
728 *
729 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
730 * adapters as 'busy' which are affected by the driver-level IORB
731 *
732 * - Driver-level IORB handlers must not access the hardware of a
733 * particular adapter if it's flagged as 'busy' by another IORB.
734 */
735int trigger_engine_1(void)
736{
737 IORBH FAR16DATA *vIorb;
738 IORBH *pIorb;
739 IORBH FAR16DATA *vNext;
740 int iorbs_sent = 0;
741 int a;
742 int p;
743
744 iorbs_sent = 0;
745
746 /* process driver-level IORBs */
747 if ((vIorb = driver_queue.vRoot) != NULL)
748 {
749 pIorb = Far16ToFlat(vIorb);
750
751 if (!add_workspace(pIorb)->processing)
752 {
753 send_iorb(vIorb, pIorb);
754 iorbs_sent++;
755 }
756 }
757
758 /* process port-level IORBs */
759 for (a = 0; a < ad_info_cnt; a++)
760 {
761 AD_INFO *ai = ad_infos + a;
762 if (ai->busy)
763 {
764 /* adapter is busy; don't process any IORBs */
765 continue;
766 }
767 for (p = 0; p <= ai->port_max; p++)
768 {
769 /* send all queued IORBs on this port */
770 vNext = NULL;
771 for (vIorb = ai->ports[p].iorb_queue.vRoot; vIorb != NULL; vIorb = vNext)
772 {
773 pIorb = Far16ToFlat(vIorb);
774
775 vNext = pIorb->pNxtIORB;
776 if (!add_workspace(pIorb)->processing)
777 {
778 send_iorb(vIorb, pIorb);
779 iorbs_sent++;
780 }
781 }
782 }
783 }
784
785 return(iorbs_sent);
786}
787
788/******************************************************************************
789 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
790 * switch board for calling the corresponding iocc_*() handler function.
791 *
792 * NOTE: This function is expected to be called with the driver-level spinlock
793 * aquired. It will release it before calling any of the handler
794 * functions and re-aquire it when done.
795 */
796void send_iorb(IORBH FAR16DATA *vIorb, IORBH *pIorb)
797{
798 /* Mark IORB as "processing" before doing anything else. Once the IORB is
799 * marked as "processing", we can release the spinlock because subsequent
800 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
801 * IORB.
802 */
803 add_workspace(pIorb)->processing = 1;
804 spin_unlock(drv_lock);
805
806 switch (pIorb->CommandCode)
807 {
808 case IOCC_CONFIGURATION:
809 iocc_configuration(vIorb, pIorb);
810 break;
811
812 case IOCC_DEVICE_CONTROL:
813 iocc_device_control(vIorb, pIorb);
814 break;
815
816 case IOCC_UNIT_CONTROL:
817 iocc_unit_control(vIorb, pIorb);
818 break;
819
820 case IOCC_GEOMETRY:
821 iocc_geometry(vIorb, pIorb);
822 break;
823
824 case IOCC_EXECUTE_IO:
825 iocc_execute_io(vIorb, pIorb);
826 break;
827
828 case IOCC_UNIT_STATUS:
829 iocc_unit_status(vIorb, pIorb);
830 break;
831
832 case IOCC_ADAPTER_PASSTHRU:
833 iocc_adapter_passthru(vIorb, pIorb);
834 break;
835
836 default:
837 /* unsupported call */
838 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
839 iorb_done(vIorb, pIorb);
840 break;
841 }
842
843 /* re-aquire spinlock before returning to trigger_engine() */
844 spin_lock(drv_lock);
845}
846
847/******************************************************************************
848 * Handle IOCC_CONFIGURATION requests.
849 */
850void iocc_configuration(IORBH FAR16DATA *vIorb, IORBH *pIorb)
851{
852 int a;
853
854 switch (pIorb->CommandModifier)
855 {
856
857 case IOCM_COMPLETE_INIT:
858 /* Complete initialization. From now on, we won't have to restore the BIOS
859 * configuration after each command and we're fully operational (i.e. will
860 * use interrupts, timers and context hooks instead of polling).
861 */
862 if (!init_complete)
863 {
864 DPRINTF(1,"leaving initialization mode\n");
865 for (a = 0; a < ad_info_cnt; a++)
866 {
867 lock_adapter(ad_infos + a);
868 ahci_complete_init(ad_infos + a);
869 }
870 init_complete = 1;
871
872 /* release all adapters */
873 for (a = 0; a < ad_info_cnt; a++)
874 {
875 unlock_adapter(ad_infos + a);
876 }
877 DPRINTF(1,"leaving initialization mode 2\n");
878
879 #ifdef LEGACY_APM
880 /* register APM hook */
881 apm_init();
882 #endif
883 }
884 iorb_done(vIorb, pIorb);
885 break;
886
887 case IOCM_GET_DEVICE_TABLE:
888 /* construct a device table */
889 iocm_device_table(vIorb, pIorb);
890 break;
891
892 default:
893 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
894 iorb_done(vIorb, pIorb);
895 break;
896 }
897}
898
899/******************************************************************************
900 * Handle IOCC_DEVICE_CONTROL requests.
901 */
902void iocc_device_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
903{
904 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
905 IORBH FAR16DATA *vPtr;
906 IORBH FAR16DATA *vNext = NULL;
907 int p = iorb_unit_port(pIorb);
908 int d = iorb_unit_device(pIorb);
909
910 switch (pIorb->CommandModifier)
911 {
912
913 case IOCM_ABORT:
914 /* abort all pending commands on specified port and device */
915 spin_lock(drv_lock);
916 for (vPtr = ai->ports[p].iorb_queue.vRoot; vPtr != NULL; vPtr = vNext)
917 {
918 IORBH *pPtr = Far16ToFlat(vPtr);
919
920 vNext = pPtr->pNxtIORB;
921 /* move all matching IORBs to the abort queue */
922 if (vPtr != vIorb && iorb_unit_device(pPtr) == d)
923 {
924 iorb_queue_del(&ai->ports[p].iorb_queue, vPtr);
925 iorb_queue_add(&abort_queue, vPtr, pPtr);
926 pPtr->ErrorCode = IOERR_CMD_ABORTED;
927 }
928 }
929 spin_unlock(drv_lock);
930
931 /* trigger reset context hook which will finish the abort processing */
932 KernArmHook(reset_ctxhook_h, 0, 0);
933 break;
934
935 case IOCM_SUSPEND:
936 case IOCM_RESUME:
937 case IOCM_GET_QUEUE_STATUS:
938 /* Suspend/resume operations allow access to the hardware for other
939 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
940 * and ATAPI in the same driver, this won't be required.
941 */
942 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
943 break;
944
945 case IOCM_LOCK_MEDIA:
946 case IOCM_UNLOCK_MEDIA:
947 case IOCM_EJECT_MEDIA:
948 /* unit control commands to lock, unlock and eject media */
949 /* will be supported later... */
950 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
951 break;
952
953 default:
954 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
955 break;
956 }
957
958 iorb_done(vIorb, pIorb);
959}
960
961/******************************************************************************
962 * Handle IOCC_UNIT_CONTROL requests.
963 */
964void iocc_unit_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
965{
966 IORB_UNIT_CONTROL *pIorb_uc = (IORB_UNIT_CONTROL *)pIorb;
967 int a = iorb_unit_adapter(pIorb);
968 int p = iorb_unit_port(pIorb);
969 int d = iorb_unit_device(pIorb);
970
971 spin_lock(drv_lock);
972 switch (pIorb->CommandModifier)
973 {
974 case IOCM_ALLOCATE_UNIT:
975 /* allocate unit for exclusive access */
976 if (ad_infos[a].ports[p].devs[d].allocated)
977 {
978 iorb_seterr(pIorb, IOERR_UNIT_ALLOCATED);
979 }
980 else
981 {
982 ad_infos[a].ports[p].devs[d].allocated = 1;
983 }
984 break;
985
986 case IOCM_DEALLOCATE_UNIT:
987 /* deallocate exclusive access to unit */
988 if (!ad_infos[a].ports[p].devs[d].allocated)
989 {
990 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
991 }
992 else
993 {
994 ad_infos[a].ports[p].devs[d].allocated = 0;
995 }
996 break;
997
998 case IOCM_CHANGE_UNITINFO:
999 /* Change unit (device) information. One reason for this IOCM is the
1000 * interface for filter device drivers: a filter device driver can
1001 * either change existing UNITINFOs or permanently allocate units
1002 * and fabricate new [logical] units; the former is the reason why we
1003 * must store the pointer to the updated UNITNIFO for subsequent
1004 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
1005 */
1006 if (!ad_infos[a].ports[p].devs[d].allocated)
1007 {
1008 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
1009 break;
1010 }
1011 ad_infos[a].ports[p].devs[d].unit_info = pIorb_uc->pUnitInfo;
1012 break;
1013
1014 default:
1015 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1016 break;
1017 }
1018
1019 spin_unlock(drv_lock);
1020 iorb_done(vIorb, pIorb);
1021}
1022
1023/******************************************************************************
1024 * Scan all ports for AHCI devices and construct a DASD device table.
1025 *
1026 * NOTES: This function may be called multiple times. Only the first
1027 * invocation will actually scan for devices; all subsequent calls will
1028 * merely return the results of the initial scan, potentially augmented
1029 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
1030 * requests.
1031 *
1032 * In order to support applications that can't deal with ATAPI devices
1033 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
1034 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
1035 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
1036 * request. The units attached to this adapter will use the real HW
1037 * unit IDs, thus we'll never receive a command specific to the
1038 * emulated SCSI adapter and won't need to set up any sort of entity
1039 * for it; the only purpose of the emulated SCSI adapter is to pass the
1040 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
1041 * course. The emulated SCSI target IDs are allocated as follows:
1042 *
1043 * 0 the virtual adapter
1044 * 1..n emulated devices; SCSI target ID increments sequentially
1045 */
1046void iocm_device_table(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1047{
1048 IORB_CONFIGURATION *pIorb_conf;
1049 DEVICETABLE FAR16DATA *vDt;
1050 DEVICETABLE *pDt;
1051 char *pPos;
1052 int scsi_units = 0;
1053 int scsi_id = 1;
1054 int rc;
1055 int dta;
1056 int a;
1057 int p;
1058 int d;
1059
1060 pIorb_conf = (IORB_CONFIGURATION *)pIorb;
1061 vDt = pIorb_conf->pDeviceTable;
1062 pDt = Far16ToFlat(vDt);
1063
1064 spin_lock(drv_lock);
1065
1066 /* initialize device table header */
1067 pDt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1068 pDt->ADDLevelMinor = ADD_LEVEL_MINOR;
1069 pDt->ADDHandle = add_handle;
1070 pDt->TotalAdapters = ad_info_cnt + 1;
1071
1072 /* set start of adapter and device information tables */
1073 pPos = (char*)&pDt->pAdapter[pDt->TotalAdapters];
1074
1075 /* go through all adapters, including the virtual SCSI adapter */
1076 for (dta = 0; dta < pDt->TotalAdapters; dta++)
1077 {
1078 ADAPTERINFO *pPtr = (ADAPTERINFO *)pPos;
1079
1080 /* sanity check for sufficient space in device table */
1081 if ((u32)(pPtr + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1082 {
1083 dprintf(0,"error: device table provided by DASD too small\n");
1084 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1085 goto iocm_device_table_done;
1086 }
1087
1088 pDt->pAdapter[dta] = MakeNear16PtrFromDiff(pIorb_conf->pDeviceTable, pDt, pPtr);
1089
1090 //DPRINTF(2,"iocm_device_table: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1091 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1092 memset(pPtr, 0x00, sizeof(*pPtr));
1093
1094 pPtr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1095 pPtr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1096 pPtr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1097 pPtr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1098
1099 if (dta < ad_info_cnt)
1100 {
1101 /* this is a physical AHCI adapter */
1102 AD_INFO *ad_info = ad_infos + dta;
1103
1104 pPtr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1105 sprintf(pPtr->AdapterName, "AHCI_%d", dta);
1106
1107 if (!ad_info->port_scan_done)
1108 {
1109 /* first call; need to scan AHCI hardware for devices */
1110 if (ad_info->busy)
1111 {
1112 dprintf(0,"error: port scan requested while adapter was busy\n");
1113 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1114 goto iocm_device_table_done;
1115 }
1116 ad_info->busy = 1;
1117 spin_unlock(drv_lock);
1118 rc = ahci_scan_ports(ad_info);
1119 spin_lock(drv_lock);
1120 ad_info->busy = 0;
1121
1122 if (rc != 0)
1123 {
1124 dprintf(0,"error: port scan failed on adapter #%d\n", dta);
1125 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1126 goto iocm_device_table_done;
1127 }
1128 ad_info->port_scan_done = 1;
1129 }
1130
1131 /* insert physical (i.e. AHCI) devices into the device table */
1132 for (p = 0; p <= ad_info->port_max; p++)
1133 {
1134 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1135 {
1136 if (ad_info->ports[p].devs[d].present)
1137 {
1138 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p])
1139 {
1140 /* report this unit as SCSI unit */
1141 scsi_units++;
1142 //continue;
1143 }
1144 if (add_unit_info(pIorb_conf, dta, dta, p, d, 0))
1145 {
1146 goto iocm_device_table_done;
1147 }
1148 }
1149 }
1150 }
1151 }
1152 else
1153 {
1154 /* this is the virtual SCSI adapter */
1155 if (scsi_units == 0)
1156 {
1157 /* not a single unit to be emulated via SCSI */
1158 pDt->TotalAdapters--;
1159 break;
1160 }
1161
1162 /* set adapter name and bus type to mimic a SCSI controller */
1163 pPtr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1164 sprintf(pPtr->AdapterName, "AHCI_SCSI_0");
1165
1166 /* add all ATAPI units to be emulated by this virtual adaper */
1167 for (a = 0; a < ad_info_cnt; a++)
1168 {
1169 AD_INFO *ad_info = ad_infos + a;
1170
1171 for (p = 0; p <= ad_info->port_max; p++)
1172 {
1173 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1174 {
1175 if (ad_info->ports[p].devs[d].present && ad_info->ports[p].devs[d].atapi && emulate_scsi[a][p])
1176 {
1177 if (add_unit_info(pIorb_conf, dta, a, p, d, scsi_id++))
1178 {
1179 goto iocm_device_table_done;
1180 }
1181 }
1182 }
1183 }
1184 }
1185 }
1186
1187 /* calculate offset for next adapter */
1188 pPos = (char *)(pPtr->UnitInfo + pPtr->AdapterUnits);
1189 }
1190
1191iocm_device_table_done:
1192 spin_unlock(drv_lock);
1193 iorb_done(vIorb, pIorb);
1194}
1195
1196/******************************************************************************
1197 * Handle IOCC_GEOMETRY requests.
1198 */
1199void iocc_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1200{
1201 switch (pIorb->CommandModifier)
1202 {
1203 case IOCM_GET_MEDIA_GEOMETRY:
1204 case IOCM_GET_DEVICE_GEOMETRY:
1205 add_workspace(pIorb)->idempotent = 1;
1206 ahci_get_geometry(vIorb, pIorb);
1207 break;
1208
1209 default:
1210 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1211 iorb_done(vIorb, pIorb);
1212 }
1213}
1214
1215/******************************************************************************
1216 * Handle IOCC_EXECUTE_IO requests.
1217 */
1218void iocc_execute_io(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1219{
1220 switch (pIorb->CommandModifier)
1221 {
1222 case IOCM_READ:
1223 add_workspace(pIorb)->idempotent = 1;
1224 ahci_read(vIorb, pIorb);
1225 break;
1226
1227 case IOCM_READ_VERIFY:
1228 add_workspace(pIorb)->idempotent = 1;
1229 ahci_verify(vIorb, pIorb);
1230 break;
1231
1232 case IOCM_WRITE:
1233 add_workspace(pIorb)->idempotent = 1;
1234 ahci_write(vIorb, pIorb);
1235 break;
1236
1237 case IOCM_WRITE_VERIFY:
1238 add_workspace(pIorb)->idempotent = 1;
1239 ahci_write(vIorb, pIorb);
1240 break;
1241
1242 default:
1243 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1244 iorb_done(vIorb, pIorb);
1245 }
1246}
1247
1248/******************************************************************************
1249 * Handle IOCC_UNIT_STATUS requests.
1250 */
1251void iocc_unit_status(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1252{
1253 switch (pIorb->CommandModifier)
1254 {
1255 case IOCM_GET_UNIT_STATUS:
1256 add_workspace(pIorb)->idempotent = 1;
1257 ahci_unit_ready(vIorb, pIorb);
1258 break;
1259
1260 default:
1261 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1262 iorb_done(vIorb, pIorb);
1263 }
1264}
1265
1266/******************************************************************************
1267 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1268 */
1269void iocc_adapter_passthru(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1270{
1271 switch (pIorb->CommandModifier)
1272 {
1273
1274 case IOCM_EXECUTE_CDB:
1275 add_workspace(pIorb)->idempotent = 0;
1276 ahci_execute_cdb(vIorb, pIorb);
1277 break;
1278
1279 case IOCM_EXECUTE_ATA:
1280 add_workspace(pIorb)->idempotent = 0;
1281 ahci_execute_ata(vIorb, pIorb);
1282 break;
1283
1284 default:
1285 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1286 iorb_done(vIorb, pIorb);
1287 }
1288}
1289
1290/******************************************************************************
1291 * Add an IORB to the specified queue. This function must be called with the
1292 * adapter-level spinlock aquired.
1293 */
1294void iorb_queue_add(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb, IORBH *pIorb)
1295{
1296 if (iorb_priority(pIorb)
1297 {
1298 /* priority IORB; insert at first position */
1299 pIorb->pNxtIORB = queue->vRoot;
1300 queue->vRoot = vIorb;
1301 }
1302 else
1303 {
1304 /* append IORB to end of queue */
1305 pIorb->pNxtIORB = NULL;
1306
1307 if (queue->vRoot == NULL)
1308 {
1309 queue->vRoot = vIorb;
1310 }
1311 else
1312 {
1313 ((IORBH *)Far16ToFlat(queue->vTail))->pNxtIORB = vIorb;
1314 }
1315 queue->vTail = vIorb;
1316 }
1317
1318 #ifdef DEBUG
1319 if (D32g_DbgLevel)
1320 {
1321 /* determine queue type (local, driver, abort or port) and minimum debug
1322 * level; otherwise, queue debug prints can become really confusing.
1323 */
1324 char *queue_type;
1325 int min_debug = 7;
1326
1327 if ((u32)queue >> 16 == (u32)&queue >> 16) /* DAZ this is bogus */
1328 {
1329 /* this queue is on the stack */
1330 queue_type = "local";
1331 min_debug = 8;
1332
1333 }
1334 else if (queue == &driver_queue)
1335 {
1336 queue_type = "driver";
1337
1338 }
1339 else if (queue == &abort_queue)
1340 {
1341 queue_type = "abort";
1342 min_debug = 8;
1343
1344 }
1345 else
1346 {
1347 queue_type = "port";
1348 }
1349
1350 DPRINTF(min_debug,"IORB %x queued (cmd=%d/%d queue=%x [%s], timeout=%d)\n",
1351 vIorb, pIorb->CommandCode, pIorb->CommandModifier, queue, queue_type,
1352 pIorb->Timeout);
1353 }
1354 #endif
1355}
1356
1357/******************************************************************************
1358 * Remove an IORB from the specified queue. This function must be called with
1359 * the adapter-level spinlock aquired.
1360 */
1361int iorb_queue_del(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb)
1362{
1363 IORBH FAR16DATA *_vIorb;
1364 IORBH FAR16DATA *_vPrev = NULL;
1365 int found = 0;
1366
1367 for (_vIorb = queue->vRoot; _vIorb != NULL; )
1368 {
1369 IORBH *_pIorb = Far16ToFlat(_vIorb);
1370 if (_vIorb == vIorb)
1371 {
1372 /* found the IORB to be removed */
1373 if (_vPrev != NULL)
1374 {
1375 ((IORBH*)Far16ToFlat(_vPrev))->pNxtIORB = _pIorb->pNxtIORB;
1376 }
1377 else
1378 {
1379 queue->vRoot = _pIorb->pNxtIORB;
1380 }
1381 if (_vIorb == queue->vTail)
1382 {
1383 queue->vTail = _vPrev;
1384 }
1385 found = 1;
1386 break;
1387 }
1388 _vPrev = _vIorb;
1389 _vIorb = _pIorb->pNxtIORB;
1390 }
1391
1392 #ifdef DEBUG
1393 if (found)
1394 {
1395 DPRINTF(8,"IORB %x removed (queue = %x)\n", vIorb, queue);
1396 }
1397 else
1398 {
1399 DPRINTF(2,"IORB %x not found in queue %x\n", vIorb, queue);
1400 }
1401 #endif
1402
1403 return(!found);
1404}
1405
1406/******************************************************************************
1407 * Set the error code in the specified IORB
1408 *
1409 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1410 * status to the specified error code.
1411 */
1412void iorb_seterr(IORBH *pIorb, USHORT error_code)
1413{
1414 pIorb->ErrorCode = error_code;
1415 pIorb->Status |= IORB_ERROR;
1416}
1417
1418/******************************************************************************
1419 * Mark the specified IORB as done and notify the asynchronous post function,
1420 * if any. The IORB is also removed from the corresponding IORB queue.
1421 *
1422 * NOTES: This function does not clear the Status field; it merely adds the
1423 * IORB_DONE flag.
1424 *
1425 * This function is expected to be called *without* the corresponding
1426 * driver-level drv_lock aquired. It will aquire the spinlock before
1427 * updating the IORB queue and release it before notifying the upstream
1428 * code in order to prevent deadlocks.
1429 *
1430 * Due to this logic, this function is only good for simple task-time
1431 * completions. Functions working on lists of IORBs (such as interrupt
1432 * handlers or context hooks) should call iorb_complete() directly and
1433 * implement their own logic for removing the IORB from the port queue.
1434 * See abort_ctxhook() for an example.
1435 */
1436void iorb_done(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1437{
1438 int a = iorb_unit_adapter(pIorb);
1439 int p = iorb_unit_port(pIorb);
1440
1441 /* remove IORB from corresponding queue */
1442 spin_lock(drv_lock);
1443 if (iorb_driver_level(pIorb))
1444 {
1445 iorb_queue_del(&driver_queue, vIorb);
1446 }
1447 else
1448 {
1449 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb);
1450 }
1451 aws_free(add_workspace(pIorb));
1452 spin_unlock(drv_lock);
1453
1454 iorb_complete(vIorb, pIorb);
1455}
1456
1457/******************************************************************************
1458 * Complete an IORB. This should be called without the adapter-level spinlock
1459 * to allow the IORB completion routine to perform whatever processing it
1460 * requires. This implies that the IORB should no longer be in any global
1461 * queue because the IORB completion routine may well reuse the IORB and send
1462 * the next request to us before even returning from this function.
1463 */
1464void iorb_complete(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1465{
1466 pIorb->Status |= IORB_DONE;
1467
1468 DPRINTF(7,"IORB %x complete status=0x%04x error=0x%04x\n",
1469 vIorb, pIorb->Status, pIorb->ErrorCode);
1470
1471 if (pIorb->RequestControl & IORB_ASYNC_POST)
1472 {
1473 Dev32Help_CallFar16((PFNFAR16)pIorb->NotifyAddress, vIorb);
1474 }
1475}
1476
1477/******************************************************************************
1478 * Requeue the specified IORB such that it will be sent downstream for
1479 * processing again. This includes freeing all resources currently allocated
1480 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1481 * spinlock must be aquired when calling this function.
1482 *
1483 * The following flags are preserved:
1484 * - no_ncq
1485 */
1486void iorb_requeue(IORBH *pIorb)
1487{
1488 ADD_WORKSPACE *aws = add_workspace(pIorb);
1489 u16 no_ncq = aws->no_ncq;
1490 u16 unaligned = aws->unaligned;
1491 u16 retries = aws->retries;
1492
1493 aws_free(aws);
1494 memset(aws, 0x00, sizeof(*aws));
1495
1496 aws->no_ncq = no_ncq;
1497 aws->unaligned = unaligned;
1498 aws->retries = retries;
1499}
1500
1501/******************************************************************************
1502 * Free resources in ADD workspace (timer, buffer, ...). This function should
1503 * be called with the spinlock held to prevent race conditions.
1504 */
1505void aws_free(ADD_WORKSPACE *aws)
1506{
1507 if (aws->timer != 0)
1508 {
1509 Timer_CancelTimer(aws->timer);
1510 aws->timer = 0;
1511 }
1512
1513 if (aws->buf != NULL)
1514 {
1515 MemFree(aws->buf);
1516 aws->buf = NULL;
1517 }
1518}
1519
1520/******************************************************************************
1521 * Lock the adapter, waiting for availability if necessary. This is expected
1522 * to be called at task/request time without the driver-level spinlock
1523 * aquired. Don't call at interrupt time.
1524 */
1525void lock_adapter(AD_INFO *ai)
1526{
1527 TIMER Timer;
1528
1529 spin_lock(drv_lock);
1530 while (ai->busy)
1531 {
1532 spin_unlock(drv_lock);
1533 TimerInit(&Timer, 250);
1534 while (!TimerCheckAndBlock(&Timer));
1535 spin_lock(drv_lock);
1536 }
1537 ai->busy = 1;
1538 spin_unlock(drv_lock);
1539}
1540
1541/******************************************************************************
1542 * Unlock adapter (i.e. reset busy flag)
1543 */
1544void unlock_adapter(AD_INFO *ai)
1545{
1546 ai->busy = 0;
1547}
1548
1549/******************************************************************************
1550 * Timeout handler for I/O commands. Since timeout handling can involve
1551 * lengthy operations like port resets, the main code is located in a
1552 * separate function which is invoked via a context hook.
1553 */
1554void __syscall timeout_callback(ULONG timer_handle, ULONG p1)
1555{
1556 IORBH FAR16DATA *vIorb = (IORBH FAR16DATA *)CastULONGToFar16(p1);
1557 IORBH *pIorb = Far16ToFlat(vIorb);
1558 int a = iorb_unit_adapter(pIorb);
1559 int p = iorb_unit_port(pIorb);
1560
1561 Timer_CancelTimer(timer_handle);
1562 dprintf(0,"timeout for IORB %x\n", vIorb);
1563
1564 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1565 * IORB has completed after the timeout has expired but before we got to
1566 * this line of code, we'll check the return code of iorb_queue_del(): If it
1567 * returns an error, the IORB must have completed a few microseconds ago and
1568 * there is no timeout.
1569 */
1570 spin_lock(drv_lock);
1571 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb) == 0)
1572 {
1573 iorb_queue_add(&abort_queue, vIorb, pIorb);
1574 pIorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1575 }
1576 spin_unlock(drv_lock);
1577
1578 /* Trigger abort processing function. We don't really care whether this
1579 * succeeds because the only reason why it would fail should be multiple
1580 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1581 * start executing, which leaves two scenarios:
1582 *
1583 * - We succeded in arming the context hook. Fine.
1584 *
1585 * - We armed the context hook a second time before it had a chance to
1586 * start executing. In this case, the already scheduled context hook
1587 * will process our IORB as well.
1588 */
1589 KernArmHook(reset_ctxhook_h, 0, 0);
1590
1591 /* Set up a watchdog timer which calls the context hook manually in case
1592 * some kernel thread is looping around the IORB_COMPLETE status bit
1593 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1594 * happen per design because kernel threads are supposed to yield but it
1595 * does in the early boot phase.
1596 */
1597 Timer_StartTimerMS(&th_reset_watchdog, 5000, reset_watchdog, 0);
1598}
1599
1600/******************************************************************************
1601 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1602 * will execute as soon as a kernel thread yields the CPU. However, some
1603 * kernel components won't yield the CPU during the early boot phase and the
1604 * only way to kick some sense into those components is to run the context
1605 * hook right inside this timer callback. Not exactly pretty, especially
1606 * considering the fact that context hooks were implemented to prevent running
1607 * lengthy operations like a port reset at interrupt time, but without this
1608 * watchdog mechanism we run the risk of getting completely stalled by device
1609 * problems during the early boot phase.
1610 */
1611void __syscall reset_watchdog(ULONG timer_handle, ULONG p1)
1612{
1613 /* reset watchdog timer */
1614 Timer_CancelTimer(timer_handle);
1615 dprintf(0,"reset watchdog invoked\n");
1616
1617 /* call context hook manually */
1618 reset_ctxhook(0);
1619}
1620
1621/******************************************************************************
1622 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1623 * adapter info array in the device table, dt->pAdapter[], is expected to be
1624 * initialized for the specified index (dt_ai).
1625 *
1626 * Please note that the device table adapter index, dta, is not always equal
1627 * to the physical adapter index, a: if SCSI emulation has been activated, the
1628 * last reported adapter is a virtual SCSI adapter and the physical adapter
1629 * indexes for those units are, of course, different from the device table
1630 * index of the virtual SCSI adapter.
1631 */
1632static int add_unit_info(IORB_CONFIGURATION *pIorb_conf, int dta,
1633 int a, int p, int d, int scsi_id)
1634{
1635 DEVICETABLE *pDt = Far16ToFlat(pIorb_conf->pDeviceTable);
1636 ADAPTERINFO *pPtr;
1637 UNITINFO *pUi;
1638 AD_INFO *ai = ad_infos + a;
1639
1640 pPtr = (ADAPTERINFO *)MakeFlatFromNear16(pIorb_conf->pDeviceTable, pDt->pAdapter[dta]);
1641 //DPRINTF(2,"add_unit_info: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1642 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1643
1644 pUi = &pPtr->UnitInfo[pPtr->AdapterUnits];
1645
1646 if ((u32)(pUi + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1647 {
1648 dprintf(0,"error: device table provided by DASD too small\n");
1649 iorb_seterr(&pIorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1650 return(-1);
1651 }
1652
1653 if (ai->ports[p].devs[d].unit_info == NULL)
1654 {
1655 /* provide original information about this device (unit) */
1656 memset(pUi, 0x00, sizeof(*pUi));
1657 pUi->AdapterIndex = dta; /* device table adapter index */
1658 pUi->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1659 pUi->UnitIndex = pPtr->AdapterUnits;
1660 pUi->UnitType = ai->ports[p].devs[d].dev_type;
1661 pUi->QueuingCount = ai->ports[p].devs[d].ncq_max;
1662 if (ai->ports[p].devs[d].removable)
1663 {
1664 pUi->UnitFlags |= UF_REMOVABLE;
1665 }
1666 if (scsi_id > 0) {
1667 /* set fake SCSI ID for this unit */
1668 pUi->UnitSCSITargetID = scsi_id;
1669 }
1670 }
1671 else
1672 {
1673 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1674 memcpy(pUi, ai->ports[p].devs[d].unit_info, sizeof(*pUi));
1675 }
1676
1677 pPtr->AdapterUnits++;
1678 return(0);
1679}
1680
Note: See TracBrowser for help on using the repository browser.