source: trunk/src/os2ahci/os2ahci.c@ 178

Last change on this file since 178 was 178, checked in by David Azarewicz, 9 years ago

Major reorganization

File size: 50.6 KB
Line 
1/******************************************************************************
2 * os2ahci.c - main file for os2ahci driver
3 *
4 * Copyright (c) 2011 thi.guten Software Development
5 * Copyright (c) 2011 Mensys B.V.
6 * Copyright (c) 2013-2016 David Azarewicz
7 *
8 * Authors: Christian Mueller, Markus Thielen
9 *
10 * Parts copied from/inspired by the Linux AHCI driver;
11 * those parts are (c) Linux AHCI/ATA maintainers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include "os2ahci.h"
29#include "ioctl.h"
30#include "version.h"
31#include "devhdr.h"
32
33/* -------------------------- macros and constants ------------------------- */
34
35/* set two-dimensional array of port options */
36#define set_port_option(opt, val) \
37 if (adapter_index == -1) { \
38 /* set option for all adapters and ports */ \
39 memset(opt, val, sizeof(opt)); \
40 } else if (port_index == -1) { \
41 /* set option for all ports on current adapter */ \
42 memset(opt[adapter_index], val, sizeof(*opt)); \
43 } else { \
44 /* set option for specific port */ \
45 opt[adapter_index][port_index] = val; \
46 }
47
48#define FLAG_KRNL_EXIT_ADD 0x1000
49#define FLAG_KRNL_EXIT_REMOVE 0x2000
50
51#define TYPE_KRNL_EXIT_NMI 0x0000 /* non masked interrupts */
52#define TYPE_KRNL_EXIT_SFF 0x0001 /* system fatal faults */
53#define TYPE_KRNL_EXIT_PROCDUMP 0x0002
54#define TYPE_KRNL_EXIT_DYN 0x0003
55#define TYPE_KRNL_EXIT_INT13 0x0004 /* enable int13 IO */
56
57/* ------------------------ typedefs and structures ------------------------ */
58
59/* -------------------------- function prototypes -------------------------- */
60
61extern int SetPsdPutc(void);
62static int add_unit_info(IORB_CONFIGURATION *iorb_conf, int dt_ai, int a, int p, int d, int scsi_id);
63
64/* ------------------------ global/static variables ------------------------ */
65int thorough_scan = 1; /* if != 0, perform thorough PCI scan */
66int init_reset = 1; /* if != 0, reset ports during init */
67int force_write_cache; /* if != 0, force write cache */
68int verbosity = 0; /* default is quiet. 1=show sign on banner, >1=show adapter info during boot */
69int use_lvm_info = 1;
70long com_baud = 0;
71
72HDRIVER rm_drvh; /* resource manager driver handle */
73USHORT add_handle; /* driver handle (RegisterDeviceClass) */
74char drv_name[] = "OS2AHCI"; /* driver name as string */
75
76/* resource manager driver information structure */
77static DRIVERSTRUCT rm_drvinfo =
78{
79 NULL, /* We cannot do Flat to Far16 conversion at compile time */
80 NULL, /* so we put NULLs in all the Far16 fields and then fill */
81 NULL, /* them in at run time */
82 DMAJOR,
83 DMINOR,
84 BLD_YEAR, BLD_MONTH, BLD_DAY,
85 0,
86 DRT_ADDDM,
87 DRS_ADD,
88 NULL
89};
90
91SpinLock_t drv_lock; /* driver-level spinlock */
92IORB_QUEUE driver_queue; /* driver-level IORB queue */
93AD_INFO ad_infos[MAX_AD]; /* adapter information list */
94int ad_info_cnt; /* number of entries in ad_infos[] */
95u16 ad_ignore; /* bitmap with adapter indexes to ignore */
96int init_complete; /* if != 0, initialization has completed */
97int suspended;
98int resume_sleep_flag;
99
100/* apapter/port-specific options saved when parsing the command line */
101u8 emulate_scsi[MAX_AD][AHCI_MAX_PORTS];
102u8 enable_ncq[MAX_AD][AHCI_MAX_PORTS];
103u8 link_speed[MAX_AD][AHCI_MAX_PORTS];
104u8 link_power[MAX_AD][AHCI_MAX_PORTS];
105u8 track_size[MAX_AD][AHCI_MAX_PORTS];
106u8 port_ignore[MAX_AD][AHCI_MAX_PORTS];
107
108char BldLevel[] = BLDLEVEL;
109
110/* ----------------------------- start of code ----------------------------- */
111
112/******************************************************************************
113 * OS/2 device driver main strategy function.
114 *
115 * NOTE: this is also used as the IDC entry point. We expect an IOCTL request
116 * packet for IDC calls, so they can be handled by gen_ioctl.
117 */
118void StrategyHandler(REQPACKET *prp)
119{
120 u16 rc;
121
122 switch (prp->bCommand)
123 {
124 case STRATEGY_BASEDEVINIT:
125 rc = init_drv(prp);
126 break;
127
128 case STRATEGY_SHUTDOWN:
129 rc = exit_drv(prp->save_restore.Function);
130 break;
131
132 case STRATEGY_GENIOCTL:
133 rc = gen_ioctl(prp);
134 break;
135
136 case STRATEGY_OPEN:
137 build_user_info();
138 rc = RPDONE;
139 break;
140
141 case STRATEGY_READ:
142 rc = char_dev_input(prp);
143 break;
144
145 case STRATEGY_SAVERESTORE:
146 rc = sr_drv(prp->save_restore.Function);
147 break;
148
149 case STRATEGY_INITCOMPLETE:
150 case STRATEGY_CLOSE:
151 case STRATEGY_INPUTSTATUS:
152 case STRATEGY_FLUSHINPUT:
153 /* noop */
154 rc = RPDONE;
155 break;
156
157 default:
158 rc = RPDONE | RPERR_BADCOMMAND;
159 break;
160 }
161
162 prp->usStatus = rc;
163}
164
165void IdcHandler(REQPACKET *prp)
166{
167 StrategyHandler(prp);
168}
169
170/******************************************************************************
171 * Intialize the os2ahci driver. This includes command line parsing, scanning
172 * the PCI bus for supported AHCI adapters, etc.
173 */
174USHORT init_drv(REQPACKET *req)
175{
176 static int init_drv_called;
177 static int init_drv_failed;
178 APIRET rmrc;
179 const char *pszCmdLine, *cmd_line;
180 int adapter_index = -1;
181 int port_index = -1;
182 int iInvertOption;
183 int iStatus;
184
185 if (init_drv_called)
186 {
187 /* This is the init call for the second (IBMS506$) character
188 * device driver. If the main driver failed initialization, fail this
189 * one as well.
190 */
191 return(RPDONE | ((init_drv_failed) ? RPERR_INITFAIL : 0));
192 }
193 D32g_DbgLevel = 0;
194 init_drv_called = 1;
195 suspended = 0;
196 resume_sleep_flag = 0;
197 memset(ad_infos, 0, sizeof(ad_infos));
198 memset(emulate_scsi, 1, sizeof(emulate_scsi)); /* set default enabled */
199 UtSetDriverName("OS2AHCI$");
200 Header.ulCaps |= DEV_ADAPTER_DD; /* DAZ This flag is not really needed. */
201
202 /* create driver-level spinlock */
203 KernAllocSpinLock(&drv_lock);
204
205 /* register driver with resource manager */
206 rm_drvinfo.DrvrName = drv_name;
207 rm_drvinfo.DrvrDescript = "AHCI SATA Driver";
208 rm_drvinfo.VendorName = DVENDOR;
209 if ((rmrc = RMCreateDriver(&rm_drvinfo, &rm_drvh)) != RMRC_SUCCESS)
210 {
211 iprintf("%s: failed to register driver with resource manager (rc = %d)", drv_name, rmrc);
212 goto init_fail;
213 }
214
215 pszCmdLine = cmd_line = req->init_in.szArgs;
216 iStatus = 0;
217 while (*pszCmdLine)
218 {
219 if (*pszCmdLine++ != '/') continue; /* Ignore anything that doesn't start with '/' */
220 /* pszCmdLine now points to first char of argument */
221
222 if ((iInvertOption = (*pszCmdLine == '!')) != 0) pszCmdLine++;
223
224 if (ArgCmp(pszCmdLine, "B:"))
225 {
226 pszCmdLine += 2;
227 com_baud = strtol(pszCmdLine, &pszCmdLine, 0);
228 continue;
229 }
230
231 if (ArgCmp(pszCmdLine, "C:"))
232 {
233 pszCmdLine += 2;
234 /* set COM port base address for debug messages */
235 D32g_ComBase = strtol(pszCmdLine, &pszCmdLine, 0);
236 if (D32g_ComBase == 1) D32g_ComBase = 0x3f8;
237 if (D32g_ComBase == 2) D32g_ComBase = 0x2f8;
238 continue;
239 }
240
241 if (ArgCmp(pszCmdLine, "D"))
242 {
243 pszCmdLine++;
244 if (*pszCmdLine == ':')
245 {
246 pszCmdLine++;
247 D32g_DbgLevel = strtol(pszCmdLine, &pszCmdLine, 0);
248 }
249 else D32g_DbgLevel++; /* increase debug level */
250 continue;
251 }
252
253 if (ArgCmp(pszCmdLine, "G:"))
254 {
255 u16 usVendor;
256 u16 usDevice;
257
258 pszCmdLine += 2;
259 /* add specfied PCI ID as a supported generic AHCI adapter */
260 usVendor = strtol(pszCmdLine, &pszCmdLine, 16);
261 if (*pszCmdLine != ':') break;
262 pszCmdLine++;
263 usDevice = strtol(pszCmdLine, &pszCmdLine, 16);
264 if (add_pci_id(usVendor, usDevice))
265 {
266 iprintf("%s: failed to add PCI ID %04x:%04x", drv_name, usVendor, usDevice);
267 iStatus = 1;
268 }
269 thorough_scan = 1;
270 continue;
271 }
272
273 if (ArgCmp(pszCmdLine, "T"))
274 {
275 pszCmdLine++;
276 /* perform thorough PCI scan (i.e. look for individual supported PCI IDs) */
277 thorough_scan = !iInvertOption;
278 continue;
279 }
280
281 if (ArgCmp(pszCmdLine, "R"))
282 {
283 pszCmdLine++;
284 /* reset ports during initialization */
285 init_reset = !iInvertOption;
286 continue;
287 }
288
289 if (ArgCmp(pszCmdLine, "F"))
290 {
291 pszCmdLine++;
292 /* force write cache regardless of IORB flags */
293 force_write_cache = 1;
294 continue;
295 }
296
297 if (ArgCmp(pszCmdLine, "A:"))
298 {
299 pszCmdLine += 2;
300 /* set adapter index for adapter and port-related options */
301 adapter_index = strtol(pszCmdLine, &pszCmdLine, 0);
302 if (adapter_index < 0 || adapter_index >= MAX_AD)
303 {
304 iprintf("%s: invalid adapter index (%d)", drv_name, adapter_index);
305 iStatus = 1;
306 }
307 continue;
308 }
309
310 if (ArgCmp(pszCmdLine, "P:"))
311 {
312 pszCmdLine += 2;
313 /* set port index for port-related options */
314 port_index = strtol(pszCmdLine, &pszCmdLine, 0);
315 if (port_index < 0 || port_index >= AHCI_MAX_PORTS)
316 {
317 iprintf("%s: invalid port index (%d)", drv_name, port_index);
318 iStatus = 1;
319 }
320 continue;
321 }
322
323 if (ArgCmp(pszCmdLine, "I"))
324 {
325 pszCmdLine++;
326 /* ignore current adapter index */
327 if (adapter_index >= 0)
328 {
329 if (port_index >= 0) port_ignore[adapter_index][port_index] = !iInvertOption;
330 else ad_ignore |= 1U << adapter_index;
331 }
332 continue;
333 }
334
335 if (ArgCmp(pszCmdLine, "S"))
336 {
337 pszCmdLine++;
338 /* enable SCSI emulation for ATAPI devices */
339 set_port_option(emulate_scsi, !iInvertOption);
340 continue;
341 }
342
343 if (ArgCmp(pszCmdLine, "N"))
344 {
345 pszCmdLine++;
346 /* enable NCQ */
347 set_port_option(enable_ncq, !iInvertOption);
348 continue;
349 }
350
351 if (ArgCmp(pszCmdLine, "LS:"))
352 {
353 int optval;
354
355 pszCmdLine += 3;
356 /* set link speed */
357 optval = strtol(pszCmdLine, &pszCmdLine, 0);
358 set_port_option(link_speed, optval);
359 /* need to reset the port in order to establish link settings */
360 init_reset = 1;
361 continue;
362 }
363
364 if (ArgCmp(pszCmdLine, "LP:"))
365 {
366 int optval;
367
368 pszCmdLine += 3;
369 /* set power management */
370 optval = strtol(pszCmdLine, &pszCmdLine, 0);
371 set_port_option(link_power, optval);
372 /* need to reset the port in order to establish link settings */
373 init_reset = 1;
374 continue;
375 }
376
377 if (ArgCmp(pszCmdLine, "4"))
378 {
379 pszCmdLine++;
380 /* enable 4K sector geometry enhancement (track size = 56) */
381 if (!iInvertOption) set_port_option(track_size, 56);
382 continue;
383 }
384
385 if (ArgCmp(pszCmdLine, "Z"))
386 {
387 pszCmdLine++;
388 /* Specify to not use the LVM information. There is no reason why anyone would
389 * want to do this, but previous versions of this driver did not have LVM capability,
390 * so this switch is here temporarily just in case.
391 */
392 use_lvm_info = !iInvertOption;
393 continue;
394 }
395
396 if (ArgCmp(pszCmdLine, "V"))
397 {
398 pszCmdLine++;
399 if (*pszCmdLine == ':')
400 {
401 pszCmdLine++;
402 verbosity = strtol(pszCmdLine, &pszCmdLine, 0);
403 }
404 else verbosity++; /* increase verbosity level */
405 continue;
406 }
407
408 if (ArgCmp(pszCmdLine, "W"))
409 {
410 pszCmdLine++;
411 /* Specify to allow the trace buffer to wrap when full. */
412 D32g_DbgBufWrap = !iInvertOption;
413 continue;
414 }
415
416 iprintf("Unrecognized switch: %s", pszCmdLine-1);
417 iStatus = 1; /* unrecognized argument */
418 }
419
420 if (iStatus) goto init_fail;
421
422 if (com_baud) InitComPort(com_baud);
423
424 NTPRINTF("BldLevel: %s\n", BldLevel);
425 NTPRINTF("CmdLine: %s\n", cmd_line);
426 /*
427 if (sizeof(ADD_WORKSPACE) > ADD_WORKSPACE_SIZE)
428 {
429 dprintf(0,"ADD_WORKSPACE size is too big! %d>16\n", sizeof(ADD_WORKSPACE));
430 goto init_fail;
431 }
432 */
433
434 /* print initialization message */
435 ciprintf("%s driver version %d.%02d", drv_name, DMAJOR, DMINOR);
436
437 #ifdef TESTVER
438 #include "testver.c"
439 #endif
440
441 /* scan PCI bus for supported devices */
442 scan_pci_bus();
443
444 if (ad_info_cnt > 0)
445 {
446 /* initialization succeeded and we found at least one AHCI adapter */
447
448 if (Dev32Help_RegisterDeviceClass(drv_name, add_entry, 0, 1, &add_handle))
449 {
450 iprintf("%s: couldn't register device class", drv_name);
451 goto init_fail;
452 }
453
454 Timer_InitTimer(TIMER_COUNT);
455
456 /* allocate context hooks */
457 KernAllocateContextHook(restart_ctxhook, 0, &restart_ctxhook_h);
458 KernAllocateContextHook(reset_ctxhook, 0, &reset_ctxhook_h);
459 KernAllocateContextHook(engine_ctxhook, 0, &engine_ctxhook_h);
460
461 /* register kernel exit routine for trap dumps */
462 Dev32Help_RegisterKrnlExit(shutdown_driver, FLAG_KRNL_EXIT_ADD, TYPE_KRNL_EXIT_INT13);
463
464 return(RPDONE);
465
466 }
467 else
468 {
469 /* no adapters found */
470 ciprintf("%s: No adapters found.", drv_name);
471 }
472
473init_fail:
474 /* initialization failed; set segment sizes to 0 and return error */
475 init_drv_failed = 1;
476
477 if (rm_drvh != 0)
478 {
479 /* remove driver from resource manager */
480 RMDestroyDriver(rm_drvh);
481 }
482
483 ciprintf("%s driver *not* installed", drv_name);
484 return(RPDONE | RPERR_INITFAIL);
485}
486
487/******************************************************************************
488 * Generic IOCTL via character device driver. IOCTLs are used to control the
489 * driver operation and to execute native ATA and ATAPI (SCSI) commands from
490 * ring 3 applications. On top of that, some predefined IOCTLs (e.g. SMART
491 * commands for ATA disks) are implemented here.
492 */
493USHORT gen_ioctl(REQPACKET *ioctl)
494{
495 DPRINTF(2,"IOCTL 0x%x/0x%x\n", ioctl->ioctl.bCategory, ioctl->ioctl.bFunction);
496
497 switch (ioctl->ioctl.bCategory)
498 {
499 case OS2AHCI_IOCTL_CATEGORY:
500 switch (ioctl->ioctl.bFunction)
501 {
502
503 case OS2AHCI_IOCTL_GET_DEVLIST:
504 return(ioctl_get_devlist(ioctl));
505
506 case OS2AHCI_IOCTL_PASSTHROUGH:
507 return(ioctl_passthrough(ioctl));
508
509 }
510 break;
511
512 case DSKSP_CAT_GENERIC:
513 return(ioctl_gen_dsk(ioctl));
514
515 case DSKSP_CAT_SMART:
516 return(ioctl_smart(ioctl));
517 }
518
519 return(RPDONE | RPERR_BADCOMMAND);
520}
521
522/******************************************************************************
523 * Read from character device. If tracing is on (internal ring buffer trace),
524 * we return data from the trace buffer; if not, we might return a device
525 * dump similar to IBM1S506.ADD/DANIS506.ADD (TODO).
526 */
527USHORT char_dev_input(REQPACKET *pPacket)
528{
529 void *LinAdr;
530
531 if (Dev32Help_PhysToLin(pPacket->io.ulAddress, pPacket->io.usCount, &LinAdr))
532 {
533 pPacket->io.usCount = 0;
534 return RPDONE | RPERR_GENERAL;
535 }
536
537 pPacket->io.usCount = dCopyToUser(LinAdr, pPacket->io.usCount);
538
539 return RPDONE;
540}
541
542/******************************************************************************
543 * Device driver exit handler. This handler is called when OS/2 shuts down and
544 * flushes the write caches of all attached devices. Since this is effectively
545 * the same we do when suspending, we'll call out to the corresponding suspend
546 * function.
547 *
548 * NOTE: Errors are ignored because there's no way we could stop the shutdown
549 * or do something about the error, unless retrying endlessly is
550 * considered an option.
551 */
552USHORT exit_drv(int func)
553{
554 DPRINTF(2,"exit_drv(%d) called\n", func);
555
556 if (func == 0)
557 {
558 /* we're only interested in the second phase of the shutdown */
559 return(RPDONE);
560 }
561
562 suspend();
563 return(RPDONE);
564}
565
566/******************************************************************************
567 * Device driver suspend/resume handler. This handler is called when ACPI is
568 * executing a suspend or resume.
569 */
570USHORT sr_drv(int func)
571{
572 DPRINTF(2,"sr_drv(%d) called\n", func);
573
574 if (func) resume();
575 else suspend();
576
577 return(RPDONE);
578}
579
580/******************************************************************************
581 * ADD entry point. This is the main entry point for all ADD requests. Due to
582 * the asynchronous nature of ADD drivers, this function primarily queues the
583 * IORB(s) to the corresponding adapter or port queues, then triggers the
584 * state machine to initiate processing queued IORBs.
585 *
586 * NOTE: In order to prevent race conditions or engine stalls, certain rules
587 * around locking, unlocking and IORB handling in general have been
588 * established. Refer to the comments in "trigger_engine()" for
589 * details.
590 */
591void add_entry(IORBH FAR16DATA *vFirstIorb)
592{
593 IORBH FAR16DATA *vIorb;
594 IORBH FAR16DATA *vNext = NULL;
595
596 spin_lock(drv_lock);
597
598 for (vIorb=vFirstIorb; vIorb!=NULL; vIorb=vNext)
599 {
600 IORBH *pIorb = Far16ToFlat(vIorb);
601
602 /* Queue this IORB. Queues primarily exist on port level but there are
603 * some requests which affect the whole driver, most notably
604 * IOCC_CONFIGURATION. In either case, adding the IORB to the driver or
605 * port queue will change the links, thus we need to save the original
606 * link in 'vNext'.
607 */
608 vNext = (pIorb->RequestControl | IORB_CHAIN) ? pIorb->pNxtIORB : NULL;
609
610 pIorb->Status = 0;
611 pIorb->ErrorCode = 0;
612 memset(&pIorb->ADDWorkSpace, 0x00, sizeof(ADD_WORKSPACE));
613
614 if (iorb_driver_level(pIorb))
615 {
616 /* driver-level IORB */
617 pIorb->UnitHandle = 0;
618 iorb_queue_add(&driver_queue, vIorb, pIorb);
619
620 }
621 else
622 {
623 /* port-level IORB */
624 int a = iorb_unit_adapter(pIorb);
625 int p = iorb_unit_port(pIorb);
626 int d = iorb_unit_device(pIorb);
627
628 if (a >= ad_info_cnt ||
629 p > ad_infos[a].port_max ||
630 d > ad_infos[a].ports[p].dev_max ||
631 (ad_infos[a].port_map & (1UL << p)) == 0)
632 {
633
634 /* unit handle outside of the allowed range */
635 DPRINTF(0,"warning: IORB for %d.%d.%d out of range\n", a, p, d);
636 pIorb->Status = IORB_ERROR;
637 pIorb->ErrorCode = IOERR_CMD_SYNTAX;
638 iorb_complete(vIorb, pIorb);
639 continue;
640 }
641
642 iorb_queue_add(&ad_infos[a].ports[p].iorb_queue, vIorb, pIorb);
643 }
644 }
645
646 /* trigger state machine */
647 trigger_engine();
648
649 spin_unlock(drv_lock);
650}
651
652/******************************************************************************
653 * Trigger IORB queue engine. This is a wrapper function for trigger_engine_1()
654 * which will try to get all IORBs sent on their way a couple of times. If
655 * there are still IORBs ready for processing after this, this function will
656 * hand off to a context hook which will continue to trigger the engine until
657 * all IORBs have been sent.
658 *
659 * NOTE: While initialization has not completed (or during suspend/resume
660 * operations), this function will loop indefinitely because we can't
661 * rely on interrupt handlers or context hooks and complex IORBs
662 * requiring multiple requeues would eventually hang and time out if
663 * we stopped triggering here.
664 */
665void trigger_engine(void)
666{
667 int i;
668
669 for (i = 0; i < 3 || !init_complete; i++)
670 {
671 if (trigger_engine_1() == 0)
672 {
673 /* done -- all IORBs have been sent on their way */
674 return;
675 }
676 }
677
678 /* Something keeps bouncing; hand off to the engine context hook which will
679 * keep trying in the background.
680 */
681 KernArmHook(engine_ctxhook_h, 0, 0);
682}
683
684/******************************************************************************
685 * Trigger IORB queue engine in order to send commands in the driver/port IORB
686 * queues to the AHCI hardware. This function will return the number of IORBs
687 * sent. Keep in mind that IORBs might "bounce" if the adapter/port is not in
688 * a state to accept the command, thus it might take quite a few calls to get
689 * all IORBs on their way. This is why there's a wrapper function which tries
690 * it a few times, then hands off to a context hook which will keep trying in
691 * the background.
692 *
693 * IORBs might complete before send_iorb() has returned, at any time during
694 * interrupt processing or on another CPU on SMP systems. IORB completion
695 * means modifications to the corresponding IORB queue (the completed IORB
696 * is removed from the queue) thus we need to protect the IORB queues from
697 * race conditions. The safest approach short of keeping the driver-level
698 * spinlock aquired permanently is to keep it throughout this function and
699 * release it temporarily in send_iorb().
700 *
701 * This implies that the handler functions are fully responsible for aquiring
702 * the driver-level spinlock when they need it, and for releasing it again.
703 *
704 * As a rule of thumb, get the driver-level spinlock whenever accessing
705 * volatile variables (IORB queues, values in ad_info[], ...).
706 *
707 * Additional Notes:
708 *
709 * - This function is expected to be called with the spinlock aquired
710 *
711 * - Adapters can be flagged as 'busy' which means no new IORBs are sent (they
712 * just remain in the queue). This can be used to release the driver-level
713 * spinlock while making sure no new IORBs are going to hit the hardware.
714 * In order to prevent engine stalls, all handlers using this functionality
715 * need to invoke trigger_engine() after resetting the busy flag.
716 *
717 * - Driver-level IORBs are not synchronized by adapter-level 'busy' flags.
718 * However, the driver-level queue is worked "one entry at a time" which
719 * means that no new IORBs will be queued on the driver-level queue until
720 * the head element has completed processing. This means that driver-
721 * level IORB handlers don't need to protect against each other. But they
722 * they do need to keep in mind interference with port-level IORBs:
723 *
724 * - Driver-level IORB handlers must obtain the spinlock and/or flag all
725 * adapters as 'busy' which are affected by the driver-level IORB
726 *
727 * - Driver-level IORB handlers must not access the hardware of a
728 * particular adapter if it's flagged as 'busy' by another IORB.
729 */
730int trigger_engine_1(void)
731{
732 IORBH FAR16DATA *vIorb;
733 IORBH *pIorb;
734 IORBH FAR16DATA *vNext;
735 int iorbs_sent = 0;
736 int a;
737 int p;
738
739 iorbs_sent = 0;
740
741 /* process driver-level IORBs */
742 if ((vIorb = driver_queue.vRoot) != NULL)
743 {
744 pIorb = Far16ToFlat(vIorb);
745
746 if (!add_workspace(pIorb)->processing)
747 {
748 send_iorb(vIorb, pIorb);
749 iorbs_sent++;
750 }
751 }
752
753 /* process port-level IORBs */
754 for (a = 0; a < ad_info_cnt; a++)
755 {
756 AD_INFO *ai = ad_infos + a;
757 if (ai->busy)
758 {
759 /* adapter is busy; don't process any IORBs */
760 continue;
761 }
762 for (p = 0; p <= ai->port_max; p++)
763 {
764 /* send all queued IORBs on this port */
765 vNext = NULL;
766 for (vIorb = ai->ports[p].iorb_queue.vRoot; vIorb != NULL; vIorb = vNext)
767 {
768 pIorb = Far16ToFlat(vIorb);
769
770 vNext = pIorb->pNxtIORB;
771 if (!add_workspace(pIorb)->processing)
772 {
773 send_iorb(vIorb, pIorb);
774 iorbs_sent++;
775 }
776 }
777 }
778 }
779
780 return(iorbs_sent);
781}
782
783/******************************************************************************
784 * Send a single IORB to the corresponding AHCI adapter/port. This is just a
785 * switch board for calling the corresponding iocc_*() handler function.
786 *
787 * NOTE: This function is expected to be called with the driver-level spinlock
788 * aquired. It will release it before calling any of the handler
789 * functions and re-aquire it when done.
790 */
791void send_iorb(IORBH FAR16DATA *vIorb, IORBH *pIorb)
792{
793 /* Mark IORB as "processing" before doing anything else. Once the IORB is
794 * marked as "processing", we can release the spinlock because subsequent
795 * invocations of trigger_engine() (e.g. at interrupt time) will ignore this
796 * IORB.
797 */
798 add_workspace(pIorb)->processing = 1;
799 spin_unlock(drv_lock);
800
801 switch (pIorb->CommandCode)
802 {
803 case IOCC_CONFIGURATION:
804 iocc_configuration(vIorb, pIorb);
805 break;
806
807 case IOCC_DEVICE_CONTROL:
808 iocc_device_control(vIorb, pIorb);
809 break;
810
811 case IOCC_UNIT_CONTROL:
812 iocc_unit_control(vIorb, pIorb);
813 break;
814
815 case IOCC_GEOMETRY:
816 iocc_geometry(vIorb, pIorb);
817 break;
818
819 case IOCC_EXECUTE_IO:
820 iocc_execute_io(vIorb, pIorb);
821 break;
822
823 case IOCC_UNIT_STATUS:
824 iocc_unit_status(vIorb, pIorb);
825 break;
826
827 case IOCC_ADAPTER_PASSTHRU:
828 iocc_adapter_passthru(vIorb, pIorb);
829 break;
830
831 default:
832 /* unsupported call */
833 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
834 iorb_done(vIorb, pIorb);
835 break;
836 }
837
838 /* re-aquire spinlock before returning to trigger_engine() */
839 spin_lock(drv_lock);
840}
841
842/******************************************************************************
843 * Handle IOCC_CONFIGURATION requests.
844 */
845void iocc_configuration(IORBH FAR16DATA *vIorb, IORBH *pIorb)
846{
847 int a;
848
849 switch (pIorb->CommandModifier)
850 {
851
852 case IOCM_COMPLETE_INIT:
853 /* Complete initialization. From now on, we won't have to restore the BIOS
854 * configuration after each command and we're fully operational (i.e. will
855 * use interrupts, timers and context hooks instead of polling).
856 */
857 if (!init_complete)
858 {
859 DPRINTF(1,"leaving initialization mode\n");
860 for (a = 0; a < ad_info_cnt; a++)
861 {
862 lock_adapter(ad_infos + a);
863 ahci_complete_init(ad_infos + a);
864 }
865 init_complete = 1;
866
867 /* release all adapters */
868 for (a = 0; a < ad_info_cnt; a++)
869 {
870 unlock_adapter(ad_infos + a);
871 }
872 DPRINTF(1,"leaving initialization mode 2\n");
873
874 #ifdef LEGACY_APM
875 /* register APM hook */
876 apm_init();
877 #endif
878 }
879 iorb_done(vIorb, pIorb);
880 break;
881
882 case IOCM_GET_DEVICE_TABLE:
883 /* construct a device table */
884 iocm_device_table(vIorb, pIorb);
885 break;
886
887 default:
888 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
889 iorb_done(vIorb, pIorb);
890 break;
891 }
892}
893
894/******************************************************************************
895 * Handle IOCC_DEVICE_CONTROL requests.
896 */
897void iocc_device_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
898{
899 AD_INFO *ai = ad_infos + iorb_unit_adapter(pIorb);
900 IORBH FAR16DATA *vPtr;
901 IORBH FAR16DATA *vNext = NULL;
902 int p = iorb_unit_port(pIorb);
903 int d = iorb_unit_device(pIorb);
904
905 switch (pIorb->CommandModifier)
906 {
907
908 case IOCM_ABORT:
909 /* abort all pending commands on specified port and device */
910 spin_lock(drv_lock);
911 for (vPtr = ai->ports[p].iorb_queue.vRoot; vPtr != NULL; vPtr = vNext)
912 {
913 IORBH *pPtr = Far16ToFlat(vPtr);
914
915 vNext = pPtr->pNxtIORB;
916 /* move all matching IORBs to the abort queue */
917 if (vPtr != vIorb && iorb_unit_device(pPtr) == d)
918 {
919 iorb_queue_del(&ai->ports[p].iorb_queue, vPtr);
920 iorb_queue_add(&abort_queue, vPtr, pPtr);
921 pPtr->ErrorCode = IOERR_CMD_ABORTED;
922 }
923 }
924 spin_unlock(drv_lock);
925
926 /* trigger reset context hook which will finish the abort processing */
927 KernArmHook(reset_ctxhook_h, 0, 0);
928 break;
929
930 case IOCM_SUSPEND:
931 case IOCM_RESUME:
932 case IOCM_GET_QUEUE_STATUS:
933 /* Suspend/resume operations allow access to the hardware for other
934 * entities such as IBMIDECD.FLT. Since os2ahci implements both ATA
935 * and ATAPI in the same driver, this won't be required.
936 */
937 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
938 break;
939
940 case IOCM_LOCK_MEDIA:
941 case IOCM_UNLOCK_MEDIA:
942 case IOCM_EJECT_MEDIA:
943 /* unit control commands to lock, unlock and eject media */
944 /* will be supported later... */
945 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
946 break;
947
948 default:
949 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
950 break;
951 }
952
953 iorb_done(vIorb, pIorb);
954}
955
956/******************************************************************************
957 * Handle IOCC_UNIT_CONTROL requests.
958 */
959void iocc_unit_control(IORBH FAR16DATA *vIorb, IORBH *pIorb)
960{
961 IORB_UNIT_CONTROL *pIorb_uc = (IORB_UNIT_CONTROL *)pIorb;
962 int a = iorb_unit_adapter(pIorb);
963 int p = iorb_unit_port(pIorb);
964 int d = iorb_unit_device(pIorb);
965
966 spin_lock(drv_lock);
967 switch (pIorb->CommandModifier)
968 {
969 case IOCM_ALLOCATE_UNIT:
970 /* allocate unit for exclusive access */
971 if (ad_infos[a].ports[p].devs[d].allocated)
972 {
973 iorb_seterr(pIorb, IOERR_UNIT_ALLOCATED);
974 }
975 else
976 {
977 ad_infos[a].ports[p].devs[d].allocated = 1;
978 }
979 break;
980
981 case IOCM_DEALLOCATE_UNIT:
982 /* deallocate exclusive access to unit */
983 if (!ad_infos[a].ports[p].devs[d].allocated)
984 {
985 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
986 }
987 else
988 {
989 ad_infos[a].ports[p].devs[d].allocated = 0;
990 }
991 break;
992
993 case IOCM_CHANGE_UNITINFO:
994 /* Change unit (device) information. One reason for this IOCM is the
995 * interface for filter device drivers: a filter device driver can
996 * either change existing UNITINFOs or permanently allocate units
997 * and fabricate new [logical] units; the former is the reason why we
998 * must store the pointer to the updated UNITNIFO for subsequent
999 * IOCC_CONFIGURATION/IOCM_GET_DEVICE_TABLE calls.
1000 */
1001 if (!ad_infos[a].ports[p].devs[d].allocated)
1002 {
1003 iorb_seterr(pIorb, IOERR_UNIT_NOT_ALLOCATED);
1004 break;
1005 }
1006 ad_infos[a].ports[p].devs[d].unit_info = pIorb_uc->pUnitInfo;
1007 break;
1008
1009 default:
1010 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1011 break;
1012 }
1013
1014 spin_unlock(drv_lock);
1015 iorb_done(vIorb, pIorb);
1016}
1017
1018/******************************************************************************
1019 * Scan all ports for AHCI devices and construct a DASD device table.
1020 *
1021 * NOTES: This function may be called multiple times. Only the first
1022 * invocation will actually scan for devices; all subsequent calls will
1023 * merely return the results of the initial scan, potentially augmented
1024 * by modified unit infos after IOCC_CONFIGURATION/IOCM_CHANGE_UNITINFO
1025 * requests.
1026 *
1027 * In order to support applications that can't deal with ATAPI devices
1028 * (i.e. need a SCSI adapter) os2ahci will optionally report ATAPI
1029 * dvices as SCSI devices. The corresponding SCSI adapter doesn't
1030 * really exist and is only reported here for the IOCM_GET_DEVICETABLE
1031 * request. The units attached to this adapter will use the real HW
1032 * unit IDs, thus we'll never receive a command specific to the
1033 * emulated SCSI adapter and won't need to set up any sort of entity
1034 * for it; the only purpose of the emulated SCSI adapter is to pass the
1035 * bus type "AI_DEVBUS_SCSI_2" upstream, and the emulated units, of
1036 * course. The emulated SCSI target IDs are allocated as follows:
1037 *
1038 * 0 the virtual adapter
1039 * 1..n emulated devices; SCSI target ID increments sequentially
1040 */
1041void iocm_device_table(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1042{
1043 IORB_CONFIGURATION *pIorb_conf;
1044 DEVICETABLE FAR16DATA *vDt;
1045 DEVICETABLE *pDt;
1046 char *pPos;
1047 int scsi_units = 0;
1048 int scsi_id = 1;
1049 int rc;
1050 int dta;
1051 int a;
1052 int p;
1053 int d;
1054
1055 pIorb_conf = (IORB_CONFIGURATION *)pIorb;
1056 vDt = pIorb_conf->pDeviceTable;
1057 pDt = Far16ToFlat(vDt);
1058
1059 spin_lock(drv_lock);
1060
1061 /* initialize device table header */
1062 pDt->ADDLevelMajor = ADD_LEVEL_MAJOR;
1063 pDt->ADDLevelMinor = ADD_LEVEL_MINOR;
1064 pDt->ADDHandle = add_handle;
1065 pDt->TotalAdapters = ad_info_cnt + 1;
1066
1067 /* set start of adapter and device information tables */
1068 pPos = (char*)&pDt->pAdapter[pDt->TotalAdapters];
1069
1070 /* go through all adapters, including the virtual SCSI adapter */
1071 for (dta = 0; dta < pDt->TotalAdapters; dta++)
1072 {
1073 ADAPTERINFO *pPtr = (ADAPTERINFO *)pPos;
1074
1075 /* sanity check for sufficient space in device table */
1076 if ((u32)(pPtr + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1077 {
1078 DPRINTF(0,"error: device table provided by DASD too small\n");
1079 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1080 goto iocm_device_table_done;
1081 }
1082
1083 pDt->pAdapter[dta] = MakeNear16PtrFromDiff(pIorb_conf->pDeviceTable, pDt, pPtr);
1084
1085 //DPRINTF(2,"iocm_device_table: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1086 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1087 memset(pPtr, 0x00, sizeof(*pPtr));
1088
1089 pPtr->AdapterIOAccess = AI_IOACCESS_BUS_MASTER;
1090 pPtr->AdapterHostBus = AI_HOSTBUS_OTHER | AI_BUSWIDTH_32BIT;
1091 pPtr->AdapterFlags = AF_16M | AF_HW_SCATGAT;
1092 pPtr->MaxHWSGList = AHCI_MAX_SG / 2; /* AHCI S/G elements are 22 bits */
1093
1094 if (dta < ad_info_cnt)
1095 {
1096 /* this is a physical AHCI adapter */
1097 AD_INFO *ad_info = ad_infos + dta;
1098
1099 pPtr->AdapterDevBus = AI_DEVBUS_ST506 | AI_DEVBUS_32BIT;
1100 sprintf(pPtr->AdapterName, "AHCI_%d", dta);
1101
1102 if (!ad_info->port_scan_done)
1103 {
1104 /* first call; need to scan AHCI hardware for devices */
1105 if (ad_info->busy)
1106 {
1107 DPRINTF(0,"error: port scan requested while adapter was busy\n");
1108 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1109 goto iocm_device_table_done;
1110 }
1111 ad_info->busy = 1;
1112 spin_unlock(drv_lock);
1113 rc = ahci_scan_ports(ad_info);
1114 spin_lock(drv_lock);
1115 ad_info->busy = 0;
1116
1117 if (rc != 0)
1118 {
1119 DPRINTF(0,"error: port scan failed on adapter #%d\n", dta);
1120 iorb_seterr(pIorb, IOERR_CMD_SW_RESOURCE);
1121 goto iocm_device_table_done;
1122 }
1123 ad_info->port_scan_done = 1;
1124 }
1125
1126 /* insert physical (i.e. AHCI) devices into the device table */
1127 for (p = 0; p <= ad_info->port_max; p++)
1128 {
1129 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1130 {
1131 if (ad_info->ports[p].devs[d].present)
1132 {
1133 if (ad_info->ports[p].devs[d].atapi && emulate_scsi[dta][p])
1134 {
1135 /* report this unit as SCSI unit */
1136 scsi_units++;
1137 //continue;
1138 }
1139 if (add_unit_info(pIorb_conf, dta, dta, p, d, 0))
1140 {
1141 goto iocm_device_table_done;
1142 }
1143 }
1144 }
1145 }
1146 }
1147 else
1148 {
1149 /* this is the virtual SCSI adapter */
1150 if (scsi_units == 0)
1151 {
1152 /* not a single unit to be emulated via SCSI */
1153 pDt->TotalAdapters--;
1154 break;
1155 }
1156
1157 /* set adapter name and bus type to mimic a SCSI controller */
1158 pPtr->AdapterDevBus = AI_DEVBUS_SCSI_2 | AI_DEVBUS_16BIT;
1159 sprintf(pPtr->AdapterName, "AHCI_SCSI_0");
1160
1161 /* add all ATAPI units to be emulated by this virtual adaper */
1162 for (a = 0; a < ad_info_cnt; a++)
1163 {
1164 AD_INFO *ad_info = ad_infos + a;
1165
1166 for (p = 0; p <= ad_info->port_max; p++)
1167 {
1168 for (d = 0; d <= ad_info->ports[p].dev_max; d++)
1169 {
1170 if (ad_info->ports[p].devs[d].present && ad_info->ports[p].devs[d].atapi && emulate_scsi[a][p])
1171 {
1172 if (add_unit_info(pIorb_conf, dta, a, p, d, scsi_id++))
1173 {
1174 goto iocm_device_table_done;
1175 }
1176 }
1177 }
1178 }
1179 }
1180 }
1181
1182 /* calculate offset for next adapter */
1183 pPos = (char *)(pPtr->UnitInfo + pPtr->AdapterUnits);
1184 }
1185
1186iocm_device_table_done:
1187 spin_unlock(drv_lock);
1188 iorb_done(vIorb, pIorb);
1189}
1190
1191/******************************************************************************
1192 * Handle IOCC_GEOMETRY requests.
1193 */
1194void iocc_geometry(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1195{
1196 switch (pIorb->CommandModifier)
1197 {
1198 case IOCM_GET_MEDIA_GEOMETRY:
1199 case IOCM_GET_DEVICE_GEOMETRY:
1200 add_workspace(pIorb)->idempotent = 1;
1201 ahci_get_geometry(vIorb, pIorb);
1202 break;
1203
1204 default:
1205 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1206 iorb_done(vIorb, pIorb);
1207 }
1208}
1209
1210/******************************************************************************
1211 * Handle IOCC_EXECUTE_IO requests.
1212 */
1213void iocc_execute_io(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1214{
1215 switch (pIorb->CommandModifier)
1216 {
1217 case IOCM_READ:
1218 add_workspace(pIorb)->idempotent = 1;
1219 ahci_read(vIorb, pIorb);
1220 break;
1221
1222 case IOCM_READ_VERIFY:
1223 add_workspace(pIorb)->idempotent = 1;
1224 ahci_verify(vIorb, pIorb);
1225 break;
1226
1227 case IOCM_WRITE:
1228 add_workspace(pIorb)->idempotent = 1;
1229 ahci_write(vIorb, pIorb);
1230 break;
1231
1232 case IOCM_WRITE_VERIFY:
1233 add_workspace(pIorb)->idempotent = 1;
1234 ahci_write(vIorb, pIorb);
1235 break;
1236
1237 default:
1238 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1239 iorb_done(vIorb, pIorb);
1240 }
1241}
1242
1243/******************************************************************************
1244 * Handle IOCC_UNIT_STATUS requests.
1245 */
1246void iocc_unit_status(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1247{
1248 switch (pIorb->CommandModifier)
1249 {
1250 case IOCM_GET_UNIT_STATUS:
1251 add_workspace(pIorb)->idempotent = 1;
1252 ahci_unit_ready(vIorb, pIorb);
1253 break;
1254
1255 default:
1256 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1257 iorb_done(vIorb, pIorb);
1258 }
1259}
1260
1261/******************************************************************************
1262 * Handle IOCC_ADAPTER_PASSTHROUGH requests.
1263 */
1264void iocc_adapter_passthru(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1265{
1266 switch (pIorb->CommandModifier)
1267 {
1268
1269 case IOCM_EXECUTE_CDB:
1270 add_workspace(pIorb)->idempotent = 0;
1271 ahci_execute_cdb(vIorb, pIorb);
1272 break;
1273
1274 case IOCM_EXECUTE_ATA:
1275 add_workspace(pIorb)->idempotent = 0;
1276 ahci_execute_ata(vIorb, pIorb);
1277 break;
1278
1279 default:
1280 iorb_seterr(pIorb, IOERR_CMD_NOT_SUPPORTED);
1281 iorb_done(vIorb, pIorb);
1282 }
1283}
1284
1285/******************************************************************************
1286 * Add an IORB to the specified queue. This function must be called with the
1287 * adapter-level spinlock aquired.
1288 */
1289void iorb_queue_add(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb, IORBH *pIorb)
1290{
1291 if (iorb_priority(pIorb)
1292 {
1293 /* priority IORB; insert at first position */
1294 pIorb->pNxtIORB = queue->vRoot;
1295 queue->vRoot = vIorb;
1296 }
1297 else
1298 {
1299 /* append IORB to end of queue */
1300 pIorb->pNxtIORB = NULL;
1301
1302 if (queue->vRoot == NULL)
1303 {
1304 queue->vRoot = vIorb;
1305 }
1306 else
1307 {
1308 ((IORBH *)Far16ToFlat(queue->vTail))->pNxtIORB = vIorb;
1309 }
1310 queue->vTail = vIorb;
1311 }
1312
1313 if (D32g_DbgLevel)
1314 {
1315 /* determine queue type (local, driver, abort or port) and minimum debug
1316 * level; otherwise, queue debug prints can become really confusing.
1317 */
1318 char *queue_type;
1319 int min_debug = 1;
1320
1321 if ((u32)queue >> 16 == (u32)&queue >> 16) /* DAZ this is bogus */
1322 {
1323 /* this queue is on the stack */
1324 queue_type = "local";
1325 min_debug = 2;
1326
1327 }
1328 else if (queue == &driver_queue)
1329 {
1330 queue_type = "driver";
1331
1332 }
1333 else if (queue == &abort_queue)
1334 {
1335 queue_type = "abort";
1336 min_debug = 2;
1337
1338 }
1339 else
1340 {
1341 queue_type = "port";
1342 }
1343
1344 DPRINTF(min_debug,"IORB %x queued (cmd=%d/%d queue=%x [%s], timeout=%d)\n",
1345 vIorb, pIorb->CommandCode, pIorb->CommandModifier, queue, queue_type,
1346 pIorb->Timeout);
1347 }
1348}
1349
1350/******************************************************************************
1351 * Remove an IORB from the specified queue. This function must be called with
1352 * the adapter-level spinlock aquired.
1353 */
1354int iorb_queue_del(IORB_QUEUE *queue, IORBH FAR16DATA *vIorb)
1355{
1356 IORBH FAR16DATA *_vIorb;
1357 IORBH FAR16DATA *_vPrev = NULL;
1358 int found = 0;
1359
1360 for (_vIorb = queue->vRoot; _vIorb != NULL; )
1361 {
1362 IORBH *_pIorb = Far16ToFlat(_vIorb);
1363 if (_vIorb == vIorb)
1364 {
1365
1366 /* found the IORB to be removed */
1367 if (_vPrev != NULL)
1368 {
1369 ((IORBH*)Far16ToFlat(_vPrev))->pNxtIORB = _pIorb->pNxtIORB;
1370 }
1371 else
1372 {
1373 queue->vRoot = _pIorb->pNxtIORB;
1374 }
1375 if (_vIorb == queue->vTail)
1376 {
1377 queue->vTail = _vPrev;
1378 }
1379 found = 1;
1380 break;
1381 }
1382 _vPrev = _vIorb;
1383 _vIorb = _pIorb->pNxtIORB;
1384 }
1385
1386 if (found)
1387 {
1388 DPRINTF(3,"IORB %x removed (queue = %x)\n", vIorb, queue);
1389 }
1390 else
1391 {
1392 DPRINTF(2,"IORB %x not found in queue %x\n", vIorb, queue);
1393 }
1394
1395 return(!found);
1396}
1397
1398/******************************************************************************
1399 * Set the error code in the specified IORB
1400 *
1401 * NOTE: This function does *not* call iorb_done(). It merely sets the IORB
1402 * status to the specified error code.
1403 */
1404void iorb_seterr(IORBH *pIorb, USHORT error_code)
1405{
1406 pIorb->ErrorCode = error_code;
1407 pIorb->Status |= IORB_ERROR;
1408}
1409
1410/******************************************************************************
1411 * Mark the specified IORB as done and notify the asynchronous post function,
1412 * if any. The IORB is also removed from the corresponding IORB queue.
1413 *
1414 * NOTES: This function does not clear the Status field; it merely adds the
1415 * IORB_DONE flag.
1416 *
1417 * This function is expected to be called *without* the corresponding
1418 * driver-level drv_lock aquired. It will aquire the spinlock before
1419 * updating the IORB queue and release it before notifying the upstream
1420 * code in order to prevent deadlocks.
1421 *
1422 * Due to this logic, this function is only good for simple task-time
1423 * completions. Functions working on lists of IORBs (such as interrupt
1424 * handlers or context hooks) should call iorb_complete() directly and
1425 * implement their own logic for removing the IORB from the port queue.
1426 * See abort_ctxhook() for an example.
1427 */
1428void iorb_done(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1429{
1430 int a = iorb_unit_adapter(pIorb);
1431 int p = iorb_unit_port(pIorb);
1432
1433 /* remove IORB from corresponding queue */
1434 spin_lock(drv_lock);
1435 if (iorb_driver_level(pIorb))
1436 {
1437 iorb_queue_del(&driver_queue, vIorb);
1438 }
1439 else
1440 {
1441 iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb);
1442 }
1443 aws_free(add_workspace(pIorb));
1444 spin_unlock(drv_lock);
1445
1446 iorb_complete(vIorb, pIorb);
1447}
1448
1449/******************************************************************************
1450 * Complete an IORB. This should be called without the adapter-level spinlock
1451 * to allow the IORB completion routine to perform whatever processing it
1452 * requires. This implies that the IORB should no longer be in any global
1453 * queue because the IORB completion routine may well reuse the IORB and send
1454 * the next request to us before even returning from this function.
1455 */
1456void iorb_complete(IORBH FAR16DATA *vIorb, IORBH *pIorb)
1457{
1458 pIorb->Status |= IORB_DONE;
1459
1460 DPRINTF(1,"IORB %x complete status=0x%04x error=0x%04x\n",
1461 vIorb, pIorb->Status, pIorb->ErrorCode);
1462
1463 if (pIorb->RequestControl & IORB_ASYNC_POST)
1464 {
1465 Dev32Help_CallFar16((PFNFAR16)pIorb->NotifyAddress, vIorb);
1466 }
1467}
1468
1469/******************************************************************************
1470 * Requeue the specified IORB such that it will be sent downstream for
1471 * processing again. This includes freeing all resources currently allocated
1472 * (timer, buffer, ...) and resetting the flags to 0. The driver-level
1473 * spinlock must be aquired when calling this function.
1474 *
1475 * The following flags are preserved:
1476 * - no_ncq
1477 */
1478void iorb_requeue(IORBH *pIorb)
1479{
1480 ADD_WORKSPACE *aws = add_workspace(pIorb);
1481 u16 no_ncq = aws->no_ncq;
1482 u16 unaligned = aws->unaligned;
1483 u16 retries = aws->retries;
1484
1485 aws_free(aws);
1486 memset(aws, 0x00, sizeof(*aws));
1487
1488 aws->no_ncq = no_ncq;
1489 aws->unaligned = unaligned;
1490 aws->retries = retries;
1491}
1492
1493/******************************************************************************
1494 * Free resources in ADD workspace (timer, buffer, ...). This function should
1495 * be called with the spinlock held to prevent race conditions.
1496 */
1497void aws_free(ADD_WORKSPACE *aws)
1498{
1499 if (aws->timer != 0)
1500 {
1501 Timer_CancelTimer(aws->timer);
1502 aws->timer = 0;
1503 }
1504
1505 if (aws->buf != NULL)
1506 {
1507 MemFree(aws->buf);
1508 aws->buf = NULL;
1509 }
1510}
1511
1512/******************************************************************************
1513 * Lock the adapter, waiting for availability if necessary. This is expected
1514 * to be called at task/request time without the driver-level spinlock
1515 * aquired. Don't call at interrupt time.
1516 */
1517void lock_adapter(AD_INFO *ai)
1518{
1519 TIMER Timer;
1520
1521 spin_lock(drv_lock);
1522 while (ai->busy)
1523 {
1524 spin_unlock(drv_lock);
1525 TimerInit(&Timer, 250);
1526 while (!TimerCheckAndBlock(&Timer));
1527 spin_lock(drv_lock);
1528 }
1529 ai->busy = 1;
1530 spin_unlock(drv_lock);
1531}
1532
1533/******************************************************************************
1534 * Unlock adapter (i.e. reset busy flag)
1535 */
1536void unlock_adapter(AD_INFO *ai)
1537{
1538 ai->busy = 0;
1539}
1540
1541/******************************************************************************
1542 * Timeout handler for I/O commands. Since timeout handling can involve
1543 * lengthy operations like port resets, the main code is located in a
1544 * separate function which is invoked via a context hook.
1545 */
1546void __syscall timeout_callback(ULONG timer_handle, ULONG p1)
1547{
1548 IORBH FAR16DATA *vIorb = (IORBH FAR16DATA *)CastULONGToFar16(p1);
1549 IORBH *pIorb = Far16ToFlat(vIorb);
1550 int a = iorb_unit_adapter(pIorb);
1551 int p = iorb_unit_port(pIorb);
1552
1553 Timer_CancelTimer(timer_handle);
1554 DPRINTF(0,"timeout for IORB %x\n", vIorb);
1555
1556 /* Move the timed-out IORB to the abort queue. Since it's possible that the
1557 * IORB has completed after the timeout has expired but before we got to
1558 * this line of code, we'll check the return code of iorb_queue_del(): If it
1559 * returns an error, the IORB must have completed a few microseconds ago and
1560 * there is no timeout.
1561 */
1562 spin_lock(drv_lock);
1563 if (iorb_queue_del(&ad_infos[a].ports[p].iorb_queue, vIorb) == 0)
1564 {
1565 iorb_queue_add(&abort_queue, vIorb, pIorb);
1566 pIorb->ErrorCode = IOERR_ADAPTER_TIMEOUT;
1567 }
1568 spin_unlock(drv_lock);
1569
1570 /* Trigger abort processing function. We don't really care whether this
1571 * succeeds because the only reason why it would fail should be multiple
1572 * calls to DevHelp_ArmCtxHook() before the context hook had a chance to
1573 * start executing, which leaves two scenarios:
1574 *
1575 * - We succeded in arming the context hook. Fine.
1576 *
1577 * - We armed the context hook a second time before it had a chance to
1578 * start executing. In this case, the already scheduled context hook
1579 * will process our IORB as well.
1580 */
1581 KernArmHook(reset_ctxhook_h, 0, 0);
1582
1583 /* Set up a watchdog timer which calls the context hook manually in case
1584 * some kernel thread is looping around the IORB_COMPLETE status bit
1585 * without yielding the CPU (kernel threads don't preempt). This shouldn't
1586 * happen per design because kernel threads are supposed to yield but it
1587 * does in the early boot phase.
1588 */
1589 Timer_StartTimerMS(&th_reset_watchdog, 5000, reset_watchdog, 0);
1590}
1591
1592/******************************************************************************
1593 * Reset handler watchdog. If a timeout occurs, a context hook is armed which
1594 * will execute as soon as a kernel thread yields the CPU. However, some
1595 * kernel components won't yield the CPU during the early boot phase and the
1596 * only way to kick some sense into those components is to run the context
1597 * hook right inside this timer callback. Not exactly pretty, especially
1598 * considering the fact that context hooks were implemented to prevent running
1599 * lengthy operations like a port reset at interrupt time, but without this
1600 * watchdog mechanism we run the risk of getting completely stalled by device
1601 * problems during the early boot phase.
1602 */
1603void __syscall reset_watchdog(ULONG timer_handle, ULONG p1)
1604{
1605 /* reset watchdog timer */
1606 Timer_CancelTimer(timer_handle);
1607 DPRINTF(0,"reset watchdog invoked\n");
1608
1609 /* call context hook manually */
1610 reset_ctxhook(0);
1611}
1612
1613/******************************************************************************
1614 * Add unit info to ADAPTERINFO array (IOCC_GET_DEVICE_TABLE requests). The
1615 * adapter info array in the device table, dt->pAdapter[], is expected to be
1616 * initialized for the specified index (dt_ai).
1617 *
1618 * Please note that the device table adapter index, dta, is not always equal
1619 * to the physical adapter index, a: if SCSI emulation has been activated, the
1620 * last reported adapter is a virtual SCSI adapter and the physical adapter
1621 * indexes for those units are, of course, different from the device table
1622 * index of the virtual SCSI adapter.
1623 */
1624static int add_unit_info(IORB_CONFIGURATION *pIorb_conf, int dta,
1625 int a, int p, int d, int scsi_id)
1626{
1627 DEVICETABLE *pDt = Far16ToFlat(pIorb_conf->pDeviceTable);
1628 ADAPTERINFO *pPtr;
1629 UNITINFO *pUi;
1630 AD_INFO *ai = ad_infos + a;
1631
1632 pPtr = (ADAPTERINFO *)MakeFlatFromNear16(pIorb_conf->pDeviceTable, pDt->pAdapter[dta]);
1633 //DPRINTF(2,"add_unit_info: ptr=%x dta=%x pAdapter[dta]=%x pDeviceTable=%x\n",
1634 // ptr, dta, dt->pAdapter[dta], iorb_conf->pDeviceTable);
1635
1636 pUi = &pPtr->UnitInfo[pPtr->AdapterUnits];
1637
1638 if ((u32)(pUi + 1) - (u32)pDt > pIorb_conf->DeviceTableLen)
1639 {
1640 DPRINTF(0,"error: device table provided by DASD too small\n");
1641 iorb_seterr(&pIorb_conf->iorbh, IOERR_CMD_SW_RESOURCE);
1642 return(-1);
1643 }
1644
1645 if (ai->ports[p].devs[d].unit_info == NULL)
1646 {
1647 /* provide original information about this device (unit) */
1648 memset(pUi, 0x00, sizeof(*pUi));
1649 pUi->AdapterIndex = dta; /* device table adapter index */
1650 pUi->UnitHandle = iorb_unit(a, p, d); /* physical adapter index */
1651 pUi->UnitIndex = pPtr->AdapterUnits;
1652 pUi->UnitType = ai->ports[p].devs[d].dev_type;
1653 pUi->QueuingCount = ai->ports[p].devs[d].ncq_max;
1654 if (ai->ports[p].devs[d].removable)
1655 {
1656 pUi->UnitFlags |= UF_REMOVABLE;
1657 }
1658 if (scsi_id > 0) {
1659 /* set fake SCSI ID for this unit */
1660 pUi->UnitSCSITargetID = scsi_id;
1661 }
1662 }
1663 else
1664 {
1665 /* copy updated device (unit) information (IOCM_CHANGE_UNITINFO) */
1666 memcpy(pUi, ai->ports[p].devs[d].unit_info, sizeof(*pUi));
1667 }
1668
1669 pPtr->AdapterUnits++;
1670 return(0);
1671}
1672
Note: See TracBrowser for help on using the repository browser.