source: trunk/kLdr/kLdrModLX.c@ 114

Last change on this file since 114 was 114, checked in by bird, 7 years ago

kLdrModLX.c: Some fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 88.3 KB
Line 
1/* $Id: kLdrModLX.c 114 2018-10-28 13:36:48Z bird $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 Knut St. Osmundsen <bird-kStuff-spamix@anduin.net>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <k/kLdr.h>
35#include "kLdrInternal.h"
36#include <k/kLdrFmts/lx.h>
37
38
39/*******************************************************************************
40* Defined Constants And Macros *
41*******************************************************************************/
42/** @def KLDRMODLX_STRICT
43 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
44#define KLDRMODLX_STRICT 1
45
46/** @def KLDRMODLX_ASSERT
47 * Assert that an expression is true when KLDR_STRICT is defined.
48 */
49#ifdef KLDRMODLX_STRICT
50# define KLDRMODLX_ASSERT(expr) kHlpAssert(expr)
51#else
52# define KLDRMODLX_ASSERT(expr) do {} while (0)
53#endif
54
55
56/*******************************************************************************
57* Structures and Typedefs *
58*******************************************************************************/
59/**
60 * Instance data for the LX module interpreter.
61 */
62typedef struct KLDRMODLX
63{
64 /** Pointer to the module. (Follows the section table.) */
65 PKLDRMOD pMod;
66 /** Pointer to the user mapping. */
67 const void *pvMapping;
68 /** The size of the mapped LX image. */
69 KSIZE cbMapped;
70 /** Reserved flags. */
71 KU32 f32Reserved;
72
73 /** The offset of the LX header. */
74 KLDRFOFF offHdr;
75 /** Copy of the LX header. */
76 struct e32_exe Hdr;
77
78 /** Pointer to the loader section.
79 * Allocated together with this strcture. */
80 const KU8 *pbLoaderSection;
81 /** Pointer to the last byte in the loader section. */
82 const KU8 *pbLoaderSectionLast;
83 /** Pointer to the object table in the loader section. */
84 const struct o32_obj *paObjs;
85 /** Pointer to the object page map table in the loader section. */
86 const struct o32_map *paPageMappings;
87 /** Pointer to the resource table in the loader section. */
88 const struct rsrc32 *paRsrcs;
89 /** Pointer to the resident name table in the loader section. */
90 const KU8 *pbResNameTab;
91 /** Pointer to the entry table in the loader section. */
92 const KU8 *pbEntryTab;
93
94 /** Pointer to the non-resident name table. */
95 KU8 *pbNonResNameTab;
96 /** Pointer to the last byte in the non-resident name table. */
97 const KU8 *pbNonResNameTabLast;
98
99 /** Pointer to the fixup section. */
100 KU8 *pbFixupSection;
101 /** Pointer to the last byte in the fixup section. */
102 const KU8 *pbFixupSectionLast;
103 /** Pointer to the fixup page table within pvFixupSection. */
104 const KU32 *paoffPageFixups;
105 /** Pointer to the fixup record table within pvFixupSection. */
106 const KU8 *pbFixupRecs;
107 /** Pointer to the import module name table within pvFixupSection. */
108 const KU8 *pbImportMods;
109 /** Pointer to the import module name table within pvFixupSection. */
110 const KU8 *pbImportProcs;
111} KLDRMODLX, *PKLDRMODLX;
112
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
118static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
119 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
120static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX);
121static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KSSIZE cbNameTable, KU32 iOrdinal);
122static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KSIZE cchSymbol, KU32 *piSymbol);
123static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KSSIZE cbNameTable,
124 const char *pchSymbol, KSIZE cchSymbol);
125static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
126static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
127static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
128static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb);
129static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
130static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, KUPTR uHandle);
131static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
132 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind);
133static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
134static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved);
135static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
136 int iSelector, KLDRADDR uValue, KU32 fKind);
137
138
139/**
140 * Create a loader module instance interpreting the executable image found
141 * in the specified file provider instance.
142 *
143 * @returns 0 on success and *ppMod pointing to a module instance.
144 * On failure, a non-zero OS specific error code is returned.
145 * @param pOps Pointer to the registered method table.
146 * @param pRdr The file provider instance to use.
147 * @param fFlags Flags, MBZ.
148 * @param enmCpuArch The desired CPU architecture. KCPUARCH_UNKNOWN means
149 * anything goes, but with a preference for the current
150 * host architecture.
151 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
152 * @param ppMod Where to store the module instance pointer.
153 */
154static int kldrModLXCreate(PCKLDRMODOPS pOps, PKRDR pRdr, KU32 fFlags, KCPUARCH enmCpuArch, KLDRFOFF offNewHdr, PPKLDRMOD ppMod)
155{
156 PKLDRMODLX pModLX;
157 int rc;
158 K_NOREF(fFlags);
159
160 /*
161 * Create the instance data and do a minimal header validation.
162 */
163 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
164 if (!rc)
165 {
166 /*
167 * Match up against the requested CPU architecture.
168 */
169 if ( enmCpuArch == KCPUARCH_UNKNOWN
170 || pModLX->pMod->enmArch == enmCpuArch)
171 {
172 pModLX->pMod->pOps = pOps;
173 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
174 *ppMod = pModLX->pMod;
175 return 0;
176 }
177 rc = KLDR_ERR_CPU_ARCH_MISMATCH;
178 }
179 kHlpFree(pModLX);
180 return rc;
181}
182
183
184/**
185 * Separate function for reading creating the LX module instance to
186 * simplify cleanup on failure.
187 */
188static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX)
189{
190 struct e32_exe Hdr;
191 PKLDRMODLX pModLX;
192 PKLDRMOD pMod;
193 KSIZE cb;
194 KSIZE cchFilename;
195 KSIZE offLdrStuff;
196 KU32 off, offEnd;
197 KU32 i;
198 int rc;
199 int fCanOptimizeMapping;
200 KU32 NextRVA;
201 *ppModLX = NULL;
202
203 /*
204 * Read the signature and file header.
205 */
206 rc = kRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
207 if (rc)
208 return rc;
209 if ( Hdr.e32_magic[0] != E32MAGIC1
210 || Hdr.e32_magic[1] != E32MAGIC2)
211 return KLDR_ERR_UNKNOWN_FORMAT;
212
213 /* We're not interested in anything but x86 images. */
214 if ( Hdr.e32_level != E32LEVEL
215 || Hdr.e32_border != E32LEBO
216 || Hdr.e32_worder != E32LEWO
217 || Hdr.e32_cpu < E32CPU286
218 || Hdr.e32_cpu > E32CPU486
219 || Hdr.e32_pagesize != OBJPAGELEN
220 )
221 return KLDR_ERR_LX_BAD_HEADER;
222
223 /* Some rough sanity checks. */
224 offEnd = kRdrSize(pRdr) >= (KLDRFOFF)~(KU32)16 ? ~(KU32)16 : (KU32)kRdrSize(pRdr);
225 if ( Hdr.e32_itermap > offEnd
226 || Hdr.e32_datapage > offEnd
227 || Hdr.e32_nrestab > offEnd
228 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
229 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
230 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
231 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
232 return KLDR_ERR_LX_BAD_HEADER;
233
234 /* Verify the loader section. */
235 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
236 if (Hdr.e32_objtab < sizeof(Hdr))
237 return KLDR_ERR_LX_BAD_LOADER_SECTION;
238 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
239 if (off > offEnd)
240 return KLDR_ERR_LX_BAD_LOADER_SECTION;
241 if ( Hdr.e32_objmap
242 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
243 return KLDR_ERR_LX_BAD_LOADER_SECTION;
244 if ( Hdr.e32_rsrccnt
245 && ( Hdr.e32_rsrctab < off
246 || Hdr.e32_rsrctab > offEnd
247 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
248 return KLDR_ERR_LX_BAD_LOADER_SECTION;
249 if ( Hdr.e32_restab
250 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
251 return KLDR_ERR_LX_BAD_LOADER_SECTION;
252 if ( Hdr.e32_enttab
253 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
254 return KLDR_ERR_LX_BAD_LOADER_SECTION;
255 if ( Hdr.e32_dircnt
256 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
257 return KLDR_ERR_LX_BAD_LOADER_SECTION;
258
259 /* Verify the fixup section. */
260 off = offEnd;
261 offEnd = off + Hdr.e32_fixupsize;
262 if ( Hdr.e32_fpagetab
263 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
264 {
265 /*
266 * wlink mixes the fixup section and the loader section.
267 */
268 off = Hdr.e32_fpagetab;
269 offEnd = off + Hdr.e32_fixupsize;
270 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
271 }
272 if ( Hdr.e32_frectab
273 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
274 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
275 if ( Hdr.e32_impmod
276 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
277 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
278 if ( Hdr.e32_impproc
279 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
280 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
281
282 /*
283 * Calc the instance size, allocate and initialize it.
284 */
285 cchFilename = kHlpStrLen(kRdrName(pRdr));
286 cb = K_ALIGN_Z(sizeof(KLDRMODLX), 8)
287 + K_ALIGN_Z(K_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
288 + K_ALIGN_Z(cchFilename + 1, 8);
289 offLdrStuff = cb;
290 cb += Hdr.e32_ldrsize + 2; /* +2 for two extra zeros. */
291 pModLX = (PKLDRMODLX)kHlpAlloc(cb);
292 if (!pModLX)
293 return KERR_NO_MEMORY;
294 *ppModLX = pModLX;
295
296 /* KLDRMOD */
297 pMod = (PKLDRMOD)((KU8 *)pModLX + K_ALIGN_Z(sizeof(KLDRMODLX), 8));
298 pMod->pvData = pModLX;
299 pMod->pRdr = pRdr;
300 pMod->pOps = NULL; /* set upon success. */
301 pMod->cSegments = Hdr.e32_objcnt;
302 pMod->cchFilename = (KU32)cchFilename;
303 pMod->pszFilename = (char *)K_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
304 kHlpMemCopy((char *)pMod->pszFilename, kRdrName(pRdr), cchFilename + 1);
305 pMod->pszName = NULL; /* finalized further down */
306 pMod->cchName = 0;
307 pMod->fFlags = 0;
308 switch (Hdr.e32_cpu)
309 {
310 case E32CPU286:
311 pMod->enmCpu = KCPU_I80286;
312 pMod->enmArch = KCPUARCH_X86_16;
313 break;
314 case E32CPU386:
315 pMod->enmCpu = KCPU_I386;
316 pMod->enmArch = KCPUARCH_X86_32;
317 break;
318 case E32CPU486:
319 pMod->enmCpu = KCPU_I486;
320 pMod->enmArch = KCPUARCH_X86_32;
321 break;
322 }
323 pMod->enmEndian = KLDRENDIAN_LITTLE;
324 pMod->enmFmt = KLDRFMT_LX;
325 switch (Hdr.e32_mflags & E32MODMASK)
326 {
327 case E32MODEXE:
328 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
329 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
330 : KLDRTYPE_EXECUTABLE_FIXED;
331 break;
332
333 case E32MODDLL:
334 case E32PROTDLL:
335 case E32MODPROTDLL:
336 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
337 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
338 : KLDRTYPE_SHARED_LIBRARY_FIXED;
339 break;
340
341 case E32MODPDEV:
342 case E32MODVDEV:
343 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
344 break;
345 }
346 pMod->u32Magic = 0; /* set upon success. */
347
348 /* KLDRMODLX */
349 pModLX->pMod = pMod;
350 pModLX->pvMapping = 0;
351 pModLX->cbMapped = 0;
352 pModLX->f32Reserved = 0;
353
354 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
355 kHlpMemCopy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
356
357 pModLX->pbLoaderSection = (KU8 *)pModLX + offLdrStuff;
358 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
359 pModLX->paObjs = NULL;
360 pModLX->paPageMappings = NULL;
361 pModLX->paRsrcs = NULL;
362 pModLX->pbResNameTab = NULL;
363 pModLX->pbEntryTab = NULL;
364
365 pModLX->pbNonResNameTab = NULL;
366 pModLX->pbNonResNameTabLast = NULL;
367
368 pModLX->pbFixupSection = NULL;
369 pModLX->pbFixupSectionLast = NULL;
370 pModLX->paoffPageFixups = NULL;
371 pModLX->pbFixupRecs = NULL;
372 pModLX->pbImportMods = NULL;
373 pModLX->pbImportProcs = NULL;
374
375 /*
376 * Read the loader data.
377 */
378 rc = kRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
379 if (rc)
380 return rc;
381 ((KU8 *)pModLX->pbLoaderSectionLast)[1] = 0;
382 ((KU8 *)pModLX->pbLoaderSectionLast)[2] = 0;
383 if (pModLX->Hdr.e32_objcnt)
384 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
385 if (pModLX->Hdr.e32_objmap)
386 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
387 if (pModLX->Hdr.e32_rsrccnt)
388 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
389 if (pModLX->Hdr.e32_restab)
390 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
391 if (pModLX->Hdr.e32_enttab)
392 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
393
394 /*
395 * Get the soname from the resident name table.
396 * Very convenient that it's the 0 ordinal, because then we get a
397 * free string terminator.
398 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
399 */
400 if (pModLX->pbResNameTab)
401 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
402 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
403 0);
404 if (!pMod->pszName)
405 return KLDR_ERR_LX_NO_SONAME;
406 pMod->cchName = *(const KU8 *)pMod->pszName++;
407 if (pMod->cchName != kHlpStrLen(pMod->pszName))
408 return KLDR_ERR_LX_BAD_SONAME;
409
410 /*
411 * Quick validation of the object table.
412 */
413 cb = 0;
414 for (i = 0; i < pMod->cSegments; i++)
415 {
416 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
417 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
418 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
419 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
420 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
421 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
422 if ( pModLX->paObjs[i].o32_mapsize
423 && ( (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
424 || (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
425 > pModLX->pbLoaderSectionLast))
426 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
427 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
428 {
429 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
430 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
431 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
432 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
433 }
434 }
435
436 /*
437 * Check if we can optimize the mapping by using a different
438 * object alignment. The linker typically uses 64KB alignment,
439 * we can easily get away with page alignment in most cases.
440 */
441 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
442 NextRVA = 0;
443
444 /*
445 * Setup the KLDRMOD segment array.
446 */
447 for (i = 0; i < pMod->cSegments; i++)
448 {
449 /* unused */
450 pMod->aSegments[i].pvUser = NULL;
451 pMod->aSegments[i].MapAddress = 0;
452 pMod->aSegments[i].pchName = NULL;
453 pMod->aSegments[i].cchName = 0;
454 pMod->aSegments[i].offFile = -1;
455 pMod->aSegments[i].cbFile = -1;
456 pMod->aSegments[i].SelFlat = 0;
457 pMod->aSegments[i].Sel16bit = 0;
458
459 /* flags */
460 pMod->aSegments[i].fFlags = 0;
461 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
462 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_16BIT;
463 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
464 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_ALIAS16;
465 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
466 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_CONFORM;
467 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
468 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_IOPL;
469
470 /* size and addresses */
471 pMod->aSegments[i].Alignment = OBJPAGELEN;
472 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
473 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
474 pMod->aSegments[i].RVA = NextRVA;
475 if ( fCanOptimizeMapping
476 || i + 1 >= pMod->cSegments
477 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
478 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
479 pMod->aSegments[i].cbMapped = K_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
480 else
481 pMod->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
482 NextRVA += (KU32)pMod->aSegments[i].cbMapped;
483
484 /* protection */
485 switch ( pModLX->paObjs[i].o32_flags
486 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
487 {
488 case 0:
489 case OBJSHARED:
490 pMod->aSegments[i].enmProt = KPROT_NOACCESS;
491 break;
492 case OBJREAD:
493 case OBJREAD | OBJSHARED:
494 pMod->aSegments[i].enmProt = KPROT_READONLY;
495 break;
496 case OBJWRITE:
497 case OBJWRITE | OBJREAD:
498 pMod->aSegments[i].enmProt = KPROT_WRITECOPY;
499 break;
500 case OBJWRITE | OBJSHARED:
501 case OBJWRITE | OBJSHARED | OBJREAD:
502 pMod->aSegments[i].enmProt = KPROT_READWRITE;
503 break;
504 case OBJEXEC:
505 case OBJEXEC | OBJSHARED:
506 pMod->aSegments[i].enmProt = KPROT_EXECUTE;
507 break;
508 case OBJEXEC | OBJREAD:
509 case OBJEXEC | OBJREAD | OBJSHARED:
510 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READ;
511 break;
512 case OBJEXEC | OBJWRITE:
513 case OBJEXEC | OBJWRITE | OBJREAD:
514 pMod->aSegments[i].enmProt = KPROT_EXECUTE_WRITECOPY;
515 break;
516 case OBJEXEC | OBJWRITE | OBJSHARED:
517 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
518 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READWRITE;
519 break;
520 }
521 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
522 pMod->aSegments[i].enmProt = KPROT_READONLY;
523 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
524 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
525 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
526 }
527
528 /* set the mapping size */
529 pModLX->cbMapped = NextRVA;
530
531 /*
532 * We're done.
533 */
534 *ppModLX = pModLX;
535 return 0;
536}
537
538
539/** @copydoc KLDRMODOPS::pfnDestroy */
540static int kldrModLXDestroy(PKLDRMOD pMod)
541{
542 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
543 int rc = 0;
544 KLDRMODLX_ASSERT(!pModLX->pvMapping);
545
546 if (pMod->pRdr)
547 {
548 rc = kRdrClose(pMod->pRdr);
549 pMod->pRdr = NULL;
550 }
551 if (pModLX->pbNonResNameTab)
552 {
553 kHlpFree(pModLX->pbNonResNameTab);
554 pModLX->pbNonResNameTab = NULL;
555 }
556 if (pModLX->pbFixupSection)
557 {
558 kHlpFree(pModLX->pbFixupSection);
559 pModLX->pbFixupSection = NULL;
560 }
561 pMod->u32Magic = 0;
562 pMod->pOps = NULL;
563 kHlpFree(pModLX);
564 return rc;
565}
566
567
568/**
569 * Resolved base address aliases.
570 *
571 * @param pModLX The interpreter module instance
572 * @param pBaseAddress The base address, IN & OUT.
573 */
574static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
575{
576 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
577 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
578 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
579 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
580}
581
582
583/** @copydoc kLdrModQuerySymbol */
584static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, KU32 iSymbol,
585 const char *pchSymbol, KSIZE cchSymbol, const char *pszVersion,
586 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
587{
588 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
589 KU32 iOrdinal;
590 int rc;
591 const struct b32_bundle *pBundle;
592 K_NOREF(pvBits);
593 K_NOREF(pszVersion);
594
595 /*
596 * Give up at once if there is no entry table.
597 */
598 if (!pModLX->Hdr.e32_enttab)
599 return KLDR_ERR_SYMBOL_NOT_FOUND;
600
601 /*
602 * Translate the symbol name into an ordinal.
603 */
604 if (pchSymbol)
605 {
606 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
607 if (rc)
608 return rc;
609 }
610
611 /*
612 * Iterate the entry table.
613 * (The entry table is made up of bundles of similar exports.)
614 */
615 iOrdinal = 1;
616 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
617 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
618 {
619 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
620
621 /*
622 * Check for a hit first.
623 */
624 iOrdinal += pBundle->b32_cnt;
625 if (iSymbol < iOrdinal)
626 {
627 KU32 offObject;
628 const struct e32_entry *pEntry = (const struct e32_entry *)((KUPTR)(pBundle + 1)
629 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
630 * s_cbEntry[pBundle->b32_type]);
631
632 /*
633 * Calculate the return address.
634 */
635 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
636 switch (pBundle->b32_type)
637 {
638 /* empty bundles are place holders unused ordinal ranges. */
639 case EMPTY:
640 return KLDR_ERR_SYMBOL_NOT_FOUND;
641
642 /* e32_flags + a 16-bit offset. */
643 case ENTRY16:
644 offObject = pEntry->e32_variant.e32_offset.offset16;
645 if (pfKind)
646 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
647 break;
648
649 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
650 case GATE16:
651 offObject = pEntry->e32_variant.e32_callgate.offset;
652 if (pfKind)
653 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
654 break;
655
656 /* e32_flags + a 32-bit offset. */
657 case ENTRY32:
658 offObject = pEntry->e32_variant.e32_offset.offset32;
659 if (pfKind)
660 *pfKind = KLDRSYMKIND_32BIT;
661 break;
662
663 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
664 case ENTRYFWD:
665 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
666
667 default:
668 /* anyone actually using TYPEINFO will end up here. */
669 KLDRMODLX_ASSERT(!"Bad bundle type");
670 return KLDR_ERR_LX_BAD_BUNDLE;
671 }
672
673 /*
674 * Validate the object number and calc the return address.
675 */
676 if ( pBundle->b32_obj <= 0
677 || pBundle->b32_obj > pMod->cSegments)
678 return KLDR_ERR_LX_BAD_BUNDLE;
679 if (puValue)
680 *puValue = BaseAddress
681 + offObject
682 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
683 return 0;
684 }
685
686 /*
687 * Skip the bundle.
688 */
689 if (pBundle->b32_type > ENTRYFWD)
690 {
691 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
692 return KLDR_ERR_LX_BAD_BUNDLE;
693 }
694 if (pBundle->b32_type == 0)
695 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
696 else
697 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
698 }
699
700 return KLDR_ERR_SYMBOL_NOT_FOUND;
701}
702
703
704/**
705 * Do name lookup.
706 *
707 * @returns See kLdrModQuerySymbol.
708 * @param pModLX The module to lookup the symbol in.
709 * @param pchSymbol The symbol to lookup.
710 * @param cchSymbol The symbol name length.
711 * @param piSymbol Where to store the symbol ordinal.
712 */
713static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KSIZE cchSymbol, KU32 *piSymbol)
714{
715
716 /*
717 * First do a hash table lookup.
718 */
719 /** @todo hash name table for speed. */
720
721 /*
722 * Search the name tables.
723 */
724 const KU8 *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
725 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
726 pchSymbol, cchSymbol);
727 if (!pbName)
728 {
729 if (!pModLX->pbNonResNameTab)
730 {
731 /* lazy load it */
732 /** @todo non-resident name table. */
733 }
734 if (pModLX->pbNonResNameTab)
735 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
736 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
737 pchSymbol, cchSymbol);
738 }
739 if (!pbName)
740 return KLDR_ERR_SYMBOL_NOT_FOUND;
741
742 *piSymbol = *(const KU16 *)(pbName + 1 + *pbName);
743 return 0;
744}
745
746
747#if 0
748/**
749 * Hash a symbol using the algorithm from sdbm.
750 *
751 * The following was is the documenation of the orignal sdbm functions:
752 *
753 * This algorithm was created for sdbm (a public-domain reimplementation of
754 * ndbm) database library. it was found to do well in scrambling bits,
755 * causing better distribution of the keys and fewer splits. it also happens
756 * to be a good general hashing function with good distribution. the actual
757 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
758 * is the faster version used in gawk. [there is even a faster, duff-device
759 * version] the magic constant 65599 was picked out of thin air while
760 * experimenting with different constants, and turns out to be a prime.
761 * this is one of the algorithms used in berkeley db (see sleepycat) and
762 * elsewhere.
763 */
764static KU32 kldrModLXDoHash(const char *pchSymbol, KU8 cchSymbol)
765{
766 KU32 hash = 0;
767 int ch;
768
769 while ( cchSymbol-- > 0
770 && (ch = *(unsigned const char *)pchSymbol++))
771 hash = ch + (hash << 6) + (hash << 16) - hash;
772
773 return hash;
774}
775#endif
776
777
778/**
779 * Lookup a name table entry by name.
780 *
781 * @returns Pointer to the name table entry if found.
782 * @returns NULL if not found.
783 * @param pbNameTable Pointer to the name table that should be searched.
784 * @param cbNameTable The size of the name table.
785 * @param pchSymbol The name of the symbol we're looking for.
786 * @param cchSymbol The length of the symbol name.
787 */
788static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KSSIZE cbNameTable,
789 const char *pchSymbol, KSIZE cchSymbol)
790{
791 /*
792 * Determin the namelength up front so we can skip anything which doesn't matches the length.
793 */
794 KU8 cbSymbol8Bit = (KU8)cchSymbol;
795 if (cbSymbol8Bit != cchSymbol)
796 return NULL; /* too long. */
797
798 /*
799 * Walk the name table.
800 */
801 while (*pbNameTable != 0 && cbNameTable > 0)
802 {
803 const KU8 cbName = *pbNameTable;
804
805 cbNameTable -= cbName + 1 + 2;
806 if (cbNameTable < 0)
807 break;
808
809 if ( cbName == cbSymbol8Bit
810 && !kHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
811 return pbNameTable;
812
813 /* next entry */
814 pbNameTable += cbName + 1 + 2;
815 }
816
817 return NULL;
818}
819
820
821/**
822 * Deal with a forwarder entry.
823 *
824 * @returns See kLdrModQuerySymbol.
825 * @param pModLX The PE module interpreter instance.
826 * @param pEntry The forwarder entry.
827 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
828 * @param pvUser The user argument for the callback.
829 * @param puValue Where to put the value. (optional)
830 * @param pfKind Where to put the symbol kind. (optional)
831 */
832static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
833 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
834{
835 int rc;
836 KU32 iSymbol;
837 const char *pchSymbol;
838 KU8 cchSymbol;
839
840 if (!pfnGetForwarder)
841 return KLDR_ERR_FORWARDER_SYMBOL;
842
843 /*
844 * Validate the entry import module ordinal.
845 */
846 if ( !pEntry->e32_variant.e32_fwd.modord
847 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
848 return KLDR_ERR_LX_BAD_FORWARDER;
849
850 /*
851 * Figure out the parameters.
852 */
853 if (pEntry->e32_flags & FWD_ORDINAL)
854 {
855 iSymbol = pEntry->e32_variant.e32_fwd.value;
856 pchSymbol = NULL; /* no symbol name. */
857 cchSymbol = 0;
858 }
859 else
860 {
861 const KU8 *pbName;
862
863 /* load the fixup section if necessary. */
864 if (!pModLX->pbImportProcs)
865 {
866 rc = kldrModLXDoLoadFixupSection(pModLX);
867 if (rc)
868 return rc;
869 }
870
871 /* Make name pointer. */
872 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
873 if ( pbName >= pModLX->pbFixupSectionLast
874 || pbName < pModLX->pbFixupSection
875 || !*pbName)
876 return KLDR_ERR_LX_BAD_FORWARDER;
877
878
879 /* check for '#' name. */
880 if (pbName[1] == '#')
881 {
882 KU8 cbLeft = *pbName;
883 const KU8 *pb = pbName + 1;
884 unsigned uBase;
885
886 /* base detection */
887 uBase = 10;
888 if ( cbLeft > 1
889 && pb[1] == '0'
890 && (pb[2] == 'x' || pb[2] == 'X'))
891 {
892 uBase = 16;
893 pb += 2;
894 cbLeft -= 2;
895 }
896
897 /* ascii to integer */
898 iSymbol = 0;
899 while (cbLeft-- > 0)
900 {
901 /* convert char to digit. */
902 unsigned uDigit = *pb++;
903 if (uDigit >= '0' && uDigit <= '9')
904 uDigit -= '0';
905 else if (uDigit >= 'a' && uDigit <= 'z')
906 uDigit -= 'a' + 10;
907 else if (uDigit >= 'A' && uDigit <= 'Z')
908 uDigit -= 'A' + 10;
909 else if (!uDigit)
910 break;
911 else
912 return KLDR_ERR_LX_BAD_FORWARDER;
913 if (uDigit >= uBase)
914 return KLDR_ERR_LX_BAD_FORWARDER;
915
916 /* insert the digit */
917 iSymbol *= uBase;
918 iSymbol += uDigit;
919 }
920 if (!iSymbol)
921 return KLDR_ERR_LX_BAD_FORWARDER;
922
923 pchSymbol = NULL; /* no symbol name. */
924 cchSymbol = 0;
925 }
926 else
927 {
928 pchSymbol = (char *)pbName + 1;
929 cchSymbol = *pbName;
930 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
931 }
932 }
933
934 /*
935 * Resolve the forwarder.
936 */
937 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pchSymbol, cchSymbol, NULL, puValue, pfKind, pvUser);
938 if (!rc && pfKind)
939 *pfKind |= KLDRSYMKIND_FORWARDER;
940 return rc;
941}
942
943
944/**
945 * Loads the fixup section from the executable image.
946 *
947 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
948 *
949 * @returns 0 on success, non-zero kLdr or native status code on failure.
950 * @param pModLX The PE module interpreter instance.
951 */
952static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
953{
954 int rc;
955 KU32 off;
956 void *pv;
957
958 pv = kHlpAlloc(pModLX->Hdr.e32_fixupsize);
959 if (!pv)
960 return KERR_NO_MEMORY;
961
962 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
963 rc = kRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
964 off + pModLX->offHdr);
965 if (!rc)
966 {
967 pModLX->pbFixupSection = pv;
968 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
969 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
970 if (pModLX->Hdr.e32_fpagetab)
971 pModLX->paoffPageFixups = (const KU32 *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
972 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
973 if (pModLX->Hdr.e32_frectab)
974 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
975 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
976 if (pModLX->Hdr.e32_impmod)
977 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
978 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
979 if (pModLX->Hdr.e32_impproc)
980 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
981 }
982 else
983 kHlpFree(pv);
984 return rc;
985}
986
987
988/** @copydoc kLdrModEnumSymbols */
989static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
990 KU32 fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
991{
992 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
993 const struct b32_bundle *pBundle;
994 KU32 iOrdinal;
995 int rc = 0;
996 K_NOREF(pvBits);
997 K_NOREF(fFlags);
998
999 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1000
1001 /*
1002 * Enumerate the entry table.
1003 * (The entry table is made up of bundles of similar exports.)
1004 */
1005 iOrdinal = 1;
1006 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
1007 while (pBundle->b32_cnt && iOrdinal)
1008 {
1009 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
1010
1011 /*
1012 * Enum the entries in the bundle.
1013 */
1014 if (pBundle->b32_type != EMPTY)
1015 {
1016 const struct e32_entry *pEntry;
1017 KSIZE cbEntry;
1018 KLDRADDR BundleRVA;
1019 unsigned cLeft;
1020
1021
1022 /* Validate the bundle. */
1023 switch (pBundle->b32_type)
1024 {
1025 case ENTRY16:
1026 case GATE16:
1027 case ENTRY32:
1028 if ( pBundle->b32_obj <= 0
1029 || pBundle->b32_obj > pMod->cSegments)
1030 return KLDR_ERR_LX_BAD_BUNDLE;
1031 BundleRVA = pMod->aSegments[pBundle->b32_obj - 1].RVA;
1032 break;
1033
1034 case ENTRYFWD:
1035 BundleRVA = 0;
1036 break;
1037
1038 default:
1039 /* anyone actually using TYPEINFO will end up here. */
1040 KLDRMODLX_ASSERT(!"Bad bundle type");
1041 return KLDR_ERR_LX_BAD_BUNDLE;
1042 }
1043
1044 /* iterate the bundle entries. */
1045 cbEntry = s_cbEntry[pBundle->b32_type];
1046 pEntry = (const struct e32_entry *)(pBundle + 1);
1047 cLeft = pBundle->b32_cnt;
1048 while (cLeft-- > 0)
1049 {
1050 KLDRADDR uValue;
1051 KU32 fKind;
1052 int fFoundName;
1053 const KU8 *pbName;
1054
1055 /*
1056 * Calc the symbol value and kind.
1057 */
1058 switch (pBundle->b32_type)
1059 {
1060 /* e32_flags + a 16-bit offset. */
1061 case ENTRY16:
1062 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1063 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
1064 break;
1065
1066 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1067 case GATE16:
1068 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1069 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
1070 break;
1071
1072 /* e32_flags + a 32-bit offset. */
1073 case ENTRY32:
1074 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1075 fKind = KLDRSYMKIND_32BIT;
1076 break;
1077
1078 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1079 case ENTRYFWD:
1080 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1081 fKind = KLDRSYMKIND_FORWARDER;
1082 break;
1083
1084 default: /* shut up gcc. */
1085 uValue = 0;
1086 fKind = KLDRSYMKIND_NO_BIT | KLDRSYMKIND_NO_TYPE;
1087 break;
1088 }
1089
1090 /*
1091 * Any symbol names?
1092 */
1093 fFoundName = 0;
1094
1095 /* resident name table. */
1096 pbName = pModLX->pbResNameTab;
1097 if (pbName)
1098 {
1099 do
1100 {
1101 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1102 if (!pbName)
1103 break;
1104 fFoundName = 1;
1105 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1106 if (rc)
1107 return rc;
1108
1109 /* skip to the next entry */
1110 pbName += 1 + *pbName + 2;
1111 } while (pbName < pModLX->pbLoaderSectionLast);
1112 }
1113
1114 /* resident name table. */
1115 pbName = pModLX->pbNonResNameTab;
1116 /** @todo lazy load the non-resident name table. */
1117 if (pbName)
1118 {
1119 do
1120 {
1121 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1122 if (!pbName)
1123 break;
1124 fFoundName = 1;
1125 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1126 if (rc)
1127 return rc;
1128
1129 /* skip to the next entry */
1130 pbName += 1 + *pbName + 2;
1131 } while (pbName < pModLX->pbLoaderSectionLast);
1132 }
1133
1134 /*
1135 * If no names, call once with the ordinal only.
1136 */
1137 if (!fFoundName)
1138 {
1139 rc = pfnCallback(pMod, iOrdinal, NULL, 0, NULL, uValue, fKind, pvUser);
1140 if (rc)
1141 return rc;
1142 }
1143
1144 /* next */
1145 iOrdinal++;
1146 pEntry = (const struct e32_entry *)((KUPTR)pEntry + cbEntry);
1147 }
1148 }
1149
1150 /*
1151 * The next bundle.
1152 */
1153 if (pBundle->b32_type > ENTRYFWD)
1154 {
1155 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1156 return KLDR_ERR_LX_BAD_BUNDLE;
1157 }
1158 if (pBundle->b32_type == 0)
1159 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
1160 else
1161 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1162 }
1163
1164 return 0;
1165}
1166
1167
1168/**
1169 * Lookup a name table entry by ordinal.
1170 *
1171 * @returns Pointer to the name table entry if found.
1172 * @returns NULL if not found.
1173 * @param pbNameTable Pointer to the name table that should be searched.
1174 * @param cbNameTable The size of the name table.
1175 * @param iOrdinal The ordinal to search for.
1176 */
1177static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KSSIZE cbNameTable, KU32 iOrdinal)
1178{
1179 while (*pbNameTable != 0 && cbNameTable > 0)
1180 {
1181 const KU8 cbName = *pbNameTable;
1182 KU32 iName;
1183
1184 cbNameTable -= cbName + 1 + 2;
1185 if (cbNameTable < 0)
1186 break;
1187
1188 iName = *(pbNameTable + cbName + 1)
1189 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1190 if (iName == iOrdinal)
1191 return pbNameTable;
1192
1193 /* next entry */
1194 pbNameTable += cbName + 1 + 2;
1195 }
1196
1197 return NULL;
1198}
1199
1200
1201/** @copydoc kLdrModGetImport */
1202static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, KU32 iImport, char *pszName, KSIZE cchName)
1203{
1204 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1205 const KU8 *pb;
1206 int rc;
1207 K_NOREF(pvBits);
1208
1209 /*
1210 * Validate
1211 */
1212 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1213 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1214
1215 /*
1216 * Lazy loading the fixup section.
1217 */
1218 if (!pModLX->pbImportMods)
1219 {
1220 rc = kldrModLXDoLoadFixupSection(pModLX);
1221 if (rc)
1222 return rc;
1223 }
1224
1225 /*
1226 * Iterate the module import table until we reach the requested import ordinal.
1227 */
1228 pb = pModLX->pbImportMods;
1229 while (iImport-- > 0)
1230 pb += *pb + 1;
1231
1232 /*
1233 * Copy out the result.
1234 */
1235 if (*pb < cchName)
1236 {
1237 kHlpMemCopy(pszName, pb + 1, *pb);
1238 pszName[*pb] = '\0';
1239 rc = 0;
1240 }
1241 else
1242 {
1243 kHlpMemCopy(pszName, pb + 1, cchName);
1244 if (cchName)
1245 pszName[cchName - 1] = '\0';
1246 rc = KERR_BUFFER_OVERFLOW;
1247 }
1248
1249 return rc;
1250}
1251
1252
1253/** @copydoc kLdrModNumberOfImports */
1254static KI32 kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1255{
1256 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1257 K_NOREF(pvBits);
1258 return pModLX->Hdr.e32_impmodcnt;
1259}
1260
1261
1262/** @copydoc kLdrModGetStackInfo */
1263static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1264{
1265 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1266 const KU32 i = pModLX->Hdr.e32_stackobj;
1267 K_NOREF(pvBits);
1268
1269 if ( i
1270 && i <= pMod->cSegments
1271 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1272 && pModLX->Hdr.e32_stacksize
1273 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1274 {
1275
1276 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1277 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1278 pStackInfo->Address = BaseAddress
1279 + pMod->aSegments[i - 1].RVA
1280 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1281 }
1282 else
1283 {
1284 pStackInfo->Address = NIL_KLDRADDR;
1285 pStackInfo->LinkAddress = NIL_KLDRADDR;
1286 }
1287 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1288 pStackInfo->cbStackThread = 0;
1289
1290 return 0;
1291}
1292
1293
1294/** @copydoc kLdrModQueryMainEntrypoint */
1295static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1296{
1297 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1298 K_NOREF(pvBits);
1299
1300 /*
1301 * Convert the address from the header.
1302 */
1303 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1304 *pMainEPAddress = pModLX->Hdr.e32_startobj
1305 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1306 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1307 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1308 : NIL_KLDRADDR;
1309 return 0;
1310}
1311
1312
1313/** @copydoc kLdrModEnumDbgInfo */
1314static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1315{
1316 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1317 K_NOREF(pfnCallback);
1318 K_NOREF(pvUser);
1319
1320 /*
1321 * Quit immediately if no debug info.
1322 */
1323 if (kldrModLXHasDbgInfo(pMod, pvBits))
1324 return 0;
1325#if 0
1326 /*
1327 * Read the debug info and look for familiar magics and structures.
1328 */
1329 /** @todo */
1330#endif
1331
1332 return 0;
1333}
1334
1335
1336/** @copydoc kLdrModHasDbgInfo */
1337static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1338{
1339 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1340 K_NOREF(pvBits);
1341
1342 /*
1343 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1344 */
1345 if ( !pModLX->Hdr.e32_debuginfo
1346 || !pModLX->Hdr.e32_debuglen)
1347 return KLDR_ERR_NO_DEBUG_INFO;
1348 return 0;
1349}
1350
1351
1352/** @copydoc kLdrModMap */
1353static int kldrModLXMap(PKLDRMOD pMod)
1354{
1355 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1356 unsigned fFixed;
1357 void *pvBase;
1358 int rc;
1359
1360 /*
1361 * Already mapped?
1362 */
1363 if (pModLX->pvMapping)
1364 return KLDR_ERR_ALREADY_MAPPED;
1365
1366 /*
1367 * Allocate memory for it.
1368 */
1369 /* fixed image? */
1370 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1371 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1372 if (!fFixed)
1373 pvBase = NULL;
1374 else
1375 {
1376 pvBase = (void *)(KUPTR)pMod->aSegments[0].LinkAddress;
1377 if ((KUPTR)pvBase != pMod->aSegments[0].LinkAddress)
1378 return KLDR_ERR_ADDRESS_OVERFLOW;
1379 }
1380 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1381 if (rc)
1382 return rc;
1383
1384 /*
1385 * Load the bits, apply page protection, and update the segment table.
1386 */
1387 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1388 if (!rc)
1389 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1390 if (!rc)
1391 {
1392 KU32 i;
1393 for (i = 0; i < pMod->cSegments; i++)
1394 {
1395 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1396 pMod->aSegments[i].MapAddress = (KUPTR)pvBase + (KUPTR)pMod->aSegments[i].RVA;
1397 }
1398 pModLX->pvMapping = pvBase;
1399 }
1400 else
1401 kHlpPageFree(pvBase, pModLX->cbMapped);
1402 return rc;
1403}
1404
1405
1406/**
1407 * Loads the LX pages into the specified memory mapping.
1408 *
1409 * @returns 0 on success.
1410 * @returns non-zero kLdr or OS status code on failure.
1411 *
1412 * @param pModLX The LX module interpreter instance.
1413 * @param pvBits Where to load the bits.
1414 */
1415static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1416{
1417 const PKRDR pRdr = pModLX->pMod->pRdr;
1418 KU8 *pbTmpPage = NULL;
1419 int rc = 0;
1420 KU32 i;
1421
1422 /*
1423 * Iterate the segments.
1424 */
1425 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1426 {
1427 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1428 const KU32 cPages = (KU32)(pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN);
1429 KU32 iPage;
1430 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[i].RVA;
1431
1432 /*
1433 * Iterate the page map pages.
1434 */
1435 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1436 {
1437 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1438 switch (pMap->o32_pageflags)
1439 {
1440 case VALID:
1441 if (pMap->o32_pagesize == OBJPAGELEN)
1442 rc = kRdrRead(pRdr, pbPage, OBJPAGELEN,
1443 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1444 else if (pMap->o32_pagesize < OBJPAGELEN)
1445 {
1446 rc = kRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1447 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1448 kHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1449 }
1450 else
1451 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1452 break;
1453
1454 case ITERDATA:
1455 case ITERDATA2:
1456 /* make sure we've got a temp page .*/
1457 if (!pbTmpPage)
1458 {
1459 pbTmpPage = kHlpAlloc(OBJPAGELEN + 256);
1460 if (!pbTmpPage)
1461 break;
1462 }
1463 /* validate the size. */
1464 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1465 {
1466 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1467 break;
1468 }
1469
1470 /* read it and ensure 4 extra zero bytes. */
1471 rc = kRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1472 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1473 if (rc)
1474 break;
1475 kHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1476
1477 /* unpack it into the image page. */
1478 if (pMap->o32_pageflags == ITERDATA2)
1479 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1480 else
1481 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1482 break;
1483
1484 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1485 case ZEROED:
1486 kHlpMemSet(pbPage, 0, OBJPAGELEN);
1487 break;
1488
1489 case RANGE:
1490 KLDRMODLX_ASSERT(!"RANGE");
1491 /* Falls through. */
1492 default:
1493 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1494 break;
1495 }
1496 }
1497 if (rc)
1498 break;
1499
1500 /*
1501 * Zero the remaining pages.
1502 */
1503 if (iPage < cPages)
1504 kHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1505 }
1506
1507 if (pbTmpPage)
1508 kHlpFree(pbTmpPage);
1509 return rc;
1510}
1511
1512
1513/**
1514 * Unpacks iterdata (aka EXEPACK).
1515 *
1516 * @returns 0 on success, non-zero kLdr status code on failure.
1517 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1518 * @param pbSrc The compressed source data.
1519 * @param cbSrc The file size of the compressed data. The source buffer
1520 * contains 4 additional zero bytes.
1521 */
1522static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1523{
1524 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1525 int cbDst = OBJPAGELEN;
1526
1527 /* Validate size of data. */
1528 if (cbSrc >= (int)OBJPAGELEN - 2)
1529 return KLDR_ERR_LX_BAD_ITERDATA;
1530
1531 /*
1532 * Expand the page.
1533 */
1534 while (cbSrc > 0 && pIter->LX_nIter)
1535 {
1536 if (pIter->LX_nBytes == 1)
1537 {
1538 /*
1539 * Special case - one databyte.
1540 */
1541 cbDst -= pIter->LX_nIter;
1542 if (cbDst < 0)
1543 return KLDR_ERR_LX_BAD_ITERDATA;
1544
1545 cbSrc -= 4 + 1;
1546 if (cbSrc < -4)
1547 return KLDR_ERR_LX_BAD_ITERDATA;
1548
1549 kHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1550 pbDst += pIter->LX_nIter;
1551 pIter++;
1552 }
1553 else
1554 {
1555 /*
1556 * General.
1557 */
1558 int i;
1559
1560 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1561 if (cbDst < 0)
1562 return KLDR_ERR_LX_BAD_ITERDATA;
1563
1564 cbSrc -= 4 + pIter->LX_nBytes;
1565 if (cbSrc < -4)
1566 return KLDR_ERR_LX_BAD_ITERDATA;
1567
1568 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1569 kHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1570 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1571 }
1572 }
1573
1574 /*
1575 * Zero remainder of the page.
1576 */
1577 if (cbDst > 0)
1578 kHlpMemSet(pbDst, 0, cbDst);
1579
1580 return 0;
1581}
1582
1583
1584/**
1585 * Unpacks iterdata (aka EXEPACK).
1586 *
1587 * @returns 0 on success, non-zero kLdr status code on failure.
1588 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1589 * @param pbSrc The compressed source data.
1590 * @param cbSrc The file size of the compressed data. The source buffer
1591 * contains 4 additional zero bytes.
1592 */
1593static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1594{
1595 int cbDst = OBJPAGELEN;
1596
1597 while (cbSrc > 0)
1598 {
1599 /*
1600 * Bit 0 and 1 is the encoding type.
1601 */
1602 switch (*pbSrc & 0x03)
1603 {
1604 /*
1605 *
1606 * 0 1 2 3 4 5 6 7
1607 * type | |
1608 * ----------------
1609 * cb <cb bytes of data>
1610 *
1611 * Bits 2-7 is, if not zero, the length of an uncompressed run
1612 * starting at the following byte.
1613 *
1614 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1615 * type | | | | | |
1616 * ---------------- ---------------------- -----------------------
1617 * zero cb char to multiply
1618 *
1619 * If the bits are zero, the following two bytes describes a 1 byte interation
1620 * run. First byte is count, second is the byte to copy. A count of zero is
1621 * means end of data, and we simply stops. In that case the rest of the data
1622 * should be zero.
1623 */
1624 case 0:
1625 {
1626 if (*pbSrc)
1627 {
1628 const int cb = *pbSrc >> 2;
1629 cbDst -= cb;
1630 if (cbDst < 0)
1631 return KLDR_ERR_LX_BAD_ITERDATA2;
1632 cbSrc -= cb + 1;
1633 if (cbSrc < 0)
1634 return KLDR_ERR_LX_BAD_ITERDATA2;
1635 kHlpMemCopy(pbDst, ++pbSrc, cb);
1636 pbDst += cb;
1637 pbSrc += cb;
1638 }
1639 else if (cbSrc < 2)
1640 return KLDR_ERR_LX_BAD_ITERDATA2;
1641 else
1642 {
1643 const int cb = pbSrc[1];
1644 if (!cb)
1645 goto l_endloop;
1646 cbDst -= cb;
1647 if (cbDst < 0)
1648 return KLDR_ERR_LX_BAD_ITERDATA2;
1649 cbSrc -= 3;
1650 if (cbSrc < 0)
1651 return KLDR_ERR_LX_BAD_ITERDATA2;
1652 kHlpMemSet(pbDst, pbSrc[2], cb);
1653 pbDst += cb;
1654 pbSrc += 3;
1655 }
1656 break;
1657 }
1658
1659
1660 /*
1661 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1662 * type | | | | | |
1663 * ---- ------- -------------------------
1664 * cb1 cb2 - 3 offset <cb1 bytes of data>
1665 *
1666 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1667 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1668 * data relative to the current position. The data copied as you would expect it to be.
1669 */
1670 case 1:
1671 {
1672 cbSrc -= 2;
1673 if (cbSrc < 0)
1674 return KLDR_ERR_LX_BAD_ITERDATA2;
1675 else
1676 {
1677 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1678 const int cb1 = (*pbSrc >> 2) & 3;
1679 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1680
1681 pbSrc += 2;
1682 cbSrc -= cb1;
1683 if (cbSrc < 0)
1684 return KLDR_ERR_LX_BAD_ITERDATA2;
1685 cbDst -= cb1;
1686 if (cbDst < 0)
1687 return KLDR_ERR_LX_BAD_ITERDATA2;
1688 kHlpMemCopy(pbDst, pbSrc, cb1);
1689 pbDst += cb1;
1690 pbSrc += cb1;
1691
1692 if (off > OBJPAGELEN - (unsigned)cbDst)
1693 return KLDR_ERR_LX_BAD_ITERDATA2;
1694 cbDst -= cb2;
1695 if (cbDst < 0)
1696 return KLDR_ERR_LX_BAD_ITERDATA2;
1697 kHlpMemMove(pbDst, pbDst - off, cb2);
1698 pbDst += cb2;
1699 }
1700 break;
1701 }
1702
1703
1704 /*
1705 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1706 * type | | | |
1707 * ---- ----------------------------------
1708 * cb-3 offset
1709 *
1710 * Two bytes layed out as described above.
1711 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1712 * data relative to the current position.
1713 *
1714 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1715 */
1716 case 2:
1717 {
1718 cbSrc -= 2;
1719 if (cbSrc < 0)
1720 return KLDR_ERR_LX_BAD_ITERDATA2;
1721 else
1722 {
1723 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1724 const int cb = ((*pbSrc >> 2) & 3) + 3;
1725
1726 pbSrc += 2;
1727 if (off > OBJPAGELEN - (unsigned)cbDst)
1728 return KLDR_ERR_LX_BAD_ITERDATA2;
1729 cbDst -= cb;
1730 if (cbDst < 0)
1731 return KLDR_ERR_LX_BAD_ITERDATA2;
1732 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1733 pbDst += cb;
1734 }
1735 break;
1736 }
1737
1738
1739 /*
1740 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1741 * type | | | | | |
1742 * ---------- ---------------- ----------------------------------
1743 * cb1 cb2 offset <cb1 bytes of data>
1744 *
1745 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1746 * The cb2 and offset describes an amount of data to be copied from the expanded
1747 * data relative to the current position.
1748 *
1749 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1750 */
1751 case 3:
1752 {
1753 cbSrc -= 3;
1754 if (cbSrc < 0)
1755 return KLDR_ERR_LX_BAD_ITERDATA2;
1756 else
1757 {
1758 const int cb1 = (*pbSrc >> 2) & 0xf;
1759 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1760 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1761
1762 pbSrc += 3;
1763 cbSrc -= cb1;
1764 if (cbSrc < 0)
1765 return KLDR_ERR_LX_BAD_ITERDATA2;
1766 cbDst -= cb1;
1767 if (cbDst < 0)
1768 return KLDR_ERR_LX_BAD_ITERDATA2;
1769 kHlpMemCopy(pbDst, pbSrc, cb1);
1770 pbDst += cb1;
1771 pbSrc += cb1;
1772
1773 if (off > OBJPAGELEN - (unsigned)cbDst)
1774 return KLDR_ERR_LX_BAD_ITERDATA2;
1775 cbDst -= cb2;
1776 if (cbDst < 0)
1777 return KLDR_ERR_LX_BAD_ITERDATA2;
1778 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1779 pbDst += cb2;
1780 }
1781 break;
1782 }
1783 } /* type switch. */
1784 } /* unpack loop */
1785
1786l_endloop:
1787
1788
1789 /*
1790 * Zero remainder of the page.
1791 */
1792 if (cbDst > 0)
1793 kHlpMemSet(pbDst, 0, cbDst);
1794
1795 return 0;
1796}
1797
1798
1799/**
1800 * Special memcpy employed by the iterdata2 algorithm.
1801 *
1802 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1803 * has if src is very close to the destination.
1804 *
1805 * @param pbDst Destination pointer.
1806 * @param pbSrc Source pointer. Will always be <= pbDst.
1807 * @param cb Amount of data to be copied.
1808 * @remark This assumes that unaligned word and dword access is fine.
1809 */
1810static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb)
1811{
1812 switch (pbDst - pbSrc)
1813 {
1814 case 0:
1815 case 1:
1816 case 2:
1817 case 3:
1818 /* 16-bit copy (unaligned) */
1819 if (cb & 1)
1820 *pbDst++ = *pbSrc++;
1821 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1822 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1823 break;
1824
1825 default:
1826 /* 32-bit copy (unaligned) */
1827 if (cb & 1)
1828 *pbDst++ = *pbSrc++;
1829 if (cb & 2)
1830 {
1831 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1832 pbDst += 2;
1833 pbSrc += 2;
1834 }
1835 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1836 *(KU32 *)pbDst = *(const KU32 *)pbSrc;
1837 break;
1838 }
1839}
1840
1841
1842/**
1843 * Unprotects or protects the specified image mapping.
1844 *
1845 * @returns 0 on success.
1846 * @returns non-zero kLdr or OS status code on failure.
1847 *
1848 * @param pModLX The LX module interpreter instance.
1849 * @param pvBits The mapping to protect.
1850 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1851 * protect according to the object table.
1852 */
1853static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1854{
1855 KU32 i;
1856 PKLDRMOD pMod = pModLX->pMod;
1857
1858 /*
1859 * Change object protection.
1860 */
1861 for (i = 0; i < pMod->cSegments; i++)
1862 {
1863 int rc;
1864 void *pv;
1865 KPROT enmProt;
1866
1867 /* calc new protection. */
1868 enmProt = pMod->aSegments[i].enmProt;
1869 if (fUnprotectOrProtect)
1870 {
1871 switch (enmProt)
1872 {
1873 case KPROT_NOACCESS:
1874 case KPROT_READONLY:
1875 case KPROT_READWRITE:
1876 case KPROT_WRITECOPY:
1877 enmProt = KPROT_READWRITE;
1878 break;
1879 case KPROT_EXECUTE:
1880 case KPROT_EXECUTE_READ:
1881 case KPROT_EXECUTE_READWRITE:
1882 case KPROT_EXECUTE_WRITECOPY:
1883 enmProt = KPROT_EXECUTE_READWRITE;
1884 break;
1885 default:
1886 KLDRMODLX_ASSERT(!"bad enmProt");
1887 return -1;
1888 }
1889 }
1890 else
1891 {
1892 /* copy on write -> normal write. */
1893 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1894 enmProt = KPROT_EXECUTE_READWRITE;
1895 else if (enmProt == KPROT_WRITECOPY)
1896 enmProt = KPROT_READWRITE;
1897 }
1898
1899
1900 /* calc the address and set page protection. */
1901 pv = (KU8 *)pvBits + pMod->aSegments[i].RVA;
1902
1903 rc = kHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1904 if (rc)
1905 break;
1906
1907 /** @todo the gap page should be marked NOACCESS! */
1908 }
1909
1910 return 0;
1911}
1912
1913
1914/** @copydoc kLdrModUnmap */
1915static int kldrModLXUnmap(PKLDRMOD pMod)
1916{
1917 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1918 KU32 i;
1919 int rc;
1920
1921 /*
1922 * Mapped?
1923 */
1924 if (!pModLX->pvMapping)
1925 return KLDR_ERR_NOT_MAPPED;
1926
1927 /*
1928 * Free the mapping and update the segments.
1929 */
1930 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1931 KLDRMODLX_ASSERT(!rc);
1932 pModLX->pvMapping = NULL;
1933
1934 for (i = 0; i < pMod->cSegments; i++)
1935 pMod->aSegments[i].MapAddress = 0;
1936
1937 return rc;
1938}
1939
1940
1941/** @copydoc kLdrModAllocTLS */
1942static int kldrModLXAllocTLS(PKLDRMOD pMod, void *pvMapping)
1943{
1944 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1945
1946 /* no tls, just do the error checking. */
1947 if ( pvMapping == KLDRMOD_INT_MAP
1948 && pModLX->pvMapping)
1949 return KLDR_ERR_NOT_MAPPED;
1950 return 0;
1951}
1952
1953
1954/** @copydoc kLdrModFreeTLS */
1955static void kldrModLXFreeTLS(PKLDRMOD pMod, void *pvMapping)
1956{
1957 /* no tls. */
1958 K_NOREF(pMod);
1959 K_NOREF(pvMapping);
1960
1961}
1962
1963
1964/** @copydoc kLdrModReload */
1965static int kldrModLXReload(PKLDRMOD pMod)
1966{
1967 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1968 int rc, rc2;
1969
1970 /*
1971 * Mapped?
1972 */
1973 if (!pModLX->pvMapping)
1974 return KLDR_ERR_NOT_MAPPED;
1975
1976 /*
1977 * Before doing anything we'll have to make all pages writable.
1978 */
1979 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1980 if (rc)
1981 return rc;
1982
1983 /*
1984 * Load the bits again.
1985 */
1986 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1987
1988 /*
1989 * Restore protection.
1990 */
1991 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1992 if (!rc && rc2)
1993 rc = rc2;
1994 return rc;
1995}
1996
1997
1998/** @copydoc kLdrModFixupMapping */
1999static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2000{
2001 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2002 int rc, rc2;
2003
2004 /*
2005 * Mapped?
2006 */
2007 if (!pModLX->pvMapping)
2008 return KLDR_ERR_NOT_MAPPED;
2009
2010 /*
2011 * Before doing anything we'll have to make all pages writable.
2012 */
2013 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
2014 if (rc)
2015 return rc;
2016
2017 /*
2018 * Apply fixups and resolve imports.
2019 */
2020 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (KUPTR)pModLX->pvMapping,
2021 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2022
2023 /*
2024 * Restore protection.
2025 */
2026 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
2027 if (!rc && rc2)
2028 rc = rc2;
2029 return rc;
2030}
2031
2032
2033/** @copydoc kLdrModCallInit */
2034static int kldrModLXCallInit(PKLDRMOD pMod, void *pvMapping, KUPTR uHandle)
2035{
2036 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2037 int rc;
2038
2039 /*
2040 * Mapped?
2041 */
2042 if (pvMapping == KLDRMOD_INT_MAP)
2043 {
2044 pvMapping = (void *)pModLX->pvMapping;
2045 if (!pvMapping)
2046 return KLDR_ERR_NOT_MAPPED;
2047 }
2048
2049 /*
2050 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2051 */
2052 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2053 rc = kldrModLXDoCallDLL(pModLX, pvMapping, 0 /* attach */, uHandle);
2054 else
2055 rc = 0;
2056 return rc;
2057}
2058
2059
2060/**
2061 * Call the DLL entrypoint.
2062 *
2063 * @returns 0 on success.
2064 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2065 * @param pModLX The LX module interpreter instance.
2066 * @param pvMapping The module mapping to use (resolved).
2067 * @param uOp The operation (DLL_*).
2068 * @param uHandle The module handle to present.
2069 */
2070static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, KUPTR uHandle)
2071{
2072 int rc;
2073
2074 /*
2075 * If no entrypoint there isn't anything to be done.
2076 */
2077 if ( !pModLX->Hdr.e32_startobj
2078 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2079 return 0;
2080
2081 /*
2082 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2083 */
2084 rc = kldrModLXDoCall((KUPTR)pvMapping
2085 + (KUPTR)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2086 + pModLX->Hdr.e32_eip,
2087 uHandle, uOp, NULL);
2088 if (rc)
2089 rc = 0;
2090 else if (uOp == 0 /* attach */)
2091 rc = KLDR_ERR_MODULE_INIT_FAILED;
2092 else /* detach: ignore failures */
2093 rc = 0;
2094 return rc;
2095}
2096
2097
2098/**
2099 * Do a 3 parameter callback.
2100 *
2101 * @returns 32-bit callback return.
2102 * @param uEntrypoint The address of the function to be called.
2103 * @param uHandle The first argument, the module handle.
2104 * @param uOp The second argumnet, the reason we're calling.
2105 * @param pvReserved The third argument, reserved argument. (figure this one out)
2106 */
2107static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved)
2108{
2109#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2110 KI32 rc;
2111/** @todo try/except */
2112
2113 /*
2114 * Paranoia.
2115 */
2116# ifdef __GNUC__
2117 __asm__ __volatile__(
2118 "pushl %2\n\t"
2119 "pushl %1\n\t"
2120 "pushl %0\n\t"
2121 "lea 12(%%esp), %2\n\t"
2122 "call *%3\n\t"
2123 "movl %2, %%esp\n\t"
2124 : "=a" (rc)
2125 : "d" (uOp),
2126 "S" (0),
2127 "c" (uEntrypoint),
2128 "0" (uHandle));
2129# elif defined(_MSC_VER)
2130 __asm {
2131 mov eax, [uHandle]
2132 mov edx, [uOp]
2133 mov ecx, 0
2134 mov ebx, [uEntrypoint]
2135 push edi
2136 mov edi, esp
2137 push ecx
2138 push edx
2139 push eax
2140 call ebx
2141 mov esp, edi
2142 pop edi
2143 mov [rc], eax
2144 }
2145# else
2146# error "port me!"
2147# endif
2148 K_NOREF(pvReserved);
2149 return rc;
2150
2151#else
2152 K_NOREF(uEntrypoint);
2153 K_NOREF(uHandle);
2154 K_NOREF(uOp);
2155 K_NOREF(pvReserved);
2156 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2157#endif
2158}
2159
2160
2161/** @copydoc kLdrModCallTerm */
2162static int kldrModLXCallTerm(PKLDRMOD pMod, void *pvMapping, KUPTR uHandle)
2163{
2164 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2165
2166 /*
2167 * Mapped?
2168 */
2169 if (pvMapping == KLDRMOD_INT_MAP)
2170 {
2171 pvMapping = (void *)pModLX->pvMapping;
2172 if (!pvMapping)
2173 return KLDR_ERR_NOT_MAPPED;
2174 }
2175
2176 /*
2177 * Do the call.
2178 */
2179 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2180 kldrModLXDoCallDLL(pModLX, pvMapping, 1 /* detach */, uHandle);
2181
2182 return 0;
2183}
2184
2185
2186/** @copydoc kLdrModCallThread */
2187static int kldrModLXCallThread(PKLDRMOD pMod, void *pvMapping, KUPTR uHandle, unsigned fAttachingOrDetaching)
2188{
2189 /* no thread attach/detach callout. */
2190 K_NOREF(pMod);
2191 K_NOREF(pvMapping);
2192 K_NOREF(uHandle);
2193 K_NOREF(fAttachingOrDetaching);
2194 return 0;
2195}
2196
2197
2198/** @copydoc kLdrModSize */
2199static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
2200{
2201 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2202 return pModLX->cbMapped;
2203}
2204
2205
2206/** @copydoc kLdrModGetBits */
2207static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2208{
2209 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2210 int rc;
2211
2212 /*
2213 * Load the image bits.
2214 */
2215 rc = kldrModLXDoLoadBits(pModLX, pvBits);
2216 if (rc)
2217 return rc;
2218
2219 /*
2220 * Perform relocations.
2221 */
2222 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2223
2224}
2225
2226
2227/** @copydoc kLdrModRelocateBits */
2228static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
2229 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2230{
2231 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2232 KU32 iSeg;
2233 int rc;
2234
2235 /*
2236 * Do we need to to *anything*?
2237 */
2238 if ( NewBaseAddress == OldBaseAddress
2239 && NewBaseAddress == pModLX->paObjs[0].o32_base
2240 && !pModLX->Hdr.e32_impmodcnt)
2241 return 0;
2242
2243 /*
2244 * Load the fixup section.
2245 */
2246 if (!pModLX->pbFixupSection)
2247 {
2248 rc = kldrModLXDoLoadFixupSection(pModLX);
2249 if (rc)
2250 return rc;
2251 }
2252
2253 /*
2254 * Iterate the segments.
2255 */
2256 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2257 {
2258 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2259 KLDRADDR PageAddress = NewBaseAddress + pModLX->pMod->aSegments[iSeg].RVA;
2260 KU32 iPage;
2261 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[iSeg].RVA;
2262
2263 /*
2264 * Iterate the page map pages.
2265 */
2266 for (iPage = 0, rc = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2267 {
2268 const KU8 * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2269 const KU8 *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2270 KLDRADDR uValue = NIL_KLDRADDR;
2271 KU32 fKind = 0;
2272 int iSelector;
2273
2274 /* sanity */
2275 if (pbFixupRecEnd < pb)
2276 return KLDR_ERR_BAD_FIXUP;
2277 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2278 return KLDR_ERR_BAD_FIXUP;
2279 if (pb < pModLX->pbFixupSection)
2280 return KLDR_ERR_BAD_FIXUP;
2281
2282 /*
2283 * Iterate the fixup record.
2284 */
2285 while (pb < pbFixupRecEnd)
2286 {
2287 union _rel
2288 {
2289 const KU8 * pb;
2290 const struct r32_rlc *prlc;
2291 } u;
2292
2293 u.pb = pb;
2294 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2295
2296 /*
2297 * Figure out the target.
2298 */
2299 switch (u.prlc->nr_flags & NRRTYP)
2300 {
2301 /*
2302 * Internal fixup.
2303 */
2304 case NRRINT:
2305 {
2306 KU16 iTrgObject;
2307 KU32 offTrgObject;
2308
2309 /* the object */
2310 if (u.prlc->nr_flags & NR16OBJMOD)
2311 {
2312 iTrgObject = *(const KU16 *)pb;
2313 pb += 2;
2314 }
2315 else
2316 iTrgObject = *pb++;
2317 iTrgObject--;
2318 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2319 return KLDR_ERR_BAD_FIXUP;
2320
2321 /* the target */
2322 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2323 {
2324 if (u.prlc->nr_flags & NR32BITOFF)
2325 {
2326 offTrgObject = *(const KU32 *)pb;
2327 pb += 4;
2328 }
2329 else
2330 {
2331 offTrgObject = *(const KU16 *)pb;
2332 pb += 2;
2333 }
2334
2335 /* calculate the symbol info. */
2336 uValue = offTrgObject + NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2337 }
2338 else
2339 uValue = NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2340 if ( (u.prlc->nr_stype & NRALIAS)
2341 || (pMod->aSegments[iTrgObject].fFlags & KLDRSEG_FLAG_16BIT))
2342 iSelector = pMod->aSegments[iTrgObject].Sel16bit;
2343 else
2344 iSelector = pMod->aSegments[iTrgObject].SelFlat;
2345 fKind = 0;
2346 break;
2347 }
2348
2349 /*
2350 * Import by symbol ordinal.
2351 */
2352 case NRRORD:
2353 {
2354 KU16 iModule;
2355 KU32 iSymbol;
2356
2357 /* the module ordinal */
2358 if (u.prlc->nr_flags & NR16OBJMOD)
2359 {
2360 iModule = *(const KU16 *)pb;
2361 pb += 2;
2362 }
2363 else
2364 iModule = *pb++;
2365 iModule--;
2366 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2367 return KLDR_ERR_BAD_FIXUP;
2368#if 1
2369 if (u.prlc->nr_flags & NRICHAIN)
2370 return KLDR_ERR_BAD_FIXUP;
2371#endif
2372
2373 /* . */
2374 if (u.prlc->nr_flags & NR32BITOFF)
2375 {
2376 iSymbol = *(const KU32 *)pb;
2377 pb += 4;
2378 }
2379 else if (!(u.prlc->nr_flags & NR8BITORD))
2380 {
2381 iSymbol = *(const KU16 *)pb;
2382 pb += 2;
2383 }
2384 else
2385 iSymbol = *pb++;
2386
2387 /* resolve it. */
2388 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, 0, NULL, &uValue, &fKind, pvUser);
2389 if (rc)
2390 return rc;
2391 iSelector = -1;
2392 break;
2393 }
2394
2395 /*
2396 * Import by symbol name.
2397 */
2398 case NRRNAM:
2399 {
2400 KU32 iModule;
2401 KU16 offSymbol;
2402 const KU8 *pbSymbol;
2403
2404 /* the module ordinal */
2405 if (u.prlc->nr_flags & NR16OBJMOD)
2406 {
2407 iModule = *(const KU16 *)pb;
2408 pb += 2;
2409 }
2410 else
2411 iModule = *pb++;
2412 iModule--;
2413 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2414 return KLDR_ERR_BAD_FIXUP;
2415#if 1
2416 if (u.prlc->nr_flags & NRICHAIN)
2417 return KLDR_ERR_BAD_FIXUP;
2418#endif
2419
2420 /* . */
2421 if (u.prlc->nr_flags & NR32BITOFF)
2422 {
2423 offSymbol = *(const KU32 *)pb;
2424 pb += 4;
2425 }
2426 else if (!(u.prlc->nr_flags & NR8BITORD))
2427 {
2428 offSymbol = *(const KU16 *)pb;
2429 pb += 2;
2430 }
2431 else
2432 offSymbol = *pb++;
2433 pbSymbol = pModLX->pbImportProcs + offSymbol;
2434 if ( pbSymbol < pModLX->pbImportProcs
2435 || pbSymbol > pModLX->pbFixupSectionLast)
2436 return KLDR_ERR_BAD_FIXUP;
2437
2438 /* resolve it. */
2439 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, (const char *)pbSymbol + 1, *pbSymbol, NULL,
2440 &uValue, &fKind, pvUser);
2441 if (rc)
2442 return rc;
2443 iSelector = -1;
2444 break;
2445 }
2446
2447 case NRRENT:
2448 KLDRMODLX_ASSERT(!"NRRENT");
2449 /* Falls through. */
2450 default:
2451 iSelector = -1;
2452 break;
2453 }
2454
2455 /* addend */
2456 if (u.prlc->nr_flags & NRADD)
2457 {
2458 if (u.prlc->nr_flags & NR32BITADD)
2459 {
2460 uValue += *(const KU32 *)pb;
2461 pb += 4;
2462 }
2463 else
2464 {
2465 uValue += *(const KU16 *)pb;
2466 pb += 2;
2467 }
2468 }
2469
2470
2471 /*
2472 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2473 */
2474 if (!(u.prlc->nr_stype & NRCHAIN))
2475 {
2476 int off = u.prlc->r32_soff;
2477
2478 /* common / simple */
2479 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2480 && off >= 0
2481 && off <= (int)OBJPAGELEN - 4)
2482 *(KU32 *)&pbPage[off] = (KU32)uValue;
2483 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2484 && off >= 0
2485 && off <= (int)OBJPAGELEN - 4)
2486 *(KU32 *)&pbPage[off] = (KU32)(uValue - (PageAddress + off + 4));
2487 else
2488 {
2489 /* generic */
2490 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2491 if (rc)
2492 return rc;
2493 }
2494 }
2495 else if (!(u.prlc->nr_flags & NRICHAIN))
2496 {
2497 const KI16 *poffSrc = (const KI16 *)pb;
2498 KU8 c = u.pb[2];
2499
2500 /* common / simple */
2501 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2502 {
2503 while (c-- > 0)
2504 {
2505 int off = *poffSrc++;
2506 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2507 *(KU32 *)&pbPage[off] = (KU32)uValue;
2508 else
2509 {
2510 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2511 if (rc)
2512 return rc;
2513 }
2514 }
2515 }
2516 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2517 {
2518 while (c-- > 0)
2519 {
2520 int off = *poffSrc++;
2521 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2522 *(KU32 *)&pbPage[off] = (KU32)(uValue - (PageAddress + off + 4));
2523 else
2524 {
2525 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2526 if (rc)
2527 return rc;
2528 }
2529 }
2530 }
2531 else
2532 {
2533 while (c-- > 0)
2534 {
2535 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2536 if (rc)
2537 return rc;
2538 }
2539 }
2540 pb = (const KU8 *)poffSrc;
2541 }
2542 else
2543 {
2544 /* This is a pain because it will require virgin pages on a relocation. */
2545 KLDRMODLX_ASSERT(!"NRICHAIN");
2546 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2547 }
2548 }
2549 }
2550 }
2551
2552 return 0;
2553}
2554
2555
2556/**
2557 * Applies the relocation to one 'source' in a page.
2558 *
2559 * This takes care of the more esotic case while the common cases
2560 * are dealt with seperately.
2561 *
2562 * @returns 0 on success, non-zero kLdr status code on failure.
2563 * @param pbPage The page in which to apply the fixup.
2564 * @param off Page relative offset of where to apply the offset.
2565 * @param uValue The target value.
2566 * @param fKind The target kind.
2567 */
2568static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
2569 int iSelector, KLDRADDR uValue, KU32 fKind)
2570{
2571#pragma pack(1) /* just to be sure */
2572 union
2573 {
2574 KU8 ab[6];
2575 KU32 off32;
2576 KU16 off16;
2577 KU8 off8;
2578 struct
2579 {
2580 KU16 off;
2581 KU16 Sel;
2582 } Far16;
2583 struct
2584 {
2585 KU32 off;
2586 KU16 Sel;
2587 } Far32;
2588 } uData;
2589#pragma pack()
2590 const KU8 *pbSrc;
2591 KU8 *pbDst;
2592 KU8 cb;
2593
2594 K_NOREF(fKind);
2595
2596 /*
2597 * Compose the fixup data.
2598 */
2599 switch (prlc->nr_stype & NRSRCMASK)
2600 {
2601 case NRSBYT:
2602 uData.off8 = (KU8)uValue;
2603 cb = 1;
2604 break;
2605 case NRSSEG:
2606 if (iSelector == -1)
2607 {
2608 /* fixme */
2609 }
2610 uData.off16 = iSelector;
2611 cb = 2;
2612 break;
2613 case NRSPTR:
2614 if (iSelector == -1)
2615 {
2616 /* fixme */
2617 }
2618 uData.Far16.off = (KU16)uValue;
2619 uData.Far16.Sel = iSelector;
2620 cb = 4;
2621 break;
2622 case NRSOFF:
2623 uData.off16 = (KU16)uValue;
2624 cb = 2;
2625 break;
2626 case NRPTR48:
2627 if (iSelector == -1)
2628 {
2629 /* fixme */
2630 }
2631 uData.Far32.off = (KU32)uValue;
2632 uData.Far32.Sel = iSelector;
2633 cb = 6;
2634 break;
2635 case NROFF32:
2636 uData.off32 = (KU32)uValue;
2637 cb = 4;
2638 break;
2639 case NRSOFF32:
2640 uData.off32 = (KU32)(uValue - (PageAddress + off + 4));
2641 cb = 4;
2642 break;
2643 default:
2644 return KLDR_ERR_LX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2645 }
2646
2647 /*
2648 * Apply it. This is sloooow...
2649 */
2650 pbSrc = &uData.ab[0];
2651 pbDst = pbPage + off;
2652 while (cb-- > 0)
2653 {
2654 if (off > (int)OBJPAGELEN)
2655 break;
2656 if (off >= 0)
2657 *pbDst = *pbSrc;
2658 pbSrc++;
2659 pbDst++;
2660 }
2661
2662 return 0;
2663}
2664
2665
2666/**
2667 * The LX module interpreter method table.
2668 */
2669KLDRMODOPS g_kLdrModLXOps =
2670{
2671 "LX",
2672 NULL,
2673 kldrModLXCreate,
2674 kldrModLXDestroy,
2675 kldrModLXQuerySymbol,
2676 kldrModLXEnumSymbols,
2677 kldrModLXGetImport,
2678 kldrModLXNumberOfImports,
2679 NULL /* can execute one is optional */,
2680 kldrModLXGetStackInfo,
2681 kldrModLXQueryMainEntrypoint,
2682 NULL /* pfnQueryImageUuid */,
2683 NULL /* fixme */,
2684 NULL /* fixme */,
2685 kldrModLXEnumDbgInfo,
2686 kldrModLXHasDbgInfo,
2687 kldrModLXMap,
2688 kldrModLXUnmap,
2689 kldrModLXAllocTLS,
2690 kldrModLXFreeTLS,
2691 kldrModLXReload,
2692 kldrModLXFixupMapping,
2693 kldrModLXCallInit,
2694 kldrModLXCallTerm,
2695 kldrModLXCallThread,
2696 kldrModLXSize,
2697 kldrModLXGetBits,
2698 kldrModLXRelocateBits,
2699 NULL /* fixme: pfnMostlyDone */,
2700 42 /* the end */
2701};
2702
Note: See TracBrowser for help on using the repository browser.