source: trunk/kLdr/kLdrModLX.c@ 55

Last change on this file since 55 was 54, checked in by bird, 12 years ago

Added kLdrModQueryImageUuid for Mach-O.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 87.1 KB
Line 
1/* $Id: kLdrModLX.c 54 2013-10-09 19:52:48Z bird $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 Knut St. Osmundsen <bird-kStuff-spamix@anduin.net>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <k/kLdr.h>
35#include "kLdrInternal.h"
36#include <k/kLdrFmts/lx.h>
37
38
39/*******************************************************************************
40* Defined Constants And Macros *
41*******************************************************************************/
42/** @def KLDRMODLX_STRICT
43 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
44#define KLDRMODLX_STRICT 1
45
46/** @def KLDRMODLX_ASSERT
47 * Assert that an expression is true when KLDR_STRICT is defined.
48 */
49#ifdef KLDRMODLX_STRICT
50# define KLDRMODLX_ASSERT(expr) kHlpAssert(expr)
51#else
52# define KLDRMODLX_ASSERT(expr) do {} while (0)
53#endif
54
55
56/*******************************************************************************
57* Structures and Typedefs *
58*******************************************************************************/
59/**
60 * Instance data for the LX module interpreter.
61 */
62typedef struct KLDRMODLX
63{
64 /** Pointer to the module. (Follows the section table.) */
65 PKLDRMOD pMod;
66 /** Pointer to the user mapping. */
67 const void *pvMapping;
68 /** The size of the mapped LX image. */
69 KSIZE cbMapped;
70 /** Reserved flags. */
71 KU32 f32Reserved;
72
73 /** The offset of the LX header. */
74 KLDRFOFF offHdr;
75 /** Copy of the LX header. */
76 struct e32_exe Hdr;
77
78 /** Pointer to the loader section.
79 * Allocated together with this strcture. */
80 const KU8 *pbLoaderSection;
81 /** Pointer to the last byte in the loader section. */
82 const KU8 *pbLoaderSectionLast;
83 /** Pointer to the object table in the loader section. */
84 const struct o32_obj *paObjs;
85 /** Pointer to the object page map table in the loader section. */
86 const struct o32_map *paPageMappings;
87 /** Pointer to the resource table in the loader section. */
88 const struct rsrc32 *paRsrcs;
89 /** Pointer to the resident name table in the loader section. */
90 const KU8 *pbResNameTab;
91 /** Pointer to the entry table in the loader section. */
92 const KU8 *pbEntryTab;
93
94 /** Pointer to the non-resident name table. */
95 KU8 *pbNonResNameTab;
96 /** Pointer to the last byte in the non-resident name table. */
97 const KU8 *pbNonResNameTabLast;
98
99 /** Pointer to the fixup section. */
100 KU8 *pbFixupSection;
101 /** Pointer to the last byte in the fixup section. */
102 const KU8 *pbFixupSectionLast;
103 /** Pointer to the fixup page table within pvFixupSection. */
104 const KU32 *paoffPageFixups;
105 /** Pointer to the fixup record table within pvFixupSection. */
106 const KU8 *pbFixupRecs;
107 /** Pointer to the import module name table within pvFixupSection. */
108 const KU8 *pbImportMods;
109 /** Pointer to the import module name table within pvFixupSection. */
110 const KU8 *pbImportProcs;
111} KLDRMODLX, *PKLDRMODLX;
112
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
118static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
119 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
120static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX);
121static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal);
122static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol);
123static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
124 const char *pchSymbol, KSIZE cchSymbol);
125static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
126static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
127static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
128static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb);
129static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
130static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle);
131static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
132 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind);
133static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
134static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved);
135static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
136 int iSelector, KLDRADDR uValue, KU32 fKind);
137
138
139/**
140 * Create a loader module instance interpreting the executable image found
141 * in the specified file provider instance.
142 *
143 * @returns 0 on success and *ppMod pointing to a module instance.
144 * On failure, a non-zero OS specific error code is returned.
145 * @param pOps Pointer to the registered method table.
146 * @param pRdr The file provider instance to use.
147 * @param fFlags Flags, MBZ.
148 * @param enmCpuArch The desired CPU architecture. KCPUARCH_UNKNOWN means
149 * anything goes, but with a preference for the current
150 * host architecture.
151 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
152 * @param ppMod Where to store the module instance pointer.
153 */
154static int kldrModLXCreate(PCKLDRMODOPS pOps, PKRDR pRdr, KU32 fFlags, KCPUARCH enmCpuArch, KLDRFOFF offNewHdr, PPKLDRMOD ppMod)
155{
156 PKLDRMODLX pModLX;
157 int rc;
158
159 /*
160 * Create the instance data and do a minimal header validation.
161 */
162 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
163 if (!rc)
164 {
165 /*
166 * Match up against the requested CPU architecture.
167 */
168 if ( enmCpuArch == KCPUARCH_UNKNOWN
169 || pModLX->pMod->enmArch == enmCpuArch)
170 {
171 pModLX->pMod->pOps = pOps;
172 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
173 *ppMod = pModLX->pMod;
174 return 0;
175 }
176 rc = KLDR_ERR_CPU_ARCH_MISMATCH;
177 }
178 kHlpFree(pModLX);
179 return rc;
180}
181
182
183/**
184 * Separate function for reading creating the LX module instance to
185 * simplify cleanup on failure.
186 */
187static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX)
188{
189 struct e32_exe Hdr;
190 PKLDRMODLX pModLX;
191 PKLDRMOD pMod;
192 KSIZE cb;
193 KSIZE cchFilename;
194 KU32 off, offEnd;
195 KU32 i;
196 int rc;
197 int fCanOptimizeMapping;
198 KU32 NextRVA;
199 *ppModLX = NULL;
200
201 /*
202 * Read the signature and file header.
203 */
204 rc = kRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
205 if (rc)
206 return rc;
207 if ( Hdr.e32_magic[0] != E32MAGIC1
208 || Hdr.e32_magic[1] != E32MAGIC2)
209 return KLDR_ERR_UNKNOWN_FORMAT;
210
211 /* We're not interested in anything but x86 images. */
212 if ( Hdr.e32_level != E32LEVEL
213 || Hdr.e32_border != E32LEBO
214 || Hdr.e32_worder != E32LEWO
215 || Hdr.e32_cpu < E32CPU286
216 || Hdr.e32_cpu > E32CPU486
217 || Hdr.e32_pagesize != OBJPAGELEN
218 )
219 return KLDR_ERR_LX_BAD_HEADER;
220
221 /* Some rough sanity checks. */
222 offEnd = kRdrSize(pRdr) >= (KLDRFOFF)~(KU32)16 ? ~(KU32)16 : (KU32)kRdrSize(pRdr);
223 if ( Hdr.e32_itermap > offEnd
224 || Hdr.e32_datapage > offEnd
225 || Hdr.e32_nrestab > offEnd
226 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
227 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
228 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
229 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
230 return KLDR_ERR_LX_BAD_HEADER;
231
232 /* Verify the loader section. */
233 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
234 if (Hdr.e32_objtab < sizeof(Hdr))
235 return KLDR_ERR_LX_BAD_LOADER_SECTION;
236 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
237 if (off > offEnd)
238 return KLDR_ERR_LX_BAD_LOADER_SECTION;
239 if ( Hdr.e32_objmap
240 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
241 return KLDR_ERR_LX_BAD_LOADER_SECTION;
242 if ( Hdr.e32_rsrccnt
243 && ( Hdr.e32_rsrctab < off
244 || Hdr.e32_rsrctab > offEnd
245 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
246 return KLDR_ERR_LX_BAD_LOADER_SECTION;
247 if ( Hdr.e32_restab
248 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
249 return KLDR_ERR_LX_BAD_LOADER_SECTION;
250 if ( Hdr.e32_enttab
251 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
252 return KLDR_ERR_LX_BAD_LOADER_SECTION;
253 if ( Hdr.e32_dircnt
254 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
255 return KLDR_ERR_LX_BAD_LOADER_SECTION;
256
257 /* Verify the fixup section. */
258 off = offEnd;
259 offEnd = off + Hdr.e32_fixupsize;
260 if ( Hdr.e32_fpagetab
261 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
262 {
263 /*
264 * wlink mixes the fixup section and the loader section.
265 */
266 off = Hdr.e32_fpagetab;
267 offEnd = off + Hdr.e32_fixupsize;
268 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
269 }
270 if ( Hdr.e32_frectab
271 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
272 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
273 if ( Hdr.e32_impmod
274 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
275 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
276 if ( Hdr.e32_impproc
277 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
278 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
279
280 /*
281 * Calc the instance size, allocate and initialize it.
282 */
283 cchFilename = kHlpStrLen(kRdrName(pRdr));
284 cb = K_ALIGN_Z(sizeof(KLDRMODLX), 8)
285 + K_ALIGN_Z(K_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
286 + K_ALIGN_Z(cchFilename + 1, 8)
287 + Hdr.e32_ldrsize + 2; /* +2 for two extra zeros. */
288 pModLX = (PKLDRMODLX)kHlpAlloc(cb);
289 if (!pModLX)
290 return KERR_NO_MEMORY;
291 *ppModLX = pModLX;
292
293 /* KLDRMOD */
294 pMod = (PKLDRMOD)((KU8 *)pModLX + K_ALIGN_Z(sizeof(KLDRMODLX), 8));
295 pMod->pvData = pModLX;
296 pMod->pRdr = pRdr;
297 pMod->pOps = NULL; /* set upon success. */
298 pMod->cSegments = Hdr.e32_objcnt;
299 pMod->cchFilename = cchFilename;
300 pMod->pszFilename = (char *)K_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
301 kHlpMemCopy((char *)pMod->pszFilename, kRdrName(pRdr), cchFilename + 1);
302 pMod->pszName = NULL; /* finalized further down */
303 pMod->cchName = 0;
304 switch (Hdr.e32_cpu)
305 {
306 case E32CPU286:
307 pMod->enmCpu = KCPU_I80286;
308 pMod->enmArch = KCPUARCH_X86_16;
309 break;
310 case E32CPU386:
311 pMod->enmCpu = KCPU_I386;
312 pMod->enmArch = KCPUARCH_X86_32;
313 break;
314 case E32CPU486:
315 pMod->enmCpu = KCPU_I486;
316 pMod->enmArch = KCPUARCH_X86_32;
317 break;
318 }
319 pMod->enmEndian = KLDRENDIAN_LITTLE;
320 pMod->enmFmt = KLDRFMT_LX;
321 switch (Hdr.e32_mflags & E32MODMASK)
322 {
323 case E32MODEXE:
324 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
325 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
326 : KLDRTYPE_EXECUTABLE_FIXED;
327 break;
328
329 case E32MODDLL:
330 case E32PROTDLL:
331 case E32MODPROTDLL:
332 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
333 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
334 : KLDRTYPE_SHARED_LIBRARY_FIXED;
335 break;
336
337 case E32MODPDEV:
338 case E32MODVDEV:
339 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
340 break;
341 }
342 pMod->u32Magic = 0; /* set upon success. */
343
344 /* KLDRMODLX */
345 pModLX->pMod = pMod;
346 pModLX->pvMapping = 0;
347 pModLX->cbMapped = 0;
348 pModLX->f32Reserved = 0;
349
350 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
351 kHlpMemCopy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
352
353 pModLX->pbLoaderSection = K_ALIGN_P(pMod->pszFilename + pMod->cchFilename + 1, 16);
354 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
355 pModLX->paObjs = NULL;
356 pModLX->paPageMappings = NULL;
357 pModLX->paRsrcs = NULL;
358 pModLX->pbResNameTab = NULL;
359 pModLX->pbEntryTab = NULL;
360
361 pModLX->pbNonResNameTab = NULL;
362 pModLX->pbNonResNameTabLast = NULL;
363
364 pModLX->pbFixupSection = NULL;
365 pModLX->pbFixupSectionLast = NULL;
366 pModLX->paoffPageFixups = NULL;
367 pModLX->pbFixupRecs = NULL;
368 pModLX->pbImportMods = NULL;
369 pModLX->pbImportProcs = NULL;
370
371 /*
372 * Read the loader data.
373 */
374 rc = kRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
375 if (rc)
376 return rc;
377 ((KU8 *)pModLX->pbLoaderSectionLast)[1] = 0;
378 ((KU8 *)pModLX->pbLoaderSectionLast)[2] = 0;
379 if (pModLX->Hdr.e32_objcnt)
380 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
381 if (pModLX->Hdr.e32_objmap)
382 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
383 if (pModLX->Hdr.e32_rsrccnt)
384 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
385 if (pModLX->Hdr.e32_restab)
386 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
387 if (pModLX->Hdr.e32_enttab)
388 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
389
390 /*
391 * Get the soname from the resident name table.
392 * Very convenient that it's the 0 ordinal, because then we get a
393 * free string terminator.
394 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
395 */
396 if (pModLX->pbResNameTab)
397 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
398 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
399 0);
400 if (!pMod->pszName)
401 return KLDR_ERR_LX_NO_SONAME;
402 pMod->cchName = *(const KU8 *)pMod->pszName++;
403 if (pMod->cchName != kHlpStrLen(pMod->pszName))
404 return KLDR_ERR_LX_BAD_SONAME;
405
406 /*
407 * Quick validation of the object table.
408 */
409 cb = 0;
410 for (i = 0; i < pMod->cSegments; i++)
411 {
412 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
413 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
414 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
415 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
416 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
417 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
418 if ( pModLX->paObjs[i].o32_mapsize
419 && ( (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
420 || (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
421 > pModLX->pbLoaderSectionLast))
422 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
423 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
424 {
425 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
426 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
427 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
428 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
429 }
430 }
431
432 /*
433 * Check if we can optimize the mapping by using a different
434 * object alignment. The linker typically uses 64KB alignment,
435 * we can easily get away with page alignment in most cases.
436 */
437 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
438 NextRVA = 0;
439
440 /*
441 * Setup the KLDRMOD segment array.
442 */
443 for (i = 0; i < pMod->cSegments; i++)
444 {
445 /* unused */
446 pMod->aSegments[i].pvUser = NULL;
447 pMod->aSegments[i].MapAddress = 0;
448 pMod->aSegments[i].pchName = NULL;
449 pMod->aSegments[i].cchName = 0;
450 pMod->aSegments[i].offFile = -1;
451 pMod->aSegments[i].cbFile = -1;
452 pMod->aSegments[i].SelFlat = 0;
453 pMod->aSegments[i].Sel16bit = 0;
454
455 /* flags */
456 pMod->aSegments[i].fFlags = 0;
457 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
458 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_16BIT;
459 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
460 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_ALIAS16;
461 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
462 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_CONFORM;
463 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
464 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_IOPL;
465
466 /* size and addresses */
467 pMod->aSegments[i].Alignment = OBJPAGELEN;
468 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
469 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
470 pMod->aSegments[i].RVA = NextRVA;
471 if ( fCanOptimizeMapping
472 || i + 1 >= pMod->cSegments
473 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
474 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
475 pMod->aSegments[i].cbMapped = K_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
476 else
477 pMod->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
478 NextRVA += pMod->aSegments[i].cbMapped;
479
480 /* protection */
481 switch ( pModLX->paObjs[i].o32_flags
482 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
483 {
484 case 0:
485 case OBJSHARED:
486 pMod->aSegments[i].enmProt = KPROT_NOACCESS;
487 break;
488 case OBJREAD:
489 case OBJREAD | OBJSHARED:
490 pMod->aSegments[i].enmProt = KPROT_READONLY;
491 break;
492 case OBJWRITE:
493 case OBJWRITE | OBJREAD:
494 pMod->aSegments[i].enmProt = KPROT_WRITECOPY;
495 break;
496 case OBJWRITE | OBJSHARED:
497 case OBJWRITE | OBJSHARED | OBJREAD:
498 pMod->aSegments[i].enmProt = KPROT_READWRITE;
499 break;
500 case OBJEXEC:
501 case OBJEXEC | OBJSHARED:
502 pMod->aSegments[i].enmProt = KPROT_EXECUTE;
503 break;
504 case OBJEXEC | OBJREAD:
505 case OBJEXEC | OBJREAD | OBJSHARED:
506 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READ;
507 break;
508 case OBJEXEC | OBJWRITE:
509 case OBJEXEC | OBJWRITE | OBJREAD:
510 pMod->aSegments[i].enmProt = KPROT_EXECUTE_WRITECOPY;
511 break;
512 case OBJEXEC | OBJWRITE | OBJSHARED:
513 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
514 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READWRITE;
515 break;
516 }
517 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
518 pMod->aSegments[i].enmProt = KPROT_READONLY;
519 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
520 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
521 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
522 }
523
524 /* set the mapping size */
525 pModLX->cbMapped = NextRVA;
526
527 /*
528 * We're done.
529 */
530 *ppModLX = pModLX;
531 return 0;
532}
533
534
535/** @copydoc KLDRMODOPS::pfnDestroy */
536static int kldrModLXDestroy(PKLDRMOD pMod)
537{
538 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
539 int rc = 0;
540 KLDRMODLX_ASSERT(!pModLX->pvMapping);
541
542 if (pMod->pRdr)
543 {
544 rc = kRdrClose(pMod->pRdr);
545 pMod->pRdr = NULL;
546 }
547 if (pModLX->pbNonResNameTab)
548 {
549 kHlpFree(pModLX->pbNonResNameTab);
550 pModLX->pbNonResNameTab = NULL;
551 }
552 if (pModLX->pbFixupSection)
553 {
554 kHlpFree(pModLX->pbFixupSection);
555 pModLX->pbFixupSection = NULL;
556 }
557 pMod->u32Magic = 0;
558 pMod->pOps = NULL;
559 kHlpFree(pModLX);
560 return rc;
561}
562
563
564/**
565 * Resolved base address aliases.
566 *
567 * @param pModLX The interpreter module instance
568 * @param pBaseAddress The base address, IN & OUT.
569 */
570static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
571{
572 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
573 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
574 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
575 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
576}
577
578
579/** @copydoc kLdrModQuerySymbol */
580static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, KU32 iSymbol,
581 const char *pchSymbol, KSIZE cchSymbol, const char *pszVersion,
582 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
583{
584 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
585 KU32 iOrdinal;
586 int rc;
587 const struct b32_bundle *pBundle;
588
589
590 /*
591 * Give up at once if there is no entry table.
592 */
593 if (!pModLX->Hdr.e32_enttab)
594 return KLDR_ERR_SYMBOL_NOT_FOUND;
595
596 /*
597 * Translate the symbol name into an ordinal.
598 */
599 if (pchSymbol)
600 {
601 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
602 if (rc)
603 return rc;
604 }
605
606 /*
607 * Iterate the entry table.
608 * (The entry table is made up of bundles of similar exports.)
609 */
610 iOrdinal = 1;
611 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
612 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
613 {
614 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
615
616 /*
617 * Check for a hit first.
618 */
619 iOrdinal += pBundle->b32_cnt;
620 if (iSymbol < iOrdinal)
621 {
622 KU32 offObject;
623 const struct e32_entry *pEntry = (const struct e32_entry *)((KUPTR)(pBundle + 1)
624 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
625 * s_cbEntry[pBundle->b32_type]);
626
627 /*
628 * Calculate the return address.
629 */
630 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
631 switch (pBundle->b32_type)
632 {
633 /* empty bundles are place holders unused ordinal ranges. */
634 case EMPTY:
635 return KLDR_ERR_SYMBOL_NOT_FOUND;
636
637 /* e32_flags + a 16-bit offset. */
638 case ENTRY16:
639 offObject = pEntry->e32_variant.e32_offset.offset16;
640 if (pfKind)
641 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
642 break;
643
644 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
645 case GATE16:
646 offObject = pEntry->e32_variant.e32_callgate.offset;
647 if (pfKind)
648 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
649 break;
650
651 /* e32_flags + a 32-bit offset. */
652 case ENTRY32:
653 offObject = pEntry->e32_variant.e32_offset.offset32;
654 if (pfKind)
655 *pfKind = KLDRSYMKIND_32BIT;
656 break;
657
658 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
659 case ENTRYFWD:
660 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
661
662 default:
663 /* anyone actually using TYPEINFO will end up here. */
664 KLDRMODLX_ASSERT(!"Bad bundle type");
665 return KLDR_ERR_LX_BAD_BUNDLE;
666 }
667
668 /*
669 * Validate the object number and calc the return address.
670 */
671 if ( pBundle->b32_obj <= 0
672 || pBundle->b32_obj > pMod->cSegments)
673 return KLDR_ERR_LX_BAD_BUNDLE;
674 if (puValue)
675 *puValue = BaseAddress
676 + offObject
677 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
678 return 0;
679 }
680
681 /*
682 * Skip the bundle.
683 */
684 if (pBundle->b32_type > ENTRYFWD)
685 {
686 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
687 return KLDR_ERR_LX_BAD_BUNDLE;
688 }
689 if (pBundle->b32_type == 0)
690 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
691 else
692 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
693 }
694
695 return KLDR_ERR_SYMBOL_NOT_FOUND;
696}
697
698
699/**
700 * Do name lookup.
701 *
702 * @returns See kLdrModQuerySymbol.
703 * @param pModLX The module to lookup the symbol in.
704 * @param pchSymbol The symbol to lookup.
705 * @param cchSymbol The symbol name length.
706 * @param piSymbol Where to store the symbol ordinal.
707 */
708static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol)
709{
710
711 /*
712 * First do a hash table lookup.
713 */
714 /** @todo hash name table for speed. */
715
716 /*
717 * Search the name tables.
718 */
719 const KU8 *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
720 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
721 pchSymbol, cchSymbol);
722 if (!pbName)
723 {
724 if (!pModLX->pbNonResNameTab)
725 {
726 /* lazy load it */
727 /** @todo non-resident name table. */
728 }
729 if (pModLX->pbNonResNameTab)
730 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
731 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
732 pchSymbol, cchSymbol);
733 }
734 if (!pbName)
735 return KLDR_ERR_SYMBOL_NOT_FOUND;
736
737 *piSymbol = *(const KU16 *)(pbName + 1 + *pbName);
738 return 0;
739}
740
741
742#if 0
743/**
744 * Hash a symbol using the algorithm from sdbm.
745 *
746 * The following was is the documenation of the orignal sdbm functions:
747 *
748 * This algorithm was created for sdbm (a public-domain reimplementation of
749 * ndbm) database library. it was found to do well in scrambling bits,
750 * causing better distribution of the keys and fewer splits. it also happens
751 * to be a good general hashing function with good distribution. the actual
752 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
753 * is the faster version used in gawk. [there is even a faster, duff-device
754 * version] the magic constant 65599 was picked out of thin air while
755 * experimenting with different constants, and turns out to be a prime.
756 * this is one of the algorithms used in berkeley db (see sleepycat) and
757 * elsewhere.
758 */
759static KU32 kldrModLXDoHash(const char *pchSymbol, KU8 cchSymbol)
760{
761 KU32 hash = 0;
762 int ch;
763
764 while ( cchSymbol-- > 0
765 && (ch = *(unsigned const char *)pchSymbol++))
766 hash = ch + (hash << 6) + (hash << 16) - hash;
767
768 return hash;
769}
770#endif
771
772
773/**
774 * Lookup a name table entry by name.
775 *
776 * @returns Pointer to the name table entry if found.
777 * @returns NULL if not found.
778 * @param pbNameTable Pointer to the name table that should be searched.
779 * @param cbNameTable The size of the name table.
780 * @param pchSymbol The name of the symbol we're looking for.
781 * @param cchSymbol The length of the symbol name.
782 */
783static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
784 const char *pchSymbol, KSIZE cchSymbol)
785{
786 /*
787 * Determin the namelength up front so we can skip anything which doesn't matches the length.
788 */
789 KU8 cbSymbol8Bit = (KU8)cchSymbol;
790 if (cbSymbol8Bit != cchSymbol)
791 return NULL; /* too long. */
792
793 /*
794 * Walk the name table.
795 */
796 while (*pbNameTable != 0 && cbNameTable > 0)
797 {
798 const KU8 cbName = *pbNameTable;
799
800 cbNameTable -= cbName + 1 + 2;
801 if (cbNameTable < 0)
802 break;
803
804 if ( cbName == cbSymbol8Bit
805 && !kHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
806 return pbNameTable;
807
808 /* next entry */
809 pbNameTable += cbName + 1 + 2;
810 }
811
812 return NULL;
813}
814
815
816/**
817 * Deal with a forwarder entry.
818 *
819 * @returns See kLdrModQuerySymbol.
820 * @param pModLX The PE module interpreter instance.
821 * @param pEntry The forwarder entry.
822 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
823 * @param pvUser The user argument for the callback.
824 * @param puValue Where to put the value. (optional)
825 * @param pfKind Where to put the symbol kind. (optional)
826 */
827static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
828 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
829{
830 int rc;
831 KU32 iSymbol;
832 const char *pchSymbol;
833 KU8 cchSymbol;
834
835 if (!pfnGetForwarder)
836 return KLDR_ERR_FORWARDER_SYMBOL;
837
838 /*
839 * Validate the entry import module ordinal.
840 */
841 if ( !pEntry->e32_variant.e32_fwd.modord
842 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
843 return KLDR_ERR_LX_BAD_FORWARDER;
844
845 /*
846 * Figure out the parameters.
847 */
848 if (pEntry->e32_flags & FWD_ORDINAL)
849 {
850 iSymbol = pEntry->e32_variant.e32_fwd.value;
851 pchSymbol = NULL; /* no symbol name. */
852 cchSymbol = 0;
853 }
854 else
855 {
856 const KU8 *pbName;
857
858 /* load the fixup section if necessary. */
859 if (!pModLX->pbImportProcs)
860 {
861 rc = kldrModLXDoLoadFixupSection(pModLX);
862 if (rc)
863 return rc;
864 }
865
866 /* Make name pointer. */
867 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
868 if ( pbName >= pModLX->pbFixupSectionLast
869 || pbName < pModLX->pbFixupSection
870 || !*pbName)
871 return KLDR_ERR_LX_BAD_FORWARDER;
872
873
874 /* check for '#' name. */
875 if (pbName[1] == '#')
876 {
877 KU8 cbLeft = *pbName;
878 const KU8 *pb = pbName + 1;
879 unsigned uBase;
880
881 /* base detection */
882 uBase = 10;
883 if ( cbLeft > 1
884 && pb[1] == '0'
885 && (pb[2] == 'x' || pb[2] == 'X'))
886 {
887 uBase = 16;
888 pb += 2;
889 cbLeft -= 2;
890 }
891
892 /* ascii to integer */
893 iSymbol = 0;
894 while (cbLeft-- > 0)
895 {
896 /* convert char to digit. */
897 unsigned uDigit = *pb++;
898 if (uDigit >= '0' && uDigit <= '9')
899 uDigit -= '0';
900 else if (uDigit >= 'a' && uDigit <= 'z')
901 uDigit -= 'a' + 10;
902 else if (uDigit >= 'A' && uDigit <= 'Z')
903 uDigit -= 'A' + 10;
904 else if (!uDigit)
905 break;
906 else
907 return KLDR_ERR_LX_BAD_FORWARDER;
908 if (uDigit >= uBase)
909 return KLDR_ERR_LX_BAD_FORWARDER;
910
911 /* insert the digit */
912 iSymbol *= uBase;
913 iSymbol += uDigit;
914 }
915 if (!iSymbol)
916 return KLDR_ERR_LX_BAD_FORWARDER;
917
918 pchSymbol = NULL; /* no symbol name. */
919 cchSymbol = 0;
920 }
921 else
922 {
923 pchSymbol = (char *)pbName + 1;
924 cchSymbol = *pbName;
925 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
926 }
927 }
928
929 /*
930 * Resolve the forwarder.
931 */
932 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pchSymbol, cchSymbol, NULL, puValue, pfKind, pvUser);
933 if (!rc && pfKind)
934 *pfKind |= KLDRSYMKIND_FORWARDER;
935 return rc;
936}
937
938
939/**
940 * Loads the fixup section from the executable image.
941 *
942 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
943 *
944 * @returns 0 on success, non-zero kLdr or native status code on failure.
945 * @param pModLX The PE module interpreter instance.
946 */
947static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
948{
949 int rc;
950 KU32 off;
951 void *pv;
952
953 pv = kHlpAlloc(pModLX->Hdr.e32_fixupsize);
954 if (!pv)
955 return KERR_NO_MEMORY;
956
957 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
958 rc = kRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
959 off + pModLX->offHdr);
960 if (!rc)
961 {
962 pModLX->pbFixupSection = pv;
963 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
964 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
965 if (pModLX->Hdr.e32_fpagetab)
966 pModLX->paoffPageFixups = (const KU32 *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
967 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
968 if (pModLX->Hdr.e32_frectab)
969 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
970 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
971 if (pModLX->Hdr.e32_impmod)
972 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
973 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
974 if (pModLX->Hdr.e32_impproc)
975 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
976 }
977 else
978 kHlpFree(pv);
979 return rc;
980}
981
982
983/** @copydoc kLdrModEnumSymbols */
984static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
985 KU32 fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
986{
987 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
988 const struct b32_bundle *pBundle;
989 KU32 iOrdinal;
990 int rc = 0;
991
992 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
993
994 /*
995 * Enumerate the entry table.
996 * (The entry table is made up of bundles of similar exports.)
997 */
998 iOrdinal = 1;
999 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
1000 while (pBundle->b32_cnt && iOrdinal)
1001 {
1002 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
1003
1004 /*
1005 * Enum the entries in the bundle.
1006 */
1007 if (pBundle->b32_type != EMPTY)
1008 {
1009 const struct e32_entry *pEntry;
1010 KSIZE cbEntry;
1011 KLDRADDR BundleRVA;
1012 unsigned cLeft;
1013
1014
1015 /* Validate the bundle. */
1016 switch (pBundle->b32_type)
1017 {
1018 case ENTRY16:
1019 case GATE16:
1020 case ENTRY32:
1021 if ( pBundle->b32_obj <= 0
1022 || pBundle->b32_obj > pMod->cSegments)
1023 return KLDR_ERR_LX_BAD_BUNDLE;
1024 BundleRVA = pMod->aSegments[pBundle->b32_obj - 1].RVA;
1025 break;
1026
1027 case ENTRYFWD:
1028 BundleRVA = 0;
1029 break;
1030
1031 default:
1032 /* anyone actually using TYPEINFO will end up here. */
1033 KLDRMODLX_ASSERT(!"Bad bundle type");
1034 return KLDR_ERR_LX_BAD_BUNDLE;
1035 }
1036
1037 /* iterate the bundle entries. */
1038 cbEntry = s_cbEntry[pBundle->b32_type];
1039 pEntry = (const struct e32_entry *)(pBundle + 1);
1040 cLeft = pBundle->b32_cnt;
1041 while (cLeft-- > 0)
1042 {
1043 KLDRADDR uValue;
1044 KU32 fKind;
1045 int fFoundName;
1046 const KU8 *pbName;
1047
1048 /*
1049 * Calc the symbol value and kind.
1050 */
1051 switch (pBundle->b32_type)
1052 {
1053 /* e32_flags + a 16-bit offset. */
1054 case ENTRY16:
1055 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1056 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
1057 break;
1058
1059 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1060 case GATE16:
1061 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1062 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
1063 break;
1064
1065 /* e32_flags + a 32-bit offset. */
1066 case ENTRY32:
1067 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1068 fKind = KLDRSYMKIND_32BIT;
1069 break;
1070
1071 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1072 case ENTRYFWD:
1073 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1074 fKind = KLDRSYMKIND_FORWARDER;
1075 break;
1076
1077 default: /* shut up gcc. */
1078 uValue = 0;
1079 fKind = KLDRSYMKIND_NO_BIT | KLDRSYMKIND_NO_TYPE;
1080 break;
1081 }
1082
1083 /*
1084 * Any symbol names?
1085 */
1086 fFoundName = 0;
1087
1088 /* resident name table. */
1089 pbName = pModLX->pbResNameTab;
1090 if (pbName)
1091 {
1092 do
1093 {
1094 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1095 if (!pbName)
1096 break;
1097 fFoundName = 1;
1098 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1099 if (rc)
1100 return rc;
1101
1102 /* skip to the next entry */
1103 pbName += 1 + *pbName + 2;
1104 } while (pbName < pModLX->pbLoaderSectionLast);
1105 }
1106
1107 /* resident name table. */
1108 pbName = pModLX->pbNonResNameTab;
1109 /** @todo lazy load the non-resident name table. */
1110 if (pbName)
1111 {
1112 do
1113 {
1114 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1115 if (!pbName)
1116 break;
1117 fFoundName = 1;
1118 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1119 if (rc)
1120 return rc;
1121
1122 /* skip to the next entry */
1123 pbName += 1 + *pbName + 2;
1124 } while (pbName < pModLX->pbLoaderSectionLast);
1125 }
1126
1127 /*
1128 * If no names, call once with the ordinal only.
1129 */
1130 if (!fFoundName)
1131 {
1132 rc = pfnCallback(pMod, iOrdinal, NULL, 0, NULL, uValue, fKind, pvUser);
1133 if (rc)
1134 return rc;
1135 }
1136
1137 /* next */
1138 iOrdinal++;
1139 pEntry = (const struct e32_entry *)((KUPTR)pEntry + cbEntry);
1140 }
1141 }
1142
1143 /*
1144 * The next bundle.
1145 */
1146 if (pBundle->b32_type > ENTRYFWD)
1147 {
1148 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1149 return KLDR_ERR_LX_BAD_BUNDLE;
1150 }
1151 if (pBundle->b32_type == 0)
1152 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
1153 else
1154 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1155 }
1156
1157 return 0;
1158}
1159
1160
1161/**
1162 * Lookup a name table entry by ordinal.
1163 *
1164 * @returns Pointer to the name table entry if found.
1165 * @returns NULL if not found.
1166 * @param pbNameTable Pointer to the name table that should be searched.
1167 * @param cbNameTable The size of the name table.
1168 * @param iOrdinal The ordinal to search for.
1169 */
1170static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal)
1171{
1172 while (*pbNameTable != 0 && cbNameTable > 0)
1173 {
1174 const KU8 cbName = *pbNameTable;
1175 KU32 iName;
1176
1177 cbNameTable -= cbName + 1 + 2;
1178 if (cbNameTable < 0)
1179 break;
1180
1181 iName = *(pbNameTable + cbName + 1)
1182 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1183 if (iName == iOrdinal)
1184 return pbNameTable;
1185
1186 /* next entry */
1187 pbNameTable += cbName + 1 + 2;
1188 }
1189
1190 return NULL;
1191}
1192
1193
1194/** @copydoc kLdrModGetImport */
1195static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, KU32 iImport, char *pszName, KSIZE cchName)
1196{
1197 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1198 const KU8 *pb;
1199 int rc;
1200
1201 /*
1202 * Validate
1203 */
1204 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1205 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1206
1207 /*
1208 * Lazy loading the fixup section.
1209 */
1210 if (!pModLX->pbImportMods)
1211 {
1212 rc = kldrModLXDoLoadFixupSection(pModLX);
1213 if (rc)
1214 return rc;
1215 }
1216
1217 /*
1218 * Iterate the module import table until we reach the requested import ordinal.
1219 */
1220 pb = pModLX->pbImportMods;
1221 while (iImport-- > 0)
1222 pb += *pb + 1;
1223
1224 /*
1225 * Copy out the result.
1226 */
1227 if (*pb < cchName)
1228 {
1229 kHlpMemCopy(pszName, pb + 1, *pb);
1230 pszName[*pb] = '\0';
1231 rc = 0;
1232 }
1233 else
1234 {
1235 kHlpMemCopy(pszName, pb + 1, cchName);
1236 if (cchName)
1237 pszName[cchName - 1] = '\0';
1238 rc = KERR_BUFFER_OVERFLOW;
1239 }
1240
1241 return rc;
1242}
1243
1244
1245/** @copydoc kLdrModNumberOfImports */
1246static KI32 kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1247{
1248 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1249 return pModLX->Hdr.e32_impmodcnt;
1250}
1251
1252
1253/** @copydoc kLdrModGetStackInfo */
1254static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1255{
1256 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1257 const KU32 i = pModLX->Hdr.e32_stackobj;
1258
1259 if ( i
1260 && i <= pMod->cSegments
1261 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1262 && pModLX->Hdr.e32_stacksize
1263 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1264 {
1265
1266 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1267 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1268 pStackInfo->Address = BaseAddress
1269 + pMod->aSegments[i - 1].RVA
1270 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1271 }
1272 else
1273 {
1274 pStackInfo->Address = NIL_KLDRADDR;
1275 pStackInfo->LinkAddress = NIL_KLDRADDR;
1276 }
1277 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1278 pStackInfo->cbStackThread = 0;
1279
1280 return 0;
1281}
1282
1283
1284/** @copydoc kLdrModQueryMainEntrypoint */
1285static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1286{
1287 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1288
1289 /*
1290 * Convert the address from the header.
1291 */
1292 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1293 *pMainEPAddress = pModLX->Hdr.e32_startobj
1294 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1295 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1296 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1297 : NIL_KLDRADDR;
1298 return 0;
1299}
1300
1301
1302/** @copydoc kLdrModEnumDbgInfo */
1303static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1304{
1305 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1306
1307 /*
1308 * Quit immediately if no debug info.
1309 */
1310 if (kldrModLXHasDbgInfo(pMod, pvBits))
1311 return 0;
1312#if 0
1313 /*
1314 * Read the debug info and look for familiar magics and structures.
1315 */
1316 /** @todo */
1317#endif
1318
1319 return 0;
1320}
1321
1322
1323/** @copydoc kLdrModHasDbgInfo */
1324static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1325{
1326 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1327
1328 /*
1329 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1330 */
1331 if ( !pModLX->Hdr.e32_debuginfo
1332 || !pModLX->Hdr.e32_debuglen)
1333 return KLDR_ERR_NO_DEBUG_INFO;
1334 return 0;
1335}
1336
1337
1338/** @copydoc kLdrModMap */
1339static int kldrModLXMap(PKLDRMOD pMod)
1340{
1341 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1342 unsigned fFixed;
1343 void *pvBase;
1344 int rc;
1345
1346 /*
1347 * Already mapped?
1348 */
1349 if (pModLX->pvMapping)
1350 return KLDR_ERR_ALREADY_MAPPED;
1351
1352 /*
1353 * Allocate memory for it.
1354 */
1355 /* fixed image? */
1356 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1357 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1358 if (!fFixed)
1359 pvBase = NULL;
1360 else
1361 {
1362 pvBase = (void *)(KUPTR)pMod->aSegments[0].LinkAddress;
1363 if ((KUPTR)pvBase != pMod->aSegments[0].LinkAddress)
1364 return KLDR_ERR_ADDRESS_OVERFLOW;
1365 }
1366 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1367 if (rc)
1368 return rc;
1369
1370 /*
1371 * Load the bits, apply page protection, and update the segment table.
1372 */
1373 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1374 if (!rc)
1375 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1376 if (!rc)
1377 {
1378 KU32 i;
1379 for (i = 0; i < pMod->cSegments; i++)
1380 {
1381 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1382 pMod->aSegments[i].MapAddress = (KUPTR)pvBase + (KUPTR)pMod->aSegments[i].RVA;
1383 }
1384 pModLX->pvMapping = pvBase;
1385 }
1386 else
1387 kHlpPageFree(pvBase, pModLX->cbMapped);
1388 return rc;
1389}
1390
1391
1392/**
1393 * Loads the LX pages into the specified memory mapping.
1394 *
1395 * @returns 0 on success.
1396 * @returns non-zero kLdr or OS status code on failure.
1397 *
1398 * @param pModLX The LX module interpreter instance.
1399 * @param pvBits Where to load the bits.
1400 */
1401static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1402{
1403 const PKRDR pRdr = pModLX->pMod->pRdr;
1404 KU8 *pbTmpPage = NULL;
1405 int rc = 0;
1406 KU32 i;
1407
1408 /*
1409 * Iterate the segments.
1410 */
1411 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1412 {
1413 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1414 const KU32 cPages = pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN;
1415 KU32 iPage;
1416 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[i].RVA;
1417
1418 /*
1419 * Iterate the page map pages.
1420 */
1421 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1422 {
1423 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1424 switch (pMap->o32_pageflags)
1425 {
1426 case VALID:
1427 if (pMap->o32_pagesize == OBJPAGELEN)
1428 rc = kRdrRead(pRdr, pbPage, OBJPAGELEN,
1429 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1430 else if (pMap->o32_pagesize < OBJPAGELEN)
1431 {
1432 rc = kRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1433 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1434 kHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1435 }
1436 else
1437 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1438 break;
1439
1440 case ITERDATA:
1441 case ITERDATA2:
1442 /* make sure we've got a temp page .*/
1443 if (!pbTmpPage)
1444 {
1445 pbTmpPage = kHlpAlloc(OBJPAGELEN + 256);
1446 if (!pbTmpPage)
1447 break;
1448 }
1449 /* validate the size. */
1450 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1451 {
1452 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1453 break;
1454 }
1455
1456 /* read it and ensure 4 extra zero bytes. */
1457 rc = kRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1458 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1459 if (rc)
1460 break;
1461 kHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1462
1463 /* unpack it into the image page. */
1464 if (pMap->o32_pageflags == ITERDATA2)
1465 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1466 else
1467 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1468 break;
1469
1470 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1471 case ZEROED:
1472 kHlpMemSet(pbPage, 0, OBJPAGELEN);
1473 break;
1474
1475 case RANGE:
1476 KLDRMODLX_ASSERT(!"RANGE");
1477 default:
1478 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1479 break;
1480 }
1481 }
1482 if (rc)
1483 break;
1484
1485 /*
1486 * Zero the remaining pages.
1487 */
1488 if (iPage < cPages)
1489 kHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1490 }
1491
1492 if (pbTmpPage)
1493 kHlpFree(pbTmpPage);
1494 return rc;
1495}
1496
1497
1498/**
1499 * Unpacks iterdata (aka EXEPACK).
1500 *
1501 * @returns 0 on success, non-zero kLdr status code on failure.
1502 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1503 * @param pbSrc The compressed source data.
1504 * @param cbSrc The file size of the compressed data. The source buffer
1505 * contains 4 additional zero bytes.
1506 */
1507static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1508{
1509 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1510 int cbDst = OBJPAGELEN;
1511
1512 /* Validate size of data. */
1513 if (cbSrc >= OBJPAGELEN - 2)
1514 return KLDR_ERR_LX_BAD_ITERDATA;
1515
1516 /*
1517 * Expand the page.
1518 */
1519 while (cbSrc > 0 && pIter->LX_nIter)
1520 {
1521 if (pIter->LX_nBytes == 1)
1522 {
1523 /*
1524 * Special case - one databyte.
1525 */
1526 cbDst -= pIter->LX_nIter;
1527 if (cbDst < 0)
1528 return KLDR_ERR_LX_BAD_ITERDATA;
1529
1530 cbSrc -= 4 + 1;
1531 if (cbSrc < -4)
1532 return KLDR_ERR_LX_BAD_ITERDATA;
1533
1534 kHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1535 pbDst += pIter->LX_nIter;
1536 pIter++;
1537 }
1538 else
1539 {
1540 /*
1541 * General.
1542 */
1543 int i;
1544
1545 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1546 if (cbDst < 0)
1547 return KLDR_ERR_LX_BAD_ITERDATA;
1548
1549 cbSrc -= 4 + pIter->LX_nBytes;
1550 if (cbSrc < -4)
1551 return KLDR_ERR_LX_BAD_ITERDATA;
1552
1553 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1554 kHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1555 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1556 }
1557 }
1558
1559 /*
1560 * Zero remainder of the page.
1561 */
1562 if (cbDst > 0)
1563 kHlpMemSet(pbDst, 0, cbDst);
1564
1565 return 0;
1566}
1567
1568
1569/**
1570 * Unpacks iterdata (aka EXEPACK).
1571 *
1572 * @returns 0 on success, non-zero kLdr status code on failure.
1573 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1574 * @param pbSrc The compressed source data.
1575 * @param cbSrc The file size of the compressed data. The source buffer
1576 * contains 4 additional zero bytes.
1577 */
1578static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1579{
1580 int cbDst = OBJPAGELEN;
1581
1582 while (cbSrc > 0)
1583 {
1584 /*
1585 * Bit 0 and 1 is the encoding type.
1586 */
1587 switch (*pbSrc & 0x03)
1588 {
1589 /*
1590 *
1591 * 0 1 2 3 4 5 6 7
1592 * type | |
1593 * ----------------
1594 * cb <cb bytes of data>
1595 *
1596 * Bits 2-7 is, if not zero, the length of an uncompressed run
1597 * starting at the following byte.
1598 *
1599 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1600 * type | | | | | |
1601 * ---------------- ---------------------- -----------------------
1602 * zero cb char to multiply
1603 *
1604 * If the bits are zero, the following two bytes describes a 1 byte interation
1605 * run. First byte is count, second is the byte to copy. A count of zero is
1606 * means end of data, and we simply stops. In that case the rest of the data
1607 * should be zero.
1608 */
1609 case 0:
1610 {
1611 if (*pbSrc)
1612 {
1613 const int cb = *pbSrc >> 2;
1614 cbDst -= cb;
1615 if (cbDst < 0)
1616 return KLDR_ERR_LX_BAD_ITERDATA2;
1617 cbSrc -= cb + 1;
1618 if (cbSrc < 0)
1619 return KLDR_ERR_LX_BAD_ITERDATA2;
1620 kHlpMemCopy(pbDst, ++pbSrc, cb);
1621 pbDst += cb;
1622 pbSrc += cb;
1623 }
1624 else if (cbSrc < 2)
1625 return KLDR_ERR_LX_BAD_ITERDATA2;
1626 else
1627 {
1628 const int cb = pbSrc[1];
1629 if (!cb)
1630 goto l_endloop;
1631 cbDst -= cb;
1632 if (cbDst < 0)
1633 return KLDR_ERR_LX_BAD_ITERDATA2;
1634 cbSrc -= 3;
1635 if (cbSrc < 0)
1636 return KLDR_ERR_LX_BAD_ITERDATA2;
1637 kHlpMemSet(pbDst, pbSrc[2], cb);
1638 pbDst += cb;
1639 pbSrc += 3;
1640 }
1641 break;
1642 }
1643
1644
1645 /*
1646 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1647 * type | | | | | |
1648 * ---- ------- -------------------------
1649 * cb1 cb2 - 3 offset <cb1 bytes of data>
1650 *
1651 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1652 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1653 * data relative to the current position. The data copied as you would expect it to be.
1654 */
1655 case 1:
1656 {
1657 cbSrc -= 2;
1658 if (cbSrc < 0)
1659 return KLDR_ERR_LX_BAD_ITERDATA2;
1660 else
1661 {
1662 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1663 const int cb1 = (*pbSrc >> 2) & 3;
1664 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1665
1666 pbSrc += 2;
1667 cbSrc -= cb1;
1668 if (cbSrc < 0)
1669 return KLDR_ERR_LX_BAD_ITERDATA2;
1670 cbDst -= cb1;
1671 if (cbDst < 0)
1672 return KLDR_ERR_LX_BAD_ITERDATA2;
1673 kHlpMemCopy(pbDst, pbSrc, cb1);
1674 pbDst += cb1;
1675 pbSrc += cb1;
1676
1677 if (off > OBJPAGELEN - (unsigned)cbDst)
1678 return KLDR_ERR_LX_BAD_ITERDATA2;
1679 cbDst -= cb2;
1680 if (cbDst < 0)
1681 return KLDR_ERR_LX_BAD_ITERDATA2;
1682 kHlpMemMove(pbDst, pbDst - off, cb2);
1683 pbDst += cb2;
1684 }
1685 break;
1686 }
1687
1688
1689 /*
1690 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1691 * type | | | |
1692 * ---- ----------------------------------
1693 * cb-3 offset
1694 *
1695 * Two bytes layed out as described above.
1696 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1697 * data relative to the current position.
1698 *
1699 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1700 */
1701 case 2:
1702 {
1703 cbSrc -= 2;
1704 if (cbSrc < 0)
1705 return KLDR_ERR_LX_BAD_ITERDATA2;
1706 else
1707 {
1708 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1709 const int cb = ((*pbSrc >> 2) & 3) + 3;
1710
1711 pbSrc += 2;
1712 if (off > OBJPAGELEN - (unsigned)cbDst)
1713 return KLDR_ERR_LX_BAD_ITERDATA2;
1714 cbDst -= cb;
1715 if (cbDst < 0)
1716 return KLDR_ERR_LX_BAD_ITERDATA2;
1717 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1718 pbDst += cb;
1719 }
1720 break;
1721 }
1722
1723
1724 /*
1725 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1726 * type | | | | | |
1727 * ---------- ---------------- ----------------------------------
1728 * cb1 cb2 offset <cb1 bytes of data>
1729 *
1730 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1731 * The cb2 and offset describes an amount of data to be copied from the expanded
1732 * data relative to the current position.
1733 *
1734 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1735 */
1736 case 3:
1737 {
1738 cbSrc -= 3;
1739 if (cbSrc < 0)
1740 return KLDR_ERR_LX_BAD_ITERDATA2;
1741 else
1742 {
1743 const int cb1 = (*pbSrc >> 2) & 0xf;
1744 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1745 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1746
1747 pbSrc += 3;
1748 cbSrc -= cb1;
1749 if (cbSrc < 0)
1750 return KLDR_ERR_LX_BAD_ITERDATA2;
1751 cbDst -= cb1;
1752 if (cbDst < 0)
1753 return KLDR_ERR_LX_BAD_ITERDATA2;
1754 kHlpMemCopy(pbDst, pbSrc, cb1);
1755 pbDst += cb1;
1756 pbSrc += cb1;
1757
1758 if (off > OBJPAGELEN - (unsigned)cbDst)
1759 return KLDR_ERR_LX_BAD_ITERDATA2;
1760 cbDst -= cb2;
1761 if (cbDst < 0)
1762 return KLDR_ERR_LX_BAD_ITERDATA2;
1763 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1764 pbDst += cb2;
1765 }
1766 break;
1767 }
1768 } /* type switch. */
1769 } /* unpack loop */
1770
1771l_endloop:
1772
1773
1774 /*
1775 * Zero remainder of the page.
1776 */
1777 if (cbDst > 0)
1778 kHlpMemSet(pbDst, 0, cbDst);
1779
1780 return 0;
1781}
1782
1783
1784/**
1785 * Special memcpy employed by the iterdata2 algorithm.
1786 *
1787 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1788 * has if src is very close to the destination.
1789 *
1790 * @param pbDst Destination pointer.
1791 * @param pbSrc Source pointer. Will always be <= pbDst.
1792 * @param cb Amount of data to be copied.
1793 * @remark This assumes that unaligned word and dword access is fine.
1794 */
1795static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb)
1796{
1797 switch (pbDst - pbSrc)
1798 {
1799 case 0:
1800 case 1:
1801 case 2:
1802 case 3:
1803 /* 16-bit copy (unaligned) */
1804 if (cb & 1)
1805 *pbDst++ = *pbSrc++;
1806 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1807 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1808 break;
1809
1810 default:
1811 /* 32-bit copy (unaligned) */
1812 if (cb & 1)
1813 *pbDst++ = *pbSrc++;
1814 if (cb & 2)
1815 {
1816 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1817 pbDst += 2;
1818 pbSrc += 2;
1819 }
1820 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1821 *(KU32 *)pbDst = *(const KU32 *)pbSrc;
1822 break;
1823 }
1824}
1825
1826
1827/**
1828 * Unprotects or protects the specified image mapping.
1829 *
1830 * @returns 0 on success.
1831 * @returns non-zero kLdr or OS status code on failure.
1832 *
1833 * @param pModLX The LX module interpreter instance.
1834 * @param pvBits The mapping to protect.
1835 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1836 * protect according to the object table.
1837 */
1838static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1839{
1840 KU32 i;
1841 PKLDRMOD pMod = pModLX->pMod;
1842
1843 /*
1844 * Change object protection.
1845 */
1846 for (i = 0; i < pMod->cSegments; i++)
1847 {
1848 int rc;
1849 void *pv;
1850 KPROT enmProt;
1851
1852 /* calc new protection. */
1853 enmProt = pMod->aSegments[i].enmProt;
1854 if (fUnprotectOrProtect)
1855 {
1856 switch (enmProt)
1857 {
1858 case KPROT_NOACCESS:
1859 case KPROT_READONLY:
1860 case KPROT_READWRITE:
1861 case KPROT_WRITECOPY:
1862 enmProt = KPROT_READWRITE;
1863 break;
1864 case KPROT_EXECUTE:
1865 case KPROT_EXECUTE_READ:
1866 case KPROT_EXECUTE_READWRITE:
1867 case KPROT_EXECUTE_WRITECOPY:
1868 enmProt = KPROT_EXECUTE_READWRITE;
1869 break;
1870 default:
1871 KLDRMODLX_ASSERT(!"bad enmProt");
1872 return -1;
1873 }
1874 }
1875 else
1876 {
1877 /* copy on write -> normal write. */
1878 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1879 enmProt = KPROT_EXECUTE_READWRITE;
1880 else if (enmProt == KPROT_WRITECOPY)
1881 enmProt = KPROT_READWRITE;
1882 }
1883
1884
1885 /* calc the address and set page protection. */
1886 pv = (KU8 *)pvBits + pMod->aSegments[i].RVA;
1887
1888 rc = kHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1889 if (rc)
1890 break;
1891
1892 /** @todo the gap page should be marked NOACCESS! */
1893 }
1894
1895 return 0;
1896}
1897
1898
1899/** @copydoc kLdrModUnmap */
1900static int kldrModLXUnmap(PKLDRMOD pMod)
1901{
1902 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1903 KU32 i;
1904 int rc;
1905
1906 /*
1907 * Mapped?
1908 */
1909 if (!pModLX->pvMapping)
1910 return KLDR_ERR_NOT_MAPPED;
1911
1912 /*
1913 * Free the mapping and update the segments.
1914 */
1915 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1916 KLDRMODLX_ASSERT(!rc);
1917 pModLX->pvMapping = NULL;
1918
1919 for (i = 0; i < pMod->cSegments; i++)
1920 pMod->aSegments[i].MapAddress = 0;
1921
1922 return rc;
1923}
1924
1925
1926/** @copydoc kLdrModAllocTLS */
1927static int kldrModLXAllocTLS(PKLDRMOD pMod)
1928{
1929 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1930
1931 /* no tls, just do the error checking. */
1932 if (!pModLX->pvMapping)
1933 return KLDR_ERR_NOT_MAPPED;
1934 return 0;
1935}
1936
1937
1938/** @copydoc kLdrModFreeTLS */
1939static void kldrModLXFreeTLS(PKLDRMOD pMod)
1940{
1941 /* no tls. */
1942}
1943
1944
1945/** @copydoc kLdrModReload */
1946static int kldrModLXReload(PKLDRMOD pMod)
1947{
1948 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1949 int rc, rc2;
1950
1951 /*
1952 * Mapped?
1953 */
1954 if (!pModLX->pvMapping)
1955 return KLDR_ERR_NOT_MAPPED;
1956
1957 /*
1958 * Before doing anything we'll have to make all pages writable.
1959 */
1960 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1961 if (rc)
1962 return rc;
1963
1964 /*
1965 * Load the bits again.
1966 */
1967 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1968
1969 /*
1970 * Restore protection.
1971 */
1972 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1973 if (!rc && rc2)
1974 rc = rc2;
1975 return rc;
1976}
1977
1978
1979/** @copydoc kLdrModFixupMapping */
1980static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1981{
1982 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1983 int rc, rc2;
1984
1985 /*
1986 * Mapped?
1987 */
1988 if (!pModLX->pvMapping)
1989 return KLDR_ERR_NOT_MAPPED;
1990
1991 /*
1992 * Before doing anything we'll have to make all pages writable.
1993 */
1994 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1995 if (rc)
1996 return rc;
1997
1998 /*
1999 * Apply fixups and resolve imports.
2000 */
2001 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (KUPTR)pModLX->pvMapping,
2002 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2003
2004 /*
2005 * Restore protection.
2006 */
2007 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
2008 if (!rc && rc2)
2009 rc = rc2;
2010 return rc;
2011}
2012
2013
2014/** @copydoc kLdrModCallInit */
2015static int kldrModLXCallInit(PKLDRMOD pMod, KUPTR uHandle)
2016{
2017 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2018 int rc;
2019
2020 /*
2021 * Mapped?
2022 */
2023 if (!pModLX->pvMapping)
2024 return KLDR_ERR_NOT_MAPPED;
2025
2026 /*
2027 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2028 */
2029 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2030 rc = kldrModLXDoCallDLL(pModLX, 0 /* attach */, uHandle);
2031 else
2032 rc = 0;
2033 return rc;
2034}
2035
2036
2037/**
2038 * Call the DLL entrypoint.
2039 *
2040 * @returns 0 on success.
2041 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2042 * @param pModLX The LX module interpreter instance.
2043 * @param uOp The operation (DLL_*).
2044 * @param uHandle The module handle to present.
2045 */
2046static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle)
2047{
2048 int rc;
2049
2050 /*
2051 * If no entrypoint there isn't anything to be done.
2052 */
2053 if ( !pModLX->Hdr.e32_startobj
2054 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2055 return 0;
2056
2057 /*
2058 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2059 */
2060 rc = kldrModLXDoCall((KUPTR)pModLX->pvMapping
2061 + (KUPTR)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2062 + pModLX->Hdr.e32_eip,
2063 uHandle, uOp, NULL);
2064 if (rc)
2065 rc = 0;
2066 else if (uOp == 0 /* attach */)
2067 rc = KLDR_ERR_MODULE_INIT_FAILED;
2068 else /* detach: ignore failures */
2069 rc = 0;
2070 return rc;
2071}
2072
2073
2074/**
2075 * Do a 3 parameter callback.
2076 *
2077 * @returns 32-bit callback return.
2078 * @param uEntrypoint The address of the function to be called.
2079 * @param uHandle The first argument, the module handle.
2080 * @param uOp The second argumnet, the reason we're calling.
2081 * @param pvReserved The third argument, reserved argument. (figure this one out)
2082 */
2083static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved)
2084{
2085#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2086 KI32 rc;
2087/** @todo try/except */
2088
2089 /*
2090 * Paranoia.
2091 */
2092# ifdef __GNUC__
2093 __asm__ __volatile__(
2094 "pushl %2\n\t"
2095 "pushl %1\n\t"
2096 "pushl %0\n\t"
2097 "lea 12(%%esp), %2\n\t"
2098 "call *%3\n\t"
2099 "movl %2, %%esp\n\t"
2100 : "=a" (rc)
2101 : "d" (uOp),
2102 "S" (0),
2103 "c" (uEntrypoint),
2104 "0" (uHandle));
2105# elif defined(_MSC_VER)
2106 __asm {
2107 mov eax, [uHandle]
2108 mov edx, [uOp]
2109 mov ecx, 0
2110 mov ebx, [uEntrypoint]
2111 push edi
2112 mov edi, esp
2113 push ecx
2114 push edx
2115 push eax
2116 call ebx
2117 mov esp, edi
2118 pop edi
2119 mov [rc], eax
2120 }
2121# else
2122# error "port me!"
2123# endif
2124 return rc;
2125
2126#else
2127 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2128#endif
2129}
2130
2131
2132/** @copydoc kLdrModCallTerm */
2133static int kldrModLXCallTerm(PKLDRMOD pMod, KUPTR uHandle)
2134{
2135 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2136
2137 /*
2138 * Mapped?
2139 */
2140 if (!pModLX->pvMapping)
2141 return KLDR_ERR_NOT_MAPPED;
2142
2143 /*
2144 * Do the call.
2145 */
2146 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2147 kldrModLXDoCallDLL(pModLX, 1 /* detach */, uHandle);
2148
2149 return 0;
2150}
2151
2152
2153/** @copydoc kLdrModCallThread */
2154static int kldrModLXCallThread(PKLDRMOD pMod, KUPTR uHandle, unsigned fAttachingOrDetaching)
2155{
2156 /* no thread attach/detach callout. */
2157 return 0;
2158}
2159
2160
2161/** @copydoc kLdrModSize */
2162static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
2163{
2164 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2165 return pModLX->cbMapped;
2166}
2167
2168
2169/** @copydoc kLdrModGetBits */
2170static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2171{
2172 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2173 int rc;
2174
2175 /*
2176 * Load the image bits.
2177 */
2178 rc = kldrModLXDoLoadBits(pModLX, pvBits);
2179 if (rc)
2180 return rc;
2181
2182 /*
2183 * Perform relocations.
2184 */
2185 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2186
2187}
2188
2189
2190/** @copydoc kLdrModRelocateBits */
2191static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
2192 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2193{
2194 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2195 KU32 iSeg;
2196 int rc;
2197
2198 /*
2199 * Do we need to to *anything*?
2200 */
2201 if ( NewBaseAddress == OldBaseAddress
2202 && NewBaseAddress == pModLX->paObjs[0].o32_base
2203 && !pModLX->Hdr.e32_impmodcnt)
2204 return 0;
2205
2206 /*
2207 * Load the fixup section.
2208 */
2209 if (!pModLX->pbFixupSection)
2210 {
2211 rc = kldrModLXDoLoadFixupSection(pModLX);
2212 if (rc)
2213 return rc;
2214 }
2215
2216 /*
2217 * Iterate the segments.
2218 */
2219 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2220 {
2221 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2222 KLDRADDR PageAddress = NewBaseAddress + pModLX->pMod->aSegments[iSeg].RVA;
2223 KU32 iPage;
2224 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[iSeg].RVA;
2225
2226 /*
2227 * Iterate the page map pages.
2228 */
2229 for (iPage = 0, rc = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2230 {
2231 const KU8 * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2232 const KU8 *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2233 KLDRADDR uValue;
2234 int iSelector;
2235 KU32 fKind;
2236
2237 /* sanity */
2238 if (pbFixupRecEnd < pb)
2239 return KLDR_ERR_BAD_FIXUP;
2240 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2241 return KLDR_ERR_BAD_FIXUP;
2242 if (pb < pModLX->pbFixupSection)
2243 return KLDR_ERR_BAD_FIXUP;
2244
2245 /*
2246 * Iterate the fixup record.
2247 */
2248 while (pb < pbFixupRecEnd)
2249 {
2250 union _rel
2251 {
2252 const KU8 * pb;
2253 const struct r32_rlc *prlc;
2254 } u;
2255
2256 u.pb = pb;
2257 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2258
2259 /*
2260 * Figure out the target.
2261 */
2262 switch (u.prlc->nr_flags & NRRTYP)
2263 {
2264 /*
2265 * Internal fixup.
2266 */
2267 case NRRINT:
2268 {
2269 KU16 iTrgObject;
2270 KU32 offTrgObject;
2271
2272 /* the object */
2273 if (u.prlc->nr_flags & NR16OBJMOD)
2274 {
2275 iTrgObject = *(const KU16 *)pb;
2276 pb += 2;
2277 }
2278 else
2279 iTrgObject = *pb++;
2280 iTrgObject--;
2281 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2282 return KLDR_ERR_BAD_FIXUP;
2283
2284 /* the target */
2285 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2286 {
2287 if (u.prlc->nr_flags & NR32BITOFF)
2288 {
2289 offTrgObject = *(const KU32 *)pb;
2290 pb += 4;
2291 }
2292 else
2293 {
2294 offTrgObject = *(const KU16 *)pb;
2295 pb += 2;
2296 }
2297
2298 /* calculate the symbol info. */
2299 uValue = offTrgObject + NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2300 }
2301 else
2302 uValue = NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2303 if ( (u.prlc->nr_stype & NRALIAS)
2304 || (pMod->aSegments[iTrgObject].fFlags & KLDRSEG_FLAG_16BIT))
2305 iSelector = pMod->aSegments[iTrgObject].Sel16bit;
2306 else
2307 iSelector = pMod->aSegments[iTrgObject].SelFlat;
2308 fKind = 0;
2309 break;
2310 }
2311
2312 /*
2313 * Import by symbol ordinal.
2314 */
2315 case NRRORD:
2316 {
2317 KU16 iModule;
2318 KU32 iSymbol;
2319
2320 /* the module ordinal */
2321 if (u.prlc->nr_flags & NR16OBJMOD)
2322 {
2323 iModule = *(const KU16 *)pb;
2324 pb += 2;
2325 }
2326 else
2327 iModule = *pb++;
2328 iModule--;
2329 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2330 return KLDR_ERR_BAD_FIXUP;
2331#if 1
2332 if (u.prlc->nr_flags & NRICHAIN)
2333 return KLDR_ERR_BAD_FIXUP;
2334#endif
2335
2336 /* . */
2337 if (u.prlc->nr_flags & NR32BITOFF)
2338 {
2339 iSymbol = *(const KU32 *)pb;
2340 pb += 4;
2341 }
2342 else if (!(u.prlc->nr_flags & NR8BITORD))
2343 {
2344 iSymbol = *(const KU16 *)pb;
2345 pb += 2;
2346 }
2347 else
2348 iSymbol = *pb++;
2349
2350 /* resolve it. */
2351 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, 0, NULL, &uValue, &fKind, pvUser);
2352 if (rc)
2353 return rc;
2354 iSelector = -1;
2355 break;
2356 }
2357
2358 /*
2359 * Import by symbol name.
2360 */
2361 case NRRNAM:
2362 {
2363 KU32 iModule;
2364 KU16 offSymbol;
2365 const KU8 *pbSymbol;
2366
2367 /* the module ordinal */
2368 if (u.prlc->nr_flags & NR16OBJMOD)
2369 {
2370 iModule = *(const KU16 *)pb;
2371 pb += 2;
2372 }
2373 else
2374 iModule = *pb++;
2375 iModule--;
2376 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2377 return KLDR_ERR_BAD_FIXUP;
2378#if 1
2379 if (u.prlc->nr_flags & NRICHAIN)
2380 return KLDR_ERR_BAD_FIXUP;
2381#endif
2382
2383 /* . */
2384 if (u.prlc->nr_flags & NR32BITOFF)
2385 {
2386 offSymbol = *(const KU32 *)pb;
2387 pb += 4;
2388 }
2389 else if (!(u.prlc->nr_flags & NR8BITORD))
2390 {
2391 offSymbol = *(const KU16 *)pb;
2392 pb += 2;
2393 }
2394 else
2395 offSymbol = *pb++;
2396 pbSymbol = pModLX->pbImportProcs + offSymbol;
2397 if ( pbSymbol < pModLX->pbImportProcs
2398 || pbSymbol > pModLX->pbFixupSectionLast)
2399 return KLDR_ERR_BAD_FIXUP;
2400
2401 /* resolve it. */
2402 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, (const char *)pbSymbol + 1, *pbSymbol, NULL,
2403 &uValue, &fKind, pvUser);
2404 if (rc)
2405 return rc;
2406 iSelector = -1;
2407 break;
2408 }
2409
2410 case NRRENT:
2411 KLDRMODLX_ASSERT(!"NRRENT");
2412 default:
2413 iSelector = -1;
2414 break;
2415 }
2416
2417 /* addend */
2418 if (u.prlc->nr_flags & NRADD)
2419 {
2420 if (u.prlc->nr_flags & NR32BITADD)
2421 {
2422 uValue += *(const KU32 *)pb;
2423 pb += 4;
2424 }
2425 else
2426 {
2427 uValue += *(const KU16 *)pb;
2428 pb += 2;
2429 }
2430 }
2431
2432
2433 /*
2434 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2435 */
2436 if (!(u.prlc->nr_stype & NRCHAIN))
2437 {
2438 int off = u.prlc->r32_soff;
2439
2440 /* common / simple */
2441 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2442 && off >= 0
2443 && off <= OBJPAGELEN - 4)
2444 *(KU32 *)&pbPage[off] = uValue;
2445 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2446 && off >= 0
2447 && off <= OBJPAGELEN - 4)
2448 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2449 else
2450 {
2451 /* generic */
2452 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2453 if (rc)
2454 return rc;
2455 }
2456 }
2457 else if (!(u.prlc->nr_flags & NRICHAIN))
2458 {
2459 const KI16 *poffSrc = (const KI16 *)pb;
2460 KU8 c = u.pb[2];
2461
2462 /* common / simple */
2463 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2464 {
2465 while (c-- > 0)
2466 {
2467 int off = *poffSrc++;
2468 if (off >= 0 && off <= OBJPAGELEN - 4)
2469 *(KU32 *)&pbPage[off] = uValue;
2470 else
2471 {
2472 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2473 if (rc)
2474 return rc;
2475 }
2476 }
2477 }
2478 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2479 {
2480 while (c-- > 0)
2481 {
2482 int off = *poffSrc++;
2483 if (off >= 0 && off <= OBJPAGELEN - 4)
2484 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2485 else
2486 {
2487 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2488 if (rc)
2489 return rc;
2490 }
2491 }
2492 }
2493 else
2494 {
2495 while (c-- > 0)
2496 {
2497 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2498 if (rc)
2499 return rc;
2500 }
2501 }
2502 pb = (const KU8 *)poffSrc;
2503 }
2504 else
2505 {
2506 /* This is a pain because it will require virgin pages on a relocation. */
2507 KLDRMODLX_ASSERT(!"NRICHAIN");
2508 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2509 }
2510 }
2511 }
2512 }
2513
2514 return 0;
2515}
2516
2517
2518/**
2519 * Applies the relocation to one 'source' in a page.
2520 *
2521 * This takes care of the more esotic case while the common cases
2522 * are dealt with seperately.
2523 *
2524 * @returns 0 on success, non-zero kLdr status code on failure.
2525 * @param pbPage The page in which to apply the fixup.
2526 * @param off Page relative offset of where to apply the offset.
2527 * @param uValue The target value.
2528 * @param fKind The target kind.
2529 */
2530static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
2531 int iSelector, KLDRADDR uValue, KU32 fKind)
2532{
2533#pragma pack(1) /* just to be sure */
2534 union
2535 {
2536 KU8 ab[6];
2537 KU32 off32;
2538 KU16 off16;
2539 KU8 off8;
2540 struct
2541 {
2542 KU16 off;
2543 KU16 Sel;
2544 } Far16;
2545 struct
2546 {
2547 KU32 off;
2548 KU16 Sel;
2549 } Far32;
2550 } uData;
2551#pragma pack()
2552 const KU8 *pbSrc;
2553 KU8 *pbDst;
2554 KU8 cb;
2555
2556 /*
2557 * Compose the fixup data.
2558 */
2559 switch (prlc->nr_stype & NRSRCMASK)
2560 {
2561 case NRSBYT:
2562 uData.off8 = (KU8)uValue;
2563 cb = 1;
2564 break;
2565 case NRSSEG:
2566 if (iSelector == -1)
2567 {
2568 /* fixme */
2569 }
2570 uData.off16 = iSelector;
2571 cb = 2;
2572 break;
2573 case NRSPTR:
2574 if (iSelector == -1)
2575 {
2576 /* fixme */
2577 }
2578 uData.Far16.off = (KU16)uValue;
2579 uData.Far16.Sel = iSelector;
2580 cb = 4;
2581 break;
2582 case NRSOFF:
2583 uData.off16 = (KU16)uValue;
2584 cb = 2;
2585 break;
2586 case NRPTR48:
2587 if (iSelector == -1)
2588 {
2589 /* fixme */
2590 }
2591 uData.Far32.off = (KU32)uValue;
2592 uData.Far32.Sel = iSelector;
2593 cb = 6;
2594 break;
2595 case NROFF32:
2596 uData.off32 = (KU32)uValue;
2597 cb = 4;
2598 break;
2599 case NRSOFF32:
2600 uData.off32 = (KU32)uValue - (PageAddress + off + 4);
2601 cb = 4;
2602 break;
2603 default:
2604 return KLDR_ERR_LX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2605 }
2606
2607 /*
2608 * Apply it. This is sloooow...
2609 */
2610 pbSrc = &uData.ab[0];
2611 pbDst = pbPage + off;
2612 while (cb-- > 0)
2613 {
2614 if (off > OBJPAGELEN)
2615 break;
2616 if (off >= 0)
2617 *pbDst = *pbSrc;
2618 pbSrc++;
2619 pbDst++;
2620 }
2621
2622 return 0;
2623}
2624
2625
2626/**
2627 * The LX module interpreter method table.
2628 */
2629KLDRMODOPS g_kLdrModLXOps =
2630{
2631 "LX",
2632 NULL,
2633 kldrModLXCreate,
2634 kldrModLXDestroy,
2635 kldrModLXQuerySymbol,
2636 kldrModLXEnumSymbols,
2637 kldrModLXGetImport,
2638 kldrModLXNumberOfImports,
2639 NULL /* can execute one is optional */,
2640 kldrModLXGetStackInfo,
2641 kldrModLXQueryMainEntrypoint,
2642 NULL /* pfnQueryImageUuid */,
2643 NULL /* fixme */,
2644 NULL /* fixme */,
2645 kldrModLXEnumDbgInfo,
2646 kldrModLXHasDbgInfo,
2647 kldrModLXMap,
2648 kldrModLXUnmap,
2649 kldrModLXAllocTLS,
2650 kldrModLXFreeTLS,
2651 kldrModLXReload,
2652 kldrModLXFixupMapping,
2653 kldrModLXCallInit,
2654 kldrModLXCallTerm,
2655 kldrModLXCallThread,
2656 kldrModLXSize,
2657 kldrModLXGetBits,
2658 kldrModLXRelocateBits,
2659 NULL /* fixme: pfnMostlyDone */,
2660 42 /* the end */
2661};
2662
Note: See TracBrowser for help on using the repository browser.