source: trunk/kLdr/kLdrModLX.c@ 2890

Last change on this file since 2890 was 2890, checked in by bird, 19 years ago

LX bugfixing. the unit test succeeds, but there are still things todo.

  • Property svn:keywords set to Id
File size: 75.7 KB
Line 
1/* $Id: kLdrModLX.c 2890 2006-11-21 16:33:25Z bird $ */
2/** @file
3 *
4 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
5 *
6 * Copyright (c) 2006 knut st. osmundsen <bird@anduin.net>
7 *
8 *
9 * This file is part of kLdr.
10 *
11 * kLdr is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * kLdr is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with kLdr; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include <kLdr.h>
32#include "kLdrHlp.h"
33#include "kLdrInternal.h"
34#include "kLdrModLX.h"
35
36
37/*******************************************************************************
38* Defined Constants And Macros *
39*******************************************************************************/
40/** @def KLDRMODLX_STRICT
41 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
42#define KLDRMODLX_STRICT 1
43
44/** @def KLDRMODLX_ASSERT
45 * Assert that an expression is true when KLDR_STRICT is defined.
46 */
47#ifdef KLDRMODLX_STRICT
48# define KLDRMODLX_ASSERT(expr) kldrHlpAssert(expr)
49#else
50# define KLDRMODLX_ASSERT(expr) do {} while (0)
51#endif
52
53
54/*******************************************************************************
55* Structures and Typedefs *
56*******************************************************************************/
57/**
58 * Instance data for the LX module interpreter.
59 */
60typedef struct KLDRMODLX
61{
62 /** Pointer to the module. (Follows the section table.) */
63 PKLDRMOD pMod;
64 /** Pointer to the user mapping. */
65 const void *pvMapping;
66 /** The size of the mapped LX image. */
67 size_t cbMapped;
68 /** Reserved flags. */
69 uint32_t f32Reserved;
70
71 /** The offset of the LX header. */
72 off_t offHdr;
73 /** Copy of the LX header. */
74 struct e32_exe Hdr;
75
76 /** Pointer to the loader section.
77 * Allocated together with this strcture. */
78 const uint8_t *pbLoaderSection;
79 /** Pointer to the last byte in the loader section. */
80 const uint8_t *pbLoaderSectionLast;
81 /** Pointer to the object table in the loader section. */
82 const struct o32_obj *paObjs;
83 /** Pointer to the object page map table in the loader section. */
84 const struct o32_map *paPageMappings;
85 /** Pointer to the resource table in the loader section. */
86 const struct rsrc32 *paRsrcs;
87 /** Pointer to the resident name table in the loader section. */
88 const uint8_t *pbResNameTab;
89 /** Pointer to the entry table in the loader section. */
90 const uint8_t *pbEntryTab;
91
92 /** Pointer to the non-resident name table. */
93 uint8_t *pbNonResNameTab;
94 /** Pointer to the last byte in the non-resident name table. */
95 const uint8_t *pbNonResNameTabLast;
96
97 /** Pointer to the fixup section. */
98 uint8_t *pbFixupSection;
99 /** Pointer to the last byte in the fixup section. */
100 const uint8_t *pbFixupSectionLast;
101 /** Pointer to the fixup page table within pvFixupSection. */
102 const uint32_t *paoffPageFixups;
103 /** Pointer to the fixup record table within pvFixupSection. */
104 const uint8_t *pbFixupRecs;
105 /** Pointer to the import module name table within pvFixupSection. */
106 const uint8_t *pbImportMods;
107 /** Pointer to the import module name table within pvFixupSection. */
108 const uint8_t *pbImportProcs;
109} KLDRMODLX, *PKLDRMODLX;
110
111
112/*******************************************************************************
113* Internal Functions *
114*******************************************************************************/
115static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
116static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
117 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
118static int kldrModLXDoCreate(PKLDRRDR pRdr, off_t offNewHdr, PKLDRMODLX *ppModLX);
119static const uint8_t *kldrModLXDoNameTableLookupByOrdinal(const uint8_t *pbNameTable, int32_t cbNameTable, uint32_t iOrdinal);
120static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pszSymbol, uint32_t *piSymbol);
121#if 0
122static const uint8_t *kldrModLXDoNameTableLookupByName(const uint8_t *pbNameTable, int32_t cbNameTable,
123 char *pchSymbol, size_t cchSymbol);
124#endif
125static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
126static int kldrModLXDoIterDataUnpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc);
127static int kldrModLXDoIterData2Unpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc);
128static void kLdrModLXMemCopyW(uint8_t *pbDst, const uint8_t *pbSrc, int cb);
129static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
130static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, uintptr_t uHandle);
131static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
132 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, uint32_t *pfKind);
133static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
134static int32_t kldrModLXDoCall(uintptr_t uEntrypoint, uintptr_t uHandle, uint32_t uOp, void *pvReserved);
135static int kldrModLXDoReloc(uint8_t *pbPage, int off, KLDRADDR uValue, uint32_t fKind);
136
137
138/**
139 * Create a loader module instance interpreting the executable image found
140 * in the specified file provider instance.
141 *
142 * @returns 0 on success and *ppMod pointing to a module instance.
143 * On failure, a non-zero OS specific error code is returned.
144 * @param pOps Pointer to the registered method table.
145 * @param pRdr The file provider instance to use.
146 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
147 * @param ppMod Where to store the module instance pointer.
148 */
149static int kldrModLXCreate(PCKLDRMODOPS pOps, PKLDRRDR pRdr, off_t offNewHdr, PPKLDRMOD ppMod)
150{
151 PKLDRMODLX pModLX;
152 int rc;
153
154 /*
155 * Create the instance data and do a minimal header validation.
156 */
157 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
158 if (!rc)
159 {
160 pModLX->pMod->pOps = pOps;
161 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
162 *ppMod = pModLX->pMod;
163 return 0;
164 }
165 kldrHlpFree(pModLX);
166 return rc;
167}
168
169
170/**
171 * Separate function for reading creating the LX module instance to
172 * simplify cleanup on failure.
173 */
174static int kldrModLXDoCreate(PKLDRRDR pRdr, off_t offNewHdr, PKLDRMODLX *ppModLX)
175{
176 struct e32_exe Hdr;
177 PKLDRMODLX pModLX;
178 PKLDRMOD pMod;
179 size_t cb;
180 size_t cchFilename;
181 uint32_t off, offEnd;
182 uint32_t i;
183 int rc;
184 int fCanOptimizeMapping;
185 uint32_t NextRVA;
186 *ppModLX = NULL;
187
188 /*
189 * Read the signature and file header.
190 */
191 rc = kLdrRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
192 if (rc)
193 return rc;
194 if ( Hdr.e32_magic[0] != E32MAGIC1
195 || Hdr.e32_magic[1] != E32MAGIC2)
196 return KLDR_ERR_UNKNOWN_FORMAT;
197
198 /* We're not interested in anything but x86 images. */
199 if ( Hdr.e32_level != E32LEVEL
200 || Hdr.e32_border != E32LEBO
201 || Hdr.e32_worder != E32LEWO
202 || Hdr.e32_cpu < E32CPU286
203 || Hdr.e32_cpu > E32CPU486
204 || Hdr.e32_pagesize != OBJPAGELEN
205 )
206 return KLDR_ERR_LX_BAD_HEADER;
207
208 /* Some rough sanity checks. */
209 offEnd = kLdrRdrSize(pRdr) >= (off_t)~(uint32_t)16 ? ~(uint32_t)16 : (uint32_t)kLdrRdrSize(pRdr);
210 if ( Hdr.e32_itermap > offEnd
211 || Hdr.e32_datapage > offEnd
212 || Hdr.e32_nrestab > offEnd
213 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
214 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
215 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
216 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
217 return KLDR_ERR_LX_BAD_HEADER;
218
219 /* Verify the loader section. */
220 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
221 if (Hdr.e32_objtab < sizeof(Hdr))
222 return KLDR_ERR_LX_BAD_LOADER_SECTION;
223 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
224 if (off > offEnd)
225 return KLDR_ERR_LX_BAD_LOADER_SECTION;
226 if ( Hdr.e32_objmap
227 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
228 return KLDR_ERR_LX_BAD_LOADER_SECTION;
229 if ( Hdr.e32_rsrccnt
230 && ( Hdr.e32_rsrctab < off
231 || Hdr.e32_rsrctab > offEnd
232 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
233 return KLDR_ERR_LX_BAD_LOADER_SECTION;
234 if ( Hdr.e32_restab
235 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
236 return KLDR_ERR_LX_BAD_LOADER_SECTION;
237 if ( Hdr.e32_enttab
238 && (Hdr.e32_enttab < off || Hdr.e32_enttab > offEnd - 2))
239 return KLDR_ERR_LX_BAD_LOADER_SECTION;
240 if ( Hdr.e32_dircnt
241 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd))
242 return KLDR_ERR_LX_BAD_LOADER_SECTION;
243
244 /* Verify the fixup section. */
245 off = offEnd;
246 offEnd = off + Hdr.e32_fixupsize;
247 if ( Hdr.e32_fpagetab
248 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
249 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
250 if ( Hdr.e32_frectab
251 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
252 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
253 if ( Hdr.e32_impmod
254 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
255 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
256 if ( Hdr.e32_impproc
257 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
258 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
259
260 /*
261 * Calc the instance size, allocate and initialize it.
262 */
263 cchFilename = kLdrHlpStrLen(kLdrRdrName(pRdr));
264 cb = KLDR_ALIGN_Z(sizeof(KLDRMODLX), 8)
265 + KLDR_ALIGN_Z(KLDR_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
266 + KLDR_ALIGN_Z(cchFilename + 1, 8)
267 + Hdr.e32_ldrsize;
268 pModLX = (PKLDRMODLX)kldrHlpAlloc(cb);
269 if (!pModLX)
270 return KLDR_ERR_NO_MEMORY;
271 *ppModLX = pModLX;
272
273 /* KLDRMOD */
274 pMod = (PKLDRMOD)((uint8_t *)pModLX + KLDR_ALIGN_Z(sizeof(KLDRMODLX), 8));
275 pMod->pvData = pModLX;
276 pMod->pRdr = pRdr;
277 pMod->pOps = NULL; /* set upon success. */
278 pMod->cSegments = Hdr.e32_objcnt;
279 pMod->cchFilename = cchFilename;
280 pMod->pszFilename = (char *)KLDR_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
281 kLdrHlpMemCopy((char *)pMod->pszFilename, kLdrRdrName(pRdr), cchFilename + 1);
282 pMod->pszName = NULL; /* finalized further down */
283 pMod->cchName = 0;
284 switch (Hdr.e32_cpu)
285 {
286 case E32CPU286:
287 pMod->enmCpu = KLDRCPU_I80286;
288 pMod->enmArch = KLDRARCH_X86_16;
289 break;
290 case E32CPU386:
291 pMod->enmCpu = KLDRCPU_I386;
292 pMod->enmArch = KLDRARCH_X86_32;
293 break;
294 case E32CPU486:
295 pMod->enmCpu = KLDRCPU_I486;
296 pMod->enmArch = KLDRARCH_X86_32;
297 break;
298 }
299 pMod->enmEndian = KLDRENDIAN_LITTLE;
300 pMod->enmFmt = KLDRFMT_LX;
301 switch (Hdr.e32_mflags & E32MODMASK)
302 {
303 case E32MODEXE:
304 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
305 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
306 : KLDRTYPE_EXECUTABLE_FIXED;
307 break;
308
309 case E32MODDLL:
310 case E32PROTDLL:
311 case E32MODPROTDLL:
312 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
313 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
314 : KLDRTYPE_SHARED_LIBRARY_FIXED;
315 break;
316
317 case E32MODPDEV:
318 case E32MODVDEV:
319 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
320 break;
321 }
322 pMod->u32Magic = 0; /* set upon success. */
323
324 /* KLDRMODLX */
325 pModLX->pMod = pMod;
326 pModLX->pvMapping = 0;
327 pModLX->cbMapped = 0;
328 pModLX->f32Reserved = 0;
329
330 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
331 pModLX->Hdr = Hdr;
332
333 pModLX->pbLoaderSection = KLDR_ALIGN_P(pMod->pszFilename + pMod->cchFilename + 1, 16);
334 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize;
335 pModLX->paObjs = NULL;
336 pModLX->paPageMappings = NULL;
337 pModLX->paRsrcs = NULL;
338 pModLX->pbResNameTab = NULL;
339 pModLX->pbEntryTab = NULL;
340
341 pModLX->pbNonResNameTab = NULL;
342 pModLX->pbNonResNameTabLast = NULL;
343
344 pModLX->pbFixupSection = NULL;
345 pModLX->pbFixupSectionLast = NULL;
346 pModLX->paoffPageFixups = NULL;
347 pModLX->pbFixupRecs = NULL;
348 pModLX->pbImportMods = NULL;
349 pModLX->pbImportProcs = NULL;
350
351 /*
352 * Read the loader data.
353 */
354 rc = kLdrRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
355 if (rc)
356 return rc;
357 if (pModLX->Hdr.e32_objcnt)
358 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
359 if (pModLX->Hdr.e32_objmap)
360 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
361 if (pModLX->Hdr.e32_rsrccnt)
362 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
363 if (pModLX->Hdr.e32_restab)
364 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
365 if (pModLX->Hdr.e32_enttab)
366 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
367
368 /*
369 * Get the soname from the resident name table.
370 * Very convenient that it's the 0 ordinal, because then we get a
371 * free string terminator.
372 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
373 */
374 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
375 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
376 0);
377 if (!pMod->pszName)
378 return KLDR_ERR_LX_NO_SONAME;
379 pMod->cchName = *(const uint8_t *)pMod->pszName++;
380 if (pMod->cchName != kLdrHlpStrLen(pMod->pszName))
381 return KLDR_ERR_LX_BAD_SONAME;
382
383 /*
384 * Quick validation of the object table.
385 */
386 cb = 0;
387 for (i = 0; i < pMod->cSegments; i++)
388 {
389 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
390 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
391 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
392 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
393 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
394 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
395 if ( pModLX->paObjs[i].o32_mapsize
396 && ( (uint8_t *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
397 || (uint8_t *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
398 > pModLX->pbLoaderSectionLast))
399 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
400 if (i > 0)
401 {
402 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
403 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
404 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
405 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
406 }
407 }
408
409 /*
410 * Check if we can optimize the mapping by using a different
411 * object alignment. The linker typically uses 64KB alignment,
412 * we can easily get away with page alignment in most cases.
413 */
414 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
415 NextRVA = 0;
416
417 /*
418 * Setup the KLDRMOD segment array.
419 */
420 for (i = 0; i < pMod->cSegments; i++)
421 {
422 /* unused */
423 pMod->aSegments[i].pvUser = NULL;
424 pMod->aSegments[i].MapAddress = 0;
425 pMod->aSegments[i].pchName = NULL;
426 pMod->aSegments[i].cchName = 0;
427 pMod->aSegments[i].offFile = -1;
428 pMod->aSegments[i].cbFile = -1;
429
430 /* size and addresses */
431 pMod->aSegments[i].Alignment = OBJPAGELEN;
432 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
433 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
434 pMod->aSegments[i].RVA = NextRVA;
435 if (fCanOptimizeMapping)
436 pMod->aSegments[i].cbMapped = KLDR_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
437 else
438 pMod->aSegments[i].cbMapped = i + 1 < pMod->cSegments
439 ? pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base
440 : KLDR_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
441 NextRVA += pMod->aSegments[i].cbMapped;
442
443 /* protection */
444 switch ( pModLX->paObjs[i].o32_flags
445 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
446 {
447 case 0:
448 case OBJSHARED:
449 pMod->aSegments[i].enmProt = KLDRPROT_NOACCESS;
450 break;
451 case OBJREAD:
452 case OBJREAD | OBJSHARED:
453 pMod->aSegments[i].enmProt = KLDRPROT_READONLY;
454 break;
455 case OBJWRITE:
456 case OBJWRITE | OBJREAD:
457 pMod->aSegments[i].enmProt = KLDRPROT_WRITECOPY;
458 break;
459 case OBJWRITE | OBJSHARED:
460 case OBJWRITE | OBJSHARED | OBJREAD:
461 pMod->aSegments[i].enmProt = KLDRPROT_READWRITE;
462 break;
463 case OBJEXEC:
464 case OBJEXEC | OBJSHARED:
465 pMod->aSegments[i].enmProt = KLDRPROT_EXECUTE;
466 break;
467 case OBJEXEC | OBJREAD:
468 case OBJEXEC | OBJREAD | OBJSHARED:
469 pMod->aSegments[i].enmProt = KLDRPROT_EXECUTE_READ;
470 break;
471 case OBJEXEC | OBJWRITE:
472 case OBJEXEC | OBJWRITE | OBJREAD:
473 pMod->aSegments[i].enmProt = KLDRPROT_EXECUTE_WRITECOPY;
474 break;
475 case OBJEXEC | OBJWRITE | OBJSHARED:
476 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
477 pMod->aSegments[i].enmProt = KLDRPROT_EXECUTE_READWRITE;
478 break;
479 }
480 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
481 pMod->aSegments[i].enmProt = KLDRPROT_READONLY;
482 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
483 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
484 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
485 }
486
487 /* set the mapping size */
488 pModLX->cbMapped = NextRVA;
489
490 /*
491 * We're done.
492 */
493 *ppModLX = pModLX;
494 return 0;
495}
496
497
498/** @copydoc KLDRMODOPS::pfnDestroy */
499static int kldrModLXDestroy(PKLDRMOD pMod)
500{
501 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
502 int rc = 0;
503 KLDRMODLX_ASSERT(!pModLX->pvMapping);
504
505 if (pMod->pRdr)
506 {
507 rc = kLdrRdrClose(pMod->pRdr);
508 pMod->pRdr = NULL;
509 }
510 if (pModLX->pbNonResNameTab)
511 {
512 kldrHlpFree(pModLX->pbNonResNameTab);
513 pModLX->pbNonResNameTab = NULL;
514 }
515 if (pModLX->pbFixupSection)
516 {
517 kldrHlpFree(pModLX->pbFixupSection);
518 pModLX->pbFixupSection = NULL;
519 }
520 pMod->u32Magic = 0;
521 pMod->pOps = NULL;
522 kldrHlpFree(pModLX);
523 return rc;
524}
525
526
527/**
528 * Resolved base address aliases.
529 *
530 * @param pModLX The interpreter module instance
531 * @param pBaseAddress The base address, IN & OUT.
532 */
533static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
534{
535 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
536 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
537 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
538 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
539}
540
541
542/** @copydoc kLdrModQuerySymbol */
543static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, uint32_t iSymbol,
544 const char *pszSymbol, PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser,
545 PKLDRADDR puValue, uint32_t *pfKind)
546{
547 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
548 uint32_t iOrdinal;
549 int rc;
550 const struct b32_bundle *pBundle;
551
552
553 /*
554 * Give up at once if there is no entry table.
555 */
556 if (!pModLX->Hdr.e32_enttab)
557 return KLDR_ERR_SYMBOL_NOT_FOUND;
558
559 /*
560 * Translate the symbol name into an ordinal.
561 */
562 if (pszSymbol)
563 {
564 rc = kldrModLXDoNameLookup(pModLX, pszSymbol, &iSymbol);
565 if (rc)
566 return rc;
567 }
568
569 /*
570 * Iterate the entry table.
571 * (The entry table is made up of bundles of similar exports.)
572 */
573 iOrdinal = 0;
574 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
575 while (!pBundle->b32_cnt && iOrdinal <= iSymbol)
576 {
577 static const size_t s_cbEntry[] = { 0, 3, 5, 5, 7 };
578
579 /*
580 * Check for a hit first.
581 */
582 iOrdinal += pBundle->b32_cnt;
583 if (iSymbol < iOrdinal)
584 {
585 uint32_t offObject;
586 const struct e32_entry *pEntry = (const struct e32_entry *)((uintptr_t)(pBundle + 1)
587 + iSymbol - (iOrdinal - pBundle->b32_cnt));
588
589 /*
590 * Calculate the return address.
591 */
592 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
593 switch (pBundle->b32_type)
594 {
595 /* empty bundles are place holders unused ordinal ranges. */
596 case EMPTY:
597 return KLDR_ERR_SYMBOL_NOT_FOUND;
598
599 /* e32_flags + a 16-bit offset. */
600 case ENTRY16:
601 offObject = pEntry->e32_variant.e32_offset.offset16;
602 if (pfKind)
603 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
604 break;
605
606 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
607 case GATE16:
608 offObject = pEntry->e32_variant.e32_callgate.offset;
609 if (pfKind)
610 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
611 break;
612
613 /* e32_flags + a 32-bit offset. */
614 case ENTRY32:
615 offObject = pEntry->e32_variant.e32_offset.offset32;
616 if (pfKind)
617 *pfKind = KLDRSYMKIND_32BIT;
618 break;
619
620 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
621 case ENTRYFWD:
622 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
623
624 default:
625 /* anyone actually using TYPEINFO will end up here. */
626 KLDRMODLX_ASSERT(!"Bad bundle type");
627 break;
628 }
629
630 /*
631 * Validate the object number and calc the return address.
632 */
633 if ( pBundle->b32_obj <= 0
634 || pBundle->b32_obj > pMod->cSegments)
635 return KLDR_ERR_LX_BAD_BUNDLE;
636 if (puValue)
637 *puValue = BaseAddress
638 + offObject
639 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
640 return 0;
641 }
642
643 /*
644 * Skip the bundle.
645 */
646 if (pBundle->b32_type > ENTRYFWD)
647 {
648 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
649 return KLDR_ERR_LX_BAD_BUNDLE;
650 }
651 if (pBundle->b32_type == 0)
652 pBundle = (const struct b32_bundle *)((const uint8_t *)pBundle + 2);
653 else
654 pBundle = (const struct b32_bundle *)((const uint8_t *)pBundle + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
655 }
656
657 return KLDR_ERR_SYMBOL_NOT_FOUND;
658}
659
660
661/**
662 * Do name lookup.
663 *
664 * @returns See kLdrModQuerySymbol.
665 * @param pModLX The module to lookup the symbol in.
666 * @param pszSymbol The symbol to lookup.
667 * @param piSymbol Where to store the symbol ordinal.
668 */
669static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pszSymbol, uint32_t *piSymbol)
670{
671
672 /*
673 * First do a hash table lookup.
674 */
675
676
677
678 /*
679
680 */
681
682 return -1;
683}
684
685
686#if 0
687/**
688 * Hash a symbol using the algorithm from sdbm.
689 *
690 * The following was is the documenation of the orignal sdbm functions:
691 *
692 * This algorithm was created for sdbm (a public-domain reimplementation of
693 * ndbm) database library. it was found to do well in scrambling bits,
694 * causing better distribution of the keys and fewer splits. it also happens
695 * to be a good general hashing function with good distribution. the actual
696 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
697 * is the faster version used in gawk. [there is even a faster, duff-device
698 * version] the magic constant 65599 was picked out of thin air while
699 * experimenting with different constants, and turns out to be a prime.
700 * this is one of the algorithms used in berkeley db (see sleepycat) and
701 * elsewhere.
702 */
703static uint32_t kldrModLXDoHash(const char *pchSymbol, uint8_t cchSymbol)
704{
705 uint32_t hash = 0;
706 int ch;
707
708 while ( cchSymbol-- > 0
709 && (ch = *(unsigned const char *)pchSymbol++))
710 hash = ch + (hash << 6) + (hash << 16) - hash;
711
712 return hash;
713}
714
715
716/**
717 * Lookup a name table entry by name.
718 *
719 * @returns Pointer to the name table entry if found.
720 * @returns NULL if not found.
721 * @param pbNameTable Pointer to the name table that should be searched.
722 * @param cbNameTable The size of the name table.
723 * @param pchSymbol The name of the symbol we're looking for.
724 * @param cchSymbol The length of the symbol name.
725 */
726static const uint8_t *kldrModLXDoNameTableLookupByName(const uint8_t *pbNameTable, int32_t cbNameTable,
727 char *pchSymbol, size_t cchSymbol)
728{
729 /*
730 * Determin the namelength up front so we can skip anything which doesn't matches the length.
731 */
732 uint8_t cbSymbol8Bit = (uint8_t)cchSymbol;
733 if (cbSymbol8Bit != cchSymbol)
734 return NULL; /* too long. */
735
736 /*
737 * Walk the name table.
738 */
739 while (*pbNameTable != 0 && cbNameTable > 0)
740 {
741 const uint8_t cbName = *pbNameTable;
742
743 cbNameTable -= cbName + 1 + 2;
744 if (cbNameTable < 0)
745 break;
746
747 if ( cbName == cbSymbol8Bit
748 && !kLdrHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
749 return pbNameTable;
750
751 /* next entry */
752 pbNameTable += cbName + 1 + 2;
753 }
754
755 return NULL;
756}
757#endif
758
759
760/**
761 * Deal with a forwarder entry.
762 *
763 * @returns See kLdrModQuerySymbol.
764 * @param pModLX The PE module interpreter instance.
765 * @param pEntry The forwarder entry.
766 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
767 * @param pvUser The user argument for the callback.
768 * @param puValue Where to put the value. (optional)
769 * @param pfKind Where to put the symbol kind. (optional)
770 */
771static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
772 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, uint32_t *pfKind)
773{
774 int rc;
775 uint32_t iSymbol;
776 const char *pszSymbol;
777
778 if (!pfnGetForwarder)
779 return KLDR_ERR_FORWARDER_SYMBOL;
780
781 /*
782 * Validate the entry import module ordinal.
783 */
784 if ( !pEntry->e32_variant.e32_fwd.modord
785 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
786 return KLDR_ERR_LX_BAD_FORWARDER;
787
788 /*
789 * Figure out the parameters.
790 */
791 if (pEntry->e32_flags & FWD_ORDINAL)
792 {
793 iSymbol = pEntry->e32_variant.e32_fwd.value;
794 pszSymbol = NULL; /* no symbol name. */
795 }
796 else
797 {
798 const uint8_t *pbName;
799
800 /* load the fixup section if necessary. */
801 if (!pModLX->pbImportProcs)
802 {
803 rc = kldrModLXDoLoadFixupSection(pModLX);
804 if (rc)
805 return rc;
806 }
807
808 /* Make name pointer. */
809 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
810 if ( pbName >= pModLX->pbFixupSectionLast
811 || pbName < pModLX->pbFixupSection
812 || !*pbName)
813 return KLDR_ERR_LX_BAD_FORWARDER;
814
815
816 /* check for '#' name. */
817 if (pbName[1] == '#')
818 {
819 uint8_t cbLeft = *pbName;
820 const uint8_t *pb = pbName + 1;
821 unsigned uBase;
822
823 /* base detection */
824 uBase = 10;
825 if ( cbLeft > 1
826 && pb[1] == '0'
827 && (pb[2] == 'x' || pb[2] == 'X'))
828 {
829 uBase = 16;
830 pb += 2;
831 cbLeft -= 2;
832 }
833
834 /* ascii to integer */
835 iSymbol = 0;
836 while (cbLeft-- > 0)
837 {
838 /* convert char to digit. */
839 unsigned uDigit = *pb++;
840 if (uDigit >= '0' && uDigit <= '9')
841 uDigit -= '0';
842 else if (uDigit >= 'a' && uDigit <= 'z')
843 uDigit -= 'a' + 10;
844 else if (uDigit >= 'A' && uDigit <= 'Z')
845 uDigit -= 'A' + 10;
846 else if (!uDigit)
847 break;
848 else
849 return KLDR_ERR_LX_BAD_FORWARDER;
850 if (uDigit >= uBase)
851 return KLDR_ERR_LX_BAD_FORWARDER;
852
853 /* insert the digit */
854 iSymbol *= uBase;
855 iSymbol += uDigit;
856 }
857 if (!iSymbol)
858 return KLDR_ERR_LX_BAD_FORWARDER;
859
860 pszSymbol = NULL; /* no symbol name. */
861 }
862 else
863 {
864 /* Make a stack copy of the name that's zero terminated. */
865 char *pszCopy = kLdrHlpAllocA(*pbName + 1);
866 kLdrHlpMemCopy(pszCopy, pbName + 1, *pbName);
867 pszCopy[*pbName] = '\0';
868
869 pszSymbol = pszCopy;
870 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
871 }
872 }
873
874 /*
875 * Resolve the forwarder.
876 */
877 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pszSymbol, puValue, pfKind, pvUser);
878 if (!rc && pfKind)
879 *pfKind |= KLDRSYMKIND_FORWARDER;
880 return rc;
881}
882
883
884/**
885 * Loads the fixup section from the executable image.
886 *
887 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
888 *
889 * @returns 0 on success, non-zero kLdr or native status code on failure.
890 * @param pModLX The PE module interpreter instance.
891 */
892static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
893{
894 int rc;
895 uint32_t off;
896 void *pv;
897
898 pv = kldrHlpAlloc(pModLX->Hdr.e32_fixupsize);
899 if (!pv)
900 return KLDR_ERR_NO_MEMORY;
901
902 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
903 rc = kLdrRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
904 off + pModLX->offHdr);
905 if (!rc)
906 {
907 pModLX->pbFixupSection = pv;
908 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
909 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
910 if (pModLX->Hdr.e32_fpagetab)
911 pModLX->paoffPageFixups = (const uint32_t *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
912 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
913 if (pModLX->Hdr.e32_frectab)
914 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
915 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
916 if (pModLX->Hdr.e32_impmod)
917 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
918 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
919 if (pModLX->Hdr.e32_impproc)
920 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
921 }
922 else
923 kldrHlpFree(pv);
924 return rc;
925}
926
927
928/** @copydoc kLdrModEnumSymbols */
929static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
930 uint32_t fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
931{
932 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
933/* int rc; */
934
935 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
936
937 return 0;
938}
939
940
941/**
942 * Lookup a name table entry by ordinal.
943 *
944 * @returns Pointer to the name table entry if found.
945 * @returns NULL if not found.
946 * @param pbNameTable Pointer to the name table that should be searched.
947 * @param cbNameTable The size of the name table.
948 * @param iOrdinal The ordinal to search for.
949 */
950static const uint8_t *kldrModLXDoNameTableLookupByOrdinal(const uint8_t *pbNameTable, int32_t cbNameTable, uint32_t iOrdinal)
951{
952 while (*pbNameTable != 0 && cbNameTable > 0)
953 {
954 const uint8_t cbName = *pbNameTable;
955 uint32_t iName;
956
957 cbNameTable -= cbName + 1 + 2;
958 if (cbNameTable < 0)
959 break;
960
961 iName = *(pbNameTable + cbName + 1)
962 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
963 if (iName == iOrdinal)
964 return pbNameTable;
965
966 /* next entry */
967 pbNameTable += cbName + 1 + 2;
968 }
969
970 return NULL;
971}
972
973
974/** @copydoc kLdrModGetImport */
975static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, uint32_t iImport, char *pszName, size_t cchName)
976{
977 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
978 const uint8_t * pb;
979 int rc;
980
981 /*
982 * Validate
983 */
984 if (iImport >= pModLX->Hdr.e32_impmodcnt)
985 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
986
987 /*
988 * Lazy loading the fixup section.
989 */
990 if (!pModLX->pbImportMods)
991 {
992 rc = kldrModLXDoLoadFixupSection(pModLX);
993 if (rc)
994 return rc;
995 }
996
997 /*
998 * Iterate the module import table until we reach the requested import ordinal.
999 */
1000 pb = pModLX->pbImportMods;
1001 while (iImport-- > 0)
1002 pb += *pb + 1;
1003
1004 /*
1005 * Copy out the result.
1006 */
1007 if (*pb < cchName)
1008 {
1009 kLdrHlpMemCopy(pszName, pb + 1, *pb);
1010 pszName[*pb] = '\0';
1011 rc = 0;
1012 }
1013 else
1014 {
1015 kLdrHlpMemCopy(pszName, pb + 1, cchName);
1016 if (cchName)
1017 pszName[cchName - 1] = '\0';
1018 rc = KLDR_ERR_BUFFER_OVERFLOW;
1019 }
1020
1021 return rc;
1022}
1023
1024
1025/** @copydoc kLdrModNumberOfImports */
1026static int32_t kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1027{
1028 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1029 return pModLX->Hdr.e32_impmodcnt;
1030}
1031
1032
1033/** @copydoc kLdrModGetStackInfo */
1034static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1035{
1036 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1037 const uint32_t i = pModLX->Hdr.e32_stackobj;
1038
1039 if ( i
1040 && i <= pMod->cSegments
1041 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1042 && pModLX->Hdr.e32_stacksize
1043 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1044 {
1045
1046 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1047 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1048 pStackInfo->Address = BaseAddress
1049 + pMod->aSegments[i - 1].RVA
1050 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1051 }
1052 else
1053 {
1054 pStackInfo->Address = NIL_KLDRADDR;
1055 pStackInfo->LinkAddress = NIL_KLDRADDR;
1056 }
1057 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1058 pStackInfo->cbStackThread = 0;
1059
1060 return 0;
1061}
1062
1063
1064/** @copydoc kLdrModQueryMainEntrypoint */
1065static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1066{
1067 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1068
1069 /*
1070 * Convert the address from the header.
1071 */
1072 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1073 *pMainEPAddress = pModLX->Hdr.e32_startobj
1074 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1075 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1076 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1077 : NIL_KLDRADDR;
1078 return 0;
1079}
1080
1081
1082/** @copydoc kLdrModEnumDbgInfo */
1083static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1084{
1085 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1086
1087 /*
1088 * Quit immediately if no debug info.
1089 */
1090 if (kldrModLXHasDbgInfo(pMod, pvBits))
1091 return 0;
1092#if 0
1093 /*
1094 * Read the debug info and look for familiar magics and structures.
1095 */
1096 /** @todo */
1097#endif
1098
1099 return 0;
1100}
1101
1102
1103/** @copydoc kLdrModHasDbgInfo */
1104static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1105{
1106 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1107
1108 /*
1109 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1110 */
1111 if ( !pModLX->Hdr.e32_debuginfo
1112 || !pModLX->Hdr.e32_debuglen)
1113 return KLDR_ERR_NO_DEBUG_INFO;
1114 return 0;
1115}
1116
1117
1118/** @copydoc kLdrModMap */
1119static int kldrModLXMap(PKLDRMOD pMod)
1120{
1121 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1122 unsigned fFixed;
1123 void *pvBase;
1124 int rc;
1125
1126 /*
1127 * Already mapped?
1128 */
1129 if (pModLX->pvMapping)
1130 return KLDR_ERR_ALREADY_MAPPED;
1131
1132 /*
1133 * Allocate memory for it.
1134 */
1135 /* fixed image? */
1136 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1137 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1138 if (!fFixed)
1139 pvBase = NULL;
1140 else
1141 {
1142 pvBase = (void *)(uintptr_t)pMod->aSegments[0].LinkAddress;
1143 if ((uintptr_t)pvBase != pMod->aSegments[0].LinkAddress)
1144 return KLDR_ERR_ADDRESS_OVERFLOW;
1145 }
1146 rc = kldrHlpPageAlloc(&pvBase, pModLX->cbMapped, KLDRPROT_EXECUTE_READWRITE, fFixed);
1147 if (rc)
1148 return rc;
1149
1150 /*
1151 * Load the bits, apply page protection, and update the segment table.
1152 */
1153 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1154 if (!rc)
1155 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1156 if (!rc)
1157 {
1158 uint32_t i;
1159 for (i = 0; i < pMod->cSegments; i++)
1160 {
1161 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1162 pMod->aSegments[i].MapAddress = (uintptr_t)pvBase + (uintptr_t)pMod->aSegments[i].RVA;
1163 }
1164 pModLX->pvMapping = pvBase;
1165 }
1166 else
1167 kldrHlpPageFree(pvBase, pModLX->cbMapped);
1168 return rc;
1169}
1170
1171
1172/**
1173 * Loads the LX pages into the specified memory mapping.
1174 *
1175 * @returns 0 on success.
1176 * @returns non-zero kLdr or OS status code on failure.
1177 *
1178 * @param pModLX The LX module interpreter instance.
1179 * @param pvBits Where to load the bits.
1180 */
1181static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1182{
1183 const PKLDRRDR pRdr = pModLX->pMod->pRdr;
1184 uint8_t *pbTmpPage = NULL;
1185 int rc = 0;
1186 uint32_t i;
1187
1188 /*
1189 * Iterate the segments.
1190 */
1191 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1192 {
1193 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1194 const uint32_t cPages = pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN;
1195 uint32_t iPage;
1196 uint8_t *pbPage = (uint8_t *)pvBits + (uintptr_t)pModLX->pMod->aSegments[i].RVA;
1197
1198 /*
1199 * Iterate the page map pages.
1200 */
1201 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1202 {
1203 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1204 switch (pMap->o32_pageflags)
1205 {
1206 case VALID:
1207 if (pMap->o32_pagesize == OBJPAGELEN)
1208 rc = kLdrRdrRead(pRdr, pbPage, OBJPAGELEN,
1209 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1210 else if (pMap->o32_pagesize < OBJPAGELEN)
1211 {
1212 rc = kLdrRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1213 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1214 kLdrHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1215 }
1216 else
1217 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1218 break;
1219
1220 case ITERDATA:
1221 case ITERDATA2:
1222 /* make sure we've got a temp page .*/
1223 if (!pbTmpPage)
1224 {
1225 pbTmpPage = kldrHlpAlloc(OBJPAGELEN + 256);
1226 if (!pbTmpPage)
1227 break;
1228 }
1229 /* validate the size. */
1230 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1231 {
1232 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1233 break;
1234 }
1235
1236 /* read it and ensure 4 extra zero bytes. */
1237 rc = kLdrRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1238 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1239 if (rc)
1240 break;
1241 kLdrHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1242
1243 /* unpack it into the image page. */
1244 if (pMap->o32_pageflags == ITERDATA2)
1245 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1246 else
1247 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1248 break;
1249
1250 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1251 case ZEROED:
1252 kLdrHlpMemSet(pbPage, 0, OBJPAGELEN);
1253 break;
1254
1255 case RANGE:
1256 KLDRMODLX_ASSERT(!"RANGE");
1257 default:
1258 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1259 break;
1260 }
1261 }
1262 if (rc)
1263 break;
1264
1265 /*
1266 * Zero the remaining pages.
1267 */
1268 if (iPage < cPages)
1269 kLdrHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1270 }
1271
1272 if (pbTmpPage)
1273 kldrHlpFree(pbTmpPage);
1274 return rc;
1275}
1276
1277
1278/**
1279 * Unpacks iterdata (aka EXEPACK).
1280 *
1281 * @returns 0 on success, non-zero kLdr status code on failure.
1282 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1283 * @param pbSrc The compressed source data.
1284 * @param cbSrc The file size of the compressed data. The source buffer
1285 * contains 4 additional zero bytes.
1286 */
1287static int kldrModLXDoIterDataUnpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc)
1288{
1289 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1290 int cbDst = OBJPAGELEN;
1291
1292 /* Validate size of data. */
1293 if (cbSrc >= OBJPAGELEN - 2)
1294 return KLDR_ERR_LX_BAD_ITERDATA;
1295
1296 /*
1297 * Expand the page.
1298 */
1299 while (cbSrc > 0 && pIter->LX_nIter)
1300 {
1301 if (pIter->LX_nBytes == 1)
1302 {
1303 /*
1304 * Special case - one databyte.
1305 */
1306 cbDst -= pIter->LX_nIter;
1307 if (cbDst < 0)
1308 return KLDR_ERR_LX_BAD_ITERDATA;
1309
1310 cbSrc -= 4 + 1;
1311 if (cbSrc < -4)
1312 return KLDR_ERR_LX_BAD_ITERDATA;
1313
1314 kLdrHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1315 pbDst += pIter->LX_nIter;
1316 pIter++;
1317 }
1318 else
1319 {
1320 /*
1321 * General.
1322 */
1323 int i;
1324
1325 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1326 if (cbDst < 0)
1327 return KLDR_ERR_LX_BAD_ITERDATA;
1328
1329 cbSrc -= 4 + pIter->LX_nBytes;
1330 if (cbSrc < -4)
1331 return KLDR_ERR_LX_BAD_ITERDATA;
1332
1333 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1334 kLdrHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1335 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1336 }
1337 }
1338
1339 /*
1340 * Zero remainder of the page.
1341 */
1342 if (cbDst > 0)
1343 kLdrHlpMemSet(pbDst, 0, cbDst);
1344
1345 return 0;
1346}
1347
1348
1349/**
1350 * Unpacks iterdata (aka EXEPACK).
1351 *
1352 * @returns 0 on success, non-zero kLdr status code on failure.
1353 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1354 * @param pbSrc The compressed source data.
1355 * @param cbSrc The file size of the compressed data. The source buffer
1356 * contains 4 additional zero bytes.
1357 */
1358static int kldrModLXDoIterData2Unpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc)
1359{
1360 int cbDst = OBJPAGELEN;
1361
1362 while (cbSrc > 0)
1363 {
1364 /*
1365 * Bit 0 and 1 is the encoding type.
1366 */
1367 switch (*pbSrc & 0x03)
1368 {
1369 /*
1370 *
1371 * 0 1 2 3 4 5 6 7
1372 * type | |
1373 * ----------------
1374 * cb <cb bytes of data>
1375 *
1376 * Bits 2-7 is, if not zero, the length of an uncompressed run
1377 * starting at the following byte.
1378 *
1379 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1380 * type | | | | | |
1381 * ---------------- ---------------------- -----------------------
1382 * zero cb char to multiply
1383 *
1384 * If the bits are zero, the following two bytes describes a 1 byte interation
1385 * run. First byte is count, second is the byte to copy. A count of zero is
1386 * means end of data, and we simply stops. In that case the rest of the data
1387 * should be zero.
1388 */
1389 case 0:
1390 {
1391 if (*pbSrc)
1392 {
1393 const int cb = *pbSrc >> 2;
1394 cbDst -= cb;
1395 if (cbDst < 0)
1396 return KLDR_ERR_LX_BAD_ITERDATA2;
1397 cbSrc -= cb;
1398 if (cbSrc < 0)
1399 return KLDR_ERR_LX_BAD_ITERDATA2;
1400 kLdrHlpMemCopy(pbDst, ++pbSrc, cb);
1401 pbDst += cb;
1402 pbSrc += cb;
1403 }
1404 else if (cbSrc < 2)
1405 return KLDR_ERR_LX_BAD_ITERDATA2;
1406 else
1407 {
1408 const int cb = pbSrc[1];
1409 if (!cb)
1410 goto l_endloop;
1411 cbDst -= cb;
1412 if (cbDst < 0)
1413 return KLDR_ERR_LX_BAD_ITERDATA2;
1414 cbSrc -= 3;
1415 if (cbSrc < 0)
1416 return KLDR_ERR_LX_BAD_ITERDATA2;
1417 kLdrHlpMemSet(pbDst, pbSrc[2], cb);
1418 pbDst += cb;
1419 pbSrc += 3;
1420 }
1421 break;
1422 }
1423
1424
1425 /*
1426 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1427 * type | | | | | |
1428 * ---- ------- -------------------------
1429 * cb1 cb2 - 3 offset <cb1 bytes of data>
1430 *
1431 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1432 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1433 * data relative to the current position. The data copied as you would expect it to be.
1434 */
1435 case 1:
1436 {
1437 cbSrc -= 2;
1438 if (cbSrc < 0)
1439 return KLDR_ERR_LX_BAD_ITERDATA2;
1440 else
1441 {
1442 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1443 const int cb1 = (*pbSrc >> 2) & 3;
1444 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1445
1446 pbSrc += 2;
1447 cbSrc -= cb1;
1448 if (cbSrc < 0)
1449 return KLDR_ERR_LX_BAD_ITERDATA2;
1450 cbDst -= cb1;
1451 if (cbDst < 0)
1452 return KLDR_ERR_LX_BAD_ITERDATA2;
1453 kLdrHlpMemCopy(pbDst, pbSrc, cb1);
1454 pbDst += cb1;
1455
1456 if (off > OBJPAGELEN - cbDst)
1457 return KLDR_ERR_LX_BAD_ITERDATA2;
1458 cbDst -= cb2;
1459 if (cbDst < 0)
1460 return KLDR_ERR_LX_BAD_ITERDATA2;
1461 kLdrHlpMemMove(pbDst, pbDst - off, cb2);
1462 pbDst += cb2;
1463 }
1464 break;
1465 }
1466
1467
1468 /*
1469 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1470 * type | | | |
1471 * ---- ----------------------------------
1472 * cb-3 offset
1473 *
1474 * Two bytes layed out as described above.
1475 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1476 * data relative to the current position.
1477 *
1478 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1479 */
1480 case 2:
1481 {
1482 cbSrc -= 2;
1483 if (cbSrc < 0)
1484 return KLDR_ERR_LX_BAD_ITERDATA2;
1485 else
1486 {
1487 const unsigned off = ((unsigned)pbSrc[2] << 4) | (*pbSrc >> 4);
1488 const int cb = ((*pbSrc >> 2) & 3) + 3;
1489
1490 pbSrc += 2;
1491 if (off > OBJPAGELEN - cbDst)
1492 return KLDR_ERR_LX_BAD_ITERDATA2;
1493 cbDst -= cb;
1494 if (cbDst < 0)
1495 return KLDR_ERR_LX_BAD_ITERDATA2;
1496 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1497 pbDst += cb;
1498 }
1499 break;
1500 }
1501
1502
1503 /*
1504 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1505 * type | | | | | |
1506 * ---------- ---------------- ----------------------------------
1507 * cb1 cb2 offset <cb1 bytes of data>
1508 *
1509 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1510 * The cb2 and offset describes an amount of data to be copied from the expanded
1511 * data relative to the current position.
1512 *
1513 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1514 */
1515 case 3:
1516 {
1517 cbSrc -= 3;
1518 if (cbSrc < 0)
1519 return KLDR_ERR_LX_BAD_ITERDATA2;
1520 else
1521 {
1522 const int cb1 = (*pbSrc >> 2) & 0xf;
1523 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1524 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1525
1526 pbSrc += 3;
1527 cbSrc -= cb1;
1528 if (cbSrc < 0)
1529 return KLDR_ERR_LX_BAD_ITERDATA2;
1530 cbDst -= cb1;
1531 if (cbDst < 0)
1532 return KLDR_ERR_LX_BAD_ITERDATA2;
1533 kLdrHlpMemCopy(pbDst, pbSrc, cb1);
1534 pbDst += cb1;
1535
1536 if (off > OBJPAGELEN - cbDst)
1537 return KLDR_ERR_LX_BAD_ITERDATA2;
1538 cbDst -= cb2;
1539 if (cbDst < 0)
1540 return KLDR_ERR_LX_BAD_ITERDATA2;
1541 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1542 pbDst += cb2;
1543 }
1544 break;
1545 }
1546 } /* type switch. */
1547 } /* unpack loop */
1548
1549l_endloop:
1550
1551
1552 /*
1553 * Zero remainder of the page.
1554 */
1555 if (cbDst > 0)
1556 kLdrHlpMemSet(pbDst, 0, cbDst);
1557
1558 return 0;
1559}
1560
1561
1562/**
1563 * Special memcpy employed by the iterdata2 algorithm.
1564 *
1565 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1566 * has if src is very close to the destination.
1567 *
1568 * @param pbDst Destination pointer.
1569 * @param pbSrc Source pointer. Will always be <= pbDst.
1570 * @param cb Amount of data to be copied.
1571 * @remark This assumes that unaligned word and dword access is fine.
1572 */
1573static void kLdrModLXMemCopyW(uint8_t *pbDst, const uint8_t *pbSrc, int cb)
1574{
1575 switch (pbDst - pbSrc)
1576 {
1577 case 0:
1578 case 1:
1579 case 2:
1580 case 3:
1581 /* 16-bit copy (unaligned) */
1582 if (cb & 1)
1583 *pbDst++ = *pbSrc++;
1584 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1585 *(uint16_t *)pbDst = *(const uint16_t *)pbSrc;
1586 break;
1587
1588 default:
1589 /* 32-bit copy (unaligned) */
1590 if (cb & 1)
1591 *pbDst++ = *pbSrc++;
1592 if (cb & 2)
1593 {
1594 *(uint16_t *)pbDst = *(const uint16_t *)pbSrc;
1595 pbDst += 2;
1596 pbSrc += 2;
1597 }
1598 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1599 *(uint32_t *)pbDst = *(const uint32_t *)pbSrc;
1600 break;
1601 }
1602}
1603
1604
1605/**
1606 * Unprotects or protects the specified image mapping.
1607 *
1608 * @returns 0 on success.
1609 * @returns non-zero kLdr or OS status code on failure.
1610 *
1611 * @param pModLX The LX module interpreter instance.
1612 * @param pvBits The mapping to protect.
1613 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1614 * protect according to the object table.
1615 */
1616static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1617{
1618 uint32_t i;
1619 PKLDRMOD pMod = pModLX->pMod;
1620
1621 /*
1622 * Change object protection.
1623 */
1624 for (i = 0; i < pMod->cSegments; i++)
1625 {
1626 int rc;
1627 void *pv;
1628 KLDRPROT enmProt;
1629
1630 /* calc new protection. */
1631 enmProt = pMod->aSegments[i].enmProt;
1632 if (fUnprotectOrProtect)
1633 {
1634 switch (enmProt)
1635 {
1636 case KLDRPROT_NOACCESS:
1637 case KLDRPROT_READONLY:
1638 case KLDRPROT_READWRITE:
1639 case KLDRPROT_WRITECOPY:
1640 enmProt = KLDRPROT_READWRITE;
1641 break;
1642 case KLDRPROT_EXECUTE:
1643 case KLDRPROT_EXECUTE_READ:
1644 case KLDRPROT_EXECUTE_READWRITE:
1645 case KLDRPROT_EXECUTE_WRITECOPY:
1646 enmProt = KLDRPROT_EXECUTE_READWRITE;
1647 break;
1648 default:
1649 KLDRMODLX_ASSERT(!"bad enmProt");
1650 return -1;
1651 }
1652 }
1653 else
1654 {
1655 /* copy on write -> normal write. */
1656 if (enmProt == KLDRPROT_EXECUTE_WRITECOPY)
1657 enmProt = KLDRPROT_EXECUTE_READWRITE;
1658 else if (enmProt == KLDRPROT_WRITECOPY)
1659 enmProt = KLDRPROT_READWRITE;
1660 }
1661
1662
1663 /* calc the address and set page protection. */
1664 pv = (uint8_t *)pvBits + pMod->aSegments[i].RVA;
1665
1666 rc = kldrHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1667 if (rc)
1668 break;
1669
1670 /** @todo the gap page should be marked NOACCESS! */
1671 }
1672
1673 return 0;
1674}
1675
1676
1677/** @copydoc kLdrModUnmap */
1678static int kldrModLXUnmap(PKLDRMOD pMod)
1679{
1680 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1681 uint32_t i;
1682 int rc;
1683
1684 /*
1685 * Mapped?
1686 */
1687 if (!pModLX->pvMapping)
1688 return KLDR_ERR_NOT_MAPPED;
1689
1690 /*
1691 * Free the mapping and update the segments.
1692 */
1693 rc = kldrHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1694 KLDRMODLX_ASSERT(!rc);
1695 pModLX->pvMapping = NULL;
1696
1697 for (i = 0; i < pMod->cSegments; i++)
1698 pMod->aSegments[i].MapAddress = 0;
1699
1700 return rc;
1701}
1702
1703
1704/** @copydoc kLdrModAllocTLS */
1705static int kldrModLXAllocTLS(PKLDRMOD pMod)
1706{
1707 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1708
1709 /* no tls, just do the error checking. */
1710 if (!pModLX->pvMapping)
1711 return KLDR_ERR_NOT_MAPPED;
1712 return 0;
1713}
1714
1715
1716/** @copydoc kLdrModFreeTLS */
1717static void kldrModLXFreeTLS(PKLDRMOD pMod)
1718{
1719 /* no tls. */
1720}
1721
1722
1723/** @copydoc kLdrModReload */
1724static int kldrModLXReload(PKLDRMOD pMod)
1725{
1726 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1727 int rc, rc2;
1728
1729 /*
1730 * Mapped?
1731 */
1732 if (!pModLX->pvMapping)
1733 return KLDR_ERR_NOT_MAPPED;
1734
1735 /*
1736 * Before doing anything we'll have to make all pages writable.
1737 */
1738 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1739 if (rc)
1740 return rc;
1741
1742 /*
1743 * Load the bits again.
1744 */
1745 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1746
1747 /*
1748 * Restore protection.
1749 */
1750 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1751 if (!rc && rc2)
1752 rc = rc2;
1753 return rc;
1754}
1755
1756
1757/** @copydoc kLdrModFixupMapping */
1758static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1759{
1760 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1761 int rc, rc2;
1762
1763 /*
1764 * Mapped?
1765 */
1766 if (!pModLX->pvMapping)
1767 return KLDR_ERR_NOT_MAPPED;
1768
1769 /*
1770 * Before doing anything we'll have to make all pages writable.
1771 */
1772 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1773 if (rc)
1774 return rc;
1775
1776 /*
1777 * Apply fixups and resolve imports.
1778 */
1779 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (uintptr_t)pModLX->pvMapping,
1780 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
1781
1782 /*
1783 * Restore protection.
1784 */
1785 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1786 if (!rc && rc2)
1787 rc = rc2;
1788 return rc;
1789}
1790
1791
1792/** @copydoc kLdrModCallInit */
1793static int kldrModLXCallInit(PKLDRMOD pMod, uintptr_t uHandle)
1794{
1795 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1796 int rc;
1797
1798 /*
1799 * Mapped?
1800 */
1801 if (!pModLX->pvMapping)
1802 return KLDR_ERR_NOT_MAPPED;
1803
1804 /*
1805 * Do TLS callbacks first and then call the init/term function if it's a DLL.
1806 */
1807 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
1808 rc = kldrModLXDoCallDLL(pModLX, 0 /* attach */, uHandle);
1809 else
1810 rc = 0;
1811 return rc;
1812}
1813
1814
1815/**
1816 * Call the DLL entrypoint.
1817 *
1818 * @returns 0 on success.
1819 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
1820 * @param pModLX The LX module interpreter instance.
1821 * @param uOp The operation (DLL_*).
1822 * @param uHandle The module handle to present.
1823 */
1824static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, uintptr_t uHandle)
1825{
1826 int rc;
1827
1828 /*
1829 * If no entrypoint there isn't anything to be done.
1830 */
1831 if ( !pModLX->Hdr.e32_startobj
1832 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
1833 return 0;
1834
1835 /*
1836 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
1837 */
1838 rc = kldrModLXDoCall((uintptr_t)pModLX->pvMapping
1839 + (uintptr_t)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
1840 + pModLX->Hdr.e32_eip,
1841 uHandle, uOp, NULL);
1842 if (rc)
1843 rc = 0;
1844 else if (uOp == 0 /* attach */)
1845 rc = KLDR_ERR_MODULE_INIT_FAILED;
1846 else /* detach: ignore failures */
1847 rc = 0;
1848 return rc;
1849}
1850
1851
1852/**
1853 * Do a 3 parameter callback.
1854 *
1855 * @returns 32-bit callback return.
1856 * @param uEntrypoint The address of the function to be called.
1857 * @param uHandle The first argument, the module handle.
1858 * @param uOp The second argumnet, the reason we're calling.
1859 * @param pvReserved The third argument, reserved argument. (figure this one out)
1860 */
1861static int32_t kldrModLXDoCall(uintptr_t uEntrypoint, uintptr_t uHandle, uint32_t uOp, void *pvReserved)
1862{
1863#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
1864 int32_t rc;
1865/** @todo try/except */
1866
1867 /*
1868 * Paranoia.
1869 */
1870# ifdef __GNUC__
1871 __asm__ __volatile__(
1872 "pushl %2\n\t"
1873 "pushl %1\n\t"
1874 "pushl %0\n\t"
1875 "lea 12(%%esp), %2\n\t"
1876 "call *%3\n\t"
1877 "movl %2, %%esp\n\t"
1878 : "=a" (rc)
1879 : "d" (uOp),
1880 "S" (0),
1881 "c" (uEntrypoint),
1882 "0" (uHandle));
1883# elif defined(_MSC_VER)
1884 __asm {
1885 mov eax, [uHandle]
1886 mov edx, [uOp]
1887 mov ecx, 0
1888 mov ebx, [uEntrypoint]
1889 push edi
1890 mov edi, esp
1891 push ecx
1892 push edx
1893 push eax
1894 call ebx
1895 mov esp, edi
1896 pop edi
1897 mov [rc], eax
1898 }
1899# else
1900# error "port me!"
1901# endif
1902 return rc;
1903
1904#else
1905 return KLDR_ERR_ARCH_CPU_NOT_COMPATIBLE;
1906#endif
1907}
1908
1909
1910/** @copydoc kLdrModCallTerm */
1911static int kldrModLXCallTerm(PKLDRMOD pMod, uintptr_t uHandle)
1912{
1913 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1914
1915 /*
1916 * Mapped?
1917 */
1918 if (!pModLX->pvMapping)
1919 return KLDR_ERR_NOT_MAPPED;
1920
1921 /*
1922 * Do the call.
1923 */
1924 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
1925 kldrModLXDoCallDLL(pModLX, 1 /* detach */, uHandle);
1926
1927 return 0;
1928}
1929
1930
1931/** @copydoc kLdrModCallThread */
1932static int kldrModLXCallThread(PKLDRMOD pMod, uintptr_t uHandle, unsigned fAttachingOrDetaching)
1933{
1934 /* no thread attach/detach callout. */
1935 return 0;
1936}
1937
1938
1939/** @copydoc kLdrModSize */
1940static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
1941{
1942 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1943 return pModLX->cbMapped;
1944}
1945
1946
1947/** @copydoc kLdrModGetBits */
1948static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1949{
1950 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1951 int rc;
1952
1953 /*
1954 * Load the image bits.
1955 */
1956 rc = kldrModLXDoLoadBits(pModLX, pvBits);
1957 if (rc)
1958 return rc;
1959
1960 /*
1961 * Perform relocations.
1962 */
1963 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
1964
1965}
1966
1967
1968/** @copydoc kLdrModRelocateBits */
1969static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
1970 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1971{
1972 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1973 uint32_t i;
1974 int rc;
1975
1976 /*
1977 * Do we need to to *anything*?
1978 */
1979 if ( NewBaseAddress == OldBaseAddress
1980 && NewBaseAddress == pModLX->paObjs[0].o32_base
1981 && !pModLX->Hdr.e32_impmodcnt)
1982 return 0;
1983
1984 /*
1985 * Load the fixup section.
1986 */
1987 if (!pModLX->pbFixupSection)
1988 {
1989 rc = kldrModLXDoLoadFixupSection(pModLX);
1990 if (rc)
1991 return rc;
1992 }
1993
1994 /*
1995 * Iterate the segments.
1996 */
1997 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1998 {
1999 const struct o32_obj * const pObj = &pModLX->paObjs[i];
2000 uint32_t iPage;
2001 uint8_t *pbPage = (uint8_t *)pvBits + (uintptr_t)pModLX->pMod->aSegments[i].RVA;
2002
2003 /*
2004 * Iterate the page map pages.
2005 */
2006 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
2007 {
2008 const uint8_t * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2009 const uint8_t *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2010 KLDRADDR uValue;
2011 uint32_t fKind;
2012
2013 /* sanity */
2014 if (pbFixupRecEnd < pb)
2015 return KLDR_ERR_BAD_FIXUP;
2016 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2017 return KLDR_ERR_BAD_FIXUP;
2018 if (pb < pModLX->pbFixupSection)
2019 return KLDR_ERR_BAD_FIXUP;
2020
2021 /*
2022 * Iterate the fixup record.
2023 */
2024 while (pb < pbFixupRecEnd)
2025 {
2026 union _rel
2027 {
2028 const uint8_t * pb;
2029 const struct r32_rlc *prlc;
2030 } u;
2031
2032 u.pb = pb;
2033 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2034
2035 /*
2036 * Figure out the target.
2037 */
2038 switch (u.prlc->nr_flags & NRRTYP)
2039 {
2040 /*
2041 * Internal fixup.
2042 */
2043 case NRRINT:
2044 {
2045 uint16_t iTrgObject;
2046 uint32_t offTrgObject;
2047
2048 /* the object */
2049 if (u.prlc->nr_flags & NR16OBJMOD)
2050 {
2051 iTrgObject = *(const uint16_t *)pb;
2052 pb += 2;
2053 }
2054 else
2055 iTrgObject = *pb++;
2056 iTrgObject--;
2057 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2058 return KLDR_ERR_BAD_FIXUP;
2059
2060 /* the target */
2061 if (u.prlc->nr_flags & NR32BITOFF)
2062 {
2063 offTrgObject = *(const uint32_t *)pb;
2064 pb += 4;
2065 }
2066 else
2067 {
2068 offTrgObject = *(const uint16_t *)pb;
2069 pb += 2;
2070 }
2071
2072 /* calculate the symbol info. */
2073 uValue = offTrgObject + pMod->aSegments[i].MapAddress;
2074 fKind = 0;
2075 break;
2076 }
2077
2078 /*
2079 * Import by symbol ordinal.
2080 */
2081 case NRRORD:
2082 {
2083 uint16_t iModule;
2084 uint32_t iSymbol;
2085
2086 /* the module ordinal */
2087 if (u.prlc->nr_flags & NR16OBJMOD)
2088 {
2089 iModule = *(const uint16_t *)pb;
2090 pb += 2;
2091 }
2092 else
2093 iModule = *pb++;
2094 iModule--;
2095 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2096 return KLDR_ERR_BAD_FIXUP;
2097#if 1
2098 if (u.prlc->nr_flags & NRICHAIN)
2099 return KLDR_ERR_BAD_FIXUP;
2100#endif
2101
2102 /* . */
2103 if (u.prlc->nr_flags & NR32BITOFF)
2104 {
2105 iSymbol = *(const uint32_t *)pb;
2106 pb += 4;
2107 }
2108 else if (!(u.prlc->nr_flags & NR8BITORD))
2109 {
2110 iSymbol = *(const uint16_t *)pb;
2111 pb += 2;
2112 }
2113 else
2114 iSymbol = *pb++;
2115
2116 /* resolve it. */
2117 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, &uValue, &fKind, pvUser);
2118 if (rc)
2119 return rc;
2120 break;
2121 }
2122
2123 /*
2124 * Import by symbol name.
2125 */
2126 case NRRNAM:
2127 {
2128 uint32_t iModule;
2129 uint16_t offSymbol;
2130 const uint8_t *pbSymbol;
2131 char szSymbol[260];
2132
2133 /* the module ordinal */
2134 if (u.prlc->nr_flags & NR16OBJMOD)
2135 {
2136 iModule = *(const uint16_t *)pb;
2137 pb += 2;
2138 }
2139 else
2140 iModule = *pb++;
2141 iModule--;
2142 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2143 return KLDR_ERR_BAD_FIXUP;
2144#if 1
2145 if (u.prlc->nr_flags & NRICHAIN)
2146 return KLDR_ERR_BAD_FIXUP;
2147#endif
2148
2149 /* . */
2150 if (u.prlc->nr_flags & NR32BITOFF)
2151 {
2152 offSymbol = *(const uint32_t *)pb;
2153 pb += 4;
2154 }
2155 else if (!(u.prlc->nr_flags & NR8BITORD))
2156 {
2157 offSymbol = *(const uint16_t *)pb;
2158 pb += 2;
2159 }
2160 else
2161 offSymbol = *pb++;
2162 pbSymbol = pModLX->pbImportProcs + offSymbol;
2163 if ( pbSymbol < pModLX->pbImportProcs
2164 || pbSymbol > pModLX->pbFixupSectionLast)
2165 return KLDR_ERR_BAD_FIXUP;
2166
2167 /* resolve it. */
2168 kLdrHlpMemCopy(szSymbol, pbSymbol + 1, *pbSymbol);
2169 szSymbol[*pbSymbol] = '\0';
2170 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, szSymbol, &uValue, &fKind, pvUser);
2171 if (rc)
2172 return rc;
2173 break;
2174 }
2175
2176 case NRRENT:
2177 KLDRMODLX_ASSERT(!"NRRENT");
2178 default:
2179 break;
2180 }
2181
2182 /* addend */
2183 if (u.prlc->nr_flags & NRADD)
2184 {
2185 if (u.prlc->nr_flags & NR32BITADD)
2186 {
2187 uValue += *(const uint32_t *)pb;
2188 pb += 4;
2189 }
2190 else
2191 {
2192 uValue += *(const uint16_t *)pb;
2193 pb += 2;
2194 }
2195 }
2196
2197 /*
2198 * Deal with the 'source' (i.e. the place that should be modified (very logical).
2199 */
2200 if (!(u.prlc->nr_stype & NRCHAIN))
2201 {
2202 rc = kldrModLXDoReloc(pbPage, u.prlc->r32_soff, uValue, fKind);
2203 if (rc)
2204 return rc;
2205 }
2206 else if (!(u.prlc->nr_flags & NRICHAIN))
2207 {
2208 const uint16_t *poffSrc = (const uint16_t *)pb;
2209 uint8_t c = u.pb[2];
2210 while (c-- > 0)
2211 {
2212 rc = kldrModLXDoReloc(pbPage, *poffSrc++, uValue, fKind);
2213 if (rc)
2214 return rc;
2215 }
2216 pb = (const uint8_t *)poffSrc;
2217 }
2218 else
2219 {
2220 KLDRMODLX_ASSERT(!"NRICHAIN");
2221 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2222 }
2223 }
2224 }
2225 }
2226
2227 return 0;
2228}
2229
2230
2231/**
2232 * Applies the relocation to one 'source' in a page.
2233 *
2234 * @returns 0 on success, non-zero kLdr status code on failure.
2235 * @param pbPage The page in which to apply the fixup.
2236 * @param off Page relative offset of where to apply the offset.
2237 * @param uValue The target value.
2238 * @param fKind The target kind.
2239 * @todo inline this.
2240 */
2241static int kldrModLXDoReloc(uint8_t *pbPage, int off, KLDRADDR uValue, uint32_t fKind)
2242{
2243
2244
2245 return 0;
2246}
2247
2248
2249/**
2250 * The LX module interpreter method table.
2251 */
2252KLDRMODOPS g_kLdrModLXOps =
2253{
2254 "LX",
2255 NULL,
2256 kldrModLXCreate,
2257 kldrModLXDestroy,
2258 kldrModLXQuerySymbol,
2259 kldrModLXEnumSymbols,
2260 kldrModLXGetImport,
2261 kldrModLXNumberOfImports,
2262 NULL /* can execute one is optional */,
2263 kldrModLXGetStackInfo,
2264 kldrModLXQueryMainEntrypoint,
2265 kldrModLXEnumDbgInfo,
2266 kldrModLXHasDbgInfo,
2267 kldrModLXMap,
2268 kldrModLXUnmap,
2269 kldrModLXAllocTLS,
2270 kldrModLXFreeTLS,
2271 kldrModLXReload,
2272 kldrModLXFixupMapping,
2273 kldrModLXCallInit,
2274 kldrModLXCallTerm,
2275 kldrModLXCallThread,
2276 kldrModLXSize,
2277 kldrModLXGetBits,
2278 kldrModLXRelocateBits,
2279 42 /* the end */
2280};
2281
Note: See TracBrowser for help on using the repository browser.