source: trunk/kLdr/kLdrModLX.c@ 28

Last change on this file since 28 was 28, checked in by bird, 16 years ago

shut up gcc warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 87.1 KB
Line 
1/* $Id: kLdrModLX.c 28 2009-03-28 18:37:20Z bird $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 knut st. osmundsen <bird-kStuff-spam@anduin.net>
8 *
9 * This file is part of kStuff.
10 *
11 * kStuff is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * In addition to the permissions in the GNU Lesser General Public
17 * License, you are granted unlimited permission to link the compiled
18 * version of this file into combinations with other programs, and to
19 * distribute those combinations without any restriction coming from
20 * the use of this file.
21 *
22 * kStuff is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 * Lesser General Public License for more details.
26 *
27 * You should have received a copy of the GNU Lesser General Public
28 * License along with kStuff; if not, write to the Free Software
29 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
30 * 02110-1301, USA
31 */
32
33/*******************************************************************************
34* Header Files *
35*******************************************************************************/
36#include <k/kLdr.h>
37#include "kLdrInternal.h"
38#include <k/kLdrFmts/lx.h>
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44/** @def KLDRMODLX_STRICT
45 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
46#define KLDRMODLX_STRICT 1
47
48/** @def KLDRMODLX_ASSERT
49 * Assert that an expression is true when KLDR_STRICT is defined.
50 */
51#ifdef KLDRMODLX_STRICT
52# define KLDRMODLX_ASSERT(expr) kHlpAssert(expr)
53#else
54# define KLDRMODLX_ASSERT(expr) do {} while (0)
55#endif
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * Instance data for the LX module interpreter.
63 */
64typedef struct KLDRMODLX
65{
66 /** Pointer to the module. (Follows the section table.) */
67 PKLDRMOD pMod;
68 /** Pointer to the user mapping. */
69 const void *pvMapping;
70 /** The size of the mapped LX image. */
71 KSIZE cbMapped;
72 /** Reserved flags. */
73 KU32 f32Reserved;
74
75 /** The offset of the LX header. */
76 KLDRFOFF offHdr;
77 /** Copy of the LX header. */
78 struct e32_exe Hdr;
79
80 /** Pointer to the loader section.
81 * Allocated together with this strcture. */
82 const KU8 *pbLoaderSection;
83 /** Pointer to the last byte in the loader section. */
84 const KU8 *pbLoaderSectionLast;
85 /** Pointer to the object table in the loader section. */
86 const struct o32_obj *paObjs;
87 /** Pointer to the object page map table in the loader section. */
88 const struct o32_map *paPageMappings;
89 /** Pointer to the resource table in the loader section. */
90 const struct rsrc32 *paRsrcs;
91 /** Pointer to the resident name table in the loader section. */
92 const KU8 *pbResNameTab;
93 /** Pointer to the entry table in the loader section. */
94 const KU8 *pbEntryTab;
95
96 /** Pointer to the non-resident name table. */
97 KU8 *pbNonResNameTab;
98 /** Pointer to the last byte in the non-resident name table. */
99 const KU8 *pbNonResNameTabLast;
100
101 /** Pointer to the fixup section. */
102 KU8 *pbFixupSection;
103 /** Pointer to the last byte in the fixup section. */
104 const KU8 *pbFixupSectionLast;
105 /** Pointer to the fixup page table within pvFixupSection. */
106 const KU32 *paoffPageFixups;
107 /** Pointer to the fixup record table within pvFixupSection. */
108 const KU8 *pbFixupRecs;
109 /** Pointer to the import module name table within pvFixupSection. */
110 const KU8 *pbImportMods;
111 /** Pointer to the import module name table within pvFixupSection. */
112 const KU8 *pbImportProcs;
113} KLDRMODLX, *PKLDRMODLX;
114
115
116/*******************************************************************************
117* Internal Functions *
118*******************************************************************************/
119static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
120static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
121 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
122static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX);
123static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal);
124static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol);
125static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
126 const char *pchSymbol, KSIZE cchSymbol);
127static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
128static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
129static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
130static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb);
131static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
132static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle);
133static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
134 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind);
135static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
136static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved);
137static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
138 int iSelector, KLDRADDR uValue, KU32 fKind);
139
140
141/**
142 * Create a loader module instance interpreting the executable image found
143 * in the specified file provider instance.
144 *
145 * @returns 0 on success and *ppMod pointing to a module instance.
146 * On failure, a non-zero OS specific error code is returned.
147 * @param pOps Pointer to the registered method table.
148 * @param pRdr The file provider instance to use.
149 * @param fFlags Flags, MBZ.
150 * @param enmCpuArch The desired CPU architecture. KCPUARCH_UNKNOWN means
151 * anything goes, but with a preference for the current
152 * host architecture.
153 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
154 * @param ppMod Where to store the module instance pointer.
155 */
156static int kldrModLXCreate(PCKLDRMODOPS pOps, PKRDR pRdr, KU32 fFlags, KCPUARCH enmCpuArch, KLDRFOFF offNewHdr, PPKLDRMOD ppMod)
157{
158 PKLDRMODLX pModLX;
159 int rc;
160
161 /*
162 * Create the instance data and do a minimal header validation.
163 */
164 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
165 if (!rc)
166 {
167 /*
168 * Match up against the requested CPU architecture.
169 */
170 if ( enmCpuArch == KCPUARCH_UNKNOWN
171 || pModLX->pMod->enmArch == enmCpuArch)
172 {
173 pModLX->pMod->pOps = pOps;
174 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
175 *ppMod = pModLX->pMod;
176 return 0;
177 }
178 rc = KLDR_ERR_CPU_ARCH_MISMATCH;
179 }
180 kHlpFree(pModLX);
181 return rc;
182}
183
184
185/**
186 * Separate function for reading creating the LX module instance to
187 * simplify cleanup on failure.
188 */
189static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX)
190{
191 struct e32_exe Hdr;
192 PKLDRMODLX pModLX;
193 PKLDRMOD pMod;
194 KSIZE cb;
195 KSIZE cchFilename;
196 KU32 off, offEnd;
197 KU32 i;
198 int rc;
199 int fCanOptimizeMapping;
200 KU32 NextRVA;
201 *ppModLX = NULL;
202
203 /*
204 * Read the signature and file header.
205 */
206 rc = kRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
207 if (rc)
208 return rc;
209 if ( Hdr.e32_magic[0] != E32MAGIC1
210 || Hdr.e32_magic[1] != E32MAGIC2)
211 return KLDR_ERR_UNKNOWN_FORMAT;
212
213 /* We're not interested in anything but x86 images. */
214 if ( Hdr.e32_level != E32LEVEL
215 || Hdr.e32_border != E32LEBO
216 || Hdr.e32_worder != E32LEWO
217 || Hdr.e32_cpu < E32CPU286
218 || Hdr.e32_cpu > E32CPU486
219 || Hdr.e32_pagesize != OBJPAGELEN
220 )
221 return KLDR_ERR_LX_BAD_HEADER;
222
223 /* Some rough sanity checks. */
224 offEnd = kRdrSize(pRdr) >= (KLDRFOFF)~(KU32)16 ? ~(KU32)16 : (KU32)kRdrSize(pRdr);
225 if ( Hdr.e32_itermap > offEnd
226 || Hdr.e32_datapage > offEnd
227 || Hdr.e32_nrestab > offEnd
228 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
229 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
230 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
231 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
232 return KLDR_ERR_LX_BAD_HEADER;
233
234 /* Verify the loader section. */
235 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
236 if (Hdr.e32_objtab < sizeof(Hdr))
237 return KLDR_ERR_LX_BAD_LOADER_SECTION;
238 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
239 if (off > offEnd)
240 return KLDR_ERR_LX_BAD_LOADER_SECTION;
241 if ( Hdr.e32_objmap
242 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
243 return KLDR_ERR_LX_BAD_LOADER_SECTION;
244 if ( Hdr.e32_rsrccnt
245 && ( Hdr.e32_rsrctab < off
246 || Hdr.e32_rsrctab > offEnd
247 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
248 return KLDR_ERR_LX_BAD_LOADER_SECTION;
249 if ( Hdr.e32_restab
250 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
251 return KLDR_ERR_LX_BAD_LOADER_SECTION;
252 if ( Hdr.e32_enttab
253 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
254 return KLDR_ERR_LX_BAD_LOADER_SECTION;
255 if ( Hdr.e32_dircnt
256 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
257 return KLDR_ERR_LX_BAD_LOADER_SECTION;
258
259 /* Verify the fixup section. */
260 off = offEnd;
261 offEnd = off + Hdr.e32_fixupsize;
262 if ( Hdr.e32_fpagetab
263 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
264 {
265 /*
266 * wlink mixes the fixup section and the loader section.
267 */
268 off = Hdr.e32_fpagetab;
269 offEnd = off + Hdr.e32_fixupsize;
270 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
271 }
272 if ( Hdr.e32_frectab
273 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
274 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
275 if ( Hdr.e32_impmod
276 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
277 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
278 if ( Hdr.e32_impproc
279 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
280 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
281
282 /*
283 * Calc the instance size, allocate and initialize it.
284 */
285 cchFilename = kHlpStrLen(kRdrName(pRdr));
286 cb = K_ALIGN_Z(sizeof(KLDRMODLX), 8)
287 + K_ALIGN_Z(K_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
288 + K_ALIGN_Z(cchFilename + 1, 8)
289 + Hdr.e32_ldrsize + 2; /* +2 for two extra zeros. */
290 pModLX = (PKLDRMODLX)kHlpAlloc(cb);
291 if (!pModLX)
292 return KERR_NO_MEMORY;
293 *ppModLX = pModLX;
294
295 /* KLDRMOD */
296 pMod = (PKLDRMOD)((KU8 *)pModLX + K_ALIGN_Z(sizeof(KLDRMODLX), 8));
297 pMod->pvData = pModLX;
298 pMod->pRdr = pRdr;
299 pMod->pOps = NULL; /* set upon success. */
300 pMod->cSegments = Hdr.e32_objcnt;
301 pMod->cchFilename = cchFilename;
302 pMod->pszFilename = (char *)K_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
303 kHlpMemCopy((char *)pMod->pszFilename, kRdrName(pRdr), cchFilename + 1);
304 pMod->pszName = NULL; /* finalized further down */
305 pMod->cchName = 0;
306 switch (Hdr.e32_cpu)
307 {
308 case E32CPU286:
309 pMod->enmCpu = KCPU_I80286;
310 pMod->enmArch = KCPUARCH_X86_16;
311 break;
312 case E32CPU386:
313 pMod->enmCpu = KCPU_I386;
314 pMod->enmArch = KCPUARCH_X86_32;
315 break;
316 case E32CPU486:
317 pMod->enmCpu = KCPU_I486;
318 pMod->enmArch = KCPUARCH_X86_32;
319 break;
320 }
321 pMod->enmEndian = KLDRENDIAN_LITTLE;
322 pMod->enmFmt = KLDRFMT_LX;
323 switch (Hdr.e32_mflags & E32MODMASK)
324 {
325 case E32MODEXE:
326 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
327 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
328 : KLDRTYPE_EXECUTABLE_FIXED;
329 break;
330
331 case E32MODDLL:
332 case E32PROTDLL:
333 case E32MODPROTDLL:
334 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
335 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
336 : KLDRTYPE_SHARED_LIBRARY_FIXED;
337 break;
338
339 case E32MODPDEV:
340 case E32MODVDEV:
341 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
342 break;
343 }
344 pMod->u32Magic = 0; /* set upon success. */
345
346 /* KLDRMODLX */
347 pModLX->pMod = pMod;
348 pModLX->pvMapping = 0;
349 pModLX->cbMapped = 0;
350 pModLX->f32Reserved = 0;
351
352 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
353 kHlpMemCopy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
354
355 pModLX->pbLoaderSection = K_ALIGN_P(pMod->pszFilename + pMod->cchFilename + 1, 16);
356 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
357 pModLX->paObjs = NULL;
358 pModLX->paPageMappings = NULL;
359 pModLX->paRsrcs = NULL;
360 pModLX->pbResNameTab = NULL;
361 pModLX->pbEntryTab = NULL;
362
363 pModLX->pbNonResNameTab = NULL;
364 pModLX->pbNonResNameTabLast = NULL;
365
366 pModLX->pbFixupSection = NULL;
367 pModLX->pbFixupSectionLast = NULL;
368 pModLX->paoffPageFixups = NULL;
369 pModLX->pbFixupRecs = NULL;
370 pModLX->pbImportMods = NULL;
371 pModLX->pbImportProcs = NULL;
372
373 /*
374 * Read the loader data.
375 */
376 rc = kRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
377 if (rc)
378 return rc;
379 ((KU8 *)pModLX->pbLoaderSectionLast)[1] = 0;
380 ((KU8 *)pModLX->pbLoaderSectionLast)[2] = 0;
381 if (pModLX->Hdr.e32_objcnt)
382 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
383 if (pModLX->Hdr.e32_objmap)
384 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
385 if (pModLX->Hdr.e32_rsrccnt)
386 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
387 if (pModLX->Hdr.e32_restab)
388 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
389 if (pModLX->Hdr.e32_enttab)
390 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
391
392 /*
393 * Get the soname from the resident name table.
394 * Very convenient that it's the 0 ordinal, because then we get a
395 * free string terminator.
396 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
397 */
398 if (pModLX->pbResNameTab)
399 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
400 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
401 0);
402 if (!pMod->pszName)
403 return KLDR_ERR_LX_NO_SONAME;
404 pMod->cchName = *(const KU8 *)pMod->pszName++;
405 if (pMod->cchName != kHlpStrLen(pMod->pszName))
406 return KLDR_ERR_LX_BAD_SONAME;
407
408 /*
409 * Quick validation of the object table.
410 */
411 cb = 0;
412 for (i = 0; i < pMod->cSegments; i++)
413 {
414 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
415 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
416 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
417 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
418 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
419 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
420 if ( pModLX->paObjs[i].o32_mapsize
421 && ( (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
422 || (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
423 > pModLX->pbLoaderSectionLast))
424 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
425 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
426 {
427 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
428 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
429 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
430 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
431 }
432 }
433
434 /*
435 * Check if we can optimize the mapping by using a different
436 * object alignment. The linker typically uses 64KB alignment,
437 * we can easily get away with page alignment in most cases.
438 */
439 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
440 NextRVA = 0;
441
442 /*
443 * Setup the KLDRMOD segment array.
444 */
445 for (i = 0; i < pMod->cSegments; i++)
446 {
447 /* unused */
448 pMod->aSegments[i].pvUser = NULL;
449 pMod->aSegments[i].MapAddress = 0;
450 pMod->aSegments[i].pchName = NULL;
451 pMod->aSegments[i].cchName = 0;
452 pMod->aSegments[i].offFile = -1;
453 pMod->aSegments[i].cbFile = -1;
454 pMod->aSegments[i].SelFlat = 0;
455 pMod->aSegments[i].Sel16bit = 0;
456
457 /* flags */
458 pMod->aSegments[i].fFlags = 0;
459 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
460 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_16BIT;
461 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
462 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_ALIAS16;
463 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
464 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_CONFORM;
465 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
466 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_IOPL;
467
468 /* size and addresses */
469 pMod->aSegments[i].Alignment = OBJPAGELEN;
470 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
471 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
472 pMod->aSegments[i].RVA = NextRVA;
473 if ( fCanOptimizeMapping
474 || i + 1 >= pMod->cSegments
475 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
476 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
477 pMod->aSegments[i].cbMapped = K_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
478 else
479 pMod->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
480 NextRVA += pMod->aSegments[i].cbMapped;
481
482 /* protection */
483 switch ( pModLX->paObjs[i].o32_flags
484 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
485 {
486 case 0:
487 case OBJSHARED:
488 pMod->aSegments[i].enmProt = KPROT_NOACCESS;
489 break;
490 case OBJREAD:
491 case OBJREAD | OBJSHARED:
492 pMod->aSegments[i].enmProt = KPROT_READONLY;
493 break;
494 case OBJWRITE:
495 case OBJWRITE | OBJREAD:
496 pMod->aSegments[i].enmProt = KPROT_WRITECOPY;
497 break;
498 case OBJWRITE | OBJSHARED:
499 case OBJWRITE | OBJSHARED | OBJREAD:
500 pMod->aSegments[i].enmProt = KPROT_READWRITE;
501 break;
502 case OBJEXEC:
503 case OBJEXEC | OBJSHARED:
504 pMod->aSegments[i].enmProt = KPROT_EXECUTE;
505 break;
506 case OBJEXEC | OBJREAD:
507 case OBJEXEC | OBJREAD | OBJSHARED:
508 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READ;
509 break;
510 case OBJEXEC | OBJWRITE:
511 case OBJEXEC | OBJWRITE | OBJREAD:
512 pMod->aSegments[i].enmProt = KPROT_EXECUTE_WRITECOPY;
513 break;
514 case OBJEXEC | OBJWRITE | OBJSHARED:
515 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
516 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READWRITE;
517 break;
518 }
519 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
520 pMod->aSegments[i].enmProt = KPROT_READONLY;
521 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
522 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
523 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
524 }
525
526 /* set the mapping size */
527 pModLX->cbMapped = NextRVA;
528
529 /*
530 * We're done.
531 */
532 *ppModLX = pModLX;
533 return 0;
534}
535
536
537/** @copydoc KLDRMODOPS::pfnDestroy */
538static int kldrModLXDestroy(PKLDRMOD pMod)
539{
540 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
541 int rc = 0;
542 KLDRMODLX_ASSERT(!pModLX->pvMapping);
543
544 if (pMod->pRdr)
545 {
546 rc = kRdrClose(pMod->pRdr);
547 pMod->pRdr = NULL;
548 }
549 if (pModLX->pbNonResNameTab)
550 {
551 kHlpFree(pModLX->pbNonResNameTab);
552 pModLX->pbNonResNameTab = NULL;
553 }
554 if (pModLX->pbFixupSection)
555 {
556 kHlpFree(pModLX->pbFixupSection);
557 pModLX->pbFixupSection = NULL;
558 }
559 pMod->u32Magic = 0;
560 pMod->pOps = NULL;
561 kHlpFree(pModLX);
562 return rc;
563}
564
565
566/**
567 * Resolved base address aliases.
568 *
569 * @param pModLX The interpreter module instance
570 * @param pBaseAddress The base address, IN & OUT.
571 */
572static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
573{
574 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
575 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
576 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
577 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
578}
579
580
581/** @copydoc kLdrModQuerySymbol */
582static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, KU32 iSymbol,
583 const char *pchSymbol, KSIZE cchSymbol, const char *pszVersion,
584 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
585{
586 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
587 KU32 iOrdinal;
588 int rc;
589 const struct b32_bundle *pBundle;
590
591
592 /*
593 * Give up at once if there is no entry table.
594 */
595 if (!pModLX->Hdr.e32_enttab)
596 return KLDR_ERR_SYMBOL_NOT_FOUND;
597
598 /*
599 * Translate the symbol name into an ordinal.
600 */
601 if (pchSymbol)
602 {
603 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
604 if (rc)
605 return rc;
606 }
607
608 /*
609 * Iterate the entry table.
610 * (The entry table is made up of bundles of similar exports.)
611 */
612 iOrdinal = 1;
613 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
614 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
615 {
616 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
617
618 /*
619 * Check for a hit first.
620 */
621 iOrdinal += pBundle->b32_cnt;
622 if (iSymbol < iOrdinal)
623 {
624 KU32 offObject;
625 const struct e32_entry *pEntry = (const struct e32_entry *)((KUPTR)(pBundle + 1)
626 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
627 * s_cbEntry[pBundle->b32_type]);
628
629 /*
630 * Calculate the return address.
631 */
632 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
633 switch (pBundle->b32_type)
634 {
635 /* empty bundles are place holders unused ordinal ranges. */
636 case EMPTY:
637 return KLDR_ERR_SYMBOL_NOT_FOUND;
638
639 /* e32_flags + a 16-bit offset. */
640 case ENTRY16:
641 offObject = pEntry->e32_variant.e32_offset.offset16;
642 if (pfKind)
643 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
644 break;
645
646 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
647 case GATE16:
648 offObject = pEntry->e32_variant.e32_callgate.offset;
649 if (pfKind)
650 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
651 break;
652
653 /* e32_flags + a 32-bit offset. */
654 case ENTRY32:
655 offObject = pEntry->e32_variant.e32_offset.offset32;
656 if (pfKind)
657 *pfKind = KLDRSYMKIND_32BIT;
658 break;
659
660 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
661 case ENTRYFWD:
662 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
663
664 default:
665 /* anyone actually using TYPEINFO will end up here. */
666 KLDRMODLX_ASSERT(!"Bad bundle type");
667 return KLDR_ERR_LX_BAD_BUNDLE;
668 }
669
670 /*
671 * Validate the object number and calc the return address.
672 */
673 if ( pBundle->b32_obj <= 0
674 || pBundle->b32_obj > pMod->cSegments)
675 return KLDR_ERR_LX_BAD_BUNDLE;
676 if (puValue)
677 *puValue = BaseAddress
678 + offObject
679 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
680 return 0;
681 }
682
683 /*
684 * Skip the bundle.
685 */
686 if (pBundle->b32_type > ENTRYFWD)
687 {
688 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
689 return KLDR_ERR_LX_BAD_BUNDLE;
690 }
691 if (pBundle->b32_type == 0)
692 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
693 else
694 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
695 }
696
697 return KLDR_ERR_SYMBOL_NOT_FOUND;
698}
699
700
701/**
702 * Do name lookup.
703 *
704 * @returns See kLdrModQuerySymbol.
705 * @param pModLX The module to lookup the symbol in.
706 * @param pchSymbol The symbol to lookup.
707 * @param cchSymbol The symbol name length.
708 * @param piSymbol Where to store the symbol ordinal.
709 */
710static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol)
711{
712
713 /*
714 * First do a hash table lookup.
715 */
716 /** @todo hash name table for speed. */
717
718 /*
719 * Search the name tables.
720 */
721 const KU8 *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
722 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
723 pchSymbol, cchSymbol);
724 if (!pbName)
725 {
726 if (!pModLX->pbNonResNameTab)
727 {
728 /* lazy load it */
729 /** @todo non-resident name table. */
730 }
731 if (pModLX->pbNonResNameTab)
732 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
733 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
734 pchSymbol, cchSymbol);
735 }
736 if (!pbName)
737 return KLDR_ERR_SYMBOL_NOT_FOUND;
738
739 *piSymbol = *(const KU16 *)(pbName + 1 + *pbName);
740 return 0;
741}
742
743
744#if 0
745/**
746 * Hash a symbol using the algorithm from sdbm.
747 *
748 * The following was is the documenation of the orignal sdbm functions:
749 *
750 * This algorithm was created for sdbm (a public-domain reimplementation of
751 * ndbm) database library. it was found to do well in scrambling bits,
752 * causing better distribution of the keys and fewer splits. it also happens
753 * to be a good general hashing function with good distribution. the actual
754 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
755 * is the faster version used in gawk. [there is even a faster, duff-device
756 * version] the magic constant 65599 was picked out of thin air while
757 * experimenting with different constants, and turns out to be a prime.
758 * this is one of the algorithms used in berkeley db (see sleepycat) and
759 * elsewhere.
760 */
761static KU32 kldrModLXDoHash(const char *pchSymbol, KU8 cchSymbol)
762{
763 KU32 hash = 0;
764 int ch;
765
766 while ( cchSymbol-- > 0
767 && (ch = *(unsigned const char *)pchSymbol++))
768 hash = ch + (hash << 6) + (hash << 16) - hash;
769
770 return hash;
771}
772#endif
773
774
775/**
776 * Lookup a name table entry by name.
777 *
778 * @returns Pointer to the name table entry if found.
779 * @returns NULL if not found.
780 * @param pbNameTable Pointer to the name table that should be searched.
781 * @param cbNameTable The size of the name table.
782 * @param pchSymbol The name of the symbol we're looking for.
783 * @param cchSymbol The length of the symbol name.
784 */
785static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
786 const char *pchSymbol, KSIZE cchSymbol)
787{
788 /*
789 * Determin the namelength up front so we can skip anything which doesn't matches the length.
790 */
791 KU8 cbSymbol8Bit = (KU8)cchSymbol;
792 if (cbSymbol8Bit != cchSymbol)
793 return NULL; /* too long. */
794
795 /*
796 * Walk the name table.
797 */
798 while (*pbNameTable != 0 && cbNameTable > 0)
799 {
800 const KU8 cbName = *pbNameTable;
801
802 cbNameTable -= cbName + 1 + 2;
803 if (cbNameTable < 0)
804 break;
805
806 if ( cbName == cbSymbol8Bit
807 && !kHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
808 return pbNameTable;
809
810 /* next entry */
811 pbNameTable += cbName + 1 + 2;
812 }
813
814 return NULL;
815}
816
817
818/**
819 * Deal with a forwarder entry.
820 *
821 * @returns See kLdrModQuerySymbol.
822 * @param pModLX The PE module interpreter instance.
823 * @param pEntry The forwarder entry.
824 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
825 * @param pvUser The user argument for the callback.
826 * @param puValue Where to put the value. (optional)
827 * @param pfKind Where to put the symbol kind. (optional)
828 */
829static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
830 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
831{
832 int rc;
833 KU32 iSymbol;
834 const char *pchSymbol;
835 KU8 cchSymbol;
836
837 if (!pfnGetForwarder)
838 return KLDR_ERR_FORWARDER_SYMBOL;
839
840 /*
841 * Validate the entry import module ordinal.
842 */
843 if ( !pEntry->e32_variant.e32_fwd.modord
844 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
845 return KLDR_ERR_LX_BAD_FORWARDER;
846
847 /*
848 * Figure out the parameters.
849 */
850 if (pEntry->e32_flags & FWD_ORDINAL)
851 {
852 iSymbol = pEntry->e32_variant.e32_fwd.value;
853 pchSymbol = NULL; /* no symbol name. */
854 cchSymbol = 0;
855 }
856 else
857 {
858 const KU8 *pbName;
859
860 /* load the fixup section if necessary. */
861 if (!pModLX->pbImportProcs)
862 {
863 rc = kldrModLXDoLoadFixupSection(pModLX);
864 if (rc)
865 return rc;
866 }
867
868 /* Make name pointer. */
869 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
870 if ( pbName >= pModLX->pbFixupSectionLast
871 || pbName < pModLX->pbFixupSection
872 || !*pbName)
873 return KLDR_ERR_LX_BAD_FORWARDER;
874
875
876 /* check for '#' name. */
877 if (pbName[1] == '#')
878 {
879 KU8 cbLeft = *pbName;
880 const KU8 *pb = pbName + 1;
881 unsigned uBase;
882
883 /* base detection */
884 uBase = 10;
885 if ( cbLeft > 1
886 && pb[1] == '0'
887 && (pb[2] == 'x' || pb[2] == 'X'))
888 {
889 uBase = 16;
890 pb += 2;
891 cbLeft -= 2;
892 }
893
894 /* ascii to integer */
895 iSymbol = 0;
896 while (cbLeft-- > 0)
897 {
898 /* convert char to digit. */
899 unsigned uDigit = *pb++;
900 if (uDigit >= '0' && uDigit <= '9')
901 uDigit -= '0';
902 else if (uDigit >= 'a' && uDigit <= 'z')
903 uDigit -= 'a' + 10;
904 else if (uDigit >= 'A' && uDigit <= 'Z')
905 uDigit -= 'A' + 10;
906 else if (!uDigit)
907 break;
908 else
909 return KLDR_ERR_LX_BAD_FORWARDER;
910 if (uDigit >= uBase)
911 return KLDR_ERR_LX_BAD_FORWARDER;
912
913 /* insert the digit */
914 iSymbol *= uBase;
915 iSymbol += uDigit;
916 }
917 if (!iSymbol)
918 return KLDR_ERR_LX_BAD_FORWARDER;
919
920 pchSymbol = NULL; /* no symbol name. */
921 cchSymbol = 0;
922 }
923 else
924 {
925 pchSymbol = (char *)pbName + 1;
926 cchSymbol = *pbName;
927 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
928 }
929 }
930
931 /*
932 * Resolve the forwarder.
933 */
934 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pchSymbol, cchSymbol, NULL, puValue, pfKind, pvUser);
935 if (!rc && pfKind)
936 *pfKind |= KLDRSYMKIND_FORWARDER;
937 return rc;
938}
939
940
941/**
942 * Loads the fixup section from the executable image.
943 *
944 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
945 *
946 * @returns 0 on success, non-zero kLdr or native status code on failure.
947 * @param pModLX The PE module interpreter instance.
948 */
949static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
950{
951 int rc;
952 KU32 off;
953 void *pv;
954
955 pv = kHlpAlloc(pModLX->Hdr.e32_fixupsize);
956 if (!pv)
957 return KERR_NO_MEMORY;
958
959 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
960 rc = kRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
961 off + pModLX->offHdr);
962 if (!rc)
963 {
964 pModLX->pbFixupSection = pv;
965 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
966 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
967 if (pModLX->Hdr.e32_fpagetab)
968 pModLX->paoffPageFixups = (const KU32 *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
969 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
970 if (pModLX->Hdr.e32_frectab)
971 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
972 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
973 if (pModLX->Hdr.e32_impmod)
974 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
975 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
976 if (pModLX->Hdr.e32_impproc)
977 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
978 }
979 else
980 kHlpFree(pv);
981 return rc;
982}
983
984
985/** @copydoc kLdrModEnumSymbols */
986static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
987 KU32 fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
988{
989 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
990 const struct b32_bundle *pBundle;
991 KU32 iOrdinal;
992 int rc = 0;
993
994 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
995
996 /*
997 * Enumerate the entry table.
998 * (The entry table is made up of bundles of similar exports.)
999 */
1000 iOrdinal = 1;
1001 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
1002 while (pBundle->b32_cnt && iOrdinal)
1003 {
1004 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
1005
1006 /*
1007 * Enum the entries in the bundle.
1008 */
1009 if (pBundle->b32_type != EMPTY)
1010 {
1011 const struct e32_entry *pEntry;
1012 KSIZE cbEntry;
1013 KLDRADDR BundleRVA;
1014 unsigned cLeft;
1015
1016
1017 /* Validate the bundle. */
1018 switch (pBundle->b32_type)
1019 {
1020 case ENTRY16:
1021 case GATE16:
1022 case ENTRY32:
1023 if ( pBundle->b32_obj <= 0
1024 || pBundle->b32_obj > pMod->cSegments)
1025 return KLDR_ERR_LX_BAD_BUNDLE;
1026 BundleRVA = pMod->aSegments[pBundle->b32_obj - 1].RVA;
1027 break;
1028
1029 case ENTRYFWD:
1030 BundleRVA = 0;
1031 break;
1032
1033 default:
1034 /* anyone actually using TYPEINFO will end up here. */
1035 KLDRMODLX_ASSERT(!"Bad bundle type");
1036 return KLDR_ERR_LX_BAD_BUNDLE;
1037 }
1038
1039 /* iterate the bundle entries. */
1040 cbEntry = s_cbEntry[pBundle->b32_type];
1041 pEntry = (const struct e32_entry *)(pBundle + 1);
1042 cLeft = pBundle->b32_cnt;
1043 while (cLeft-- > 0)
1044 {
1045 KLDRADDR uValue;
1046 KU32 fKind;
1047 int fFoundName;
1048 const KU8 *pbName;
1049
1050 /*
1051 * Calc the symbol value and kind.
1052 */
1053 switch (pBundle->b32_type)
1054 {
1055 /* e32_flags + a 16-bit offset. */
1056 case ENTRY16:
1057 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1058 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
1059 break;
1060
1061 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1062 case GATE16:
1063 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1064 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
1065 break;
1066
1067 /* e32_flags + a 32-bit offset. */
1068 case ENTRY32:
1069 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1070 fKind = KLDRSYMKIND_32BIT;
1071 break;
1072
1073 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1074 case ENTRYFWD:
1075 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1076 fKind = KLDRSYMKIND_FORWARDER;
1077 break;
1078
1079 default: /* shut up gcc. */
1080 uValue = 0;
1081 fKind = KLDRSYMKIND_NO_BIT | KLDRSYMKIND_NO_TYPE;
1082 break;
1083 }
1084
1085 /*
1086 * Any symbol names?
1087 */
1088 fFoundName = 0;
1089
1090 /* resident name table. */
1091 pbName = pModLX->pbResNameTab;
1092 if (pbName)
1093 {
1094 do
1095 {
1096 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1097 if (!pbName)
1098 break;
1099 fFoundName = 1;
1100 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1101 if (rc)
1102 return rc;
1103
1104 /* skip to the next entry */
1105 pbName += 1 + *pbName + 2;
1106 } while (pbName < pModLX->pbLoaderSectionLast);
1107 }
1108
1109 /* resident name table. */
1110 pbName = pModLX->pbNonResNameTab;
1111 /** @todo lazy load the non-resident name table. */
1112 if (pbName)
1113 {
1114 do
1115 {
1116 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1117 if (!pbName)
1118 break;
1119 fFoundName = 1;
1120 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1121 if (rc)
1122 return rc;
1123
1124 /* skip to the next entry */
1125 pbName += 1 + *pbName + 2;
1126 } while (pbName < pModLX->pbLoaderSectionLast);
1127 }
1128
1129 /*
1130 * If no names, call once with the ordinal only.
1131 */
1132 if (!fFoundName)
1133 {
1134 rc = pfnCallback(pMod, iOrdinal, NULL, 0, NULL, uValue, fKind, pvUser);
1135 if (rc)
1136 return rc;
1137 }
1138
1139 /* next */
1140 iOrdinal++;
1141 pEntry = (const struct e32_entry *)((KUPTR)pEntry + cbEntry);
1142 }
1143 }
1144
1145 /*
1146 * The next bundle.
1147 */
1148 if (pBundle->b32_type > ENTRYFWD)
1149 {
1150 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1151 return KLDR_ERR_LX_BAD_BUNDLE;
1152 }
1153 if (pBundle->b32_type == 0)
1154 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
1155 else
1156 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1157 }
1158
1159 return 0;
1160}
1161
1162
1163/**
1164 * Lookup a name table entry by ordinal.
1165 *
1166 * @returns Pointer to the name table entry if found.
1167 * @returns NULL if not found.
1168 * @param pbNameTable Pointer to the name table that should be searched.
1169 * @param cbNameTable The size of the name table.
1170 * @param iOrdinal The ordinal to search for.
1171 */
1172static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal)
1173{
1174 while (*pbNameTable != 0 && cbNameTable > 0)
1175 {
1176 const KU8 cbName = *pbNameTable;
1177 KU32 iName;
1178
1179 cbNameTable -= cbName + 1 + 2;
1180 if (cbNameTable < 0)
1181 break;
1182
1183 iName = *(pbNameTable + cbName + 1)
1184 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1185 if (iName == iOrdinal)
1186 return pbNameTable;
1187
1188 /* next entry */
1189 pbNameTable += cbName + 1 + 2;
1190 }
1191
1192 return NULL;
1193}
1194
1195
1196/** @copydoc kLdrModGetImport */
1197static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, KU32 iImport, char *pszName, KSIZE cchName)
1198{
1199 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1200 const KU8 *pb;
1201 int rc;
1202
1203 /*
1204 * Validate
1205 */
1206 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1207 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1208
1209 /*
1210 * Lazy loading the fixup section.
1211 */
1212 if (!pModLX->pbImportMods)
1213 {
1214 rc = kldrModLXDoLoadFixupSection(pModLX);
1215 if (rc)
1216 return rc;
1217 }
1218
1219 /*
1220 * Iterate the module import table until we reach the requested import ordinal.
1221 */
1222 pb = pModLX->pbImportMods;
1223 while (iImport-- > 0)
1224 pb += *pb + 1;
1225
1226 /*
1227 * Copy out the result.
1228 */
1229 if (*pb < cchName)
1230 {
1231 kHlpMemCopy(pszName, pb + 1, *pb);
1232 pszName[*pb] = '\0';
1233 rc = 0;
1234 }
1235 else
1236 {
1237 kHlpMemCopy(pszName, pb + 1, cchName);
1238 if (cchName)
1239 pszName[cchName - 1] = '\0';
1240 rc = KERR_BUFFER_OVERFLOW;
1241 }
1242
1243 return rc;
1244}
1245
1246
1247/** @copydoc kLdrModNumberOfImports */
1248static KI32 kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1249{
1250 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1251 return pModLX->Hdr.e32_impmodcnt;
1252}
1253
1254
1255/** @copydoc kLdrModGetStackInfo */
1256static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1257{
1258 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1259 const KU32 i = pModLX->Hdr.e32_stackobj;
1260
1261 if ( i
1262 && i <= pMod->cSegments
1263 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1264 && pModLX->Hdr.e32_stacksize
1265 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1266 {
1267
1268 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1269 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1270 pStackInfo->Address = BaseAddress
1271 + pMod->aSegments[i - 1].RVA
1272 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1273 }
1274 else
1275 {
1276 pStackInfo->Address = NIL_KLDRADDR;
1277 pStackInfo->LinkAddress = NIL_KLDRADDR;
1278 }
1279 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1280 pStackInfo->cbStackThread = 0;
1281
1282 return 0;
1283}
1284
1285
1286/** @copydoc kLdrModQueryMainEntrypoint */
1287static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1288{
1289 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1290
1291 /*
1292 * Convert the address from the header.
1293 */
1294 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1295 *pMainEPAddress = pModLX->Hdr.e32_startobj
1296 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1297 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1298 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1299 : NIL_KLDRADDR;
1300 return 0;
1301}
1302
1303
1304/** @copydoc kLdrModEnumDbgInfo */
1305static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1306{
1307 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1308
1309 /*
1310 * Quit immediately if no debug info.
1311 */
1312 if (kldrModLXHasDbgInfo(pMod, pvBits))
1313 return 0;
1314#if 0
1315 /*
1316 * Read the debug info and look for familiar magics and structures.
1317 */
1318 /** @todo */
1319#endif
1320
1321 return 0;
1322}
1323
1324
1325/** @copydoc kLdrModHasDbgInfo */
1326static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1327{
1328 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1329
1330 /*
1331 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1332 */
1333 if ( !pModLX->Hdr.e32_debuginfo
1334 || !pModLX->Hdr.e32_debuglen)
1335 return KLDR_ERR_NO_DEBUG_INFO;
1336 return 0;
1337}
1338
1339
1340/** @copydoc kLdrModMap */
1341static int kldrModLXMap(PKLDRMOD pMod)
1342{
1343 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1344 unsigned fFixed;
1345 void *pvBase;
1346 int rc;
1347
1348 /*
1349 * Already mapped?
1350 */
1351 if (pModLX->pvMapping)
1352 return KLDR_ERR_ALREADY_MAPPED;
1353
1354 /*
1355 * Allocate memory for it.
1356 */
1357 /* fixed image? */
1358 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1359 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1360 if (!fFixed)
1361 pvBase = NULL;
1362 else
1363 {
1364 pvBase = (void *)(KUPTR)pMod->aSegments[0].LinkAddress;
1365 if ((KUPTR)pvBase != pMod->aSegments[0].LinkAddress)
1366 return KLDR_ERR_ADDRESS_OVERFLOW;
1367 }
1368 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1369 if (rc)
1370 return rc;
1371
1372 /*
1373 * Load the bits, apply page protection, and update the segment table.
1374 */
1375 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1376 if (!rc)
1377 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1378 if (!rc)
1379 {
1380 KU32 i;
1381 for (i = 0; i < pMod->cSegments; i++)
1382 {
1383 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1384 pMod->aSegments[i].MapAddress = (KUPTR)pvBase + (KUPTR)pMod->aSegments[i].RVA;
1385 }
1386 pModLX->pvMapping = pvBase;
1387 }
1388 else
1389 kHlpPageFree(pvBase, pModLX->cbMapped);
1390 return rc;
1391}
1392
1393
1394/**
1395 * Loads the LX pages into the specified memory mapping.
1396 *
1397 * @returns 0 on success.
1398 * @returns non-zero kLdr or OS status code on failure.
1399 *
1400 * @param pModLX The LX module interpreter instance.
1401 * @param pvBits Where to load the bits.
1402 */
1403static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1404{
1405 const PKRDR pRdr = pModLX->pMod->pRdr;
1406 KU8 *pbTmpPage = NULL;
1407 int rc = 0;
1408 KU32 i;
1409
1410 /*
1411 * Iterate the segments.
1412 */
1413 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1414 {
1415 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1416 const KU32 cPages = pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN;
1417 KU32 iPage;
1418 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[i].RVA;
1419
1420 /*
1421 * Iterate the page map pages.
1422 */
1423 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1424 {
1425 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1426 switch (pMap->o32_pageflags)
1427 {
1428 case VALID:
1429 if (pMap->o32_pagesize == OBJPAGELEN)
1430 rc = kRdrRead(pRdr, pbPage, OBJPAGELEN,
1431 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1432 else if (pMap->o32_pagesize < OBJPAGELEN)
1433 {
1434 rc = kRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1435 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1436 kHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1437 }
1438 else
1439 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1440 break;
1441
1442 case ITERDATA:
1443 case ITERDATA2:
1444 /* make sure we've got a temp page .*/
1445 if (!pbTmpPage)
1446 {
1447 pbTmpPage = kHlpAlloc(OBJPAGELEN + 256);
1448 if (!pbTmpPage)
1449 break;
1450 }
1451 /* validate the size. */
1452 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1453 {
1454 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1455 break;
1456 }
1457
1458 /* read it and ensure 4 extra zero bytes. */
1459 rc = kRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1460 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1461 if (rc)
1462 break;
1463 kHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1464
1465 /* unpack it into the image page. */
1466 if (pMap->o32_pageflags == ITERDATA2)
1467 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1468 else
1469 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1470 break;
1471
1472 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1473 case ZEROED:
1474 kHlpMemSet(pbPage, 0, OBJPAGELEN);
1475 break;
1476
1477 case RANGE:
1478 KLDRMODLX_ASSERT(!"RANGE");
1479 default:
1480 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1481 break;
1482 }
1483 }
1484 if (rc)
1485 break;
1486
1487 /*
1488 * Zero the remaining pages.
1489 */
1490 if (iPage < cPages)
1491 kHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1492 }
1493
1494 if (pbTmpPage)
1495 kHlpFree(pbTmpPage);
1496 return rc;
1497}
1498
1499
1500/**
1501 * Unpacks iterdata (aka EXEPACK).
1502 *
1503 * @returns 0 on success, non-zero kLdr status code on failure.
1504 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1505 * @param pbSrc The compressed source data.
1506 * @param cbSrc The file size of the compressed data. The source buffer
1507 * contains 4 additional zero bytes.
1508 */
1509static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1510{
1511 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1512 int cbDst = OBJPAGELEN;
1513
1514 /* Validate size of data. */
1515 if (cbSrc >= OBJPAGELEN - 2)
1516 return KLDR_ERR_LX_BAD_ITERDATA;
1517
1518 /*
1519 * Expand the page.
1520 */
1521 while (cbSrc > 0 && pIter->LX_nIter)
1522 {
1523 if (pIter->LX_nBytes == 1)
1524 {
1525 /*
1526 * Special case - one databyte.
1527 */
1528 cbDst -= pIter->LX_nIter;
1529 if (cbDst < 0)
1530 return KLDR_ERR_LX_BAD_ITERDATA;
1531
1532 cbSrc -= 4 + 1;
1533 if (cbSrc < -4)
1534 return KLDR_ERR_LX_BAD_ITERDATA;
1535
1536 kHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1537 pbDst += pIter->LX_nIter;
1538 pIter++;
1539 }
1540 else
1541 {
1542 /*
1543 * General.
1544 */
1545 int i;
1546
1547 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1548 if (cbDst < 0)
1549 return KLDR_ERR_LX_BAD_ITERDATA;
1550
1551 cbSrc -= 4 + pIter->LX_nBytes;
1552 if (cbSrc < -4)
1553 return KLDR_ERR_LX_BAD_ITERDATA;
1554
1555 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1556 kHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1557 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1558 }
1559 }
1560
1561 /*
1562 * Zero remainder of the page.
1563 */
1564 if (cbDst > 0)
1565 kHlpMemSet(pbDst, 0, cbDst);
1566
1567 return 0;
1568}
1569
1570
1571/**
1572 * Unpacks iterdata (aka EXEPACK).
1573 *
1574 * @returns 0 on success, non-zero kLdr status code on failure.
1575 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1576 * @param pbSrc The compressed source data.
1577 * @param cbSrc The file size of the compressed data. The source buffer
1578 * contains 4 additional zero bytes.
1579 */
1580static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1581{
1582 int cbDst = OBJPAGELEN;
1583
1584 while (cbSrc > 0)
1585 {
1586 /*
1587 * Bit 0 and 1 is the encoding type.
1588 */
1589 switch (*pbSrc & 0x03)
1590 {
1591 /*
1592 *
1593 * 0 1 2 3 4 5 6 7
1594 * type | |
1595 * ----------------
1596 * cb <cb bytes of data>
1597 *
1598 * Bits 2-7 is, if not zero, the length of an uncompressed run
1599 * starting at the following byte.
1600 *
1601 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1602 * type | | | | | |
1603 * ---------------- ---------------------- -----------------------
1604 * zero cb char to multiply
1605 *
1606 * If the bits are zero, the following two bytes describes a 1 byte interation
1607 * run. First byte is count, second is the byte to copy. A count of zero is
1608 * means end of data, and we simply stops. In that case the rest of the data
1609 * should be zero.
1610 */
1611 case 0:
1612 {
1613 if (*pbSrc)
1614 {
1615 const int cb = *pbSrc >> 2;
1616 cbDst -= cb;
1617 if (cbDst < 0)
1618 return KLDR_ERR_LX_BAD_ITERDATA2;
1619 cbSrc -= cb + 1;
1620 if (cbSrc < 0)
1621 return KLDR_ERR_LX_BAD_ITERDATA2;
1622 kHlpMemCopy(pbDst, ++pbSrc, cb);
1623 pbDst += cb;
1624 pbSrc += cb;
1625 }
1626 else if (cbSrc < 2)
1627 return KLDR_ERR_LX_BAD_ITERDATA2;
1628 else
1629 {
1630 const int cb = pbSrc[1];
1631 if (!cb)
1632 goto l_endloop;
1633 cbDst -= cb;
1634 if (cbDst < 0)
1635 return KLDR_ERR_LX_BAD_ITERDATA2;
1636 cbSrc -= 3;
1637 if (cbSrc < 0)
1638 return KLDR_ERR_LX_BAD_ITERDATA2;
1639 kHlpMemSet(pbDst, pbSrc[2], cb);
1640 pbDst += cb;
1641 pbSrc += 3;
1642 }
1643 break;
1644 }
1645
1646
1647 /*
1648 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1649 * type | | | | | |
1650 * ---- ------- -------------------------
1651 * cb1 cb2 - 3 offset <cb1 bytes of data>
1652 *
1653 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1654 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1655 * data relative to the current position. The data copied as you would expect it to be.
1656 */
1657 case 1:
1658 {
1659 cbSrc -= 2;
1660 if (cbSrc < 0)
1661 return KLDR_ERR_LX_BAD_ITERDATA2;
1662 else
1663 {
1664 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1665 const int cb1 = (*pbSrc >> 2) & 3;
1666 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1667
1668 pbSrc += 2;
1669 cbSrc -= cb1;
1670 if (cbSrc < 0)
1671 return KLDR_ERR_LX_BAD_ITERDATA2;
1672 cbDst -= cb1;
1673 if (cbDst < 0)
1674 return KLDR_ERR_LX_BAD_ITERDATA2;
1675 kHlpMemCopy(pbDst, pbSrc, cb1);
1676 pbDst += cb1;
1677 pbSrc += cb1;
1678
1679 if (off > OBJPAGELEN - (unsigned)cbDst)
1680 return KLDR_ERR_LX_BAD_ITERDATA2;
1681 cbDst -= cb2;
1682 if (cbDst < 0)
1683 return KLDR_ERR_LX_BAD_ITERDATA2;
1684 kHlpMemMove(pbDst, pbDst - off, cb2);
1685 pbDst += cb2;
1686 }
1687 break;
1688 }
1689
1690
1691 /*
1692 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1693 * type | | | |
1694 * ---- ----------------------------------
1695 * cb-3 offset
1696 *
1697 * Two bytes layed out as described above.
1698 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1699 * data relative to the current position.
1700 *
1701 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1702 */
1703 case 2:
1704 {
1705 cbSrc -= 2;
1706 if (cbSrc < 0)
1707 return KLDR_ERR_LX_BAD_ITERDATA2;
1708 else
1709 {
1710 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1711 const int cb = ((*pbSrc >> 2) & 3) + 3;
1712
1713 pbSrc += 2;
1714 if (off > OBJPAGELEN - (unsigned)cbDst)
1715 return KLDR_ERR_LX_BAD_ITERDATA2;
1716 cbDst -= cb;
1717 if (cbDst < 0)
1718 return KLDR_ERR_LX_BAD_ITERDATA2;
1719 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1720 pbDst += cb;
1721 }
1722 break;
1723 }
1724
1725
1726 /*
1727 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1728 * type | | | | | |
1729 * ---------- ---------------- ----------------------------------
1730 * cb1 cb2 offset <cb1 bytes of data>
1731 *
1732 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1733 * The cb2 and offset describes an amount of data to be copied from the expanded
1734 * data relative to the current position.
1735 *
1736 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1737 */
1738 case 3:
1739 {
1740 cbSrc -= 3;
1741 if (cbSrc < 0)
1742 return KLDR_ERR_LX_BAD_ITERDATA2;
1743 else
1744 {
1745 const int cb1 = (*pbSrc >> 2) & 0xf;
1746 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1747 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1748
1749 pbSrc += 3;
1750 cbSrc -= cb1;
1751 if (cbSrc < 0)
1752 return KLDR_ERR_LX_BAD_ITERDATA2;
1753 cbDst -= cb1;
1754 if (cbDst < 0)
1755 return KLDR_ERR_LX_BAD_ITERDATA2;
1756 kHlpMemCopy(pbDst, pbSrc, cb1);
1757 pbDst += cb1;
1758 pbSrc += cb1;
1759
1760 if (off > OBJPAGELEN - (unsigned)cbDst)
1761 return KLDR_ERR_LX_BAD_ITERDATA2;
1762 cbDst -= cb2;
1763 if (cbDst < 0)
1764 return KLDR_ERR_LX_BAD_ITERDATA2;
1765 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1766 pbDst += cb2;
1767 }
1768 break;
1769 }
1770 } /* type switch. */
1771 } /* unpack loop */
1772
1773l_endloop:
1774
1775
1776 /*
1777 * Zero remainder of the page.
1778 */
1779 if (cbDst > 0)
1780 kHlpMemSet(pbDst, 0, cbDst);
1781
1782 return 0;
1783}
1784
1785
1786/**
1787 * Special memcpy employed by the iterdata2 algorithm.
1788 *
1789 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1790 * has if src is very close to the destination.
1791 *
1792 * @param pbDst Destination pointer.
1793 * @param pbSrc Source pointer. Will always be <= pbDst.
1794 * @param cb Amount of data to be copied.
1795 * @remark This assumes that unaligned word and dword access is fine.
1796 */
1797static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb)
1798{
1799 switch (pbDst - pbSrc)
1800 {
1801 case 0:
1802 case 1:
1803 case 2:
1804 case 3:
1805 /* 16-bit copy (unaligned) */
1806 if (cb & 1)
1807 *pbDst++ = *pbSrc++;
1808 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1809 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1810 break;
1811
1812 default:
1813 /* 32-bit copy (unaligned) */
1814 if (cb & 1)
1815 *pbDst++ = *pbSrc++;
1816 if (cb & 2)
1817 {
1818 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1819 pbDst += 2;
1820 pbSrc += 2;
1821 }
1822 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1823 *(KU32 *)pbDst = *(const KU32 *)pbSrc;
1824 break;
1825 }
1826}
1827
1828
1829/**
1830 * Unprotects or protects the specified image mapping.
1831 *
1832 * @returns 0 on success.
1833 * @returns non-zero kLdr or OS status code on failure.
1834 *
1835 * @param pModLX The LX module interpreter instance.
1836 * @param pvBits The mapping to protect.
1837 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1838 * protect according to the object table.
1839 */
1840static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1841{
1842 KU32 i;
1843 PKLDRMOD pMod = pModLX->pMod;
1844
1845 /*
1846 * Change object protection.
1847 */
1848 for (i = 0; i < pMod->cSegments; i++)
1849 {
1850 int rc;
1851 void *pv;
1852 KPROT enmProt;
1853
1854 /* calc new protection. */
1855 enmProt = pMod->aSegments[i].enmProt;
1856 if (fUnprotectOrProtect)
1857 {
1858 switch (enmProt)
1859 {
1860 case KPROT_NOACCESS:
1861 case KPROT_READONLY:
1862 case KPROT_READWRITE:
1863 case KPROT_WRITECOPY:
1864 enmProt = KPROT_READWRITE;
1865 break;
1866 case KPROT_EXECUTE:
1867 case KPROT_EXECUTE_READ:
1868 case KPROT_EXECUTE_READWRITE:
1869 case KPROT_EXECUTE_WRITECOPY:
1870 enmProt = KPROT_EXECUTE_READWRITE;
1871 break;
1872 default:
1873 KLDRMODLX_ASSERT(!"bad enmProt");
1874 return -1;
1875 }
1876 }
1877 else
1878 {
1879 /* copy on write -> normal write. */
1880 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1881 enmProt = KPROT_EXECUTE_READWRITE;
1882 else if (enmProt == KPROT_WRITECOPY)
1883 enmProt = KPROT_READWRITE;
1884 }
1885
1886
1887 /* calc the address and set page protection. */
1888 pv = (KU8 *)pvBits + pMod->aSegments[i].RVA;
1889
1890 rc = kHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1891 if (rc)
1892 break;
1893
1894 /** @todo the gap page should be marked NOACCESS! */
1895 }
1896
1897 return 0;
1898}
1899
1900
1901/** @copydoc kLdrModUnmap */
1902static int kldrModLXUnmap(PKLDRMOD pMod)
1903{
1904 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1905 KU32 i;
1906 int rc;
1907
1908 /*
1909 * Mapped?
1910 */
1911 if (!pModLX->pvMapping)
1912 return KLDR_ERR_NOT_MAPPED;
1913
1914 /*
1915 * Free the mapping and update the segments.
1916 */
1917 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1918 KLDRMODLX_ASSERT(!rc);
1919 pModLX->pvMapping = NULL;
1920
1921 for (i = 0; i < pMod->cSegments; i++)
1922 pMod->aSegments[i].MapAddress = 0;
1923
1924 return rc;
1925}
1926
1927
1928/** @copydoc kLdrModAllocTLS */
1929static int kldrModLXAllocTLS(PKLDRMOD pMod)
1930{
1931 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1932
1933 /* no tls, just do the error checking. */
1934 if (!pModLX->pvMapping)
1935 return KLDR_ERR_NOT_MAPPED;
1936 return 0;
1937}
1938
1939
1940/** @copydoc kLdrModFreeTLS */
1941static void kldrModLXFreeTLS(PKLDRMOD pMod)
1942{
1943 /* no tls. */
1944}
1945
1946
1947/** @copydoc kLdrModReload */
1948static int kldrModLXReload(PKLDRMOD pMod)
1949{
1950 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1951 int rc, rc2;
1952
1953 /*
1954 * Mapped?
1955 */
1956 if (!pModLX->pvMapping)
1957 return KLDR_ERR_NOT_MAPPED;
1958
1959 /*
1960 * Before doing anything we'll have to make all pages writable.
1961 */
1962 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1963 if (rc)
1964 return rc;
1965
1966 /*
1967 * Load the bits again.
1968 */
1969 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1970
1971 /*
1972 * Restore protection.
1973 */
1974 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1975 if (!rc && rc2)
1976 rc = rc2;
1977 return rc;
1978}
1979
1980
1981/** @copydoc kLdrModFixupMapping */
1982static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1983{
1984 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1985 int rc, rc2;
1986
1987 /*
1988 * Mapped?
1989 */
1990 if (!pModLX->pvMapping)
1991 return KLDR_ERR_NOT_MAPPED;
1992
1993 /*
1994 * Before doing anything we'll have to make all pages writable.
1995 */
1996 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1997 if (rc)
1998 return rc;
1999
2000 /*
2001 * Apply fixups and resolve imports.
2002 */
2003 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (KUPTR)pModLX->pvMapping,
2004 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2005
2006 /*
2007 * Restore protection.
2008 */
2009 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
2010 if (!rc && rc2)
2011 rc = rc2;
2012 return rc;
2013}
2014
2015
2016/** @copydoc kLdrModCallInit */
2017static int kldrModLXCallInit(PKLDRMOD pMod, KUPTR uHandle)
2018{
2019 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2020 int rc;
2021
2022 /*
2023 * Mapped?
2024 */
2025 if (!pModLX->pvMapping)
2026 return KLDR_ERR_NOT_MAPPED;
2027
2028 /*
2029 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2030 */
2031 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2032 rc = kldrModLXDoCallDLL(pModLX, 0 /* attach */, uHandle);
2033 else
2034 rc = 0;
2035 return rc;
2036}
2037
2038
2039/**
2040 * Call the DLL entrypoint.
2041 *
2042 * @returns 0 on success.
2043 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2044 * @param pModLX The LX module interpreter instance.
2045 * @param uOp The operation (DLL_*).
2046 * @param uHandle The module handle to present.
2047 */
2048static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle)
2049{
2050 int rc;
2051
2052 /*
2053 * If no entrypoint there isn't anything to be done.
2054 */
2055 if ( !pModLX->Hdr.e32_startobj
2056 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2057 return 0;
2058
2059 /*
2060 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2061 */
2062 rc = kldrModLXDoCall((KUPTR)pModLX->pvMapping
2063 + (KUPTR)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2064 + pModLX->Hdr.e32_eip,
2065 uHandle, uOp, NULL);
2066 if (rc)
2067 rc = 0;
2068 else if (uOp == 0 /* attach */)
2069 rc = KLDR_ERR_MODULE_INIT_FAILED;
2070 else /* detach: ignore failures */
2071 rc = 0;
2072 return rc;
2073}
2074
2075
2076/**
2077 * Do a 3 parameter callback.
2078 *
2079 * @returns 32-bit callback return.
2080 * @param uEntrypoint The address of the function to be called.
2081 * @param uHandle The first argument, the module handle.
2082 * @param uOp The second argumnet, the reason we're calling.
2083 * @param pvReserved The third argument, reserved argument. (figure this one out)
2084 */
2085static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved)
2086{
2087#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2088 KI32 rc;
2089/** @todo try/except */
2090
2091 /*
2092 * Paranoia.
2093 */
2094# ifdef __GNUC__
2095 __asm__ __volatile__(
2096 "pushl %2\n\t"
2097 "pushl %1\n\t"
2098 "pushl %0\n\t"
2099 "lea 12(%%esp), %2\n\t"
2100 "call *%3\n\t"
2101 "movl %2, %%esp\n\t"
2102 : "=a" (rc)
2103 : "d" (uOp),
2104 "S" (0),
2105 "c" (uEntrypoint),
2106 "0" (uHandle));
2107# elif defined(_MSC_VER)
2108 __asm {
2109 mov eax, [uHandle]
2110 mov edx, [uOp]
2111 mov ecx, 0
2112 mov ebx, [uEntrypoint]
2113 push edi
2114 mov edi, esp
2115 push ecx
2116 push edx
2117 push eax
2118 call ebx
2119 mov esp, edi
2120 pop edi
2121 mov [rc], eax
2122 }
2123# else
2124# error "port me!"
2125# endif
2126 return rc;
2127
2128#else
2129 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2130#endif
2131}
2132
2133
2134/** @copydoc kLdrModCallTerm */
2135static int kldrModLXCallTerm(PKLDRMOD pMod, KUPTR uHandle)
2136{
2137 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2138
2139 /*
2140 * Mapped?
2141 */
2142 if (!pModLX->pvMapping)
2143 return KLDR_ERR_NOT_MAPPED;
2144
2145 /*
2146 * Do the call.
2147 */
2148 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2149 kldrModLXDoCallDLL(pModLX, 1 /* detach */, uHandle);
2150
2151 return 0;
2152}
2153
2154
2155/** @copydoc kLdrModCallThread */
2156static int kldrModLXCallThread(PKLDRMOD pMod, KUPTR uHandle, unsigned fAttachingOrDetaching)
2157{
2158 /* no thread attach/detach callout. */
2159 return 0;
2160}
2161
2162
2163/** @copydoc kLdrModSize */
2164static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
2165{
2166 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2167 return pModLX->cbMapped;
2168}
2169
2170
2171/** @copydoc kLdrModGetBits */
2172static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2173{
2174 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2175 int rc;
2176
2177 /*
2178 * Load the image bits.
2179 */
2180 rc = kldrModLXDoLoadBits(pModLX, pvBits);
2181 if (rc)
2182 return rc;
2183
2184 /*
2185 * Perform relocations.
2186 */
2187 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2188
2189}
2190
2191
2192/** @copydoc kLdrModRelocateBits */
2193static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
2194 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2195{
2196 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2197 KU32 iSeg;
2198 int rc;
2199
2200 /*
2201 * Do we need to to *anything*?
2202 */
2203 if ( NewBaseAddress == OldBaseAddress
2204 && NewBaseAddress == pModLX->paObjs[0].o32_base
2205 && !pModLX->Hdr.e32_impmodcnt)
2206 return 0;
2207
2208 /*
2209 * Load the fixup section.
2210 */
2211 if (!pModLX->pbFixupSection)
2212 {
2213 rc = kldrModLXDoLoadFixupSection(pModLX);
2214 if (rc)
2215 return rc;
2216 }
2217
2218 /*
2219 * Iterate the segments.
2220 */
2221 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2222 {
2223 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2224 KLDRADDR PageAddress = NewBaseAddress + pModLX->pMod->aSegments[iSeg].RVA;
2225 KU32 iPage;
2226 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[iSeg].RVA;
2227
2228 /*
2229 * Iterate the page map pages.
2230 */
2231 for (iPage = 0, rc = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2232 {
2233 const KU8 * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2234 const KU8 *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2235 KLDRADDR uValue;
2236 int iSelector;
2237 KU32 fKind;
2238
2239 /* sanity */
2240 if (pbFixupRecEnd < pb)
2241 return KLDR_ERR_BAD_FIXUP;
2242 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2243 return KLDR_ERR_BAD_FIXUP;
2244 if (pb < pModLX->pbFixupSection)
2245 return KLDR_ERR_BAD_FIXUP;
2246
2247 /*
2248 * Iterate the fixup record.
2249 */
2250 while (pb < pbFixupRecEnd)
2251 {
2252 union _rel
2253 {
2254 const KU8 * pb;
2255 const struct r32_rlc *prlc;
2256 } u;
2257
2258 u.pb = pb;
2259 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2260
2261 /*
2262 * Figure out the target.
2263 */
2264 switch (u.prlc->nr_flags & NRRTYP)
2265 {
2266 /*
2267 * Internal fixup.
2268 */
2269 case NRRINT:
2270 {
2271 KU16 iTrgObject;
2272 KU32 offTrgObject;
2273
2274 /* the object */
2275 if (u.prlc->nr_flags & NR16OBJMOD)
2276 {
2277 iTrgObject = *(const KU16 *)pb;
2278 pb += 2;
2279 }
2280 else
2281 iTrgObject = *pb++;
2282 iTrgObject--;
2283 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2284 return KLDR_ERR_BAD_FIXUP;
2285
2286 /* the target */
2287 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2288 {
2289 if (u.prlc->nr_flags & NR32BITOFF)
2290 {
2291 offTrgObject = *(const KU32 *)pb;
2292 pb += 4;
2293 }
2294 else
2295 {
2296 offTrgObject = *(const KU16 *)pb;
2297 pb += 2;
2298 }
2299
2300 /* calculate the symbol info. */
2301 uValue = offTrgObject + NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2302 }
2303 else
2304 uValue = NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2305 if ( (u.prlc->nr_stype & NRALIAS)
2306 || (pMod->aSegments[iTrgObject].fFlags & KLDRSEG_FLAG_16BIT))
2307 iSelector = pMod->aSegments[iTrgObject].Sel16bit;
2308 else
2309 iSelector = pMod->aSegments[iTrgObject].SelFlat;
2310 fKind = 0;
2311 break;
2312 }
2313
2314 /*
2315 * Import by symbol ordinal.
2316 */
2317 case NRRORD:
2318 {
2319 KU16 iModule;
2320 KU32 iSymbol;
2321
2322 /* the module ordinal */
2323 if (u.prlc->nr_flags & NR16OBJMOD)
2324 {
2325 iModule = *(const KU16 *)pb;
2326 pb += 2;
2327 }
2328 else
2329 iModule = *pb++;
2330 iModule--;
2331 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2332 return KLDR_ERR_BAD_FIXUP;
2333#if 1
2334 if (u.prlc->nr_flags & NRICHAIN)
2335 return KLDR_ERR_BAD_FIXUP;
2336#endif
2337
2338 /* . */
2339 if (u.prlc->nr_flags & NR32BITOFF)
2340 {
2341 iSymbol = *(const KU32 *)pb;
2342 pb += 4;
2343 }
2344 else if (!(u.prlc->nr_flags & NR8BITORD))
2345 {
2346 iSymbol = *(const KU16 *)pb;
2347 pb += 2;
2348 }
2349 else
2350 iSymbol = *pb++;
2351
2352 /* resolve it. */
2353 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, 0, NULL, &uValue, &fKind, pvUser);
2354 if (rc)
2355 return rc;
2356 iSelector = -1;
2357 break;
2358 }
2359
2360 /*
2361 * Import by symbol name.
2362 */
2363 case NRRNAM:
2364 {
2365 KU32 iModule;
2366 KU16 offSymbol;
2367 const KU8 *pbSymbol;
2368
2369 /* the module ordinal */
2370 if (u.prlc->nr_flags & NR16OBJMOD)
2371 {
2372 iModule = *(const KU16 *)pb;
2373 pb += 2;
2374 }
2375 else
2376 iModule = *pb++;
2377 iModule--;
2378 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2379 return KLDR_ERR_BAD_FIXUP;
2380#if 1
2381 if (u.prlc->nr_flags & NRICHAIN)
2382 return KLDR_ERR_BAD_FIXUP;
2383#endif
2384
2385 /* . */
2386 if (u.prlc->nr_flags & NR32BITOFF)
2387 {
2388 offSymbol = *(const KU32 *)pb;
2389 pb += 4;
2390 }
2391 else if (!(u.prlc->nr_flags & NR8BITORD))
2392 {
2393 offSymbol = *(const KU16 *)pb;
2394 pb += 2;
2395 }
2396 else
2397 offSymbol = *pb++;
2398 pbSymbol = pModLX->pbImportProcs + offSymbol;
2399 if ( pbSymbol < pModLX->pbImportProcs
2400 || pbSymbol > pModLX->pbFixupSectionLast)
2401 return KLDR_ERR_BAD_FIXUP;
2402
2403 /* resolve it. */
2404 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, (const char *)pbSymbol + 1, *pbSymbol, NULL,
2405 &uValue, &fKind, pvUser);
2406 if (rc)
2407 return rc;
2408 iSelector = -1;
2409 break;
2410 }
2411
2412 case NRRENT:
2413 KLDRMODLX_ASSERT(!"NRRENT");
2414 default:
2415 iSelector = -1;
2416 break;
2417 }
2418
2419 /* addend */
2420 if (u.prlc->nr_flags & NRADD)
2421 {
2422 if (u.prlc->nr_flags & NR32BITADD)
2423 {
2424 uValue += *(const KU32 *)pb;
2425 pb += 4;
2426 }
2427 else
2428 {
2429 uValue += *(const KU16 *)pb;
2430 pb += 2;
2431 }
2432 }
2433
2434
2435 /*
2436 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2437 */
2438 if (!(u.prlc->nr_stype & NRCHAIN))
2439 {
2440 int off = u.prlc->r32_soff;
2441
2442 /* common / simple */
2443 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2444 && off >= 0
2445 && off <= OBJPAGELEN - 4)
2446 *(KU32 *)&pbPage[off] = uValue;
2447 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2448 && off >= 0
2449 && off <= OBJPAGELEN - 4)
2450 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2451 else
2452 {
2453 /* generic */
2454 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2455 if (rc)
2456 return rc;
2457 }
2458 }
2459 else if (!(u.prlc->nr_flags & NRICHAIN))
2460 {
2461 const KI16 *poffSrc = (const KI16 *)pb;
2462 KU8 c = u.pb[2];
2463
2464 /* common / simple */
2465 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2466 {
2467 while (c-- > 0)
2468 {
2469 int off = *poffSrc++;
2470 if (off >= 0 && off <= OBJPAGELEN - 4)
2471 *(KU32 *)&pbPage[off] = uValue;
2472 else
2473 {
2474 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2475 if (rc)
2476 return rc;
2477 }
2478 }
2479 }
2480 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2481 {
2482 while (c-- > 0)
2483 {
2484 int off = *poffSrc++;
2485 if (off >= 0 && off <= OBJPAGELEN - 4)
2486 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2487 else
2488 {
2489 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2490 if (rc)
2491 return rc;
2492 }
2493 }
2494 }
2495 else
2496 {
2497 while (c-- > 0)
2498 {
2499 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2500 if (rc)
2501 return rc;
2502 }
2503 }
2504 pb = (const KU8 *)poffSrc;
2505 }
2506 else
2507 {
2508 /* This is a pain because it will require virgin pages on a relocation. */
2509 KLDRMODLX_ASSERT(!"NRICHAIN");
2510 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2511 }
2512 }
2513 }
2514 }
2515
2516 return 0;
2517}
2518
2519
2520/**
2521 * Applies the relocation to one 'source' in a page.
2522 *
2523 * This takes care of the more esotic case while the common cases
2524 * are dealt with seperately.
2525 *
2526 * @returns 0 on success, non-zero kLdr status code on failure.
2527 * @param pbPage The page in which to apply the fixup.
2528 * @param off Page relative offset of where to apply the offset.
2529 * @param uValue The target value.
2530 * @param fKind The target kind.
2531 */
2532static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
2533 int iSelector, KLDRADDR uValue, KU32 fKind)
2534{
2535#pragma pack(1) /* just to be sure */
2536 union
2537 {
2538 KU8 ab[6];
2539 KU32 off32;
2540 KU16 off16;
2541 KU8 off8;
2542 struct
2543 {
2544 KU16 off;
2545 KU16 Sel;
2546 } Far16;
2547 struct
2548 {
2549 KU32 off;
2550 KU16 Sel;
2551 } Far32;
2552 } uData;
2553#pragma pack()
2554 const KU8 *pbSrc;
2555 KU8 *pbDst;
2556 KU8 cb;
2557
2558 /*
2559 * Compose the fixup data.
2560 */
2561 switch (prlc->nr_stype & NRSRCMASK)
2562 {
2563 case NRSBYT:
2564 uData.off8 = (KU8)uValue;
2565 cb = 1;
2566 break;
2567 case NRSSEG:
2568 if (iSelector == -1)
2569 {
2570 /* fixme */
2571 }
2572 uData.off16 = iSelector;
2573 cb = 2;
2574 break;
2575 case NRSPTR:
2576 if (iSelector == -1)
2577 {
2578 /* fixme */
2579 }
2580 uData.Far16.off = (KU16)uValue;
2581 uData.Far16.Sel = iSelector;
2582 cb = 4;
2583 break;
2584 case NRSOFF:
2585 uData.off16 = (KU16)uValue;
2586 cb = 2;
2587 break;
2588 case NRPTR48:
2589 if (iSelector == -1)
2590 {
2591 /* fixme */
2592 }
2593 uData.Far32.off = (KU32)uValue;
2594 uData.Far32.Sel = iSelector;
2595 cb = 6;
2596 break;
2597 case NROFF32:
2598 uData.off32 = (KU32)uValue;
2599 cb = 4;
2600 break;
2601 case NRSOFF32:
2602 uData.off32 = (KU32)uValue - (PageAddress + off + 4);
2603 cb = 4;
2604 break;
2605 default:
2606 return KLDR_ERR_LX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2607 }
2608
2609 /*
2610 * Apply it. This is sloooow...
2611 */
2612 pbSrc = &uData.ab[0];
2613 pbDst = pbPage + off;
2614 while (cb-- > 0)
2615 {
2616 if (off > OBJPAGELEN)
2617 break;
2618 if (off >= 0)
2619 *pbDst = *pbSrc;
2620 pbSrc++;
2621 pbDst++;
2622 }
2623
2624 return 0;
2625}
2626
2627
2628/**
2629 * The LX module interpreter method table.
2630 */
2631KLDRMODOPS g_kLdrModLXOps =
2632{
2633 "LX",
2634 NULL,
2635 kldrModLXCreate,
2636 kldrModLXDestroy,
2637 kldrModLXQuerySymbol,
2638 kldrModLXEnumSymbols,
2639 kldrModLXGetImport,
2640 kldrModLXNumberOfImports,
2641 NULL /* can execute one is optional */,
2642 kldrModLXGetStackInfo,
2643 kldrModLXQueryMainEntrypoint,
2644 NULL /* fixme */,
2645 NULL /* fixme */,
2646 kldrModLXEnumDbgInfo,
2647 kldrModLXHasDbgInfo,
2648 kldrModLXMap,
2649 kldrModLXUnmap,
2650 kldrModLXAllocTLS,
2651 kldrModLXFreeTLS,
2652 kldrModLXReload,
2653 kldrModLXFixupMapping,
2654 kldrModLXCallInit,
2655 kldrModLXCallTerm,
2656 kldrModLXCallThread,
2657 kldrModLXSize,
2658 kldrModLXGetBits,
2659 kldrModLXRelocateBits,
2660 NULL /* fixme: pfnMostlyDone */,
2661 42 /* the end */
2662};
2663
Note: See TracBrowser for help on using the repository browser.