Ignore:
Timestamp:
Mar 18, 2021, 8:57:36 PM (4 years ago)
Author:
David Azarewicz
Message:

Merge changes from Paul's uniaud32next branch.

Location:
GPL/trunk
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • GPL/trunk

  • GPL/trunk/lib32/memory.c

    r587 r679  
    3232#include <stacktoflat.h>
    3333#include <limits.h>
    34 #ifdef KEE
    3534#include <kee.h>
    36 #endif
    3735#include "malloc.h"
     36#define _I386_PAGE_H
     37typedef struct { unsigned long pgprot; } pgprot_t;
     38#define MAP_NR(addr)            (__pa(addr) >> PAGE_SHIFT)
     39#define PAGE_SHIFT      12
     40#define __PAGE_OFFSET           (0xC0000000)
     41
     42#define PAGE_OFFSET             ((unsigned long)__PAGE_OFFSET)
     43#define __pa(x)                 ((unsigned long)(x)-PAGE_OFFSET)
     44
     45#include <linux/mm.h>
     46#include <linux/slab.h>
     47#include <linux/printk.h>
    3848
    3949#pragma off (unreferenced)
    4050
    4151#define PAGE_SIZE 4096
     52#define min(a,b)  (((a) < (b)) ? (a) : (b))
    4253
    4354int free_pages(unsigned long addr, unsigned long order);
     
    6071
    6172typedef struct _BaseAddr {
    62     ULONG                  base;
    63     ULONG                  retaddr;
    64     ULONG                  size;
     73    ULONG                  base;        // VMAlloc addr
     74    ULONG                  retaddr;     // aligned addr returned to caller
     75    ULONG                  size;        // VMAlloc size
    6576    struct _BaseAddr NEAR *next;
    6677} BaseAddr;
     
    93104//******************************************************************************
    94105//******************************************************************************
    95 ULONG GetBaseAddress(ULONG addr, ULONG *pSize)
     106ULONG GetBaseAddressAndFree(ULONG addr, ULONG *pSize)
    96107{
    97108    BaseAddr NEAR *pCur, NEAR *pTemp;
     
    102113    pCur = pBaseAddrHead;
    103114
     115    // If address is in list, remove list item and free entry
     116    // Caller must VMFree returned address or else
    104117    if(pCur->retaddr == addr)
    105118    {
     
    126139}
    127140//******************************************************************************
     141//******************************************************************************
     142ULONG GetBaseAddressNoFree(ULONG addr, ULONG *pSize)
     143{
     144    BaseAddr NEAR *pCur, NEAR *pTemp;
     145
     146    if(pBaseAddrHead == NULL) return addr;
     147
     148    DevCli();
     149    pCur = pBaseAddrHead;
     150
     151    if(pCur->retaddr == addr)
     152    {
     153        addr = pCur->base;
     154        if(pSize) *pSize = pCur->size;
     155        pBaseAddrHead = pCur->next;
     156//        _kfree(pCur);
     157    }
     158    else
     159    while(pCur->next) {
     160        if(pCur->next->retaddr == addr) {
     161            pTemp = pCur->next;
     162            addr = pTemp->base;
     163            if(pSize) *pSize = pTemp->size;
     164            pCur->next = pTemp->next;
     165//            _kfree(pTemp);
     166            break;
     167        }
     168        pCur = pCur->next;
     169    }
     170    DevSti();
     171    return addr;
     172}
     173//******************************************************************************
    128174//NOTE: Assumes memory is continuous!!
    129175//******************************************************************************
    130176unsigned long virt_to_phys(void * address)
    131177{
    132 #ifdef KEE
    133178    KEEVMPageList pagelist;
    134179    ULONG         nrpages;
     
    139184        }
    140185        return pagelist.addr;
    141 #else
    142     LINEAR addr = (LINEAR)address;
    143     PAGELIST pagelist;
    144 
    145         if(DevLinToPageList(addr, PAGE_SIZE, (PAGELIST NEAR *)__Stack32ToFlat((ULONG)&pagelist))) {
    146                 DebugInt3();
    147                 return 0;
    148         }
    149         return pagelist.physaddr;
    150 #endif
    151186}
    152187//******************************************************************************
     
    157192    ULONG addr = 0;
    158193
    159 #ifdef KEE
    160194    SHORT sel;
    161195    rc = KernVMAlloc(PAGE_SIZE, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&address, &sel);
    162 #else
    163     rc = DevVMAlloc(VMDHA_PHYS, PAGE_SIZE, (LINEAR)&address, __Stack32ToFlat((ULONG)&addr));
    164 #endif
    165196    if (rc != 0) {
    166197        DebugInt3();
     
    184215__again:
    185216
    186 #ifdef KEE
    187 
    188217    rc = KernVMAlloc(size, flags, (PVOID*)&addr, (PVOID*)-1, &sel);
    189 #else
    190     rc = DevVMAlloc(flags, size, (LINEAR)-1, __Stack32ToFlat((ULONG)&addr));
    191 #endif
    192218    if (rc == 0) {
    193219        *pAddr = (LINEAR)addr;
     
    209235{
    210236    APIRET rc;
    211 
    212 #ifdef KEE
    213         rc = KernVMFree((PVOID)addr);
    214 #else
    215         rc = DevVMFree((LINEAR)addr);
    216 #endif
     237    rc = KernVMFree((PVOID)addr);
    217238    if(rc) {
    218239        DebugInt3();
     
    224245ULONG ulget_free_pagesMemUsed = 0;
    225246
    226 #define GFP_DMA         0x80
    227 #define GFP_DMAHIGHMEM  0x100
    228247//******************************************************************************
    229248//******************************************************************************
     
    243262
    244263        if(startpage != endpage) {
    245             //try once more
     264            // not in same 32K page, try once more
    246265            rc = VMAlloc(size, flags, (LINEAR *)&tempaddr);
    247266            VMFree((LINEAR)addr);
     
    333352
    334353            if (startpage != endpage) {
     354                // Not in same 32K page
    335355                physaddr2 = (startpage+1) << 16;
    336356
     
    363383
    364384    //check if it really is the base of the allocation (see above)
    365     addr = GetBaseAddress(addr, (ULONG NEAR *)__Stack32ToFlat(&size));
     385    addr = GetBaseAddressAndFree(addr, (ULONG NEAR *)&size);
    366386
    367387    if(VMFree((LINEAR)addr)) {
     
    402422//******************************************************************************
    403423//******************************************************************************
     424void *__vmalloc(unsigned long size, gfp_t gfp_mask)
     425{
     426        return vmalloc(size);
     427}
     428//******************************************************************************
     429//******************************************************************************
     430/**
     431 * __vmalloc_node - allocate virtually contiguous memory
     432 * @size:           allocation size
     433 * @align:          desired alignment
     434 * @gfp_mask:       flags for the page level allocator
     435 * @node:           node to use for allocation or NUMA_NO_NODE
     436 * @caller:         caller's return address
     437 *
     438 * Allocate enough pages to cover @size from the page level allocator with
     439 * @gfp_mask flags.  Map them into contiguous kernel virtual space.
     440 *
     441 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
     442 * and __GFP_NOFAIL are not supported
     443 *
     444 * Any use of gfp flags outside of GFP_KERNEL should be consulted
     445 * with mm people.
     446 *
     447 * Return: pointer to the allocated memory or %NULL on error
     448 */
     449void *__vmalloc_node(unsigned long size, unsigned long align,
     450                            gfp_t gfp_mask, int node, const void *caller)
     451{
     452        return vmalloc(size);
     453}
     454//******************************************************************************
     455//******************************************************************************
    404456void vfree(void *ptr)
    405457{
     
    407459    ULONG  size = 0;
    408460
    409     GetBaseAddress((ULONG)ptr, (ULONG NEAR *)__Stack32ToFlat(&size));
     461    GetBaseAddressAndFree((ULONG)ptr, (ULONG NEAR *)&size);
    410462
    411463    if(VMFree((LINEAR)ptr)) {
     
    427479//******************************************************************************
    428480//******************************************************************************
    429 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, unsigned long prot)
     481int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
    430482{
    431483        DebugInt3();
     
    452504        //size &= 0xFFFFF000;
    453505
    454 #ifdef KEE
    455506    SHORT sel;
    456 
    457         //rc = KernVMAlloc(size, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&physaddr, &sel);
     507    //rc = KernVMAlloc(size, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&physaddr, &sel);
    458508    rc = KernVMAlloc(Length, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&PhysicalAddress, &sel);
    459 #else
    460     //rc = DevVMAlloc(VMDHA_PHYS, size, (LINEAR)&physaddr, __Stack32ToFlat((ULONG)&addr));
    461     rc = DevVMAlloc(VMDHA_PHYS, Length, (LINEAR)&PhysicalAddress, __Stack32ToFlat((ULONG)&addr));
    462 #endif
    463509    if (rc != 0) {
    464510        dprintf(("ioremap error: %x", rc));
     
    497543    if(n == 0) return;
    498544
    499         kmemcpy(to, from, n);
     545        memcpy(to, from, n);
    500546}
    501547//******************************************************************************
     
    509555    if(n == 0) return 0;
    510556
    511         kmemcpy(to, from, n);
     557        memcpy(to, from, n);
    512558        return 0;
    513559}
     
    534580    if(n == 0) return 0;
    535581
    536         kmemcpy(to, from, n);
     582        memcpy(to, from, n);
    537583        return 0;
    538584}
     
    547593                return 0;
    548594        }
    549         kmemcpy(dest, src, size);
     595        memcpy(dest, src, size);
    550596        return 0;
    551597}
     
    638684        return kzalloc(n * size, flags);
    639685}
    640 
     686//******************************************************************************
     687//******************************************************************************
     688
     689size_t ksize(const void *block)
     690{
     691        size_t size;
     692
     693        if (!block)
     694            size = 0;                   // Bad coder
     695
     696        else if (block == ZERO_SIZE_PTR)
     697            size = 0;                   // Bad coder
     698
     699        else if(IsHeapAddr((ULONG)block))
     700            size = _msize((void _near *)block);
     701
     702        else if (!GetBaseAddressNoFree((ULONG)block, (ULONG NEAR *)&size))
     703            size = 0;                   // Something wrong
     704
     705        return size;
     706}
     707//******************************************************************************
     708//******************************************************************************
     709static inline void *__do_krealloc(const void *p, size_t new_size,
     710                                           gfp_t flags)
     711{
     712        void *ret;
     713        size_t ks = 0;
     714
     715        if (p)
     716                ks = ksize(p);
     717
     718        if (ks >= new_size)
     719                return (void *)p;
     720
     721        ret = __kmalloc(new_size, flags);
     722        if (ret && p)
     723                memcpy(ret, p, ks);
     724
     725        return ret;
     726}
     727//******************************************************************************
     728//******************************************************************************
     729/**
     730 * krealloc - reallocate memory. The contents will remain unchanged.
     731 * @p: object to reallocate memory for.
     732 * @new_size: how many bytes of memory are required.
     733 * @flags: the type of memory to allocate.
     734 *
     735 * The contents of the object pointed to are preserved up to the
     736 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
     737 * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
     738 * %NULL pointer, the object pointed to is freed.
     739 */
     740void *krealloc(const void *p, size_t new_size, gfp_t flags)
     741{
     742        void *ret;
     743
     744        if (!new_size) {
     745                kfree(p);
     746                return ZERO_SIZE_PTR;
     747        }
     748
     749        ret = __do_krealloc(p, new_size, flags);
     750        if (ret && p != ret)
     751                kfree(p);
     752
     753        return ret;
     754}
     755//******************************************************************************
     756//******************************************************************************
     757/**
     758 *      vzalloc - allocate virtually contiguous memory with zero fill
     759 *      @size:  allocation size
     760 *      Allocate enough pages to cover @size from the page level
     761 *      allocator and map them into contiguous kernel virtual space.
     762 *      The memory allocated is set to zero.
     763 *
     764 *      For tight control over page level allocator and protection flags
     765 *      use __vmalloc() instead.
     766 */
     767void *vzalloc(unsigned long size)
     768{
     769        void *buf;
     770        buf = vmalloc(size);
     771        if (buf)
     772                memset(buf, 0, size);
     773        return buf;
     774}
     775//******************************************************************************
     776//******************************************************************************
     777/**
     778 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
     779 * failure, fall back to non-contiguous (vmalloc) allocation.
     780 * @size: size of the request.
     781 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
     782 * @node: numa node to allocate from
     783 *
     784 * Uses kmalloc to get the memory but if the allocation fails then falls back
     785 * to the vmalloc allocator. Use kvfree for freeing the memory.
     786 *
     787 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
     788 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
     789 * preferable to the vmalloc fallback, due to visible performance drawbacks.
     790 *
     791 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
     792 * fall back to vmalloc.
     793 *
     794 * Return: pointer to the allocated memory of %NULL in case of failure
     795 */
     796void *kvmalloc_node(size_t size, gfp_t flags, int node)
     797{
     798        gfp_t kmalloc_flags = flags;
     799        void *ret;
     800
     801        /*
     802         * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
     803         * so the given set of flags has to be compatible.
     804         */
     805        if ((flags & GFP_KERNEL) != GFP_KERNEL)
     806                return kmalloc_node(size, flags, node);
     807
     808        /*
     809         * We want to attempt a large physically contiguous block first because
     810         * it is less likely to fragment multiple larger blocks and therefore
     811         * contribute to a long term fragmentation less than vmalloc fallback.
     812         * However make sure that larger requests are not too disruptive - no
     813         * OOM killer and no allocation failure warnings as we have a fallback.
     814         */
     815        if (size > PAGE_SIZE) {
     816                kmalloc_flags |= __GFP_NOWARN;
     817
     818                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
     819                        kmalloc_flags |= __GFP_NORETRY;
     820        }
     821
     822        ret = kmalloc_node(size, kmalloc_flags, node);
     823
     824        /*
     825         * It doesn't really make sense to fallback to vmalloc for sub page
     826         * requests
     827         */
     828        if (ret || size <= PAGE_SIZE)
     829                return ret;
     830
     831        return __vmalloc_node(size, 1, flags, node,
     832                        __builtin_return_address(0));
     833}
     834//******************************************************************************
     835//******************************************************************************
Note: See TracChangeset for help on using the changeset viewer.