Changeset 679 for GPL/trunk/lib32/memory.c
- Timestamp:
- Mar 18, 2021, 8:57:36 PM (4 years ago)
- Location:
- GPL/trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
GPL/trunk
- Property svn:mergeinfo changed
/GPL/branches/uniaud32-linux-3.2.102 (added) merged: 611-614 /GPL/branches/uniaud32-next (added) merged: 615-678
- Property svn:mergeinfo changed
-
GPL/trunk/lib32/memory.c
r587 r679 32 32 #include <stacktoflat.h> 33 33 #include <limits.h> 34 #ifdef KEE35 34 #include <kee.h> 36 #endif37 35 #include "malloc.h" 36 #define _I386_PAGE_H 37 typedef struct { unsigned long pgprot; } pgprot_t; 38 #define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT) 39 #define PAGE_SHIFT 12 40 #define __PAGE_OFFSET (0xC0000000) 41 42 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 43 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 44 45 #include <linux/mm.h> 46 #include <linux/slab.h> 47 #include <linux/printk.h> 38 48 39 49 #pragma off (unreferenced) 40 50 41 51 #define PAGE_SIZE 4096 52 #define min(a,b) (((a) < (b)) ? (a) : (b)) 42 53 43 54 int free_pages(unsigned long addr, unsigned long order); … … 60 71 61 72 typedef struct _BaseAddr { 62 ULONG base; 63 ULONG retaddr; 64 ULONG size; 73 ULONG base; // VMAlloc addr 74 ULONG retaddr; // aligned addr returned to caller 75 ULONG size; // VMAlloc size 65 76 struct _BaseAddr NEAR *next; 66 77 } BaseAddr; … … 93 104 //****************************************************************************** 94 105 //****************************************************************************** 95 ULONG GetBaseAddress (ULONG addr, ULONG *pSize)106 ULONG GetBaseAddressAndFree(ULONG addr, ULONG *pSize) 96 107 { 97 108 BaseAddr NEAR *pCur, NEAR *pTemp; … … 102 113 pCur = pBaseAddrHead; 103 114 115 // If address is in list, remove list item and free entry 116 // Caller must VMFree returned address or else 104 117 if(pCur->retaddr == addr) 105 118 { … … 126 139 } 127 140 //****************************************************************************** 141 //****************************************************************************** 142 ULONG GetBaseAddressNoFree(ULONG addr, ULONG *pSize) 143 { 144 BaseAddr NEAR *pCur, NEAR *pTemp; 145 146 if(pBaseAddrHead == NULL) return addr; 147 148 DevCli(); 149 pCur = pBaseAddrHead; 150 151 if(pCur->retaddr == addr) 152 { 153 addr = pCur->base; 154 if(pSize) *pSize = pCur->size; 155 pBaseAddrHead = pCur->next; 156 // _kfree(pCur); 157 } 158 else 159 while(pCur->next) { 160 if(pCur->next->retaddr == addr) { 161 pTemp = pCur->next; 162 addr = pTemp->base; 163 if(pSize) *pSize = pTemp->size; 164 pCur->next = pTemp->next; 165 // _kfree(pTemp); 166 break; 167 } 168 pCur = pCur->next; 169 } 170 DevSti(); 171 return addr; 172 } 173 //****************************************************************************** 128 174 //NOTE: Assumes memory is continuous!! 129 175 //****************************************************************************** 130 176 unsigned long virt_to_phys(void * address) 131 177 { 132 #ifdef KEE133 178 KEEVMPageList pagelist; 134 179 ULONG nrpages; … … 139 184 } 140 185 return pagelist.addr; 141 #else142 LINEAR addr = (LINEAR)address;143 PAGELIST pagelist;144 145 if(DevLinToPageList(addr, PAGE_SIZE, (PAGELIST NEAR *)__Stack32ToFlat((ULONG)&pagelist))) {146 DebugInt3();147 return 0;148 }149 return pagelist.physaddr;150 #endif151 186 } 152 187 //****************************************************************************** … … 157 192 ULONG addr = 0; 158 193 159 #ifdef KEE160 194 SHORT sel; 161 195 rc = KernVMAlloc(PAGE_SIZE, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&address, &sel); 162 #else163 rc = DevVMAlloc(VMDHA_PHYS, PAGE_SIZE, (LINEAR)&address, __Stack32ToFlat((ULONG)&addr));164 #endif165 196 if (rc != 0) { 166 197 DebugInt3(); … … 184 215 __again: 185 216 186 #ifdef KEE187 188 217 rc = KernVMAlloc(size, flags, (PVOID*)&addr, (PVOID*)-1, &sel); 189 #else190 rc = DevVMAlloc(flags, size, (LINEAR)-1, __Stack32ToFlat((ULONG)&addr));191 #endif192 218 if (rc == 0) { 193 219 *pAddr = (LINEAR)addr; … … 209 235 { 210 236 APIRET rc; 211 212 #ifdef KEE 213 rc = KernVMFree((PVOID)addr); 214 #else 215 rc = DevVMFree((LINEAR)addr); 216 #endif 237 rc = KernVMFree((PVOID)addr); 217 238 if(rc) { 218 239 DebugInt3(); … … 224 245 ULONG ulget_free_pagesMemUsed = 0; 225 246 226 #define GFP_DMA 0x80227 #define GFP_DMAHIGHMEM 0x100228 247 //****************************************************************************** 229 248 //****************************************************************************** … … 243 262 244 263 if(startpage != endpage) { 245 // try once more264 // not in same 32K page, try once more 246 265 rc = VMAlloc(size, flags, (LINEAR *)&tempaddr); 247 266 VMFree((LINEAR)addr); … … 333 352 334 353 if (startpage != endpage) { 354 // Not in same 32K page 335 355 physaddr2 = (startpage+1) << 16; 336 356 … … 363 383 364 384 //check if it really is the base of the allocation (see above) 365 addr = GetBaseAddress (addr, (ULONG NEAR *)__Stack32ToFlat(&size));385 addr = GetBaseAddressAndFree(addr, (ULONG NEAR *)&size); 366 386 367 387 if(VMFree((LINEAR)addr)) { … … 402 422 //****************************************************************************** 403 423 //****************************************************************************** 424 void *__vmalloc(unsigned long size, gfp_t gfp_mask) 425 { 426 return vmalloc(size); 427 } 428 //****************************************************************************** 429 //****************************************************************************** 430 /** 431 * __vmalloc_node - allocate virtually contiguous memory 432 * @size: allocation size 433 * @align: desired alignment 434 * @gfp_mask: flags for the page level allocator 435 * @node: node to use for allocation or NUMA_NO_NODE 436 * @caller: caller's return address 437 * 438 * Allocate enough pages to cover @size from the page level allocator with 439 * @gfp_mask flags. Map them into contiguous kernel virtual space. 440 * 441 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 442 * and __GFP_NOFAIL are not supported 443 * 444 * Any use of gfp flags outside of GFP_KERNEL should be consulted 445 * with mm people. 446 * 447 * Return: pointer to the allocated memory or %NULL on error 448 */ 449 void *__vmalloc_node(unsigned long size, unsigned long align, 450 gfp_t gfp_mask, int node, const void *caller) 451 { 452 return vmalloc(size); 453 } 454 //****************************************************************************** 455 //****************************************************************************** 404 456 void vfree(void *ptr) 405 457 { … … 407 459 ULONG size = 0; 408 460 409 GetBaseAddress ((ULONG)ptr, (ULONG NEAR *)__Stack32ToFlat(&size));461 GetBaseAddressAndFree((ULONG)ptr, (ULONG NEAR *)&size); 410 462 411 463 if(VMFree((LINEAR)ptr)) { … … 427 479 //****************************************************************************** 428 480 //****************************************************************************** 429 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, unsigned longprot)481 int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot) 430 482 { 431 483 DebugInt3(); … … 452 504 //size &= 0xFFFFF000; 453 505 454 #ifdef KEE455 506 SHORT sel; 456 457 //rc = KernVMAlloc(size, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&physaddr, &sel); 507 //rc = KernVMAlloc(size, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&physaddr, &sel); 458 508 rc = KernVMAlloc(Length, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&PhysicalAddress, &sel); 459 #else460 //rc = DevVMAlloc(VMDHA_PHYS, size, (LINEAR)&physaddr, __Stack32ToFlat((ULONG)&addr));461 rc = DevVMAlloc(VMDHA_PHYS, Length, (LINEAR)&PhysicalAddress, __Stack32ToFlat((ULONG)&addr));462 #endif463 509 if (rc != 0) { 464 510 dprintf(("ioremap error: %x", rc)); … … 497 543 if(n == 0) return; 498 544 499 kmemcpy(to, from, n);545 memcpy(to, from, n); 500 546 } 501 547 //****************************************************************************** … … 509 555 if(n == 0) return 0; 510 556 511 kmemcpy(to, from, n);557 memcpy(to, from, n); 512 558 return 0; 513 559 } … … 534 580 if(n == 0) return 0; 535 581 536 kmemcpy(to, from, n);582 memcpy(to, from, n); 537 583 return 0; 538 584 } … … 547 593 return 0; 548 594 } 549 kmemcpy(dest, src, size);595 memcpy(dest, src, size); 550 596 return 0; 551 597 } … … 638 684 return kzalloc(n * size, flags); 639 685 } 640 686 //****************************************************************************** 687 //****************************************************************************** 688 689 size_t ksize(const void *block) 690 { 691 size_t size; 692 693 if (!block) 694 size = 0; // Bad coder 695 696 else if (block == ZERO_SIZE_PTR) 697 size = 0; // Bad coder 698 699 else if(IsHeapAddr((ULONG)block)) 700 size = _msize((void _near *)block); 701 702 else if (!GetBaseAddressNoFree((ULONG)block, (ULONG NEAR *)&size)) 703 size = 0; // Something wrong 704 705 return size; 706 } 707 //****************************************************************************** 708 //****************************************************************************** 709 static inline void *__do_krealloc(const void *p, size_t new_size, 710 gfp_t flags) 711 { 712 void *ret; 713 size_t ks = 0; 714 715 if (p) 716 ks = ksize(p); 717 718 if (ks >= new_size) 719 return (void *)p; 720 721 ret = __kmalloc(new_size, flags); 722 if (ret && p) 723 memcpy(ret, p, ks); 724 725 return ret; 726 } 727 //****************************************************************************** 728 //****************************************************************************** 729 /** 730 * krealloc - reallocate memory. The contents will remain unchanged. 731 * @p: object to reallocate memory for. 732 * @new_size: how many bytes of memory are required. 733 * @flags: the type of memory to allocate. 734 * 735 * The contents of the object pointed to are preserved up to the 736 * lesser of the new and old sizes. If @p is %NULL, krealloc() 737 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a 738 * %NULL pointer, the object pointed to is freed. 739 */ 740 void *krealloc(const void *p, size_t new_size, gfp_t flags) 741 { 742 void *ret; 743 744 if (!new_size) { 745 kfree(p); 746 return ZERO_SIZE_PTR; 747 } 748 749 ret = __do_krealloc(p, new_size, flags); 750 if (ret && p != ret) 751 kfree(p); 752 753 return ret; 754 } 755 //****************************************************************************** 756 //****************************************************************************** 757 /** 758 * vzalloc - allocate virtually contiguous memory with zero fill 759 * @size: allocation size 760 * Allocate enough pages to cover @size from the page level 761 * allocator and map them into contiguous kernel virtual space. 762 * The memory allocated is set to zero. 763 * 764 * For tight control over page level allocator and protection flags 765 * use __vmalloc() instead. 766 */ 767 void *vzalloc(unsigned long size) 768 { 769 void *buf; 770 buf = vmalloc(size); 771 if (buf) 772 memset(buf, 0, size); 773 return buf; 774 } 775 //****************************************************************************** 776 //****************************************************************************** 777 /** 778 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 779 * failure, fall back to non-contiguous (vmalloc) allocation. 780 * @size: size of the request. 781 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 782 * @node: numa node to allocate from 783 * 784 * Uses kmalloc to get the memory but if the allocation fails then falls back 785 * to the vmalloc allocator. Use kvfree for freeing the memory. 786 * 787 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. 788 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 789 * preferable to the vmalloc fallback, due to visible performance drawbacks. 790 * 791 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not 792 * fall back to vmalloc. 793 * 794 * Return: pointer to the allocated memory of %NULL in case of failure 795 */ 796 void *kvmalloc_node(size_t size, gfp_t flags, int node) 797 { 798 gfp_t kmalloc_flags = flags; 799 void *ret; 800 801 /* 802 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 803 * so the given set of flags has to be compatible. 804 */ 805 if ((flags & GFP_KERNEL) != GFP_KERNEL) 806 return kmalloc_node(size, flags, node); 807 808 /* 809 * We want to attempt a large physically contiguous block first because 810 * it is less likely to fragment multiple larger blocks and therefore 811 * contribute to a long term fragmentation less than vmalloc fallback. 812 * However make sure that larger requests are not too disruptive - no 813 * OOM killer and no allocation failure warnings as we have a fallback. 814 */ 815 if (size > PAGE_SIZE) { 816 kmalloc_flags |= __GFP_NOWARN; 817 818 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 819 kmalloc_flags |= __GFP_NORETRY; 820 } 821 822 ret = kmalloc_node(size, kmalloc_flags, node); 823 824 /* 825 * It doesn't really make sense to fallback to vmalloc for sub page 826 * requests 827 */ 828 if (ret || size <= PAGE_SIZE) 829 return ret; 830 831 return __vmalloc_node(size, 1, flags, node, 832 __builtin_return_address(0)); 833 } 834 //****************************************************************************** 835 //******************************************************************************
Note:
See TracChangeset
for help on using the changeset viewer.