source: GPL/trunk/lib32/memory.c

Last change on this file was 772, checked in by David Azarewicz, 5 months ago

Merge in changes from 6.6-LTS branch.
Fixed additional 25+ problems.

File size: 26.0 KB
Line 
1/* $Id: memory.cpp,v 1.1.1.1 2003/07/02 13:57:02 eleph Exp $ */
2/*
3 * OS/2 implementation of Linux memory kernel services
4 *
5 * (C) 2000-2002 InnoTek Systemberatung GmbH
6 * (C) 2000-2001 Sander van Leeuwen (sandervl@xs4all.nl)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the Free
20 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
21 * USA.
22 *
23 */
24
25#define INCL_NOPMAPI
26#define INCL_DOSERRORS // for ERROR_INVALID_FUNCTION
27#include <os2.h>
28#include <devhelp.h>
29#include <ossidc.h>
30#include <string.h>
31#include <dbgos2.h>
32#include <stacktoflat.h>
33#include <limits.h>
34#include <kee.h>
35#include "malloc.h"
36#define _I386_PAGE_H
37typedef struct { unsigned long pgprot; } pgprot_t;
38#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
39#define PAGE_SHIFT 12
40#define __PAGE_OFFSET (0xC0000000)
41
42#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
43#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
44
45#include <linux/mm.h>
46#include <linux/slab.h>
47#include <linux/printk.h>
48
49#pragma off (unreferenced)
50
51#define PAGE_SIZE 4096
52#define min(a,b) (((a) < (b)) ? (a) : (b))
53
54int free_pages(unsigned long addr, unsigned long order);
55int __compat_get_order(unsigned long size);
56#ifdef DEBUGHEAP
57void near *__kmalloc(int size, int flags, const char *filename, int lineno);
58void __kfree(const void near *ptr, const char *filename, int lineno);
59#else
60void near *__kmalloc(int size, int flags);
61void __kfree(const void near *ptr);
62#endif
63
64#ifdef DEBUGHEAP
65#define _kmalloc(a, b) __kmalloc(a, b, __FILE__, __LINE__)
66#define _kfree(a) __kfree(a, __FILE__, __LINE__)
67#else
68#define _kmalloc(a, b) __kmalloc(a, b)
69#define _kfree(a) __kfree(a)
70#endif
71
72typedef struct _BaseAddr {
73 ULONG base; // VMAlloc addr
74 ULONG retaddr; // aligned addr returned to caller
75 ULONG size; // VMAlloc size
76 struct _BaseAddr NEAR *next;
77} BaseAddr;
78
79static BaseAddr NEAR *pBaseAddrHead = NULL;
80
81//******************************************************************************
82//Very simple linked list for storing original addresses returned by VMAlloc
83//if returned address is different due to alignment requirements
84//Performance is not an issue as the alloc & free functions aren't called often.
85//(e.g. ALS4000 driver calls it 4 times (2 alloc during boot, 2 during shutdown)
86//******************************************************************************
87void AddBaseAddress(ULONG baseaddr, ULONG retaddr, ULONG size)
88{
89 BaseAddr NEAR *pBase;
90
91 pBase = (BaseAddr NEAR *)_kmalloc(sizeof(BaseAddr), 0);
92 if(pBase == NULL) {
93 DebugInt3();
94 return;
95 }
96 DevCli();
97 pBase->base = baseaddr;
98 pBase->retaddr = retaddr;
99 pBase->size = size;
100 pBase->next = pBaseAddrHead;
101 pBaseAddrHead = pBase;
102 DevSti();
103}
104//******************************************************************************
105//******************************************************************************
106ULONG GetBaseAddressAndFree(ULONG addr, ULONG *pSize)
107{
108 BaseAddr NEAR *pCur, NEAR *pTemp;
109
110 if(pBaseAddrHead == NULL) return addr;
111
112 DevCli();
113 pCur = pBaseAddrHead;
114
115 // If address is in list, remove list item and free entry
116 // Caller must VMFree returned address or else
117 if(pCur->retaddr == addr)
118 {
119 addr = pCur->base;
120 if(pSize) *pSize = pCur->size;
121 pBaseAddrHead = pCur->next;
122 _kfree(pCur);
123 }
124 else
125 while(pCur->next) {
126 if(pCur->next->retaddr == addr) {
127 pTemp = pCur->next;
128 addr = pTemp->base;
129 if(pSize) *pSize = pTemp->size;
130 pCur->next = pTemp->next;
131
132 _kfree(pTemp);
133 break;
134 }
135 pCur = pCur->next;
136 }
137 DevSti();
138 return addr;
139}
140//******************************************************************************
141//******************************************************************************
142ULONG GetBaseAddressNoFree(ULONG addr, ULONG *pSize)
143{
144 BaseAddr NEAR *pCur, NEAR *pTemp;
145
146 if(pBaseAddrHead == NULL) return addr;
147
148 DevCli();
149 pCur = pBaseAddrHead;
150
151 if(pCur->retaddr == addr)
152 {
153 addr = pCur->base;
154 if(pSize) *pSize = pCur->size;
155 pBaseAddrHead = pCur->next;
156// _kfree(pCur);
157 }
158 else
159 while(pCur->next) {
160 if(pCur->next->retaddr == addr) {
161 pTemp = pCur->next;
162 addr = pTemp->base;
163 if(pSize) *pSize = pTemp->size;
164 pCur->next = pTemp->next;
165// _kfree(pTemp);
166 break;
167 }
168 pCur = pCur->next;
169 }
170 DevSti();
171 return addr;
172}
173//******************************************************************************
174//NOTE: Assumes memory is continuous!!
175//******************************************************************************
176unsigned long virt_to_phys(void * address)
177{
178 KEEVMPageList pagelist;
179 ULONG nrpages;
180
181 if(KernLinToPageList(address, PAGE_SIZE, &pagelist, &nrpages)) {
182 DebugInt3();
183 return 0;
184 }
185 return pagelist.addr;
186}
187//******************************************************************************
188//******************************************************************************
189void * phys_to_virt(unsigned long address)
190{
191 APIRET rc = 0;
192 ULONG addr = 0;
193
194 SHORT sel;
195 rc = KernVMAlloc(PAGE_SIZE, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&address, &sel);
196 if (rc != 0) {
197 DebugInt3();
198 return NULL;
199 }
200 return (void *)addr;
201}
202//******************************************************************************
203extern int fStrategyInit;
204//******************************************************************************
205APIRET VMAlloc(ULONG size, ULONG flags, LINEAR *pAddr)
206{
207 APIRET rc;
208 ULONG addr;
209 SHORT sel;
210
211 if(fStrategyInit && !(flags & VMDHA_16M)) {
212 flags |= VMDHA_USEHIGHMEM;
213 }
214
215__again:
216
217 rc = KernVMAlloc(size, flags, (PVOID*)&addr, (PVOID*)-1, &sel);
218 if (rc == 0) {
219 *pAddr = (LINEAR)addr;
220 if (flags & VMDHA_USEHIGHMEM)
221 dprintf1((("allocated %X in HIGH memory\n"), size));
222 else dprintf1((("allocated %X in LOW memory\n"), size));
223 }
224 if ((rc == 87) && (flags & VMDHA_USEHIGHMEM))
225 {
226 // EARLYMEMINIT workaround
227 flags = flags & (~VMDHA_USEHIGHMEM);
228 goto __again;
229 }
230 return rc;
231}
232//******************************************************************************
233//******************************************************************************
234APIRET VMFree(LINEAR addr)
235{
236 APIRET rc;
237 rc = KernVMFree((PVOID)addr);
238 if(rc) {
239 DebugInt3();
240 }
241
242 return rc;
243}
244//******************************************************************************
245ULONG ulget_free_pagesMemUsed = 0;
246
247//******************************************************************************
248//******************************************************************************
249void *__get_free_dma_pages(unsigned long size, unsigned long flags)
250{
251 ULONG addr, physaddr, physaddr2, diff, tempaddr;
252 APIRET rc;
253
254 if(VMAlloc(size, flags, (LINEAR *)&addr)) {
255 DebugInt3();
256 return 0;
257 }
258 physaddr = virt_to_phys((void *)addr);
259 if(physaddr) {
260 ULONG startpage = (physaddr >> 16);
261 ULONG endpage = (physaddr + ((size < 0x10000) ? size : 63*1024)) >> 16;
262
263 if(startpage != endpage) {
264 // not in same 32K page, try once more
265 rc = VMAlloc(size, flags, (LINEAR *)&tempaddr);
266 VMFree((LINEAR)addr);
267 if(rc) {
268 DebugInt3();
269 return 0;
270 }
271 addr = tempaddr;
272
273 physaddr = virt_to_phys((void *)addr);
274 if(physaddr) {
275 ULONG startpage = (physaddr >> 16);
276 ULONG endpage = (physaddr + ((size < 0x10000) ? size : 63*1024)) >> 16;
277
278 if(startpage != endpage) {
279 //oops, this didn't work, fail
280 VMFree((LINEAR)addr);
281 dprintf(("get_free_dma_pages failed %x size:%x st:%x end:%x, trying wasteful method instead", physaddr, size, startpage, endpage));
282 return 0;
283 }
284 }
285 }
286 }
287 else {
288 DebugInt3();
289 VMFree((LINEAR)addr);
290 addr = 0;
291 }
292
293 if(addr) {
294 //only done to save size of memory block
295 AddBaseAddress(addr, addr, size);
296 ulget_free_pagesMemUsed += size;
297 dprintf(("get_free_dma_pages: size=%x adr=%x (phys %x) total alloc size=%x",
298 size, (ULONG)addr, virt_to_phys((void *)addr), ulget_free_pagesMemUsed));
299 }
300
301 return (void *)addr;
302}
303//******************************************************************************
304//******************************************************************************
305void *__get_free_pages(int gfp_mask, unsigned long order)
306{
307 ULONG addr;
308 ULONG flags = VMDHA_FIXED|VMDHA_CONTIG;
309 ULONG size, allocsize;
310
311 order = (1 << order); //TODO: Is this correct???
312 size = order * PAGE_SIZE;
313
314 if(gfp_mask & (GFP_DMA|GFP_DMAHIGHMEM))
315 {//below 16 mb for legacy DMA?
316 if(gfp_mask & GFP_DMA)
317 flags |= VMDHA_16M;
318
319 //these buffers must be aligned at 64kb boundary
320
321 //first try a less wasteful approach
322 void *pBlock;
323
324 pBlock = __get_free_dma_pages(size, flags);
325 if(pBlock) {
326 return pBlock;
327 }
328 //else allocate extra memory to make sure we can satisfy
329 //the alignment requirement
330 if(size < 0x10000) {
331 allocsize = size * 2;
332 }
333 else {
334 allocsize = size + 0x10000;
335 }
336 }
337 else allocsize = size;
338
339 if(VMAlloc(allocsize, flags, (LINEAR *)&addr)) {
340 DebugInt3();
341 return 0;
342 }
343 //dprintf(("__get_free_pages %d returned %x", order*PAGE_SIZE, addr));
344 if(gfp_mask & (GFP_DMA|GFP_DMAHIGHMEM))
345 {//must be aligned at 64kb boundary
346 ULONG physaddr = virt_to_phys((void *)addr);
347 ULONG physaddr2;
348
349 if(physaddr) {
350 ULONG startpage = (physaddr >> 16);
351 ULONG endpage = (physaddr + ((size < 0x10000) ? size : 63*1024)) >> 16;
352
353 if (startpage != endpage) {
354 // Not in same 32K page
355 physaddr2 = (startpage+1) << 16;
356
357 AddBaseAddress(addr, addr + (physaddr2 - physaddr), allocsize);
358 addr += (physaddr2 - physaddr);
359 }
360 }
361 else {
362 DebugInt3();
363 free_pages(addr, order);
364 addr = 0;
365 }
366 }
367 else {
368 //only done to save size of memory block
369 AddBaseAddress(addr, addr, allocsize);
370 }
371 if(addr) {
372 //dprintf(("get_free_pages %d (%d) -> %x (phys %x)", allocsize, size, (ULONG)addr, virt_to_phys((void *)addr)));
373 ulget_free_pagesMemUsed += allocsize;
374 //dprintf(("get_free_pages: total alloc size %d", ulget_free_pagesMemUsed));
375 }
376 return (void *)addr;
377}
378//******************************************************************************
379//******************************************************************************
380int free_pages(unsigned long addr, unsigned long order)
381{
382 ULONG rc, size = 0;
383
384 //check if it really is the base of the allocation (see above)
385 addr = GetBaseAddressAndFree(addr, (ULONG NEAR *)&size);
386
387 if(VMFree((LINEAR)addr)) {
388 DebugInt3();
389 }
390 else {
391 //dprintf(("free_pages %x size %d", (ULONG)addr, size));
392 ulget_free_pagesMemUsed -= size;
393 //dprintf(("free_pages: total alloc size %d", ulget_free_pagesMemUsed));
394 }
395 //dprintf(("free_pages %x", addr));
396 return 0;
397}
398//******************************************************************************
399//******************************************************************************
400void *vmalloc(unsigned long size)
401{
402 ULONG addr = 0;
403 ULONG flags = VMDHA_FIXED|VMDHA_CONTIG;
404
405 //round to next page boundary
406 size = size + PAGE_SIZE - 1;
407 size &= 0xFFFFF000;
408
409 if(VMAlloc(size, flags, (LINEAR *)&addr)) {
410 DebugInt3();
411 return 0;
412 }
413 if(addr) {
414 //dprintf(("vmalloc %d -> %x (phys %x)", size, addr, virt_to_phys((void *)addr)));
415 //only done to save size of memory block
416 AddBaseAddress(addr, addr, size);
417 ulget_free_pagesMemUsed += size;
418 //dprintf(("vmalloc: total alloc size %d", ulget_free_pagesMemUsed));
419 }
420 return (void *)addr;
421}
422//******************************************************************************
423//******************************************************************************
424void *__vmalloc(unsigned long size, gfp_t gfp_mask)
425{
426 LINEAR addr;
427 addr = (LINEAR) vmalloc(size);
428
429 if (gfp_mask & __GFP_ZERO)
430 memset((LINEAR)addr, 0, size);
431
432 return addr;
433}
434//******************************************************************************
435//******************************************************************************
436/**
437 * __vmalloc_node - allocate virtually contiguous memory
438 * @size: allocation size
439 * @align: desired alignment
440 * @gfp_mask: flags for the page level allocator
441 * @node: node to use for allocation or NUMA_NO_NODE
442 * @caller: caller's return address
443 *
444 * Allocate enough pages to cover @size from the page level allocator with
445 * @gfp_mask flags. Map them into contiguous kernel virtual space.
446 *
447 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
448 * and __GFP_NOFAIL are not supported
449 *
450 * Any use of gfp flags outside of GFP_KERNEL should be consulted
451 * with mm people.
452 *
453 * Return: pointer to the allocated memory or %NULL on error
454 */
455void *__vmalloc_node(unsigned long size, unsigned long align,
456 gfp_t gfp_mask, int node, const void *caller)
457{
458 return vmalloc(size);
459}
460//******************************************************************************
461//******************************************************************************
462void vfree(void *ptr)
463{
464 APIRET rc;
465 ULONG size = 0;
466
467 GetBaseAddressAndFree((ULONG)ptr, (ULONG NEAR *)&size);
468
469 if(VMFree((LINEAR)ptr)) {
470 DebugInt3();
471 }
472 else {
473 //dprintf(("vfree %x size %d", (ULONG)ptr, size));
474 ulget_free_pagesMemUsed -= size;
475 //dprintf(("vfree: total alloc size %d", ulget_free_pagesMemUsed));
476 }
477}
478//******************************************************************************
479//******************************************************************************
480struct page * alloc_pages(int gfp_mask, unsigned long order)
481{
482 DebugInt3();
483 return 0;
484}
485//******************************************************************************
486//******************************************************************************
487int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
488{
489 DebugInt3();
490 return 0;
491}
492//******************************************************************************
493//Map physical address (memory mapped io range) to linear
494//******************************************************************************
495void * __ioremap(unsigned long physaddr, unsigned long size, unsigned long flags)
496{
497 ULONG addr = 0, Offset = 0, PhysicalAddress = 0, Length = 0;
498 APIRET rc;
499
500 PhysicalAddress = physaddr;
501 Length = size;
502
503 Offset = PhysicalAddress & (PAGE_SIZE - 1); // within Page
504 Length = (Length + Offset + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
505 PhysicalAddress &= ~(PAGE_SIZE - 1);
506 //dprintf(("ioremap: len %d phys %x off %x", Length, PhysicalAddress, Offset));
507
508 //round to next page boundary
509 //size = size + PAGE_SIZE - 1;
510 //size &= 0xFFFFF000;
511
512 SHORT sel;
513 //rc = KernVMAlloc(size, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&physaddr, &sel);
514 rc = KernVMAlloc(Length, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&PhysicalAddress, &sel);
515 if (rc != 0) {
516 dprintf(("ioremap error: %x", rc));
517 DebugInt3();
518 return NULL;
519 }
520 return (void *)( addr + Offset) ; //PS
521}
522//******************************************************************************
523//******************************************************************************
524void iounmap(void *addr)
525{
526LINEAR ad;
527
528 ad=(LINEAR)addr;
529 // *ad &= ~(0xfff); // 12 Jun 07 SHL this looks wrong
530 ad = (LINEAR)((ULONG)ad & ~0xfff); // 12 Jun 07 SHL Round down to 4KiB
531 if(VMFree((LINEAR)ad)) {
532 DebugInt3();
533 }
534}
535//******************************************************************************
536//******************************************************************************
537int is_access_ok(int type, void *addr, unsigned long size)
538{
539 return 1;
540}
541//******************************************************************************
542//******************************************************************************
543void __copy_user(void *to, const void *from, unsigned long n)
544{
545 if(to == NULL || from == NULL) {
546 DebugInt3();
547 return;
548 }
549 if(n == 0) return;
550
551 memcpy(to, from, n);
552}
553//******************************************************************************
554//******************************************************************************
555unsigned long copy_to_user(void *to, const void *from, unsigned long n)
556{
557 if(to == NULL || from == NULL) {
558 DebugInt3();
559 return 0;
560 }
561 if(n == 0) return 0;
562
563 memcpy(to, from, n);
564 return 0;
565}
566//******************************************************************************
567//******************************************************************************
568void __copy_user_zeroing(void *to, const void *from, unsigned long n)
569{
570 if(to == NULL || from == NULL) {
571 DebugInt3();
572 return;
573 }
574 if(n == 0) return;
575
576 copy_to_user(to, from, n);
577}
578//******************************************************************************
579//******************************************************************************
580unsigned long copy_from_user(void *to, const void *from, unsigned long n)
581{
582 if(to == NULL || from == NULL) {
583 DebugInt3();
584 return 0;
585 }
586 if(n == 0) return 0;
587
588 memcpy(to, from, n);
589 return 0;
590}
591//******************************************************************************
592//******************************************************************************
593int __get_user(int size, void *dest, void *src)
594{
595 if(size == 0) return 0;
596
597 if(dest == NULL || src == NULL) {
598 DebugInt3();
599 return 0;
600 }
601 memcpy(dest, src, size);
602 return 0;
603}
604//******************************************************************************
605//******************************************************************************
606int _put_user(int size, int x, void *ptr)
607{
608 if(ptr == NULL || size == 0) {
609 DebugInt3();
610 return 0;
611 }
612
613 *(int *)ptr = x;
614 return 0;
615}
616
617//******************************************************************************
618//******************************************************************************
619#ifdef DEBUGHEAP
620void *__kmalloc(int size, int flags, const char *filename, int lineno)
621#else
622void *__kmalloc(int size, int flags)
623#endif
624{
625 LINEAR addr;
626
627 if(size == 0) {
628 DebugInt3();
629 return NULL;
630 }
631 if(flags & GFP_DMA) {
632 DebugInt3();
633 }
634 if(size >= 4096) {
635 addr = (LINEAR)vmalloc(size);
636 } else {
637#ifdef DEBUGHEAP
638 addr = (LINEAR)malloc(size, filename, lineno);
639#else
640 addr = (LINEAR)malloc(size);
641#endif
642 }
643 if(addr == NULL) {
644 DebugInt3();
645 return 0;
646 }
647 if (flags & __GFP_ZERO)
648 memset((LINEAR)addr, 0, size);
649 //dprintf(("kmalloc %d returned %x", size, addr));
650 return addr;
651}
652
653//******************************************************************************
654//******************************************************************************
655#ifdef DEBUGHEAP
656void __kfree(const void *ptr, const char *filename, int lineno)
657#else
658void __kfree(const void *ptr)
659#endif
660{
661 ULONG addr;
662
663 addr = (ULONG)ptr;
664 if(addr == 0)
665 {
666 DebugInt3();
667 return;
668 }
669 //dprintf(("kfree %x", addr));
670 if(IsHeapAddr(addr)) {
671#ifdef DEBUGHEAP
672 free((void *)addr, filename, lineno);
673#else
674 free((void *)addr);
675#endif
676 }
677 else vfree((PVOID)addr);
678}
679
680//******************************************************************************
681//******************************************************************************
682void *kcalloc(size_t n, size_t size, unsigned int flags)
683{
684 if (n != 0 && size > INT_MAX / n)
685 return NULL;
686 return kzalloc(n * size, flags);
687}
688//******************************************************************************
689//******************************************************************************
690
691size_t ksize(const void *block)
692{
693 size_t size;
694
695 if (!block)
696 size = 0; // Bad coder
697
698 else if (block == ZERO_SIZE_PTR)
699 size = 0; // Bad coder
700
701 else if(IsHeapAddr((ULONG)block))
702 size = _msize((void _near *)block);
703
704 else if (!GetBaseAddressNoFree((ULONG)block, (ULONG NEAR *)&size))
705 size = 0; // Something wrong
706
707 return size;
708}
709//******************************************************************************
710//******************************************************************************
711static inline void *__do_krealloc(const void *p, size_t new_size,
712 gfp_t flags)
713{
714 void *ret;
715 size_t ks = 0;
716
717 if (p)
718 ks = ksize(p);
719
720 if (ks >= new_size)
721 return (void *)p;
722
723 ret = __kmalloc(new_size, flags);
724 if (ret && p)
725 memcpy(ret, p, ks);
726
727 return ret;
728}
729//******************************************************************************
730//******************************************************************************
731/**
732 * krealloc - reallocate memory. The contents will remain unchanged.
733 * @p: object to reallocate memory for.
734 * @new_size: how many bytes of memory are required.
735 * @flags: the type of memory to allocate.
736 *
737 * The contents of the object pointed to are preserved up to the
738 * lesser of the new and old sizes. If @p is %NULL, krealloc()
739 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
740 * %NULL pointer, the object pointed to is freed.
741 */
742void *krealloc(const void *p, size_t new_size, gfp_t flags)
743{
744 void *ret;
745
746 if (!new_size) {
747 kfree(p);
748 return ZERO_SIZE_PTR;
749 }
750
751 ret = __do_krealloc(p, new_size, flags);
752 if (ret && p != ret)
753 kfree(p);
754
755 return ret;
756}
757//******************************************************************************
758//******************************************************************************
759/**
760 * vzalloc - allocate virtually contiguous memory with zero fill
761 * @size: allocation size
762 * Allocate enough pages to cover @size from the page level
763 * allocator and map them into contiguous kernel virtual space.
764 * The memory allocated is set to zero.
765 *
766 * For tight control over page level allocator and protection flags
767 * use __vmalloc() instead.
768 */
769void *vzalloc(unsigned long size)
770{
771 void *buf;
772 buf = vmalloc(size);
773 if (buf)
774 memset(buf, 0, size);
775 return buf;
776}
777
778//******************************************************************************
779//******************************************************************************
780/**
781 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
782 * failure, fall back to non-contiguous (vmalloc) allocation.
783 * @size: size of the request.
784 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
785 * @node: numa node to allocate from
786 *
787 * Uses kmalloc to get the memory but if the allocation fails then falls back
788 * to the vmalloc allocator. Use kvfree for freeing the memory.
789 *
790 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
791 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
792 * preferable to the vmalloc fallback, due to visible performance drawbacks.
793 *
794 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
795 * fall back to vmalloc.
796 *
797 * Return: pointer to the allocated memory of %NULL in case of failure
798 */
799void *kvmalloc_node(size_t size, gfp_t flags, int node)
800{
801 gfp_t kmalloc_flags = flags;
802 void *ret;
803
804 /*
805 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
806 * so the given set of flags has to be compatible.
807 */
808 if ((flags & GFP_KERNEL) != GFP_KERNEL)
809 return kmalloc_node(size, flags, node);
810
811 /*
812 * We want to attempt a large physically contiguous block first because
813 * it is less likely to fragment multiple larger blocks and therefore
814 * contribute to a long term fragmentation less than vmalloc fallback.
815 * However make sure that larger requests are not too disruptive - no
816 * OOM killer and no allocation failure warnings as we have a fallback.
817 */
818 if (size > PAGE_SIZE) {
819 kmalloc_flags |= __GFP_NOWARN;
820
821 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
822 kmalloc_flags |= __GFP_NORETRY;
823 }
824
825 ret = kmalloc_node(size, kmalloc_flags, node);
826
827 /*
828 * It doesn't really make sense to fallback to vmalloc for sub page
829 * requests
830 */
831 if (ret || size <= PAGE_SIZE)
832 return ret;
833
834 return __vmalloc_node(size, 1, flags, node,
835 __builtin_return_address(0));
836}
837//******************************************************************************
838//******************************************************************************
Note: See TracBrowser for help on using the repository browser.