source: GPL/trunk/lib32/memory.c@ 689

Last change on this file since 689 was 679, checked in by David Azarewicz, 4 years ago

Merge changes from Paul's uniaud32next branch.

File size: 25.8 KB
Line 
1/* $Id: memory.cpp,v 1.1.1.1 2003/07/02 13:57:02 eleph Exp $ */
2/*
3 * OS/2 implementation of Linux memory kernel services
4 *
5 * (C) 2000-2002 InnoTek Systemberatung GmbH
6 * (C) 2000-2001 Sander van Leeuwen (sandervl@xs4all.nl)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the Free
20 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
21 * USA.
22 *
23 */
24
25#define INCL_NOPMAPI
26#define INCL_DOSERRORS // for ERROR_INVALID_FUNCTION
27#include <os2.h>
28#include <devhelp.h>
29#include <ossidc.h>
30#include <string.h>
31#include <dbgos2.h>
32#include <stacktoflat.h>
33#include <limits.h>
34#include <kee.h>
35#include "malloc.h"
36#define _I386_PAGE_H
37typedef struct { unsigned long pgprot; } pgprot_t;
38#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
39#define PAGE_SHIFT 12
40#define __PAGE_OFFSET (0xC0000000)
41
42#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
43#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
44
45#include <linux/mm.h>
46#include <linux/slab.h>
47#include <linux/printk.h>
48
49#pragma off (unreferenced)
50
51#define PAGE_SIZE 4096
52#define min(a,b) (((a) < (b)) ? (a) : (b))
53
54int free_pages(unsigned long addr, unsigned long order);
55int __compat_get_order(unsigned long size);
56#ifdef DEBUGHEAP
57void near *__kmalloc(int size, int flags, const char *filename, int lineno);
58void __kfree(const void near *ptr, const char *filename, int lineno);
59#else
60void near *__kmalloc(int size, int flags);
61void __kfree(const void near *ptr);
62#endif
63
64#ifdef DEBUGHEAP
65#define _kmalloc(a, b) __kmalloc(a, b, __FILE__, __LINE__)
66#define _kfree(a) __kfree(a, __FILE__, __LINE__)
67#else
68#define _kmalloc(a, b) __kmalloc(a, b)
69#define _kfree(a) __kfree(a)
70#endif
71
72typedef struct _BaseAddr {
73 ULONG base; // VMAlloc addr
74 ULONG retaddr; // aligned addr returned to caller
75 ULONG size; // VMAlloc size
76 struct _BaseAddr NEAR *next;
77} BaseAddr;
78
79static BaseAddr NEAR *pBaseAddrHead = NULL;
80
81//******************************************************************************
82//Very simple linked list for storing original addresses returned by VMAlloc
83//if returned address is different due to alignment requirements
84//Performance is not an issue as the alloc & free functions aren't called often.
85//(e.g. ALS4000 driver calls it 4 times (2 alloc during boot, 2 during shutdown)
86//******************************************************************************
87void AddBaseAddress(ULONG baseaddr, ULONG retaddr, ULONG size)
88{
89 BaseAddr NEAR *pBase;
90
91 pBase = (BaseAddr NEAR *)_kmalloc(sizeof(BaseAddr), 0);
92 if(pBase == NULL) {
93 DebugInt3();
94 return;
95 }
96 DevCli();
97 pBase->base = baseaddr;
98 pBase->retaddr = retaddr;
99 pBase->size = size;
100 pBase->next = pBaseAddrHead;
101 pBaseAddrHead = pBase;
102 DevSti();
103}
104//******************************************************************************
105//******************************************************************************
106ULONG GetBaseAddressAndFree(ULONG addr, ULONG *pSize)
107{
108 BaseAddr NEAR *pCur, NEAR *pTemp;
109
110 if(pBaseAddrHead == NULL) return addr;
111
112 DevCli();
113 pCur = pBaseAddrHead;
114
115 // If address is in list, remove list item and free entry
116 // Caller must VMFree returned address or else
117 if(pCur->retaddr == addr)
118 {
119 addr = pCur->base;
120 if(pSize) *pSize = pCur->size;
121 pBaseAddrHead = pCur->next;
122 _kfree(pCur);
123 }
124 else
125 while(pCur->next) {
126 if(pCur->next->retaddr == addr) {
127 pTemp = pCur->next;
128 addr = pTemp->base;
129 if(pSize) *pSize = pTemp->size;
130 pCur->next = pTemp->next;
131
132 _kfree(pTemp);
133 break;
134 }
135 pCur = pCur->next;
136 }
137 DevSti();
138 return addr;
139}
140//******************************************************************************
141//******************************************************************************
142ULONG GetBaseAddressNoFree(ULONG addr, ULONG *pSize)
143{
144 BaseAddr NEAR *pCur, NEAR *pTemp;
145
146 if(pBaseAddrHead == NULL) return addr;
147
148 DevCli();
149 pCur = pBaseAddrHead;
150
151 if(pCur->retaddr == addr)
152 {
153 addr = pCur->base;
154 if(pSize) *pSize = pCur->size;
155 pBaseAddrHead = pCur->next;
156// _kfree(pCur);
157 }
158 else
159 while(pCur->next) {
160 if(pCur->next->retaddr == addr) {
161 pTemp = pCur->next;
162 addr = pTemp->base;
163 if(pSize) *pSize = pTemp->size;
164 pCur->next = pTemp->next;
165// _kfree(pTemp);
166 break;
167 }
168 pCur = pCur->next;
169 }
170 DevSti();
171 return addr;
172}
173//******************************************************************************
174//NOTE: Assumes memory is continuous!!
175//******************************************************************************
176unsigned long virt_to_phys(void * address)
177{
178 KEEVMPageList pagelist;
179 ULONG nrpages;
180
181 if(KernLinToPageList(address, PAGE_SIZE, &pagelist, &nrpages)) {
182 DebugInt3();
183 return 0;
184 }
185 return pagelist.addr;
186}
187//******************************************************************************
188//******************************************************************************
189void * phys_to_virt(unsigned long address)
190{
191 APIRET rc = 0;
192 ULONG addr = 0;
193
194 SHORT sel;
195 rc = KernVMAlloc(PAGE_SIZE, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&address, &sel);
196 if (rc != 0) {
197 DebugInt3();
198 return NULL;
199 }
200 return (void *)addr;
201}
202//******************************************************************************
203extern int fStrategyInit;
204//******************************************************************************
205APIRET VMAlloc(ULONG size, ULONG flags, LINEAR *pAddr)
206{
207 APIRET rc;
208 ULONG addr;
209 SHORT sel;
210
211 if(fStrategyInit && !(flags & VMDHA_16M)) {
212 flags |= VMDHA_USEHIGHMEM;
213 }
214
215__again:
216
217 rc = KernVMAlloc(size, flags, (PVOID*)&addr, (PVOID*)-1, &sel);
218 if (rc == 0) {
219 *pAddr = (LINEAR)addr;
220 if (flags & VMDHA_USEHIGHMEM)
221 dprintf1((("allocated %X in HIGH memory\n"), size));
222 else dprintf1((("allocated %X in LOW memory\n"), size));
223 }
224 if ((rc == 87) && (flags & VMDHA_USEHIGHMEM))
225 {
226 // EARLYMEMINIT workaround
227 flags = flags & (~VMDHA_USEHIGHMEM);
228 goto __again;
229 }
230 return rc;
231}
232//******************************************************************************
233//******************************************************************************
234APIRET VMFree(LINEAR addr)
235{
236 APIRET rc;
237 rc = KernVMFree((PVOID)addr);
238 if(rc) {
239 DebugInt3();
240 }
241
242 return rc;
243}
244//******************************************************************************
245ULONG ulget_free_pagesMemUsed = 0;
246
247//******************************************************************************
248//******************************************************************************
249void *__get_free_dma_pages(unsigned long size, unsigned long flags)
250{
251 ULONG addr, physaddr, physaddr2, diff, tempaddr;
252 APIRET rc;
253
254 if(VMAlloc(size, flags, (LINEAR *)&addr)) {
255 DebugInt3();
256 return 0;
257 }
258 physaddr = virt_to_phys((void *)addr);
259 if(physaddr) {
260 ULONG startpage = (physaddr >> 16);
261 ULONG endpage = (physaddr + ((size < 0x10000) ? size : 63*1024)) >> 16;
262
263 if(startpage != endpage) {
264 // not in same 32K page, try once more
265 rc = VMAlloc(size, flags, (LINEAR *)&tempaddr);
266 VMFree((LINEAR)addr);
267 if(rc) {
268 DebugInt3();
269 return 0;
270 }
271 addr = tempaddr;
272
273 physaddr = virt_to_phys((void *)addr);
274 if(physaddr) {
275 ULONG startpage = (physaddr >> 16);
276 ULONG endpage = (physaddr + ((size < 0x10000) ? size : 63*1024)) >> 16;
277
278 if(startpage != endpage) {
279 //oops, this didn't work, fail
280 VMFree((LINEAR)addr);
281 dprintf(("get_free_dma_pages failed %x size:%x st:%x end:%x, trying wasteful method instead", physaddr, size, startpage, endpage));
282 return 0;
283 }
284 }
285 }
286 }
287 else {
288 DebugInt3();
289 VMFree((LINEAR)addr);
290 addr = 0;
291 }
292
293 if(addr) {
294 //only done to save size of memory block
295 AddBaseAddress(addr, addr, size);
296 ulget_free_pagesMemUsed += size;
297 dprintf(("get_free_dma_pages: size=%x adr=%x (phys %x) total alloc size=%x",
298 size, (ULONG)addr, virt_to_phys((void *)addr), ulget_free_pagesMemUsed));
299 }
300
301 return (void *)addr;
302}
303//******************************************************************************
304//******************************************************************************
305void *__get_free_pages(int gfp_mask, unsigned long order)
306{
307 ULONG addr;
308 ULONG flags = VMDHA_FIXED|VMDHA_CONTIG;
309 ULONG size, allocsize;
310
311 order = (1 << order); //TODO: Is this correct???
312 size = order * PAGE_SIZE;
313
314 if(gfp_mask & (GFP_DMA|GFP_DMAHIGHMEM))
315 {//below 16 mb for legacy DMA?
316 if(gfp_mask & GFP_DMA)
317 flags |= VMDHA_16M;
318
319 //these buffers must be aligned at 64kb boundary
320
321 //first try a less wasteful approach
322 void *pBlock;
323
324 pBlock = __get_free_dma_pages(size, flags);
325 if(pBlock) {
326 return pBlock;
327 }
328 //else allocate extra memory to make sure we can satisfy
329 //the alignment requirement
330 if(size < 0x10000) {
331 allocsize = size * 2;
332 }
333 else {
334 allocsize = size + 0x10000;
335 }
336 }
337 else allocsize = size;
338
339 if(VMAlloc(allocsize, flags, (LINEAR *)&addr)) {
340 DebugInt3();
341 return 0;
342 }
343 //dprintf(("__get_free_pages %d returned %x", order*PAGE_SIZE, addr));
344 if(gfp_mask & (GFP_DMA|GFP_DMAHIGHMEM))
345 {//must be aligned at 64kb boundary
346 ULONG physaddr = virt_to_phys((void *)addr);
347 ULONG physaddr2;
348
349 if(physaddr) {
350 ULONG startpage = (physaddr >> 16);
351 ULONG endpage = (physaddr + ((size < 0x10000) ? size : 63*1024)) >> 16;
352
353 if (startpage != endpage) {
354 // Not in same 32K page
355 physaddr2 = (startpage+1) << 16;
356
357 AddBaseAddress(addr, addr + (physaddr2 - physaddr), allocsize);
358 addr += (physaddr2 - physaddr);
359 }
360 }
361 else {
362 DebugInt3();
363 free_pages(addr, order);
364 addr = 0;
365 }
366 }
367 else {
368 //only done to save size of memory block
369 AddBaseAddress(addr, addr, allocsize);
370 }
371 if(addr) {
372 //dprintf(("get_free_pages %d (%d) -> %x (phys %x)", allocsize, size, (ULONG)addr, virt_to_phys((void *)addr)));
373 ulget_free_pagesMemUsed += allocsize;
374 //dprintf(("get_free_pages: total alloc size %d", ulget_free_pagesMemUsed));
375 }
376 return (void *)addr;
377}
378//******************************************************************************
379//******************************************************************************
380int free_pages(unsigned long addr, unsigned long order)
381{
382 ULONG rc, size = 0;
383
384 //check if it really is the base of the allocation (see above)
385 addr = GetBaseAddressAndFree(addr, (ULONG NEAR *)&size);
386
387 if(VMFree((LINEAR)addr)) {
388 DebugInt3();
389 }
390 else {
391 //dprintf(("free_pages %x size %d", (ULONG)addr, size));
392 ulget_free_pagesMemUsed -= size;
393 //dprintf(("free_pages: total alloc size %d", ulget_free_pagesMemUsed));
394 }
395 //dprintf(("free_pages %x", addr));
396 return 0;
397}
398//******************************************************************************
399//******************************************************************************
400void *vmalloc(unsigned long size)
401{
402 ULONG addr = 0;
403 ULONG flags = VMDHA_FIXED|VMDHA_CONTIG;
404
405 //round to next page boundary
406 size = size + PAGE_SIZE - 1;
407 size &= 0xFFFFF000;
408
409 if(VMAlloc(size, flags, (LINEAR *)&addr)) {
410 DebugInt3();
411 return 0;
412 }
413 if(addr) {
414 //dprintf(("vmalloc %d -> %x (phys %x)", size, addr, virt_to_phys((void *)addr)));
415 //only done to save size of memory block
416 AddBaseAddress(addr, addr, size);
417 ulget_free_pagesMemUsed += size;
418 //dprintf(("vmalloc: total alloc size %d", ulget_free_pagesMemUsed));
419 }
420 return (void *)addr;
421}
422//******************************************************************************
423//******************************************************************************
424void *__vmalloc(unsigned long size, gfp_t gfp_mask)
425{
426 return vmalloc(size);
427}
428//******************************************************************************
429//******************************************************************************
430/**
431 * __vmalloc_node - allocate virtually contiguous memory
432 * @size: allocation size
433 * @align: desired alignment
434 * @gfp_mask: flags for the page level allocator
435 * @node: node to use for allocation or NUMA_NO_NODE
436 * @caller: caller's return address
437 *
438 * Allocate enough pages to cover @size from the page level allocator with
439 * @gfp_mask flags. Map them into contiguous kernel virtual space.
440 *
441 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
442 * and __GFP_NOFAIL are not supported
443 *
444 * Any use of gfp flags outside of GFP_KERNEL should be consulted
445 * with mm people.
446 *
447 * Return: pointer to the allocated memory or %NULL on error
448 */
449void *__vmalloc_node(unsigned long size, unsigned long align,
450 gfp_t gfp_mask, int node, const void *caller)
451{
452 return vmalloc(size);
453}
454//******************************************************************************
455//******************************************************************************
456void vfree(void *ptr)
457{
458 APIRET rc;
459 ULONG size = 0;
460
461 GetBaseAddressAndFree((ULONG)ptr, (ULONG NEAR *)&size);
462
463 if(VMFree((LINEAR)ptr)) {
464 DebugInt3();
465 }
466 else {
467 //dprintf(("vfree %x size %d", (ULONG)ptr, size));
468 ulget_free_pagesMemUsed -= size;
469 //dprintf(("vfree: total alloc size %d", ulget_free_pagesMemUsed));
470 }
471}
472//******************************************************************************
473//******************************************************************************
474struct page * alloc_pages(int gfp_mask, unsigned long order)
475{
476 DebugInt3();
477 return 0;
478}
479//******************************************************************************
480//******************************************************************************
481int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot)
482{
483 DebugInt3();
484 return 0;
485}
486//******************************************************************************
487//Map physical address (memory mapped io range) to linear
488//******************************************************************************
489void * __ioremap(unsigned long physaddr, unsigned long size, unsigned long flags)
490{
491 ULONG addr = 0, Offset = 0, PhysicalAddress = 0, Length = 0;
492 APIRET rc;
493
494 PhysicalAddress = physaddr;
495 Length = size;
496
497 Offset = PhysicalAddress & (PAGE_SIZE - 1); // within Page
498 Length = (Length + Offset + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
499 PhysicalAddress &= ~(PAGE_SIZE - 1);
500 //dprintf(("ioremap: len %d phys %x off %x", Length, PhysicalAddress, Offset));
501
502 //round to next page boundary
503 //size = size + PAGE_SIZE - 1;
504 //size &= 0xFFFFF000;
505
506 SHORT sel;
507 //rc = KernVMAlloc(size, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&physaddr, &sel);
508 rc = KernVMAlloc(Length, VMDHA_PHYS, (PVOID*)&addr, (PVOID*)&PhysicalAddress, &sel);
509 if (rc != 0) {
510 dprintf(("ioremap error: %x", rc));
511 DebugInt3();
512 return NULL;
513 }
514 return (void *)( addr + Offset) ; //PS
515}
516//******************************************************************************
517//******************************************************************************
518void iounmap(void *addr)
519{
520LINEAR ad;
521
522 ad=(LINEAR)addr;
523 // *ad &= ~(0xfff); // 12 Jun 07 SHL this looks wrong
524 ad = (LINEAR)((ULONG)ad & ~0xfff); // 12 Jun 07 SHL Round down to 4KiB
525 if(VMFree((LINEAR)ad)) {
526 DebugInt3();
527 }
528}
529//******************************************************************************
530//******************************************************************************
531int is_access_ok(int type, void *addr, unsigned long size)
532{
533 return 1;
534}
535//******************************************************************************
536//******************************************************************************
537void __copy_user(void *to, const void *from, unsigned long n)
538{
539 if(to == NULL || from == NULL) {
540 DebugInt3();
541 return;
542 }
543 if(n == 0) return;
544
545 memcpy(to, from, n);
546}
547//******************************************************************************
548//******************************************************************************
549unsigned long copy_to_user(void *to, const void *from, unsigned long n)
550{
551 if(to == NULL || from == NULL) {
552 DebugInt3();
553 return 0;
554 }
555 if(n == 0) return 0;
556
557 memcpy(to, from, n);
558 return 0;
559}
560//******************************************************************************
561//******************************************************************************
562void __copy_user_zeroing(void *to, const void *from, unsigned long n)
563{
564 if(to == NULL || from == NULL) {
565 DebugInt3();
566 return;
567 }
568 if(n == 0) return;
569
570 copy_to_user(to, from, n);
571}
572//******************************************************************************
573//******************************************************************************
574unsigned long copy_from_user(void *to, const void *from, unsigned long n)
575{
576 if(to == NULL || from == NULL) {
577 DebugInt3();
578 return 0;
579 }
580 if(n == 0) return 0;
581
582 memcpy(to, from, n);
583 return 0;
584}
585//******************************************************************************
586//******************************************************************************
587int __get_user(int size, void *dest, void *src)
588{
589 if(size == 0) return 0;
590
591 if(dest == NULL || src == NULL) {
592 DebugInt3();
593 return 0;
594 }
595 memcpy(dest, src, size);
596 return 0;
597}
598//******************************************************************************
599//******************************************************************************
600int _put_user(int size, int x, void *ptr)
601{
602 if(ptr == NULL || size == 0) {
603 DebugInt3();
604 return 0;
605 }
606
607 *(int *)ptr = x;
608 return 0;
609}
610
611//******************************************************************************
612#ifdef DEBUGHEAP
613void *__kmalloc(int size, int flags, const char *filename, int lineno)
614#else
615void *__kmalloc(int size, int flags)
616#endif
617{
618 LINEAR addr;
619
620 if(size == 0) {
621 DebugInt3();
622 return NULL;
623 }
624 if(flags & GFP_DMA) {
625 DebugInt3();
626 }
627 if(size >= 4096) {
628 return vmalloc(size);
629 }
630#ifdef DEBUGHEAP
631 addr = (LINEAR)malloc(size, filename, lineno);
632#else
633 addr = (LINEAR)malloc(size);
634#endif
635 if(addr == NULL) {
636 DebugInt3();
637 return 0;
638 }
639 //dprintf(("kmalloc %d returned %x", size, addr));
640 return addr;
641}
642
643//******************************************************************************
644#ifdef DEBUGHEAP
645void __kfree(const void *ptr, const char *filename, int lineno)
646#else
647void __kfree(const void *ptr)
648#endif
649{
650 ULONG addr;
651
652 addr = (ULONG)ptr;
653 if(addr == 0)
654 {
655 DebugInt3();
656 return;
657 }
658 //dprintf(("kfree %x", addr));
659 if(IsHeapAddr(addr)) {
660#ifdef DEBUGHEAP
661 free((void *)addr, filename, lineno);
662#else
663 free((void *)addr);
664#endif
665 }
666 else vfree((PVOID)addr);
667}
668
669//******************************************************************************
670void *kzalloc(size_t size, unsigned int flags)
671{
672 void *ret;
673 ret = _kmalloc(size, flags);
674 if (ret)
675 memset(ret, 0, size);
676 return ret;
677}
678//******************************************************************************
679//******************************************************************************
680void *kcalloc(size_t n, size_t size, unsigned int flags)
681{
682 if (n != 0 && size > INT_MAX / n)
683 return NULL;
684 return kzalloc(n * size, flags);
685}
686//******************************************************************************
687//******************************************************************************
688
689size_t ksize(const void *block)
690{
691 size_t size;
692
693 if (!block)
694 size = 0; // Bad coder
695
696 else if (block == ZERO_SIZE_PTR)
697 size = 0; // Bad coder
698
699 else if(IsHeapAddr((ULONG)block))
700 size = _msize((void _near *)block);
701
702 else if (!GetBaseAddressNoFree((ULONG)block, (ULONG NEAR *)&size))
703 size = 0; // Something wrong
704
705 return size;
706}
707//******************************************************************************
708//******************************************************************************
709static inline void *__do_krealloc(const void *p, size_t new_size,
710 gfp_t flags)
711{
712 void *ret;
713 size_t ks = 0;
714
715 if (p)
716 ks = ksize(p);
717
718 if (ks >= new_size)
719 return (void *)p;
720
721 ret = __kmalloc(new_size, flags);
722 if (ret && p)
723 memcpy(ret, p, ks);
724
725 return ret;
726}
727//******************************************************************************
728//******************************************************************************
729/**
730 * krealloc - reallocate memory. The contents will remain unchanged.
731 * @p: object to reallocate memory for.
732 * @new_size: how many bytes of memory are required.
733 * @flags: the type of memory to allocate.
734 *
735 * The contents of the object pointed to are preserved up to the
736 * lesser of the new and old sizes. If @p is %NULL, krealloc()
737 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
738 * %NULL pointer, the object pointed to is freed.
739 */
740void *krealloc(const void *p, size_t new_size, gfp_t flags)
741{
742 void *ret;
743
744 if (!new_size) {
745 kfree(p);
746 return ZERO_SIZE_PTR;
747 }
748
749 ret = __do_krealloc(p, new_size, flags);
750 if (ret && p != ret)
751 kfree(p);
752
753 return ret;
754}
755//******************************************************************************
756//******************************************************************************
757/**
758 * vzalloc - allocate virtually contiguous memory with zero fill
759 * @size: allocation size
760 * Allocate enough pages to cover @size from the page level
761 * allocator and map them into contiguous kernel virtual space.
762 * The memory allocated is set to zero.
763 *
764 * For tight control over page level allocator and protection flags
765 * use __vmalloc() instead.
766 */
767void *vzalloc(unsigned long size)
768{
769 void *buf;
770 buf = vmalloc(size);
771 if (buf)
772 memset(buf, 0, size);
773 return buf;
774}
775//******************************************************************************
776//******************************************************************************
777/**
778 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
779 * failure, fall back to non-contiguous (vmalloc) allocation.
780 * @size: size of the request.
781 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
782 * @node: numa node to allocate from
783 *
784 * Uses kmalloc to get the memory but if the allocation fails then falls back
785 * to the vmalloc allocator. Use kvfree for freeing the memory.
786 *
787 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
788 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
789 * preferable to the vmalloc fallback, due to visible performance drawbacks.
790 *
791 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
792 * fall back to vmalloc.
793 *
794 * Return: pointer to the allocated memory of %NULL in case of failure
795 */
796void *kvmalloc_node(size_t size, gfp_t flags, int node)
797{
798 gfp_t kmalloc_flags = flags;
799 void *ret;
800
801 /*
802 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
803 * so the given set of flags has to be compatible.
804 */
805 if ((flags & GFP_KERNEL) != GFP_KERNEL)
806 return kmalloc_node(size, flags, node);
807
808 /*
809 * We want to attempt a large physically contiguous block first because
810 * it is less likely to fragment multiple larger blocks and therefore
811 * contribute to a long term fragmentation less than vmalloc fallback.
812 * However make sure that larger requests are not too disruptive - no
813 * OOM killer and no allocation failure warnings as we have a fallback.
814 */
815 if (size > PAGE_SIZE) {
816 kmalloc_flags |= __GFP_NOWARN;
817
818 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
819 kmalloc_flags |= __GFP_NORETRY;
820 }
821
822 ret = kmalloc_node(size, kmalloc_flags, node);
823
824 /*
825 * It doesn't really make sense to fallback to vmalloc for sub page
826 * requests
827 */
828 if (ret || size <= PAGE_SIZE)
829 return ret;
830
831 return __vmalloc_node(size, 1, flags, node,
832 __builtin_return_address(0));
833}
834//******************************************************************************
835//******************************************************************************
Note: See TracBrowser for help on using the repository browser.