Changeset 662
- Timestamp:
- Jan 26, 2021, 7:59:05 AM (5 years ago)
- Location:
- GPL/branches/uniaud32-next
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
GPL/branches/uniaud32-next/alsa-kernel/core/seq/seq_memory.c
r637 r662 375 375 return -EINVAL; 376 376 377 #ifndef TARGET_OS2378 377 cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size, 379 378 GFP_KERNEL); 380 #else381 cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);382 #endif383 379 if (!cellptr) 384 380 return -ENOMEM; -
GPL/branches/uniaud32-next/include/linux/gfp.h
r647 r662 42 42 #define __GFP_COMP ((__force gfp_t)___GFP_COMP) 43 43 #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 44 #define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) 44 45 #define GFP_DMA32 0 /* driver must check for 32-bit address */ 45 46 -
GPL/branches/uniaud32-next/include/linux/mm.h
r647 r662 10 10 #include <linux/err.h> 11 11 12 #define NUMA_NO_NODE (-1) 12 13 /* 13 14 * GFP bitmasks.. … … 19 20 #define __GFP_IO 0x10 20 21 #define __GFP_SWAP 0x20 22 #define ___GFP_ZERO 0x100u 23 #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 21 24 22 25 #ifdef TARGET_OS2 … … 173 176 struct page *vmalloc_to_page(void *addr); 174 177 178 extern void *kvmalloc_node(size_t size, gfp_t flags, int node); 179 static inline void *kvmalloc(size_t size, gfp_t flags) 180 { 181 return kvmalloc_node(size, flags, NUMA_NO_NODE); 182 } 183 static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) 184 { 185 return kvmalloc_node(size, flags | __GFP_ZERO, node); 186 } 187 static inline void *kvzalloc(size_t size, gfp_t flags) 188 { 189 return kvmalloc(size, flags | __GFP_ZERO); 190 } 191 static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 192 { 193 size_t bytes; 194 195 bytes = n * size; 196 197 return kvmalloc(bytes, flags); 198 } 175 199 #endif -
GPL/branches/uniaud32-next/include/linux/slab.h
r647 r662 124 124 #define kvzalloc kzalloc 125 125 size_t ksize(const void *); 126 127 static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 128 { 129 return __kmalloc(size, flags); 130 } 131 132 static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 133 { 134 return __kmalloc_node(size, flags, node); 135 } 136 126 137 #endif /* _LINUX_SLAB_H */ -
GPL/branches/uniaud32-next/include/linux/vmalloc.h
r625 r662 25 25 extern struct vm_struct * vmlist; 26 26 extern void *vzalloc(unsigned long size); 27 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); 28 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, 29 int node, const void *caller); 27 30 #endif 28 31 -
GPL/branches/uniaud32-next/lib32/memory.c
r655 r662 449 449 //****************************************************************************** 450 450 //****************************************************************************** 451 void *__vmalloc(unsigned long size, gfp_t gfp_mask) 452 { 453 return vmalloc(size); 454 } 455 //****************************************************************************** 456 //****************************************************************************** 457 /** 458 * __vmalloc_node - allocate virtually contiguous memory 459 * @size: allocation size 460 * @align: desired alignment 461 * @gfp_mask: flags for the page level allocator 462 * @node: node to use for allocation or NUMA_NO_NODE 463 * @caller: caller's return address 464 * 465 * Allocate enough pages to cover @size from the page level allocator with 466 * @gfp_mask flags. Map them into contiguous kernel virtual space. 467 * 468 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 469 * and __GFP_NOFAIL are not supported 470 * 471 * Any use of gfp flags outside of GFP_KERNEL should be consulted 472 * with mm people. 473 * 474 * Return: pointer to the allocated memory or %NULL on error 475 */ 476 void *__vmalloc_node(unsigned long size, unsigned long align, 477 gfp_t gfp_mask, int node, const void *caller) 478 { 479 return vmalloc(size); 480 } 481 //****************************************************************************** 482 //****************************************************************************** 451 483 void vfree(void *ptr) 452 484 { … … 776 808 //****************************************************************************** 777 809 //****************************************************************************** 810 /** 811 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon 812 * failure, fall back to non-contiguous (vmalloc) allocation. 813 * @size: size of the request. 814 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. 815 * @node: numa node to allocate from 816 * 817 * Uses kmalloc to get the memory but if the allocation fails then falls back 818 * to the vmalloc allocator. Use kvfree for freeing the memory. 819 * 820 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. 821 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is 822 * preferable to the vmalloc fallback, due to visible performance drawbacks. 823 * 824 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not 825 * fall back to vmalloc. 826 * 827 * Return: pointer to the allocated memory of %NULL in case of failure 828 */ 829 void *kvmalloc_node(size_t size, gfp_t flags, int node) 830 { 831 gfp_t kmalloc_flags = flags; 832 void *ret; 833 834 /* 835 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) 836 * so the given set of flags has to be compatible. 837 */ 838 if ((flags & GFP_KERNEL) != GFP_KERNEL) 839 return kmalloc_node(size, flags, node); 840 841 /* 842 * We want to attempt a large physically contiguous block first because 843 * it is less likely to fragment multiple larger blocks and therefore 844 * contribute to a long term fragmentation less than vmalloc fallback. 845 * However make sure that larger requests are not too disruptive - no 846 * OOM killer and no allocation failure warnings as we have a fallback. 847 */ 848 if (size > PAGE_SIZE) { 849 kmalloc_flags |= __GFP_NOWARN; 850 851 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) 852 kmalloc_flags |= __GFP_NORETRY; 853 } 854 855 ret = kmalloc_node(size, kmalloc_flags, node); 856 857 /* 858 * It doesn't really make sense to fallback to vmalloc for sub page 859 * requests 860 */ 861 if (ret || size <= PAGE_SIZE) 862 return ret; 863 864 return __vmalloc_node(size, 1, flags, node, 865 __builtin_return_address(0)); 866 } 867 //****************************************************************************** 868 //******************************************************************************
Note:
See TracChangeset
for help on using the changeset viewer.