Changeset 662


Ignore:
Timestamp:
Jan 26, 2021, 7:59:05 AM (5 years ago)
Author:
Paul Smedley
Message:

Yet more code cleanups

Location:
GPL/branches/uniaud32-next
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • GPL/branches/uniaud32-next/alsa-kernel/core/seq/seq_memory.c

    r637 r662  
    375375                return -EINVAL;
    376376
    377 #ifndef TARGET_OS2
    378377        cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
    379378                                 GFP_KERNEL);
    380 #else
    381         cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
    382 #endif
    383379        if (!cellptr)
    384380                return -ENOMEM;
  • GPL/branches/uniaud32-next/include/linux/gfp.h

    r647 r662  
    4242#define __GFP_COMP      ((__force gfp_t)___GFP_COMP)
    4343#define __GFP_ZERO      ((__force gfp_t)___GFP_ZERO)
     44#define __GFP_RETRY_MAYFAIL     ((__force gfp_t)___GFP_RETRY_MAYFAIL)
    4445#define GFP_DMA32 0             /* driver must check for 32-bit address */
    4546
  • GPL/branches/uniaud32-next/include/linux/mm.h

    r647 r662  
    1010#include <linux/err.h>
    1111
     12#define NUMA_NO_NODE    (-1)
    1213/*
    1314 * GFP bitmasks..
     
    1920#define __GFP_IO        0x10
    2021#define __GFP_SWAP      0x20
     22#define ___GFP_ZERO             0x100u
     23#define __GFP_ZERO      ((__force gfp_t)___GFP_ZERO)
    2124
    2225#ifdef TARGET_OS2
     
    173176struct page *vmalloc_to_page(void *addr);
    174177
     178extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
     179static inline void *kvmalloc(size_t size, gfp_t flags)
     180{
     181        return kvmalloc_node(size, flags, NUMA_NO_NODE);
     182}
     183static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
     184{
     185        return kvmalloc_node(size, flags | __GFP_ZERO, node);
     186}
     187static inline void *kvzalloc(size_t size, gfp_t flags)
     188{
     189        return kvmalloc(size, flags | __GFP_ZERO);
     190}
     191static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
     192{
     193        size_t bytes;
     194
     195        bytes = n * size;
     196
     197        return kvmalloc(bytes, flags);
     198}
    175199#endif
  • GPL/branches/uniaud32-next/include/linux/slab.h

    r647 r662  
    124124#define kvzalloc kzalloc
    125125size_t ksize(const void *);
     126
     127static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
     128{
     129        return __kmalloc(size, flags);
     130}
     131
     132static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
     133{
     134        return __kmalloc_node(size, flags, node);
     135}
     136
    126137#endif  /* _LINUX_SLAB_H */
  • GPL/branches/uniaud32-next/include/linux/vmalloc.h

    r625 r662  
    2525extern struct vm_struct * vmlist;
    2626extern void *vzalloc(unsigned long size);
     27extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
     28void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
     29                int node, const void *caller);
    2730#endif
    2831
  • GPL/branches/uniaud32-next/lib32/memory.c

    r655 r662  
    449449//******************************************************************************
    450450//******************************************************************************
     451void *__vmalloc(unsigned long size, gfp_t gfp_mask)
     452{
     453        return vmalloc(size);
     454}
     455//******************************************************************************
     456//******************************************************************************
     457/**
     458 * __vmalloc_node - allocate virtually contiguous memory
     459 * @size:           allocation size
     460 * @align:          desired alignment
     461 * @gfp_mask:       flags for the page level allocator
     462 * @node:           node to use for allocation or NUMA_NO_NODE
     463 * @caller:         caller's return address
     464 *
     465 * Allocate enough pages to cover @size from the page level allocator with
     466 * @gfp_mask flags.  Map them into contiguous kernel virtual space.
     467 *
     468 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
     469 * and __GFP_NOFAIL are not supported
     470 *
     471 * Any use of gfp flags outside of GFP_KERNEL should be consulted
     472 * with mm people.
     473 *
     474 * Return: pointer to the allocated memory or %NULL on error
     475 */
     476void *__vmalloc_node(unsigned long size, unsigned long align,
     477                            gfp_t gfp_mask, int node, const void *caller)
     478{
     479        return vmalloc(size);
     480}
     481//******************************************************************************
     482//******************************************************************************
    451483void vfree(void *ptr)
    452484{
     
    776808//******************************************************************************
    777809//******************************************************************************
     810/**
     811 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
     812 * failure, fall back to non-contiguous (vmalloc) allocation.
     813 * @size: size of the request.
     814 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
     815 * @node: numa node to allocate from
     816 *
     817 * Uses kmalloc to get the memory but if the allocation fails then falls back
     818 * to the vmalloc allocator. Use kvfree for freeing the memory.
     819 *
     820 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
     821 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
     822 * preferable to the vmalloc fallback, due to visible performance drawbacks.
     823 *
     824 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
     825 * fall back to vmalloc.
     826 *
     827 * Return: pointer to the allocated memory of %NULL in case of failure
     828 */
     829void *kvmalloc_node(size_t size, gfp_t flags, int node)
     830{
     831        gfp_t kmalloc_flags = flags;
     832        void *ret;
     833
     834        /*
     835         * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
     836         * so the given set of flags has to be compatible.
     837         */
     838        if ((flags & GFP_KERNEL) != GFP_KERNEL)
     839                return kmalloc_node(size, flags, node);
     840
     841        /*
     842         * We want to attempt a large physically contiguous block first because
     843         * it is less likely to fragment multiple larger blocks and therefore
     844         * contribute to a long term fragmentation less than vmalloc fallback.
     845         * However make sure that larger requests are not too disruptive - no
     846         * OOM killer and no allocation failure warnings as we have a fallback.
     847         */
     848        if (size > PAGE_SIZE) {
     849                kmalloc_flags |= __GFP_NOWARN;
     850
     851                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
     852                        kmalloc_flags |= __GFP_NORETRY;
     853        }
     854
     855        ret = kmalloc_node(size, kmalloc_flags, node);
     856
     857        /*
     858         * It doesn't really make sense to fallback to vmalloc for sub page
     859         * requests
     860         */
     861        if (ret || size <= PAGE_SIZE)
     862                return ret;
     863
     864        return __vmalloc_node(size, 1, flags, node,
     865                        __builtin_return_address(0));
     866}
     867//******************************************************************************
     868//******************************************************************************
Note: See TracChangeset for help on using the changeset viewer.