source: GPL/trunk/include/linux/mm.h@ 679

Last change on this file since 679 was 679, checked in by David Azarewicz, 4 years ago

Merge changes from Paul's uniaud32next branch.

File size: 6.1 KB
Line 
1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/sched.h>
5#include <linux/errno.h>
6#include <linux/gfp.h>
7#include <asm/page.h>
8#include <asm/atomic.h>
9#include <linux/overflow.h>
10#include <linux/err.h>
11
12#define NUMA_NO_NODE (-1)
13/*
14 * GFP bitmasks..
15 */
16#define __GFP_WAIT 0x01
17#define __GFP_LOW 0x02
18#define __GFP_MED 0x04
19#define __GFP_HIGH 0x08
20#define __GFP_IO 0x10
21#define __GFP_SWAP 0x20
22#define ___GFP_ZERO 0x100u
23#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
24
25#ifdef TARGET_OS2
26#define __GFP_DMAHIGHMEM 0x100
27#define GFP_DMAHIGHMEM __GFP_DMAHIGHMEM
28#endif
29
30#define GFP_BUFFER (__GFP_LOW | __GFP_WAIT)
31#define GFP_ATOMIC (__GFP_HIGH)
32#define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO)
33#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
34#define GFP_KERNEL (__GFP_MED | __GFP_WAIT | __GFP_IO)
35#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
36#define GFP_KSWAPD (__GFP_IO | __GFP_SWAP)
37
38/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
39 platforms, used as appropriate on others */
40
41#define GFP_DMA __GFP_DMA
42
43/* Flag - indicates that the buffer can be taken from high memory which is not
44 directly addressable by the kernel */
45
46#define GFP_HIGHMEM __GFP_HIGHMEM
47
48/*
49 * This struct defines a memory VMM memory area. There is one of these
50 * per VM-area/task. A VM area is any part of the process virtual memory
51 * space that has a special rule for the page-fault handlers (ie a shared
52 * library, the executable area etc).
53 */
54struct vm_area_struct {
55 struct mm_struct * vm_mm; /* VM area parameters */
56 unsigned long vm_start;
57 unsigned long vm_end;
58
59 /* linked list of VM areas per task, sorted by address */
60 struct vm_area_struct *vm_next;
61
62 pgprot_t vm_page_prot;
63 unsigned short vm_flags;
64
65 /* AVL tree of VM areas per task, sorted by address */
66 short vm_avl_height;
67 struct vm_area_struct * vm_avl_left;
68 struct vm_area_struct * vm_avl_right;
69
70 /* For areas with inode, the list inode->i_mmap, for shm areas,
71 * the list of attaches, otherwise unused.
72 */
73 struct vm_area_struct *vm_next_share;
74 struct vm_area_struct **vm_pprev_share;
75
76 struct vm_operations_struct * vm_ops;
77 unsigned long vm_pgoff; /* offset in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */
78 struct file * vm_file;
79 void * vm_private_data; /* was vm_pte (shared mem) */
80};
81
82/*
83 * vm_flags..
84 */
85#define VM_READ 0x0001 /* currently active flags */
86#define VM_WRITE 0x0002
87#define VM_EXEC 0x0004
88#define VM_SHARED 0x0008
89
90#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
91#define VM_MAYWRITE 0x0020
92#define VM_MAYEXEC 0x0040
93#define VM_MAYSHARE 0x0080
94
95#define VM_GROWSDOWN 0x0100 /* general info on the segment */
96#define VM_GROWSUP 0x0200
97#define VM_SHM 0x0400 /* shared memory area, don't swap out */
98#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
99
100#define VM_EXECUTABLE 0x1000
101#define VM_LOCKED 0x2000
102#define VM_IO 0x4000 /* Memory mapped I/O or similar */
103
104#define VM_STACK_FLAGS 0x0177
105
106/* Page flag bit values */
107#define PG_locked 0
108#define PG_error 1
109#define PG_referenced 2
110#define PG_uptodate 3
111#define PG_decr_after 5
112#define PG_DMA 7
113#define PG_slab 8
114#define PG_swap_cache 9
115#define PG_skip 10
116#define PG_swap_entry 11
117#define PG_highmem 12
118 /* bits 21-30 unused */
119#define PG_reserved 31
120
121typedef struct page {
122 unsigned long index;
123 atomic_t count;
124 unsigned long flags; /* atomic flags, some possibly updated asynchronously */
125 unsigned long virtual; /* nonzero if kmapped */
126 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
127 struct page *first_page; /* Compound tail pages */
128} mem_map_t;
129
130extern mem_map_t * mem_map;
131
132#define free_page(addr) free_pages((addr),0)
133extern int free_pages(unsigned long addr, unsigned long order);
134
135#define virt_to_bus virt_to_phys
136extern unsigned long virt_to_phys(void * address);
137
138//extern mem_map_t *virt_to_page(int x);
139#define virt_to_page(x) (&mem_map[MAP_NR(x)])
140
141extern void * phys_to_virt(unsigned long address);
142
143#define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
144#define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
145
146extern void *__get_free_pages(int gfp_mask, unsigned long order);
147extern struct page * alloc_pages(int gfp_mask, unsigned long order);
148
149extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
150
151/*
152 * Error return values for the *_nopage functions
153 */
154#define NOPAGE_SIGBUS (NULL)
155#define NOPAGE_OOM -1
156
157/*
158 * These are the virtual MM functions - opening of an area, closing and
159 * unmapping it (needed to keep files on disk up-to-date etc), pointer
160 * to the functions called when a no-page or a wp-page exception occurs.
161 */
162struct vm_operations_struct {
163 void (*open)(struct vm_area_struct * area);
164 void (*close)(struct vm_area_struct * area);
165 void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
166 void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
167 int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
168 void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
169 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
170 struct page * (*wppage)(struct vm_area_struct * area, unsigned long address, struct page * page);
171 int (*swapout)(struct page *, struct file *);
172};
173
174#define SetPageReserved(a) a
175#define ClearPageReserved(a) a
176struct page *vmalloc_to_page(void *addr);
177
178extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
179static inline void *kvmalloc(size_t size, gfp_t flags)
180{
181 return kvmalloc_node(size, flags, NUMA_NO_NODE);
182}
183static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
184{
185 return kvmalloc_node(size, flags | __GFP_ZERO, node);
186}
187static inline void *kvzalloc(size_t size, gfp_t flags)
188{
189 return kvmalloc(size, flags | __GFP_ZERO);
190}
191static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
192{
193 size_t bytes;
194
195 bytes = n * size;
196
197 return kvmalloc(bytes, flags);
198}
199#endif
Note: See TracBrowser for help on using the repository browser.