source: GPL/trunk/include/linux/mm.h

Last change on this file was 772, checked in by David Azarewicz, 4 months ago

Merge in changes from 6.6-LTS branch.
Fixed additional 25+ problems.

File size: 5.1 KB
Line 
1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/sched.h>
5#include <linux/errno.h>
6#include <linux/gfp.h>
7#include <asm/page.h>
8#include <asm/atomic.h>
9#include <linux/overflow.h>
10#include <linux/err.h>
11
12#define NUMA_NO_NODE (-1)
13
14/*
15 * This struct defines a memory VMM memory area. There is one of these
16 * per VM-area/task. A VM area is any part of the process virtual memory
17 * space that has a special rule for the page-fault handlers (ie a shared
18 * library, the executable area etc).
19 */
20struct vm_area_struct {
21 struct mm_struct * vm_mm; /* VM area parameters */
22 unsigned long vm_start;
23 unsigned long vm_end;
24
25 /* linked list of VM areas per task, sorted by address */
26 struct vm_area_struct *vm_next;
27
28 pgprot_t vm_page_prot;
29 unsigned short vm_flags;
30
31 /* AVL tree of VM areas per task, sorted by address */
32 short vm_avl_height;
33 struct vm_area_struct * vm_avl_left;
34 struct vm_area_struct * vm_avl_right;
35
36 /* For areas with inode, the list inode->i_mmap, for shm areas,
37 * the list of attaches, otherwise unused.
38 */
39 struct vm_area_struct *vm_next_share;
40 struct vm_area_struct **vm_pprev_share;
41
42 struct vm_operations_struct * vm_ops;
43 unsigned long vm_pgoff; /* offset in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */
44 struct file * vm_file;
45 void * vm_private_data; /* was vm_pte (shared mem) */
46};
47
48/*
49 * vm_flags..
50 */
51#define VM_READ 0x0001 /* currently active flags */
52#define VM_WRITE 0x0002
53#define VM_EXEC 0x0004
54#define VM_SHARED 0x0008
55
56#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
57#define VM_MAYWRITE 0x0020
58#define VM_MAYEXEC 0x0040
59#define VM_MAYSHARE 0x0080
60
61#define VM_GROWSDOWN 0x0100 /* general info on the segment */
62#define VM_GROWSUP 0x0200
63#define VM_SHM 0x0400 /* shared memory area, don't swap out */
64#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
65
66#define VM_EXECUTABLE 0x1000
67#define VM_LOCKED 0x2000
68#define VM_IO 0x4000 /* Memory mapped I/O or similar */
69
70#define VM_STACK_FLAGS 0x0177
71
72/* Page flag bit values */
73#define PG_locked 0
74#define PG_error 1
75#define PG_referenced 2
76#define PG_uptodate 3
77#define PG_decr_after 5
78#define PG_DMA 7
79#define PG_slab 8
80#define PG_swap_cache 9
81#define PG_skip 10
82#define PG_swap_entry 11
83#define PG_highmem 12
84 /* bits 21-30 unused */
85#define PG_reserved 31
86
87typedef struct page {
88 unsigned long index;
89 atomic_t count;
90 unsigned long flags; /* atomic flags, some possibly updated asynchronously */
91 unsigned long virtual; /* nonzero if kmapped */
92 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
93 struct page *first_page; /* Compound tail pages */
94} mem_map_t;
95
96extern mem_map_t * mem_map;
97
98#define free_page(addr) free_pages((addr),0)
99extern int free_pages(unsigned long addr, unsigned long order);
100
101#define virt_to_bus virt_to_phys
102extern unsigned long virt_to_phys(void * address);
103
104//extern mem_map_t *virt_to_page(int x);
105#define virt_to_page(x) (&mem_map[MAP_NR(x)])
106
107extern void * phys_to_virt(unsigned long address);
108
109#define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
110#define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
111
112extern void *__get_free_pages(int gfp_mask, unsigned long order);
113extern struct page * alloc_pages(int gfp_mask, unsigned long order);
114
115extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
116
117/*
118 * Error return values for the *_nopage functions
119 */
120#define NOPAGE_SIGBUS (NULL)
121#define NOPAGE_OOM -1
122
123/*
124 * These are the virtual MM functions - opening of an area, closing and
125 * unmapping it (needed to keep files on disk up-to-date etc), pointer
126 * to the functions called when a no-page or a wp-page exception occurs.
127 */
128struct vm_operations_struct {
129 void (*open)(struct vm_area_struct * area);
130 void (*close)(struct vm_area_struct * area);
131 void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
132 void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
133 int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
134 void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
135 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
136 struct page * (*wppage)(struct vm_area_struct * area, unsigned long address, struct page * page);
137 int (*swapout)(struct page *, struct file *);
138};
139
140#define SetPageReserved(a) a
141#define ClearPageReserved(a) a
142struct page *vmalloc_to_page(void *addr);
143
144extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
145static inline void *kvmalloc(size_t size, gfp_t flags)
146{
147 return kvmalloc_node(size, flags, NUMA_NO_NODE);
148}
149static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
150{
151 return kvmalloc_node(size, flags | __GFP_ZERO, node);
152}
153static inline void *kvzalloc(size_t size, gfp_t flags)
154{
155 return kvmalloc(size, flags | __GFP_ZERO);
156}
157static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
158{
159 size_t bytes;
160
161 bytes = n * size;
162
163 return kvmalloc(bytes, flags);
164}
165#endif
Note: See TracBrowser for help on using the repository browser.