[142] | 1 | /* $Id: mm.h 153 2000-07-23 16:21:57Z sandervl $ */
|
---|
| 2 |
|
---|
| 3 | #ifndef _LINUX_MM_H
|
---|
| 4 | #define _LINUX_MM_H
|
---|
| 5 |
|
---|
| 6 | #include <linux/sched.h>
|
---|
| 7 | #include <linux/errno.h>
|
---|
| 8 | #include <asm/page.h>
|
---|
[153] | 9 | #include <asm/atomic.h>
|
---|
[142] | 10 |
|
---|
| 11 | /*
|
---|
| 12 | * GFP bitmasks..
|
---|
| 13 | */
|
---|
| 14 | #define __GFP_WAIT 0x01
|
---|
| 15 | #define __GFP_LOW 0x02
|
---|
| 16 | #define __GFP_MED 0x04
|
---|
| 17 | #define __GFP_HIGH 0x08
|
---|
| 18 | #define __GFP_IO 0x10
|
---|
| 19 | #define __GFP_SWAP 0x20
|
---|
| 20 | #ifdef CONFIG_HIGHMEM
|
---|
| 21 | #define __GFP_HIGHMEM 0x40
|
---|
| 22 | #else
|
---|
| 23 | #define __GFP_HIGHMEM 0x0 /* noop */
|
---|
| 24 | #endif
|
---|
| 25 |
|
---|
| 26 | #define __GFP_DMA 0x80
|
---|
| 27 |
|
---|
| 28 | #define GFP_BUFFER (__GFP_LOW | __GFP_WAIT)
|
---|
| 29 | #define GFP_ATOMIC (__GFP_HIGH)
|
---|
| 30 | #define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO)
|
---|
| 31 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
|
---|
| 32 | #define GFP_KERNEL (__GFP_MED | __GFP_WAIT | __GFP_IO)
|
---|
| 33 | #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
|
---|
| 34 | #define GFP_KSWAPD (__GFP_IO | __GFP_SWAP)
|
---|
| 35 |
|
---|
| 36 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
|
---|
| 37 | platforms, used as appropriate on others */
|
---|
| 38 |
|
---|
| 39 | #define GFP_DMA __GFP_DMA
|
---|
| 40 |
|
---|
| 41 | /* Flag - indicates that the buffer can be taken from high memory which is not
|
---|
| 42 | directly addressable by the kernel */
|
---|
| 43 |
|
---|
| 44 | #define GFP_HIGHMEM __GFP_HIGHMEM
|
---|
| 45 |
|
---|
| 46 | /*
|
---|
| 47 | * This struct defines a memory VMM memory area. There is one of these
|
---|
| 48 | * per VM-area/task. A VM area is any part of the process virtual memory
|
---|
| 49 | * space that has a special rule for the page-fault handlers (ie a shared
|
---|
| 50 | * library, the executable area etc).
|
---|
| 51 | */
|
---|
| 52 | struct vm_area_struct {
|
---|
| 53 | struct mm_struct * vm_mm; /* VM area parameters */
|
---|
| 54 | unsigned long vm_start;
|
---|
| 55 | unsigned long vm_end;
|
---|
| 56 |
|
---|
| 57 | /* linked list of VM areas per task, sorted by address */
|
---|
| 58 | struct vm_area_struct *vm_next;
|
---|
| 59 |
|
---|
| 60 | pgprot_t vm_page_prot;
|
---|
| 61 | unsigned short vm_flags;
|
---|
| 62 |
|
---|
| 63 | /* AVL tree of VM areas per task, sorted by address */
|
---|
| 64 | short vm_avl_height;
|
---|
| 65 | struct vm_area_struct * vm_avl_left;
|
---|
| 66 | struct vm_area_struct * vm_avl_right;
|
---|
| 67 |
|
---|
| 68 | /* For areas with inode, the list inode->i_mmap, for shm areas,
|
---|
| 69 | * the list of attaches, otherwise unused.
|
---|
| 70 | */
|
---|
| 71 | struct vm_area_struct *vm_next_share;
|
---|
| 72 | struct vm_area_struct **vm_pprev_share;
|
---|
| 73 |
|
---|
| 74 | struct vm_operations_struct * vm_ops;
|
---|
| 75 | unsigned long vm_pgoff; /* offset in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */
|
---|
| 76 | struct file * vm_file;
|
---|
| 77 | void * vm_private_data; /* was vm_pte (shared mem) */
|
---|
| 78 | };
|
---|
| 79 |
|
---|
| 80 | /*
|
---|
| 81 | * vm_flags..
|
---|
| 82 | */
|
---|
| 83 | #define VM_READ 0x0001 /* currently active flags */
|
---|
| 84 | #define VM_WRITE 0x0002
|
---|
| 85 | #define VM_EXEC 0x0004
|
---|
| 86 | #define VM_SHARED 0x0008
|
---|
| 87 |
|
---|
| 88 | #define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
|
---|
| 89 | #define VM_MAYWRITE 0x0020
|
---|
| 90 | #define VM_MAYEXEC 0x0040
|
---|
| 91 | #define VM_MAYSHARE 0x0080
|
---|
| 92 |
|
---|
| 93 | #define VM_GROWSDOWN 0x0100 /* general info on the segment */
|
---|
| 94 | #define VM_GROWSUP 0x0200
|
---|
| 95 | #define VM_SHM 0x0400 /* shared memory area, don't swap out */
|
---|
| 96 | #define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
|
---|
| 97 |
|
---|
| 98 | #define VM_EXECUTABLE 0x1000
|
---|
| 99 | #define VM_LOCKED 0x2000
|
---|
| 100 | #define VM_IO 0x4000 /* Memory mapped I/O or similar */
|
---|
| 101 |
|
---|
| 102 | #define VM_STACK_FLAGS 0x0177
|
---|
| 103 |
|
---|
| 104 | /* Page flag bit values */
|
---|
| 105 | #define PG_locked 0
|
---|
| 106 | #define PG_error 1
|
---|
| 107 | #define PG_referenced 2
|
---|
| 108 | #define PG_uptodate 3
|
---|
| 109 | #define PG_decr_after 5
|
---|
| 110 | #define PG_DMA 7
|
---|
| 111 | #define PG_slab 8
|
---|
| 112 | #define PG_swap_cache 9
|
---|
| 113 | #define PG_skip 10
|
---|
| 114 | #define PG_swap_entry 11
|
---|
| 115 | #define PG_highmem 12
|
---|
| 116 | /* bits 21-30 unused */
|
---|
| 117 | #define PG_reserved 31
|
---|
| 118 |
|
---|
| 119 | typedef struct page {
|
---|
| 120 | unsigned long index;
|
---|
| 121 | atomic_t count;
|
---|
| 122 | unsigned long flags; /* atomic flags, some possibly updated asynchronously */
|
---|
| 123 | unsigned long virtual; /* nonzero if kmapped */
|
---|
| 124 | } mem_map_t;
|
---|
| 125 |
|
---|
| 126 | extern mem_map_t * mem_map;
|
---|
| 127 |
|
---|
| 128 | #define free_page(addr) free_pages((addr),0)
|
---|
| 129 | extern int free_pages(unsigned long addr, unsigned long order);
|
---|
| 130 |
|
---|
| 131 | #define virt_to_bus virt_to_phys
|
---|
| 132 | extern unsigned long virt_to_phys(void * address);
|
---|
| 133 |
|
---|
| 134 | extern void * phys_to_virt(unsigned long address);
|
---|
| 135 |
|
---|
| 136 | #define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
|
---|
| 137 | #define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
|
---|
| 138 |
|
---|
| 139 | extern void *__get_free_pages(int gfp_mask, unsigned long order);
|
---|
| 140 | extern struct page * alloc_pages(int gfp_mask, unsigned long order);
|
---|
| 141 |
|
---|
| 142 | extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
|
---|
| 143 |
|
---|
| 144 | #endif
|
---|