| 1 | #ifndef _I386_PGTABLE_H | 
|---|
| 2 | #define _I386_PGTABLE_H | 
|---|
| 3 |  | 
|---|
| 4 | #include <linux/config.h> | 
|---|
| 5 |  | 
|---|
| 6 | /* | 
|---|
| 7 | * The Linux memory management assumes a three-level page table setup. On | 
|---|
| 8 | * the i386, we use that, but "fold" the mid level into the top-level page | 
|---|
| 9 | * table, so that we physically have the same two-level page table as the | 
|---|
| 10 | * i386 mmu expects. | 
|---|
| 11 | * | 
|---|
| 12 | * This file contains the functions and defines necessary to modify and use | 
|---|
| 13 | * the i386 page table tree. | 
|---|
| 14 | */ | 
|---|
| 15 | #ifndef __ASSEMBLY__ | 
|---|
| 16 | #include <asm/processor.h> | 
|---|
| 17 | #include <asm/fixmap.h> | 
|---|
| 18 | #include <linux/threads.h> | 
|---|
| 19 |  | 
|---|
| 20 | extern pgd_t swapper_pg_dir[1024]; | 
|---|
| 21 |  | 
|---|
| 22 | /* Caches aren't brain-dead on the intel. */ | 
|---|
| 23 | #define flush_cache_all()                       do { } while (0) | 
|---|
| 24 | #define flush_cache_mm(mm)                      do { } while (0) | 
|---|
| 25 | #define flush_cache_range(mm, start, end)       do { } while (0) | 
|---|
| 26 | #define flush_cache_page(vma, vmaddr)           do { } while (0) | 
|---|
| 27 | #define flush_page_to_ram(page)                 do { } while (0) | 
|---|
| 28 | #define flush_icache_range(start, end)          do { } while (0) | 
|---|
| 29 |  | 
|---|
| 30 | /* | 
|---|
| 31 | * TLB flushing: | 
|---|
| 32 | * | 
|---|
| 33 | *  - flush_tlb() flushes the current mm struct TLBs | 
|---|
| 34 | *  - flush_tlb_all() flushes all processes TLBs | 
|---|
| 35 | *  - flush_tlb_mm(mm) flushes the specified mm context TLB's | 
|---|
| 36 | *  - flush_tlb_page(vma, vmaddr) flushes one page | 
|---|
| 37 | *  - flush_tlb_range(mm, start, end) flushes a range of pages | 
|---|
| 38 | * | 
|---|
| 39 | * ..but the i386 has somewhat limited tlb flushing capabilities, | 
|---|
| 40 | * and page-granular flushes are available only on i486 and up. | 
|---|
| 41 | */ | 
|---|
| 42 |  | 
|---|
| 43 | #define __flush_tlb() \ | 
|---|
| 44 | do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0) | 
|---|
| 45 |  | 
|---|
| 46 |  | 
|---|
| 47 | #endif /* !__ASSEMBLY__ */ | 
|---|
| 48 |  | 
|---|
| 49 | #define pgd_quicklist (current_cpu_data.pgd_quick) | 
|---|
| 50 | #define pmd_quicklist (current_cpu_data.pmd_quick) | 
|---|
| 51 | #define pte_quicklist (current_cpu_data.pte_quick) | 
|---|
| 52 | #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz) | 
|---|
| 53 |  | 
|---|
| 54 | /* | 
|---|
| 55 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | 
|---|
| 56 | * implements both the traditional 2-level x86 page tables and the | 
|---|
| 57 | * newer 3-level PAE-mode page tables. | 
|---|
| 58 | */ | 
|---|
| 59 | #ifndef __ASSEMBLY__ | 
|---|
| 60 | #endif | 
|---|
| 61 |  | 
|---|
| 62 | /* | 
|---|
| 63 | * Certain architectures need to do special things when PTEs | 
|---|
| 64 | * within a page table are directly modified.  Thus, the following | 
|---|
| 65 | * hook is made available. | 
|---|
| 66 | */ | 
|---|
| 67 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) | 
|---|
| 68 |  | 
|---|
| 69 | #define __beep() asm("movb $0x3,%al; outb %al,$0x61") | 
|---|
| 70 |  | 
|---|
| 71 | #define PMD_SIZE        (1UL << PMD_SHIFT) | 
|---|
| 72 | #define PMD_MASK        (~(PMD_SIZE-1)) | 
|---|
| 73 | #define PGDIR_SIZE      (1UL << PGDIR_SHIFT) | 
|---|
| 74 | #define PGDIR_MASK      (~(PGDIR_SIZE-1)) | 
|---|
| 75 |  | 
|---|
| 76 | #define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE) | 
|---|
| 77 |  | 
|---|
| 78 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | 
|---|
| 79 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | 
|---|
| 80 |  | 
|---|
| 81 | #define TWOLEVEL_PGDIR_SHIFT    22 | 
|---|
| 82 | #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) | 
|---|
| 83 | #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) | 
|---|
| 84 |  | 
|---|
| 85 |  | 
|---|
| 86 | #ifndef __ASSEMBLY__ | 
|---|
| 87 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | 
|---|
| 88 | * current 8MB value just means that there will be a 8MB "hole" after the | 
|---|
| 89 | * physical memory until the kernel virtual memory starts.  That means that | 
|---|
| 90 | * any out-of-bounds memory accesses will hopefully be caught. | 
|---|
| 91 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 
|---|
| 92 | * area for the same reason. ;) | 
|---|
| 93 | */ | 
|---|
| 94 | #define VMALLOC_OFFSET  (8*1024*1024) | 
|---|
| 95 | #define VMALLOC_START   (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 
|---|
| 96 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | 
|---|
| 97 | #define VMALLOC_END     (FIXADDR_START) | 
|---|
| 98 |  | 
|---|
| 99 | /* | 
|---|
| 100 | * The 4MB page is guessing..  Detailed in the infamous "Chapter H" | 
|---|
| 101 | * of the Pentium details, but assuming intel did the straightforward | 
|---|
| 102 | * thing, this bit set in the page directory entry just means that | 
|---|
| 103 | * the page directory entry points directly to a 4MB-aligned block of | 
|---|
| 104 | * memory. | 
|---|
| 105 | */ | 
|---|
| 106 | #define _PAGE_PRESENT   0x001 | 
|---|
| 107 | #define _PAGE_RW        0x002 | 
|---|
| 108 | #define _PAGE_USER      0x004 | 
|---|
| 109 | #define _PAGE_PWT       0x008 | 
|---|
| 110 | #define _PAGE_PCD       0x010 | 
|---|
| 111 | #define _PAGE_ACCESSED  0x020 | 
|---|
| 112 | #define _PAGE_DIRTY     0x040 | 
|---|
| 113 | #define _PAGE_PSE       0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */ | 
|---|
| 114 | #define _PAGE_GLOBAL    0x100   /* Global TLB entry PPro+ */ | 
|---|
| 115 |  | 
|---|
| 116 | #define _PAGE_PROTNONE  0x080   /* If not present */ | 
|---|
| 117 |  | 
|---|
| 118 | #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|---|
| 119 | #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|---|
| 120 | #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|---|
| 121 |  | 
|---|
| 122 | #define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 
|---|
| 123 | #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | 
|---|
| 124 | #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 
|---|
| 125 | #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 
|---|
| 126 | #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | 
|---|
| 127 | #define PAGE_KERNEL_RO  __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) | 
|---|
| 128 |  | 
|---|
| 129 | /* | 
|---|
| 130 | * The i386 can't do page protection for execute, and considers that the same are read. | 
|---|
| 131 | * Also, write permissions imply read permissions. This is the closest we can get.. | 
|---|
| 132 | */ | 
|---|
| 133 | #define __P000  PAGE_NONE | 
|---|
| 134 | #define __P001  PAGE_READONLY | 
|---|
| 135 | #define __P010  PAGE_COPY | 
|---|
| 136 | #define __P011  PAGE_COPY | 
|---|
| 137 | #define __P100  PAGE_READONLY | 
|---|
| 138 | #define __P101  PAGE_READONLY | 
|---|
| 139 | #define __P110  PAGE_COPY | 
|---|
| 140 | #define __P111  PAGE_COPY | 
|---|
| 141 |  | 
|---|
| 142 | #define __S000  PAGE_NONE | 
|---|
| 143 | #define __S001  PAGE_READONLY | 
|---|
| 144 | #define __S010  PAGE_SHARED | 
|---|
| 145 | #define __S011  PAGE_SHARED | 
|---|
| 146 | #define __S100  PAGE_READONLY | 
|---|
| 147 | #define __S101  PAGE_READONLY | 
|---|
| 148 | #define __S110  PAGE_SHARED | 
|---|
| 149 | #define __S111  PAGE_SHARED | 
|---|
| 150 |  | 
|---|
| 151 | /* | 
|---|
| 152 | * Define this if things work differently on an i386 and an i486: | 
|---|
| 153 | * it will (on an i486) warn about kernel memory accesses that are | 
|---|
| 154 | * done without a 'verify_area(VERIFY_WRITE,..)' | 
|---|
| 155 | */ | 
|---|
| 156 | #undef TEST_VERIFY_AREA | 
|---|
| 157 |  | 
|---|
| 158 | /* page table for 0-4MB for everybody */ | 
|---|
| 159 | extern unsigned long pg0[1024]; | 
|---|
| 160 |  | 
|---|
| 161 | /* | 
|---|
| 162 | * ZERO_PAGE is a global shared page that is always zero: used | 
|---|
| 163 | * for zero-mapped memory areas etc.. | 
|---|
| 164 | */ | 
|---|
| 165 | extern unsigned long empty_zero_page[1024]; | 
|---|
| 166 | #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) | 
|---|
| 167 |  | 
|---|
| 168 | /* | 
|---|
| 169 | * Handling allocation failures during page table setup. | 
|---|
| 170 | */ | 
|---|
| 171 | extern void __handle_bad_pmd(pmd_t * pmd); | 
|---|
| 172 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); | 
|---|
| 173 |  | 
|---|
| 174 | #define pte_none(x)     (!pte_val(x)) | 
|---|
| 175 | #define pte_present(x)  (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 
|---|
| 176 | #define pte_clear(xp)   do { pte_val(*(xp)) = 0; } while (0) | 
|---|
| 177 | #define pte_pagenr(x)   ((unsigned long)((pte_val(x) >> PAGE_SHIFT))) | 
|---|
| 178 |  | 
|---|
| 179 | #define pmd_none(x)     (!pmd_val(x)) | 
|---|
| 180 | #define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 
|---|
| 181 | #define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT) | 
|---|
| 182 | #define pmd_clear(xp)   do { pmd_val(*(xp)) = 0; } while (0) | 
|---|
| 183 |  | 
|---|
| 184 | /* | 
|---|
| 185 | * Permanent address of a page. Obviously must never be | 
|---|
| 186 | * called on a highmem page. | 
|---|
| 187 | */ | 
|---|
| 188 | #define page_address(page) page | 
|---|
| 189 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 
|---|
| 190 | #define pte_page(x) (mem_map+pte_pagenr(x)) | 
|---|
| 191 |  | 
|---|
| 192 | /* | 
|---|
| 193 | * The following only work if pte_present() is true. | 
|---|
| 194 | * Undefined behaviour if not.. | 
|---|
| 195 | */ | 
|---|
| 196 | #if 0 | 
|---|
| 197 | #define pte_read(pte)           (pte_val(pte) & _PAGE_USER) | 
|---|
| 198 | #define pte_exec(pte_t pte)             { return pte_val(pte) & _PAGE_USER; } | 
|---|
| 199 | extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; } | 
|---|
| 200 | extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; } | 
|---|
| 201 | extern inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_RW; } | 
|---|
| 202 |  | 
|---|
| 203 | extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_USER; return pte; } | 
|---|
| 204 | extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_USER; return pte; } | 
|---|
| 205 | extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | 
|---|
| 206 | extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | 
|---|
| 207 | extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) &= ~_PAGE_RW; return pte; } | 
|---|
| 208 | extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) |= _PAGE_USER; return pte; } | 
|---|
| 209 | extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) |= _PAGE_USER; return pte; } | 
|---|
| 210 | extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= _PAGE_DIRTY; return pte; } | 
|---|
| 211 | extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 
|---|
| 212 | extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) |= _PAGE_RW; return pte; } | 
|---|
| 213 | #endif | 
|---|
| 214 |  | 
|---|
| 215 | /* | 
|---|
| 216 | * Conversion functions: convert a page and protection to a page entry, | 
|---|
| 217 | * and a page entry and page directory to the page they refer to. | 
|---|
| 218 | */ | 
|---|
| 219 |  | 
|---|
| 220 |  | 
|---|
| 221 | /* This takes a physical page address that is used by the remapping functions */ | 
|---|
| 222 | #define mk_pte_phys(physpage, pgprot) \ | 
|---|
| 223 | ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) | 
|---|
| 224 |  | 
|---|
| 225 | //extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
|---|
| 226 | //{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | 
|---|
| 227 |  | 
|---|
| 228 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) | 
|---|
| 229 |  | 
|---|
| 230 | #define pmd_page(pmd) \ | 
|---|
| 231 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 
|---|
| 232 |  | 
|---|
| 233 | /* to find an entry in a page-table-directory. */ | 
|---|
| 234 | #define __pgd_offset(address) \ | 
|---|
| 235 | ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 
|---|
| 236 |  | 
|---|
| 237 | #define pgd_offset(mm, address) ((mm)->pgd+__pgd_offset(address)) | 
|---|
| 238 |  | 
|---|
| 239 | /* to find an entry in a kernel page-table-directory */ | 
|---|
| 240 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
|---|
| 241 |  | 
|---|
| 242 | #define __pmd_offset(address) \ | 
|---|
| 243 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
|---|
| 244 |  | 
|---|
| 245 | /* Find an entry in the third-level page table.. */ | 
|---|
| 246 | #define __pte_offset(address) \ | 
|---|
| 247 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 
|---|
| 248 | #define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \ | 
|---|
| 249 | __pte_offset(address)) | 
|---|
| 250 |  | 
|---|
| 251 |  | 
|---|
| 252 | #if 0 | 
|---|
| 253 | /* | 
|---|
| 254 | * The i386 doesn't have any external MMU info: the kernel page | 
|---|
| 255 | * tables contain all the necessary information. | 
|---|
| 256 | */ | 
|---|
| 257 | extern inline void update_mmu_cache(struct vm_area_struct * vma, | 
|---|
| 258 | unsigned long address, pte_t pte) | 
|---|
| 259 | { | 
|---|
| 260 | } | 
|---|
| 261 | #endif | 
|---|
| 262 |  | 
|---|
| 263 | /* Encode and de-code a swap entry */ | 
|---|
| 264 | #define SWP_TYPE(x)                     (((x).val >> 1) & 0x3f) | 
|---|
| 265 | #define SWP_OFFSET(x)                   ((x).val >> 8) | 
|---|
| 266 | #define SWP_ENTRY(type, offset)         ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 
|---|
| 267 | #define pte_to_swp_entry(pte)           ((swp_entry_t) { pte_val(pte) }) | 
|---|
| 268 | #define swp_entry_to_pte(x)             ((pte_t) { (x).val }) | 
|---|
| 269 |  | 
|---|
| 270 | #define module_map      vmalloc | 
|---|
| 271 | #define module_unmap    vfree | 
|---|
| 272 |  | 
|---|
| 273 | #endif /* !__ASSEMBLY__ */ | 
|---|
| 274 |  | 
|---|
| 275 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | 
|---|
| 276 | #define PageSkip(page)          (0) | 
|---|
| 277 | #define kern_addr_valid(addr)   (1) | 
|---|
| 278 |  | 
|---|
| 279 | #define io_remap_page_range remap_page_range | 
|---|
| 280 |  | 
|---|
| 281 | #endif /* _I386_PGTABLE_H */ | 
|---|