[32] | 1 | #ifndef _LINUX_PAGEMAP_H
|
---|
| 2 | #define _LINUX_PAGEMAP_H
|
---|
| 3 |
|
---|
| 4 | /*
|
---|
| 5 | * Page-mapping primitive inline functions
|
---|
| 6 | *
|
---|
| 7 | * Copyright 1995 Linus Torvalds
|
---|
| 8 | */
|
---|
| 9 |
|
---|
| 10 | #include <linux/mm.h>
|
---|
| 11 | #include <linux/fs.h>
|
---|
| 12 | #include <linux/list.h>
|
---|
| 13 |
|
---|
[679] | 14 | #include <asm/hardirq.h>
|
---|
[32] | 15 | #include <asm/system.h>
|
---|
| 16 | #include <asm/pgtable.h>
|
---|
[679] | 17 | #include <linux/bitops.h>
|
---|
[32] | 18 |
|
---|
| 19 | /*
|
---|
| 20 | * The page cache can done in larger chunks than
|
---|
| 21 | * one page, because it allows for more efficient
|
---|
| 22 | * throughput (it can then be mapped into user
|
---|
| 23 | * space in smaller chunks for same flexibility).
|
---|
| 24 | *
|
---|
| 25 | * Or rather, it _will_ be done in larger chunks.
|
---|
| 26 | */
|
---|
| 27 | #define PAGE_CACHE_SHIFT PAGE_SHIFT
|
---|
| 28 | #define PAGE_CACHE_SIZE PAGE_SIZE
|
---|
| 29 | #define PAGE_CACHE_MASK PAGE_MASK
|
---|
| 30 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
|
---|
| 31 |
|
---|
| 32 | #define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0)
|
---|
| 33 | #define page_cache_free(x) __free_page(x)
|
---|
| 34 | #define page_cache_release(x) __free_page(x)
|
---|
| 35 |
|
---|
| 36 | /*
|
---|
| 37 | * From a kernel address, get the "struct page *"
|
---|
| 38 | */
|
---|
| 39 | #define page_cache_entry(x) (mem_map + MAP_NR(x))
|
---|
| 40 |
|
---|
| 41 | extern unsigned int page_hash_bits;
|
---|
| 42 | #define PAGE_HASH_BITS (page_hash_bits)
|
---|
| 43 | #define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
|
---|
| 44 |
|
---|
| 45 | extern atomic_t page_cache_size; /* # of pages currently in the hash table */
|
---|
| 46 | extern struct page **page_hash_table;
|
---|
| 47 |
|
---|
| 48 | extern void page_cache_init(unsigned long);
|
---|
| 49 |
|
---|
| 50 | /*
|
---|
| 51 | * We use a power-of-two hash table to avoid a modulus,
|
---|
| 52 | * and get a reasonable hash by knowing roughly how the
|
---|
| 53 | * inode pointer and indexes are distributed (ie, we
|
---|
| 54 | * roughly know which bits are "significant")
|
---|
| 55 | *
|
---|
| 56 | * For the time being it will work for struct address_space too (most of
|
---|
| 57 | * them sitting inside the inodes). We might want to change it later.
|
---|
| 58 | */
|
---|
| 59 | //extern inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long index)
|
---|
| 60 | //{
|
---|
| 61 | //#define i (((unsigned long) mapping)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
|
---|
| 62 | //#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
|
---|
| 63 | // return s(i+index) & (PAGE_HASH_SIZE-1);
|
---|
| 64 | //#undef i
|
---|
| 65 | //#undef o
|
---|
| 66 | //#undef s
|
---|
| 67 | //}
|
---|
| 68 |
|
---|
| 69 | #define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
|
---|
| 70 |
|
---|
| 71 | extern struct page * __find_get_page (struct address_space *mapping,
|
---|
| 72 | unsigned long index, struct page **hash);
|
---|
| 73 | #define find_get_page(mapping, index) \
|
---|
| 74 | __find_get_page(mapping, index, page_hash(mapping, index))
|
---|
| 75 | extern struct page * __find_lock_page (struct address_space * mapping,
|
---|
| 76 | unsigned long index, struct page **hash);
|
---|
| 77 | extern void lock_page(struct page *page);
|
---|
| 78 | #define find_lock_page(mapping, index) \
|
---|
| 79 | __find_lock_page(mapping, index, page_hash(mapping, index))
|
---|
| 80 |
|
---|
| 81 | extern void __add_page_to_hash_queue(struct page * page, struct page **p);
|
---|
| 82 |
|
---|
| 83 | extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
|
---|
| 84 | extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash);
|
---|
| 85 |
|
---|
| 86 | #if 0
|
---|
| 87 | extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
|
---|
| 88 | {
|
---|
| 89 | __add_page_to_hash_queue(page, page_hash(&inode->i_data,index));
|
---|
| 90 | }
|
---|
| 91 |
|
---|
| 92 | extern inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
|
---|
| 93 | {
|
---|
| 94 | struct list_head *head = &mapping->pages;
|
---|
| 95 |
|
---|
| 96 | if (!mapping->nrpages++) {
|
---|
| 97 | if (!list_empty(head))
|
---|
| 98 | BUG();
|
---|
| 99 | } else {
|
---|
| 100 | if (list_empty(head))
|
---|
| 101 | BUG();
|
---|
| 102 | }
|
---|
| 103 | list_add(&page->list, head);
|
---|
| 104 | page->mapping = mapping;
|
---|
| 105 | }
|
---|
| 106 |
|
---|
| 107 | extern inline void remove_page_from_inode_queue(struct page * page)
|
---|
| 108 | {
|
---|
| 109 | struct address_space * mapping = page->mapping;
|
---|
| 110 |
|
---|
| 111 | mapping->nrpages--;
|
---|
| 112 | list_del(&page->list);
|
---|
| 113 | }
|
---|
| 114 |
|
---|
| 115 | extern void ___wait_on_page(struct page *);
|
---|
| 116 |
|
---|
| 117 | extern inline void wait_on_page(struct page * page)
|
---|
| 118 | {
|
---|
| 119 | if (PageLocked(page))
|
---|
| 120 | ___wait_on_page(page);
|
---|
| 121 | }
|
---|
| 122 | #endif
|
---|
| 123 |
|
---|
| 124 | #endif
|
---|