| 1 | #ifndef _LINUX_PAGEMAP_H
|
|---|
| 2 | #define _LINUX_PAGEMAP_H
|
|---|
| 3 |
|
|---|
| 4 | /*
|
|---|
| 5 | * Page-mapping primitive inline functions
|
|---|
| 6 | *
|
|---|
| 7 | * Copyright 1995 Linus Torvalds
|
|---|
| 8 | */
|
|---|
| 9 |
|
|---|
| 10 | #include <linux/mm.h>
|
|---|
| 11 | #include <linux/fs.h>
|
|---|
| 12 | #include <linux/list.h>
|
|---|
| 13 |
|
|---|
| 14 | #include <asm/system.h>
|
|---|
| 15 | #include <asm/pgtable.h>
|
|---|
| 16 |
|
|---|
| 17 | /*
|
|---|
| 18 | * The page cache can done in larger chunks than
|
|---|
| 19 | * one page, because it allows for more efficient
|
|---|
| 20 | * throughput (it can then be mapped into user
|
|---|
| 21 | * space in smaller chunks for same flexibility).
|
|---|
| 22 | *
|
|---|
| 23 | * Or rather, it _will_ be done in larger chunks.
|
|---|
| 24 | */
|
|---|
| 25 | #define PAGE_CACHE_SHIFT PAGE_SHIFT
|
|---|
| 26 | #define PAGE_CACHE_SIZE PAGE_SIZE
|
|---|
| 27 | #define PAGE_CACHE_MASK PAGE_MASK
|
|---|
| 28 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
|
|---|
| 29 |
|
|---|
| 30 | #define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0)
|
|---|
| 31 | #define page_cache_free(x) __free_page(x)
|
|---|
| 32 | #define page_cache_release(x) __free_page(x)
|
|---|
| 33 |
|
|---|
| 34 | /*
|
|---|
| 35 | * From a kernel address, get the "struct page *"
|
|---|
| 36 | */
|
|---|
| 37 | #define page_cache_entry(x) (mem_map + MAP_NR(x))
|
|---|
| 38 |
|
|---|
| 39 | extern unsigned int page_hash_bits;
|
|---|
| 40 | #define PAGE_HASH_BITS (page_hash_bits)
|
|---|
| 41 | #define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
|
|---|
| 42 |
|
|---|
| 43 | extern atomic_t page_cache_size; /* # of pages currently in the hash table */
|
|---|
| 44 | extern struct page **page_hash_table;
|
|---|
| 45 |
|
|---|
| 46 | extern void page_cache_init(unsigned long);
|
|---|
| 47 |
|
|---|
| 48 | /*
|
|---|
| 49 | * We use a power-of-two hash table to avoid a modulus,
|
|---|
| 50 | * and get a reasonable hash by knowing roughly how the
|
|---|
| 51 | * inode pointer and indexes are distributed (ie, we
|
|---|
| 52 | * roughly know which bits are "significant")
|
|---|
| 53 | *
|
|---|
| 54 | * For the time being it will work for struct address_space too (most of
|
|---|
| 55 | * them sitting inside the inodes). We might want to change it later.
|
|---|
| 56 | */
|
|---|
| 57 | //extern inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long index)
|
|---|
| 58 | //{
|
|---|
| 59 | //#define i (((unsigned long) mapping)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
|
|---|
| 60 | //#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
|
|---|
| 61 | // return s(i+index) & (PAGE_HASH_SIZE-1);
|
|---|
| 62 | //#undef i
|
|---|
| 63 | //#undef o
|
|---|
| 64 | //#undef s
|
|---|
| 65 | //}
|
|---|
| 66 |
|
|---|
| 67 | #define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
|
|---|
| 68 |
|
|---|
| 69 | extern struct page * __find_get_page (struct address_space *mapping,
|
|---|
| 70 | unsigned long index, struct page **hash);
|
|---|
| 71 | #define find_get_page(mapping, index) \
|
|---|
| 72 | __find_get_page(mapping, index, page_hash(mapping, index))
|
|---|
| 73 | extern struct page * __find_lock_page (struct address_space * mapping,
|
|---|
| 74 | unsigned long index, struct page **hash);
|
|---|
| 75 | extern void lock_page(struct page *page);
|
|---|
| 76 | #define find_lock_page(mapping, index) \
|
|---|
| 77 | __find_lock_page(mapping, index, page_hash(mapping, index))
|
|---|
| 78 |
|
|---|
| 79 | extern void __add_page_to_hash_queue(struct page * page, struct page **p);
|
|---|
| 80 |
|
|---|
| 81 | extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
|
|---|
| 82 | extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash);
|
|---|
| 83 |
|
|---|
| 84 | #if 0
|
|---|
| 85 | extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
|
|---|
| 86 | {
|
|---|
| 87 | __add_page_to_hash_queue(page, page_hash(&inode->i_data,index));
|
|---|
| 88 | }
|
|---|
| 89 |
|
|---|
| 90 | extern inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
|
|---|
| 91 | {
|
|---|
| 92 | struct list_head *head = &mapping->pages;
|
|---|
| 93 |
|
|---|
| 94 | if (!mapping->nrpages++) {
|
|---|
| 95 | if (!list_empty(head))
|
|---|
| 96 | BUG();
|
|---|
| 97 | } else {
|
|---|
| 98 | if (list_empty(head))
|
|---|
| 99 | BUG();
|
|---|
| 100 | }
|
|---|
| 101 | list_add(&page->list, head);
|
|---|
| 102 | page->mapping = mapping;
|
|---|
| 103 | }
|
|---|
| 104 |
|
|---|
| 105 | extern inline void remove_page_from_inode_queue(struct page * page)
|
|---|
| 106 | {
|
|---|
| 107 | struct address_space * mapping = page->mapping;
|
|---|
| 108 |
|
|---|
| 109 | mapping->nrpages--;
|
|---|
| 110 | list_del(&page->list);
|
|---|
| 111 | }
|
|---|
| 112 |
|
|---|
| 113 | extern void ___wait_on_page(struct page *);
|
|---|
| 114 |
|
|---|
| 115 | extern inline void wait_on_page(struct page * page)
|
|---|
| 116 | {
|
|---|
| 117 | if (PageLocked(page))
|
|---|
| 118 | ___wait_on_page(page);
|
|---|
| 119 | }
|
|---|
| 120 | #endif
|
|---|
| 121 |
|
|---|
| 122 | #endif
|
|---|