[142] | 1 | /* $Id: uaccess.h 142 2000-04-23 14:55:46Z ktk $ */
|
---|
| 2 |
|
---|
| 3 | #ifndef __i386_UACCESS_H
|
---|
| 4 | #define __i386_UACCESS_H
|
---|
| 5 |
|
---|
| 6 | /*
|
---|
| 7 | * User space memory access functions
|
---|
| 8 | */
|
---|
| 9 | //#include <linux/config.h>
|
---|
| 10 | //#include <linux/sched.h>
|
---|
| 11 | #include <asm/page.h>
|
---|
| 12 |
|
---|
| 13 | #define VERIFY_READ 0
|
---|
| 14 | #define VERIFY_WRITE 1
|
---|
| 15 |
|
---|
| 16 | /*
|
---|
| 17 | * The fs value determines whether argument validity checking should be
|
---|
| 18 | * performed or not. If get_fs() == USER_DS, checking is performed, with
|
---|
| 19 | * get_fs() == KERNEL_DS, checking is bypassed.
|
---|
| 20 | *
|
---|
| 21 | * For historical reasons, these macros are grossly misnamed.
|
---|
| 22 | */
|
---|
| 23 |
|
---|
| 24 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
|
---|
| 25 |
|
---|
| 26 |
|
---|
| 27 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
|
---|
| 28 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
|
---|
| 29 |
|
---|
| 30 | #define get_ds() (KERNEL_DS)
|
---|
| 31 | #define get_fs() (current->addr_limit)
|
---|
| 32 | #define set_fs(x) (current->addr_limit = (x))
|
---|
| 33 |
|
---|
| 34 | #define segment_eq(a,b) ((a).seg == (b).seg)
|
---|
| 35 |
|
---|
| 36 | extern int __verify_write(const void *, unsigned long);
|
---|
| 37 |
|
---|
| 38 | #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
|
---|
| 39 |
|
---|
| 40 | /*
|
---|
| 41 | * Uhhuh, this needs 33-bit arithmetic. We have a carry..
|
---|
| 42 | */
|
---|
| 43 |
|
---|
| 44 | int is_access_ok(int type, void *addr, unsigned long size);
|
---|
| 45 |
|
---|
| 46 | #define access_ok(type, addr, size) is_access_ok((int)type, (void *)addr, size)
|
---|
| 47 |
|
---|
| 48 | #define verify_area(type, addr, size) (access_ok(type, (void *)addr,size) ? 0 : -EFAULT)
|
---|
| 49 |
|
---|
| 50 |
|
---|
| 51 | /*
|
---|
| 52 | * The exception table consists of pairs of addresses: the first is the
|
---|
| 53 | * address of an instruction that is allowed to fault, and the second is
|
---|
| 54 | * the address at which the program should continue. No registers are
|
---|
| 55 | * modified, so it is entirely up to the continuation code to figure out
|
---|
| 56 | * what to do.
|
---|
| 57 | *
|
---|
| 58 | * All the routines below use bits of fixup code that are out of line
|
---|
| 59 | * with the main instruction path. This means when everything is well,
|
---|
| 60 | * we don't even have to jump over them. Further, they do not intrude
|
---|
| 61 | * on our cache or tlb entries.
|
---|
| 62 | */
|
---|
| 63 |
|
---|
| 64 | struct exception_table_entry
|
---|
| 65 | {
|
---|
| 66 | unsigned long insn, fixup;
|
---|
| 67 | };
|
---|
| 68 |
|
---|
| 69 | /* Returns 0 if exception not found and fixup otherwise. */
|
---|
| 70 | extern unsigned long search_exception_table(unsigned long);
|
---|
| 71 |
|
---|
| 72 |
|
---|
| 73 | /*
|
---|
| 74 | * These are the main single-value transfer routines. They automatically
|
---|
| 75 | * use the right size if we just have the right pointer type.
|
---|
| 76 | *
|
---|
| 77 | * This gets kind of ugly. We want to return _two_ values in "get_user()"
|
---|
| 78 | * and yet we don't want to do any pointers, because that is too much
|
---|
| 79 | * of a performance impact. Thus we have a few rather ugly macros here,
|
---|
| 80 | * and hide all the uglyness from the user.
|
---|
| 81 | *
|
---|
| 82 | * The "__xxx" versions of the user access functions are versions that
|
---|
| 83 | * do not verify the address space, that must have been done previously
|
---|
| 84 | * with a separate "access_ok()" call (this is used when we do multiple
|
---|
| 85 | * accesses to the same area of user memory).
|
---|
| 86 | */
|
---|
| 87 |
|
---|
| 88 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */
|
---|
| 89 | int get_user(int size, void *dest, void *src);
|
---|
| 90 |
|
---|
| 91 | extern void __put_user_bad(void);
|
---|
| 92 |
|
---|
| 93 | int put_user(int x, void *ptr);
|
---|
| 94 |
|
---|
| 95 | #define __get_user(x,ptr) \
|
---|
| 96 | __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
---|
| 97 |
|
---|
| 98 | int __put_user(int size, int x, void *ptr);
|
---|
| 99 |
|
---|
| 100 | #define __put_user_nocheck(x,ptr,size) lksjdflksdjf
|
---|
| 101 |
|
---|
| 102 | #define __put_user_size(x,ptr,size,retval) asdlkfjsdklfj
|
---|
| 103 |
|
---|
| 104 | struct __large_struct { unsigned long buf[100]; };
|
---|
| 105 | #define __m(x) (*(struct __large_struct *)(x))
|
---|
| 106 |
|
---|
| 107 | /*
|
---|
| 108 | * Tell gcc we read from memory instead of writing: this is because
|
---|
| 109 | * we do not write to any memory gcc knows about, so there are no
|
---|
| 110 | * aliasing issues.
|
---|
| 111 | */
|
---|
| 112 | #define __put_user_asm(x, addr, err, itype, rtype, ltype) lksjdf
|
---|
| 113 |
|
---|
| 114 |
|
---|
| 115 | #define __get_user_nocheck(x,ptr,size) lsjdf
|
---|
| 116 |
|
---|
| 117 | extern long __get_user_bad(void);
|
---|
| 118 |
|
---|
| 119 | #define __get_user_size(x,ptr,size,retval) ksjdf
|
---|
| 120 |
|
---|
| 121 | #define __get_user_asm(x, addr, err, itype, rtype, ltype) sldkjf
|
---|
| 122 |
|
---|
| 123 | /*
|
---|
| 124 | * The "xxx_ret" versions return constant specified in third argument, if
|
---|
| 125 | * something bad happens. These macros can be optimized for the
|
---|
| 126 | * case of just returning from the function xxx_ret is used.
|
---|
| 127 | */
|
---|
| 128 |
|
---|
| 129 | #define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
|
---|
| 130 |
|
---|
| 131 | #define get_user_ret(x,ptr,ret) if (get_user(sizeof(x), (void *)&x, (void *)ptr)) return ret;
|
---|
| 132 |
|
---|
| 133 |
|
---|
| 134 | /*
|
---|
| 135 | * Copy To/From Userspace
|
---|
| 136 | */
|
---|
| 137 |
|
---|
| 138 | /* Generic arbitrary sized copy. */
|
---|
| 139 | void __copy_user(void *to, const void *from, unsigned long n);
|
---|
| 140 |
|
---|
| 141 | void __copy_user_zeroing(void *to, const void *from, unsigned long n);
|
---|
| 142 |
|
---|
| 143 | /* We let the __ versions of copy_from/to_user inline, because they're often
|
---|
| 144 | * used in fast paths and have only a small space overhead.
|
---|
| 145 | */
|
---|
| 146 | static __inline unsigned long
|
---|
| 147 | __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
|
---|
| 148 | {
|
---|
| 149 | __copy_user_zeroing(to,from,n);
|
---|
| 150 | return n;
|
---|
| 151 | }
|
---|
| 152 |
|
---|
| 153 | static __inline unsigned long
|
---|
| 154 | __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
|
---|
| 155 | {
|
---|
| 156 | __copy_user(to,from,n);
|
---|
| 157 | return n;
|
---|
| 158 | }
|
---|
| 159 |
|
---|
| 160 |
|
---|
| 161 | /* Optimize just a little bit when we know the size of the move. */
|
---|
| 162 | void __constant_copy_user(void *to, const void *from, unsigned long n);
|
---|
| 163 |
|
---|
| 164 | /* Optimize just a little bit when we know the size of the move. */
|
---|
| 165 | void __constant_copy_user_zeroing(void *to, const void *from, unsigned long n);
|
---|
| 166 |
|
---|
| 167 | unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
|
---|
| 168 | unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
|
---|
| 169 |
|
---|
| 170 | static __inline unsigned long
|
---|
| 171 | __constant_copy_to_user(void *to, const void *from, unsigned long n)
|
---|
| 172 | {
|
---|
| 173 | if (access_ok(VERIFY_WRITE, to, n))
|
---|
| 174 | __constant_copy_user(to,from,n);
|
---|
| 175 | return n;
|
---|
| 176 | }
|
---|
| 177 |
|
---|
| 178 | static __inline unsigned long
|
---|
| 179 | __constant_copy_from_user(void *to, const void *from, unsigned long n)
|
---|
| 180 | {
|
---|
| 181 | if (access_ok(VERIFY_READ, (void *)from, n))
|
---|
| 182 | __constant_copy_user_zeroing(to,from,n);
|
---|
| 183 | return n;
|
---|
| 184 | }
|
---|
| 185 |
|
---|
| 186 | static __inline unsigned long
|
---|
| 187 | __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
|
---|
| 188 | {
|
---|
| 189 | __constant_copy_user(to,from,n);
|
---|
| 190 | return n;
|
---|
| 191 | }
|
---|
| 192 |
|
---|
| 193 | static __inline unsigned long
|
---|
| 194 | __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
|
---|
| 195 | {
|
---|
| 196 | __constant_copy_user_zeroing(to,from,n);
|
---|
| 197 | return n;
|
---|
| 198 | }
|
---|
| 199 |
|
---|
| 200 | unsigned long copy_to_user(void *to, const void *from, unsigned long n);
|
---|
| 201 |
|
---|
| 202 | unsigned long copy_from_user(void *to, const void *from, unsigned long n);
|
---|
| 203 |
|
---|
| 204 | #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
|
---|
| 205 |
|
---|
| 206 | #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
|
---|
| 207 |
|
---|
| 208 | #define __copy_to_user(to,from,n) \
|
---|
| 209 | (__builtin_constant_p(n) ? \
|
---|
| 210 | __constant_copy_to_user_nocheck((to),(from),(n)) : \
|
---|
| 211 | __generic_copy_to_user_nocheck((to),(from),(n)))
|
---|
| 212 |
|
---|
| 213 | #define __copy_from_user(to,from,n) \
|
---|
| 214 | (__builtin_constant_p(n) ? \
|
---|
| 215 | __constant_copy_from_user_nocheck((to),(from),(n)) : \
|
---|
| 216 | __generic_copy_from_user_nocheck((to),(from),(n)))
|
---|
| 217 |
|
---|
| 218 | long strncpy_from_user(char *dst, const char *src, long count);
|
---|
| 219 | long __strncpy_from_user(char *dst, const char *src, long count);
|
---|
| 220 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
|
---|
| 221 | long strnlen_user(const char *str, long n);
|
---|
| 222 | unsigned long clear_user(void *mem, unsigned long len);
|
---|
| 223 | unsigned long __clear_user(void *mem, unsigned long len);
|
---|
| 224 |
|
---|
| 225 | #endif /* __i386_UACCESS_H */
|
---|