patch-2.1.4 linux/include/asm-i386/segment.h

Next file: linux/include/asm-i386/semaphore.h
Previous file: linux/include/asm-i386/processor.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.3/linux/include/asm-i386/segment.h linux/include/asm-i386/segment.h
@@ -12,31 +12,6 @@
 #include <linux/string.h>
 
 /*
- * Uh, these should become the main single-value transfer routines..
- * They automatically use the right size if we just have the right
- * pointer type..
- */
-#define put_user(x,ptr)	do { (*(ptr)=(x)); } while (0)
-#define get_user(ptr)	(*(ptr))
-
-/*
- * These are deprecated..
- *
- * Use "put_user()" and "get_user()" with the proper pointer types instead.
- */
-
-#define get_fs_byte(addr) get_user((const unsigned char *)(addr))
-#define get_fs_word(addr) get_user((const unsigned short *)(addr))
-#define get_fs_long(addr) get_user((const unsigned int *)(addr))
-
-#define put_fs_byte(x,addr) put_user((x),(unsigned char *)(addr))
-#define put_fs_word(x,addr) put_user((x),(unsigned short *)(addr))
-#define put_fs_long(x,addr) put_user((x),(unsigned int *)(addr))
-
-#define memcpy_fromfs(to,from,n) memcpy((to),(from),(n))
-#define memcpy_tofs(to,from,n)   memcpy((to),(from),(n))
-
-/*
  * The fs value determines whether argument validity checking should be
  * performed or not.  If get_fs() == USER_DS, checking is performed, with
  * get_fs() == KERNEL_DS, checking is bypassed.
@@ -48,22 +23,140 @@
 #define set_fs(x)	(current->tss.segment = (x))
 #define get_ds()	(KERNEL_DS)
 
-extern int __verify_write(const void *addr, unsigned long size);
-
-#if CPU > 386
+#define __user_ok(addr,size) \
+((size <= 0xC0000000) && (addr <= 0xC0000000 - size))
+#define __kernel_ok \
+(get_fs() == KERNEL_DS)
 
-#define verify_write(type,addr,size) 0
+extern int __verify_write(const void *, unsigned long);
 
+#if CPU > 386
+#define __access_ok(type,addr,size) \
+(__kernel_ok || __user_ok(addr,size))
 #else
+#define __access_ok(type,addr,size) \
+(__kernel_ok || (__user_ok(addr,size) && \
+  ((type) == VERIFY_READ || __verify_write((void *)(addr),(size)))))
+#endif /* CPU */
+
+#define access_ok(type,addr,size) \
+__access_ok((type),(unsigned long)(addr),(size))
 
 /*
- * The intel i386 CPU needs to check writability by hand, as the
- * CPU does not honour the write protect bit in supervisor mode
+ * Uh, these should become the main single-value transfer routines..
+ * They automatically use the right size if we just have the right
+ * pointer type..
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
  */
-#define verify_write(type,addr,size) \
-(((type) && !wp_works_ok)?__verify_write((addr),(size)):0)
-
-#endif
+#define put_user(x,ptr) ({ \
+unsigned long __pu_addr = (unsigned long)(ptr); \
+__put_user((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
+
+#define get_user(x,ptr) ({ \
+unsigned long __gu_addr = (unsigned long)(ptr); \
+__get_user((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
+
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct *)(x))
+
+#define __put_user(x,addr,size) ({ \
+int __pu_ret = -EFAULT; \
+if (access_ok(VERIFY_WRITE,addr,size)) { \
+switch (size) { \
+case 1: __put_user_8(x,addr,__pu_ret); break; \
+case 2: __put_user_16(x,addr,__pu_ret); break; \
+case 4: __put_user_32(x,addr,__pu_ret); break; \
+default: __pu_ret = __put_user_bad(); break; \
+} } __pu_ret; })
+
+#define __put_user_asm(x,addr,ret,bwl,reg,rtype) \
+__asm__ __volatile__( \
+	"movl $1f,%0\n\t" \
+	"incl %3\n\t" \
+	"mov" #bwl " %" reg "1,%2\n\t" \
+	"xorl %0,%0\n\t" \
+	"decl %3\n1:" \
+:"=d" (ret) \
+:#rtype (x), "m" (__m(addr)),"m" (current->tss.ex.count), "0" (ret))
+
+#define __put_user_8(x,addr,ret) \
+__put_user_asm(x,addr,ret,b,"b","iq")
+#define __put_user_16(x,addr,ret) \
+__put_user_asm(x,addr,ret,w,"w","ir")
+#define __put_user_32(x,addr,ret) \
+__put_user_asm(x,addr,ret,l,"","ir")
+
+extern int __put_user_bad(void);
+
+#define __get_user(x,addr,size,type) ({ \
+int __gu_ret = -EFAULT; \
+unsigned long __gu_val = 0; \
+if (access_ok(VERIFY_WRITE,addr,size)) { \
+switch (size) { \
+case 1: __get_user_8(__gu_val,addr,__gu_ret); break; \
+case 2: __get_user_16(__gu_val,addr,__gu_ret); break; \
+case 4: __get_user_32(__gu_val,addr,__gu_ret); break; \
+default: __gu_ret = __get_user_bad(); break; \
+} } x = (type) __gu_val; __gu_ret; })
+
+#define __get_user_asm(x,addr,ret,bwl,reg,rtype) \
+__asm__ __volatile__( \
+	"movl $1f,%0\n\t" \
+	"incl %3\n\t" \
+	"mov" #bwl " %2,%" reg "1\n\t" \
+	"xorl %0,%0\n\t" \
+	"decl %3\n1:" \
+:"=d" (ret), #rtype (x) \
+:"m" (__m(addr)),"m" (current->tss.ex.count), "0" (ret), "1" (x))
+
+#define __get_user_8(x,addr,ret) \
+__get_user_asm(x,addr,ret,b,"b","=q")
+#define __get_user_16(x,addr,ret) \
+__get_user_asm(x,addr,ret,w,"w","=r")
+#define __get_user_32(x,addr,ret) \
+__get_user_asm(x,addr,ret,l,"","=r")
+
+extern int __get_user_bad(void);
+
+#define __copy_user(to,from,size) \
+__asm__ __volatile__( \
+	"movl $3f,%0\n\t" \
+	"incl %2\n\t" \
+	"rep; movsl\n\t" \
+	"testb $2,%b3\n\t" \
+	"je 1f\n\t" \
+	"movsw\n\t" \
+	"subb $2,%b3\n" \
+	"1:\t" \
+	"testb $1,%b3\n\t" \
+	"je 2f\n\t" \
+	"movsb\n\t" \
+	"decb %b3\n" \
+	"2:\t" \
+	"decl %2\n" \
+	"3:\tlea 0(%3,%1,4),%0" \
+	:"=d" (size) \
+	:"c" (size >> 2), "m" (current->tss.ex), "r" (size & 3), \
+	 "D" (to), "S" (from), "0" (size) \
+	:"cx","di","si","memory");
+
+#define copy_to_user(to,from,n) ({ \
+unsigned long __cu_to = (unsigned long) (to); \
+unsigned long __cu_size = (unsigned long) (n); \
+if (__cu_size && __access_ok(VERIFY_WRITE, __cu_to, __cu_size)) \
+__copy_user(__cu_to,from,__cu_size); \
+__cu_size; })
+
+#define copy_from_user(to,from,n) ({ \
+unsigned long __cu_from = (unsigned long) (from); \
+unsigned long __cu_size = (unsigned long) (n); \
+if (__cu_size && __access_ok(VERIFY_READ, __cu_from, __cu_size)) \
+__copy_user(to,__cu_from,__cu_size); \
+__cu_size; })
 
 #endif /* __ASSEMBLY__ */
 

FUNET's LINUX-ADM group, [email protected]
TCL-scripts by Sam Shen, [email protected]