lkml.org 
[lkml]   [2012]   [Dec]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH] x86: Add support for 64bit get_user() on x86-32
From
Date
Can we worry about this after the merge window?

ville.syrjala@linux.intel.com wrote:

>From: Ville Syrjälä <ville.syrjala@linux.intel.com>
>
>Implement __get_user_8() for x86-32. It will return the
>64bit result in edx:eax register pair, and ecx is used
>to pass in the address and return the error value.
>
>For consistency, change the register assignment for all
>other __get_user_x() variants, so that address is passed in
>ecx/rcx, the error value is returned in ecx/rcx, and eax/rax
>contains the actual value.
>
>This is a partial refresh of a patch [1] by Jamie Lokier from
>2004. Only the minimal changes to implement 64bit get_user()
>were picked from the original patch.
>
>[1] http://article.gmane.org/gmane.linux.kernel/198823
>
>Cc: Jamie Lokier <jamie@shareable.org>
>Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
>---
> arch/x86/include/asm/uaccess.h | 17 ++++++--
> arch/x86/kernel/i386_ksyms_32.c | 1 +
>arch/x86/lib/getuser.S | 82
>++++++++++++++++++++++++++------------
> 3 files changed, 69 insertions(+), 31 deletions(-)
>
>diff --git a/arch/x86/include/asm/uaccess.h
>b/arch/x86/include/asm/uaccess.h
>index 7ccf8d1..3f4387e 100644
>--- a/arch/x86/include/asm/uaccess.h
>+++ b/arch/x86/include/asm/uaccess.h
>@@ -127,7 +127,7 @@ extern int __get_user_bad(void);
>
> #define __get_user_x(size, ret, x, ptr) \
> asm volatile("call __get_user_" #size \
>- : "=a" (ret), "=d" (x) \
>+ : "=c" (ret), "=a" (x) \
> : "0" (ptr)) \
>
> /* Careful: we have to cast the result to the type of the pointer
>@@ -151,8 +151,11 @@ extern int __get_user_bad(void);
> * On error, the variable @x is set to zero.
> */
> #ifdef CONFIG_X86_32
>-#define __get_user_8(__ret_gu, __val_gu, ptr) \
>- __get_user_x(X, __ret_gu, __val_gu, ptr)
>+#define __get_user_8(ret, x, ptr) \
>+ asm volatile("call __get_user_8" \
>+ : "=c" (ret), "=A" (x) \
>+ : "0" (ptr)) \
>+
> #else
> #define __get_user_8(__ret_gu, __val_gu, ptr) \
> __get_user_x(8, __ret_gu, __val_gu, ptr)
>@@ -162,6 +165,7 @@ extern int __get_user_bad(void);
> ({ \
> int __ret_gu; \
> unsigned long __val_gu; \
>+ unsigned long long __val_gu8; \
> __chk_user_ptr(ptr); \
> might_fault(); \
> switch (sizeof(*(ptr))) { \
>@@ -175,13 +179,16 @@ extern int __get_user_bad(void);
> __get_user_x(4, __ret_gu, __val_gu, ptr); \
> break; \
> case 8: \
>- __get_user_8(__ret_gu, __val_gu, ptr); \
>+ __get_user_8(__ret_gu, __val_gu8, ptr); \
> break; \
> default: \
> __get_user_x(X, __ret_gu, __val_gu, ptr); \
> break; \
> } \
>- (x) = (__typeof__(*(ptr)))__val_gu; \
>+ if (sizeof(*(ptr)) == 8) \
>+ (x) = (__typeof__(*(ptr)))__val_gu8; \
>+ else \
>+ (x) = (__typeof__(*(ptr)))__val_gu; \
> __ret_gu; \
> })
>
>diff --git a/arch/x86/kernel/i386_ksyms_32.c
>b/arch/x86/kernel/i386_ksyms_32.c
>index 9c3bd4a..0fa6912 100644
>--- a/arch/x86/kernel/i386_ksyms_32.c
>+++ b/arch/x86/kernel/i386_ksyms_32.c
>@@ -26,6 +26,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
> EXPORT_SYMBOL(__get_user_1);
> EXPORT_SYMBOL(__get_user_2);
> EXPORT_SYMBOL(__get_user_4);
>+EXPORT_SYMBOL(__get_user_8);
>
> EXPORT_SYMBOL(__put_user_1);
> EXPORT_SYMBOL(__put_user_2);
>diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
>index 156b9c8..38afef0 100644
>--- a/arch/x86/lib/getuser.S
>+++ b/arch/x86/lib/getuser.S
>@@ -14,12 +14,11 @@
> /*
> * __get_user_X
> *
>- * Inputs: %[r|e]ax contains the address.
>- * The register is modified, but all changes are undone
>- * before returning because the C code doesn't know about it.
>+ * Inputs: %[r|e]cx contains the address.
> *
>- * Outputs: %[r|e]ax is error code (0 or -EFAULT)
>- * %[r|e]dx contains zero-extended value
>+ * Outputs: %[r|e]cx is error code (0 or -EFAULT)
>+ * %[r|e]ax contains zero-extended value
>+ * %edx contains the high bits of the value for __get_user_8 on
>x86-32
> *
> *
> * These functions should not modify any other registers,
>@@ -38,12 +37,12 @@
> .text
> ENTRY(__get_user_1)
> CFI_STARTPROC
>- GET_THREAD_INFO(%_ASM_DX)
>- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
>+ GET_THREAD_INFO(%_ASM_AX)
>+ cmp TI_addr_limit(%_ASM_AX),%_ASM_CX
> jae bad_get_user
> ASM_STAC
>-1: movzb (%_ASM_AX),%edx
>- xor %eax,%eax
>+1: movzb (%_ASM_CX),%eax
>+ xor %ecx,%ecx
> ASM_CLAC
> ret
> CFI_ENDPROC
>@@ -51,14 +50,14 @@ ENDPROC(__get_user_1)
>
> ENTRY(__get_user_2)
> CFI_STARTPROC
>- add $1,%_ASM_AX
>+ add $1,%_ASM_CX
> jc bad_get_user
>- GET_THREAD_INFO(%_ASM_DX)
>- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
>+ GET_THREAD_INFO(%_ASM_AX)
>+ cmp TI_addr_limit(%_ASM_AX),%_ASM_CX
> jae bad_get_user
> ASM_STAC
>-2: movzwl -1(%_ASM_AX),%edx
>- xor %eax,%eax
>+2: movzwl -1(%_ASM_CX),%eax
>+ xor %ecx,%ecx
> ASM_CLAC
> ret
> CFI_ENDPROC
>@@ -66,14 +65,14 @@ ENDPROC(__get_user_2)
>
> ENTRY(__get_user_4)
> CFI_STARTPROC
>- add $3,%_ASM_AX
>+ add $3,%_ASM_CX
> jc bad_get_user
>- GET_THREAD_INFO(%_ASM_DX)
>- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
>+ GET_THREAD_INFO(%_ASM_AX)
>+ cmp TI_addr_limit(%_ASM_AX),%_ASM_CX
> jae bad_get_user
> ASM_STAC
>-3: mov -3(%_ASM_AX),%edx
>- xor %eax,%eax
>+3: mov -3(%_ASM_CX),%eax
>+ xor %ecx,%ecx
> ASM_CLAC
> ret
> CFI_ENDPROC
>@@ -82,14 +81,30 @@ ENDPROC(__get_user_4)
> #ifdef CONFIG_X86_64
> ENTRY(__get_user_8)
> CFI_STARTPROC
>- add $7,%_ASM_AX
>+ add $7,%_ASM_CX
> jc bad_get_user
>- GET_THREAD_INFO(%_ASM_DX)
>- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
>+ GET_THREAD_INFO(%_ASM_AX)
>+ cmp TI_addr_limit(%_ASM_AX),%_ASM_CX
> jae bad_get_user
> ASM_STAC
>-4: movq -7(%_ASM_AX),%_ASM_DX
>- xor %eax,%eax
>+4: movq -7(%_ASM_CX),%_ASM_AX
>+ xor %ecx,%ecx
>+ ASM_CLAC
>+ ret
>+ CFI_ENDPROC
>+ENDPROC(__get_user_8)
>+#else
>+ENTRY(__get_user_8)
>+ CFI_STARTPROC
>+ add $7,%_ASM_CX
>+ jc bad_get_user_8
>+ GET_THREAD_INFO(%_ASM_AX)
>+ cmp TI_addr_limit(%_ASM_AX),%_ASM_CX
>+ jae bad_get_user_8
>+ ASM_STAC
>+4: mov -7(%_ASM_CX),%eax
>+5: mov -3(%_ASM_CX),%edx
>+ xor %ecx,%ecx
> ASM_CLAC
> ret
> CFI_ENDPROC
>@@ -98,16 +113,31 @@ ENDPROC(__get_user_8)
>
> bad_get_user:
> CFI_STARTPROC
>- xor %edx,%edx
>- mov $(-EFAULT),%_ASM_AX
>+ xor %eax,%eax
>+ mov $(-EFAULT),%_ASM_CX
> ASM_CLAC
> ret
> CFI_ENDPROC
> END(bad_get_user)
>
>+#ifdef CONFIG_X86_32
>+bad_get_user_8:
>+ CFI_STARTPROC
>+ xor %eax,%eax
>+ xor %edx,%edx
>+ mov $(-EFAULT),%_ASM_CX
>+ ASM_CLAC
>+ ret
>+ CFI_ENDPROC
>+END(bad_get_user_8)
>+#endif
>+
> _ASM_EXTABLE(1b,bad_get_user)
> _ASM_EXTABLE(2b,bad_get_user)
> _ASM_EXTABLE(3b,bad_get_user)
> #ifdef CONFIG_X86_64
> _ASM_EXTABLE(4b,bad_get_user)
>+#else
>+ _ASM_EXTABLE(4b,bad_get_user_8)
>+ _ASM_EXTABLE(5b,bad_get_user_8)
> #endif

--
Sent from my mobile phone. Please excuse brevity and lack of formatting.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2012-12-12 17:21    [W:0.113 / U:0.412 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site