Messages in this thread Patch in this message | | | From | Vitaly Mayatskikh <> | Subject | [PATCH] x86: Optimize tail handling for copy_user | Date | Mon, 28 Jul 2008 16:10:30 +0200 |
| |
Reduce protection faults count in copy_user_handle_tail routine by limiting clear length to the end of page as was suggested by Linus.
Linus, should I add myself "signed-off-by: you" for patches with your ideas implemented? Clear length calculation was changed a bit for correct handling of page aligned addresses.
I'm using this systemtap script for sanity testing: http://people.redhat.com/vmayatsk/copy_user_x8664/ It looks very ugly, but works.
Signed-off-by: Vitaly Mayatskikh <v.mayatskih@gmail.com>
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index f4df6e7..42baeca 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -161,23 +161,33 @@ EXPORT_SYMBOL(copy_in_user); /* * Try to copy last bytes and clear the rest if needed. * Since protection fault in copy_from/to_user is not a normal situation, - * it is not necessary to optimize tail handling. + * it is not necessary to do low level optimization of tail handling. */ unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) +copy_user_handle_tail(char *to, char *from, unsigned len, unsigned clear_rest) { char c; - unsigned zero_len; + unsigned clear_len; - for (; len; --len) { - if (__get_user_nocheck(c, from++, sizeof(char))) - break; - if (__put_user_nocheck(c, to++, sizeof(char))) + while (len) { + if (__get_user_nocheck(c, from, 1)) break; + from++; + if (__put_user_nocheck(c, to, 1)) + /* Fault in destination, nothing to clear */ + goto out; + to++; + len--; } - for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) - if (__put_user_nocheck(c, to++, sizeof(char))) - break; + if (clear_rest) { + unsigned long addr = (unsigned long)to; + clear_len = len; + /* Limit clear_len to the rest of the page */ + if ((addr + len) & PAGE_MASK != addr & PAGE_MASK) + clear_len = len - ((addr + len) & ~PAGE_MASK); + memset(to, 0, clear_len); + } +out: return len; } diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 5cfd295..c5c5af0 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h @@ -196,6 +196,6 @@ static inline int __copy_from_user_inatomic_nocache(void *dst, } unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); +copy_user_handle_tail(char *to, char *from, unsigned len, unsigned clear_rest); #endif /* ASM_X86__UACCESS_64_H */ -- wbr, Vitaly
| |