lkml.org 
[lkml]   [2022]   [Aug]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/1] x86: Change mov $0, %reg with xor %reg, %reg
Date
Change mov $0, %reg with xor %reg, %reg because xor %reg, %reg is
smaller so it is good to save space

asm:
ba 00 00 00 00 movl $0x0,%edx
31 d2 xorl %edx,%edx

Suggested-by: Ammar Faizi <ammarfaizi2@gnuweeb.org>
Signed-off-by: Kanna Scarlet <knscarlet@gnuweeb.org>
---
arch/x86/boot/compressed/head_64.S | 2 +-
arch/x86/boot/compressed/mem_encrypt.S | 2 +-
arch/x86/kernel/ftrace_32.S | 4 ++--
arch/x86/kernel/head_64.S | 2 +-
arch/x86/math-emu/div_Xsig.S | 2 +-
arch/x86/math-emu/reg_u_sub.S | 2 +-
6 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d33f060900d2..39442e7f5993 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -666,7 +666,7 @@ SYM_CODE_START(trampoline_32bit_src)
movl %cr4, %eax
andl $X86_CR4_MCE, %eax
#else
- movl $0, %eax
+ xorl %eax, %eax
#endif

/* Enable PAE and LA57 (if required) paging modes */
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index a73e4d783cae..d1e4d3aa8395 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -111,7 +111,7 @@ SYM_CODE_START(startup32_vc_handler)
cmpl $0x72, 16(%esp)
jne .Lfail

- movl $0, %eax # Request CPUID[fn].EAX
+ xorl %eax, %eax # Request CPUID[fn].EAX
movl %ebx, %edx # CPUID fn
call sev_es_req_cpuid # Call helper
testl %eax, %eax # Check return code
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index a0ed0e4a2c0c..cff7decb58be 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -171,7 +171,7 @@ SYM_CODE_START(ftrace_graph_caller)
movl 3*4(%esp), %eax
/* Even with frame pointers, fentry doesn't have one here */
lea 4*4(%esp), %edx
- movl $0, %ecx
+ xorl %ecx, %ecx
subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
@@ -184,7 +184,7 @@ SYM_CODE_END(ftrace_graph_caller)
return_to_handler:
pushl %eax
pushl %edx
- movl $0, %eax
+ xorl %eax, %eax
call ftrace_return_to_handler
movl %eax, %ecx
popl %edx
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d860d437631b..eeb06047e30a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -184,7 +184,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
movq %cr4, %rcx
andl $X86_CR4_MCE, %ecx
#else
- movl $0, %ecx
+ xorl %ecx, %ecx
#endif

/* Enable PAE mode, PGE and LA57 */
diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S
index 8c270ab415be..5767b4d23954 100644
--- a/arch/x86/math-emu/div_Xsig.S
+++ b/arch/x86/math-emu/div_Xsig.S
@@ -122,7 +122,7 @@ SYM_FUNC_START(div_Xsig)
movl XsigLL(%esi),%eax
rcrl %eax
movl %eax,FPU_accum_1
- movl $0,%eax
+ xorl %eax,%eax
rcrl %eax
movl %eax,FPU_accum_0

diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S
index 4c900c29e4ff..130b49fa1ca2 100644
--- a/arch/x86/math-emu/reg_u_sub.S
+++ b/arch/x86/math-emu/reg_u_sub.S
@@ -212,7 +212,7 @@ L_must_be_zero:
L_shift_32:
movl %ebx,%eax
movl %edx,%ebx
- movl $0,%edx
+ xorl %edx,%edx
subw $32,EXP(%edi) /* Can get underflow here */

/* We need to shift left by 1 - 31 bits */
--
Kanna Scarlet
\
 
 \ /
  Last update: 2022-08-04 17:28    [W:0.051 / U:0.404 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site