lkml.org 
[lkml]   [2012]   [Sep]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 2/8] KVM: x86 emulator: use aligned variants of SSE register ops
On 08/30/2012 02:30 AM, Mathias Krause wrote:
> As the the compiler ensures that the memory operand is always aligned
> to a 16 byte memory location,

I'm not sure it does. Is V4SI aligned? Do we use alignof() to
propagate the alignment to the vcpu allocation code?

> use the aligned variant of MOVDQ for
> read_sse_reg() and write_sse_reg().
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index 1451cff..5a0fee1 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -909,23 +909,23 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
> {
> ctxt->ops->get_fpu(ctxt);
> switch (reg) {
> - case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
> - case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
> - case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
> - case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
> - case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
> - case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
> - case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
> - case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
> + case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
> + case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
> + case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
> + case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
> + case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
> + case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
> + case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
> + case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
> #ifdef CONFIG_X86_64
> - case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
> - case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
> - case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
> - case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
> - case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
> - case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
> - case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
> - case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
> + case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
> + case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
> + case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
> + case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
> + case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
> + case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
> + case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
> + case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
> #endif
> default: BUG();


The vmexit costs dominates any win here by several orders of magnitude.


--
error compiling committee.c: too many arguments to function


\
 
 \ /
  Last update: 2012-09-04 15:03    [W:0.265 / U:1.024 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site