lkml.org 
[lkml]   [2014]   [Sep]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH] kvm: don't take vcpu mutex for obviously invalid vcpu ioctls
Il 22/09/2014 15:45, Christian Borntraeger ha scritto:
> We now have an extra condition check for every valid ioctl, to make an error case go faster.
> I know, the extra check is just a 1 or 2 cycles if branch prediction is right, but still.

I applied the patch because the delay could be substantial, depending on
what the other VCPU is doing. Perhaps something like this would be
better?

(Untested, but Tested-by/Reviewed-bys are welcome).

Paolo

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 84e24b210273..ed31760d79fe 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -117,12 +117,10 @@ bool kvm_is_mmio_pfn(pfn_t pfn)
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
-int vcpu_load(struct kvm_vcpu *vcpu)
+static void __vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu;

- if (mutex_lock_killable(&vcpu->mutex))
- return -EINTR;
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
@@ -136,6 +134,14 @@ int vcpu_load(struct kvm_vcpu *vcpu)
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
+}
+
+int vcpu_load(struct kvm_vcpu *vcpu)
+{
+ if (mutex_lock_killable(&vcpu->mutex))
+ return -EINTR;
+
+ __vcpu_load(vcpu);
return 0;
}

@@ -1989,9 +1995,6 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (vcpu->kvm->mm != current->mm)
return -EIO;

- if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
- return -EINVAL;
-
#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
/*
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
@@ -2001,8 +2004,21 @@ static long kvm_vcpu_ioctl(struct file *filp,
return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif

+ if (!mutex_trylock(&vcpu->mutex))) {
+ /*
+ * Before a potentially long sleep, check if we'd exit anyway.
+ * The common case is for the mutex not to be contended, when
+ * this does not add overhead.
+ */
+ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
+ return -EINVAL;
+
+ if (mutex_lock_killable(&vcpu->mutex))
+ return -EINTR;
+ }
+

- r = vcpu_load(vcpu);
+ r = __vcpu_load(vcpu);
if (r)
return r;
switch (ioctl) {


\
 
 \ /
  Last update: 2014-09-22 17:01    [W:0.065 / U:3.904 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site