lkml.org 
[lkml]   [2012]   [Nov]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH 2/5] Expand the steal time msr to also contain the consigned time.
From
Date
Add a consigned field.  This field will hold the time lost due to capping or overcommit.
The rest of the time will still show up in the steal-time field.

Signed-off-by: Michael Wolf <mjw@linux.vnet.ibm.com>
---
arch/x86/include/asm/paravirt.h | 4 ++--
arch/x86/include/asm/paravirt_types.h | 2 +-
arch/x86/kernel/kvm.c | 7 ++-----
kernel/sched/core.c | 10 +++++++++-
kernel/sched/cputime.c | 2 +-
5 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index a0facf3..a5f9f30 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -196,9 +196,9 @@ struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;

-static inline u64 paravirt_steal_clock(int cpu)
+static inline u64 paravirt_steal_clock(int cpu, u64 *steal)
{
- return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
+ PVOP_VCALL2(pv_time_ops.steal_clock, cpu, steal);
}

static inline unsigned long long paravirt_read_pmc(int counter)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 142236e..5d4fc8b 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -95,7 +95,7 @@ struct pv_lazy_ops {

struct pv_time_ops {
unsigned long long (*sched_clock)(void);
- unsigned long long (*steal_clock)(int cpu);
+ void (*steal_clock)(int cpu, unsigned long long *steal);
unsigned long (*get_tsc_khz)(void);
};

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 4180a87..ac357b3 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -372,9 +372,8 @@ static struct notifier_block kvm_pv_reboot_nb = {
.notifier_call = kvm_pv_reboot_notify,
};

-static u64 kvm_steal_clock(int cpu)
+static void kvm_steal_clock(int cpu, u64 *steal)
{
- u64 steal;
struct kvm_steal_time *src;
int version;

@@ -382,11 +381,9 @@ static u64 kvm_steal_clock(int cpu)
do {
version = src->version;
rmb();
- steal = src->steal;
+ *steal = src->steal;
rmb();
} while ((version & 1) || (version != src->version));
-
- return steal;
}

void kvm_disable_steal_time(void)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c2e077c..b21d92d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -748,6 +748,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
*/
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
s64 steal = 0, irq_delta = 0;
+ u64 consigned = 0;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
@@ -776,8 +777,15 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (static_key_false((&paravirt_steal_rq_enabled))) {
u64 st;
+ u64 cs;

- steal = paravirt_steal_clock(cpu_of(rq));
+ paravirt_steal_clock(cpu_of(rq), &steal, &consigned);
+ /*
+ * since we are not assigning the steal time to cpustats
+ * here, just combine the steal and consigned times to
+ * do the rest of the calculations.
+ */
+ steal += consigned;
steal -= rq->prev_steal_time_rq;

if (unlikely(steal > delta))
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 8d859da..593b647 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -275,7 +275,7 @@ static __always_inline bool steal_account_process_tick(void)
if (static_key_false(&paravirt_steal_enabled)) {
u64 steal, st = 0;

- steal = paravirt_steal_clock(smp_processor_id());
+ paravirt_steal_clock(smp_processor_id(), &steal);
steal -= this_rq()->prev_steal_time;

st = steal_ticks(steal);


\
 
 \ /
  Last update: 2012-11-26 23:01    [W:0.062 / U:0.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site