lkml.org 
[lkml]   [2023]   [Aug]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [RFC PATCH v6 2/5] perf sched: reorganize sched-out task state report code
On Thu,  3 Aug 2023 04:33:49 -0400
Ze Gao <zegao2021@gmail.com> wrote:

> Mainly does housekeeping work and not introduce any
> functional change.

This change log doesn't explain at all why this patch is needed, let alone
what it is even doing.

-- Steve


>
> Signed-off-by: Ze Gao <zegao@tencent.com>
> ---
> tools/perf/builtin-sched.c | 59 +++++++++++++++++---------------------
> 1 file changed, 26 insertions(+), 33 deletions(-)
>
> diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
> index 8dc8f071721c..5042874ba204 100644
> --- a/tools/perf/builtin-sched.c
> +++ b/tools/perf/builtin-sched.c
> @@ -94,11 +94,6 @@ struct sched_atom {
>
> #define TASK_STATE_TO_CHAR_STR "RSDTtXZPI"
>
> -/* task state bitmask, copied from include/linux/sched.h */
> -#define TASK_RUNNING 0
> -#define TASK_INTERRUPTIBLE 1
> -#define TASK_UNINTERRUPTIBLE 2
> -
> enum thread_state {
> THREAD_SLEEPING = 0,
> THREAD_WAIT_CPU,
> @@ -255,7 +250,7 @@ struct thread_runtime {
> u64 total_preempt_time;
> u64 total_delay_time;
>
> - int last_state;
> + char last_state;
>
> char shortname[3];
> bool comm_changed;
> @@ -425,7 +420,7 @@ static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *t
> }
>
> static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
> - u64 timestamp, u64 task_state __maybe_unused)
> + u64 timestamp, char task_state __maybe_unused)
> {
> struct sched_atom *event = get_new_event(task, timestamp);
>
> @@ -840,6 +835,22 @@ replay_wakeup_event(struct perf_sched *sched,
> return 0;
> }
>
> +static inline char task_state_char(int state)
> +{
> + static const char state_to_char[] = "RSDTtXZPI";
> + unsigned int bit = state ? ffs(state) : 0;
> +
> + return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
> +}
> +
> +static inline char get_task_prev_state(struct evsel *evsel,
> + struct perf_sample *sample)
> +{
> + const int prev_state = evsel__intval(evsel, sample, "prev_state");
> +
> + return task_state_char(prev_state);
> +}
> +
> static int replay_switch_event(struct perf_sched *sched,
> struct evsel *evsel,
> struct perf_sample *sample,
> @@ -849,7 +860,7 @@ static int replay_switch_event(struct perf_sched *sched,
> *next_comm = evsel__strval(evsel, sample, "next_comm");
> const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
> next_pid = evsel__intval(evsel, sample, "next_pid");
> - const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
> + const char prev_state = get_task_prev_state(evsel, sample);
> struct task_desc *prev, __maybe_unused *next;
> u64 timestamp0, timestamp = sample->time;
> int cpu = sample->cpu;
> @@ -1039,12 +1050,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
> return 0;
> }
>
> -static char sched_out_state(u64 prev_state)
> -{
> - const char *str = TASK_STATE_TO_CHAR_STR;
> -
> - return str[prev_state];
> -}
>
> static int
> add_sched_out_event(struct work_atoms *atoms,
> @@ -1121,7 +1126,7 @@ static int latency_switch_event(struct perf_sched *sched,
> {
> const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
> next_pid = evsel__intval(evsel, sample, "next_pid");
> - const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
> + const char prev_state = get_task_prev_state(evsel, sample);
> struct work_atoms *out_events, *in_events;
> struct thread *sched_out, *sched_in;
> u64 timestamp0, timestamp = sample->time;
> @@ -1157,7 +1162,7 @@ static int latency_switch_event(struct perf_sched *sched,
> goto out_put;
> }
> }
> - if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
> + if (add_sched_out_event(out_events, prev_state, timestamp))
> return -1;
>
> in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
> @@ -2022,24 +2027,12 @@ static void timehist_header(struct perf_sched *sched)
> printf("\n");
> }
>
> -static char task_state_char(struct thread *thread, int state)
> -{
> - static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
> - unsigned bit = state ? ffs(state) : 0;
> -
> - /* 'I' for idle */
> - if (thread__tid(thread) == 0)
> - return 'I';
> -
> - return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
> -}
> -
> static void timehist_print_sample(struct perf_sched *sched,
> struct evsel *evsel,
> struct perf_sample *sample,
> struct addr_location *al,
> struct thread *thread,
> - u64 t, int state)
> + u64 t, char state)
> {
> struct thread_runtime *tr = thread__priv(thread);
> const char *next_comm = evsel__strval(evsel, sample, "next_comm");
> @@ -2080,7 +2073,7 @@ static void timehist_print_sample(struct perf_sched *sched,
> print_sched_time(tr->dt_run, 6);
>
> if (sched->show_state)
> - printf(" %5c ", task_state_char(thread, state));
> + printf(" %5c ", thread->tid == 0 ? 'I' : state);
>
> if (sched->show_next) {
> snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
> @@ -2152,9 +2145,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
> else if (r->last_time) {
> u64 dt_wait = tprev - r->last_time;
>
> - if (r->last_state == TASK_RUNNING)
> + if (r->last_state == 'R')
> r->dt_preempt = dt_wait;
> - else if (r->last_state == TASK_UNINTERRUPTIBLE)
> + else if (r->last_state == 'D')
> r->dt_iowait = dt_wait;
> else
> r->dt_sleep = dt_wait;
> @@ -2579,7 +2572,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
> struct thread_runtime *tr = NULL;
> u64 tprev, t = sample->time;
> int rc = 0;
> - int state = evsel__intval(evsel, sample, "prev_state");
> + const char state = get_task_prev_state(evsel, sample);
>
> addr_location__init(&al);
> if (machine__resolve(machine, &al, sample) < 0) {

\
 
 \ /
  Last update: 2023-08-03 11:11    [W:0.132 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site