6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/proc_fs.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/utsname.h>
19 #include <linux/mempolicy.h>
20 #include <linux/debugfs.h>
24 static DEFINE_SPINLOCK(sched_debug_lock);
27 * This allows printing both to /proc/sched_debug and
30 #define SEQ_printf(m, x...) \
39 * Ease the printing of nsec fields:
41 static long long nsec_high(unsigned long long nsec)
43 if ((long long)nsec < 0) {
45 do_div(nsec, 1000000);
48 do_div(nsec, 1000000);
53 static unsigned long nsec_low(unsigned long long nsec)
55 if ((long long)nsec < 0)
58 return do_div(nsec, 1000000);
61 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
63 #define SCHED_FEAT(name, enabled) \
66 static const char * const sched_feat_names[] = {
72 static int sched_feat_show(struct seq_file *m, void *v)
76 for (i = 0; i < __SCHED_FEAT_NR; i++) {
77 if (!(sysctl_sched_features & (1UL << i)))
79 seq_printf(m, "%s ", sched_feat_names[i]);
86 #ifdef HAVE_JUMP_LABEL
88 #define jump_label_key__true STATIC_KEY_INIT_TRUE
89 #define jump_label_key__false STATIC_KEY_INIT_FALSE
91 #define SCHED_FEAT(name, enabled) \
92 jump_label_key__##enabled ,
94 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
100 static void sched_feat_disable(int i)
102 static_key_disable(&sched_feat_keys[i]);
105 static void sched_feat_enable(int i)
107 static_key_enable(&sched_feat_keys[i]);
110 static void sched_feat_disable(int i) { };
111 static void sched_feat_enable(int i) { };
112 #endif /* HAVE_JUMP_LABEL */
114 static int sched_feat_set(char *cmp)
119 if (strncmp(cmp, "NO_", 3) == 0) {
124 for (i = 0; i < __SCHED_FEAT_NR; i++) {
125 if (strcmp(cmp, sched_feat_names[i]) == 0) {
127 sysctl_sched_features &= ~(1UL << i);
128 sched_feat_disable(i);
130 sysctl_sched_features |= (1UL << i);
131 sched_feat_enable(i);
141 sched_feat_write(struct file *filp, const char __user *ubuf,
142 size_t cnt, loff_t *ppos)
152 if (copy_from_user(&buf, ubuf, cnt))
158 /* Ensure the static_key remains in a consistent state */
159 inode = file_inode(filp);
161 i = sched_feat_set(cmp);
163 if (i == __SCHED_FEAT_NR)
171 static int sched_feat_open(struct inode *inode, struct file *filp)
173 return single_open(filp, sched_feat_show, NULL);
176 static const struct file_operations sched_feat_fops = {
177 .open = sched_feat_open,
178 .write = sched_feat_write,
181 .release = single_release,
184 __read_mostly bool sched_debug_enabled;
186 static __init int sched_init_debug(void)
188 debugfs_create_file("sched_features", 0644, NULL, NULL,
191 debugfs_create_bool("sched_debug", 0644, NULL,
192 &sched_debug_enabled);
196 late_initcall(sched_init_debug);
202 static struct ctl_table sd_ctl_dir[] = {
204 .procname = "sched_domain",
210 static struct ctl_table sd_ctl_root[] = {
212 .procname = "kernel",
219 static struct ctl_table *sd_alloc_ctl_entry(int n)
221 struct ctl_table *entry =
222 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
227 static void sd_free_ctl_entry(struct ctl_table **tablep)
229 struct ctl_table *entry;
232 * In the intermediate directories, both the child directory and
233 * procname are dynamically allocated and could fail but the mode
234 * will always be set. In the lowest directory the names are
235 * static strings and all have proc handlers.
237 for (entry = *tablep; entry->mode; entry++) {
239 sd_free_ctl_entry(&entry->child);
240 if (entry->proc_handler == NULL)
241 kfree(entry->procname);
248 static int min_load_idx = 0;
249 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
252 set_table_entry(struct ctl_table *entry,
253 const char *procname, void *data, int maxlen,
254 umode_t mode, proc_handler *proc_handler,
257 entry->procname = procname;
259 entry->maxlen = maxlen;
261 entry->proc_handler = proc_handler;
264 entry->extra1 = &min_load_idx;
265 entry->extra2 = &max_load_idx;
269 static struct ctl_table *
270 sd_alloc_ctl_domain_table(struct sched_domain *sd)
272 struct ctl_table *table = sd_alloc_ctl_entry(14);
277 set_table_entry(&table[0], "min_interval", &sd->min_interval,
278 sizeof(long), 0644, proc_doulongvec_minmax, false);
279 set_table_entry(&table[1], "max_interval", &sd->max_interval,
280 sizeof(long), 0644, proc_doulongvec_minmax, false);
281 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
282 sizeof(int), 0644, proc_dointvec_minmax, true);
283 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
284 sizeof(int), 0644, proc_dointvec_minmax, true);
285 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
286 sizeof(int), 0644, proc_dointvec_minmax, true);
287 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
288 sizeof(int), 0644, proc_dointvec_minmax, true);
289 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
290 sizeof(int), 0644, proc_dointvec_minmax, true);
291 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
292 sizeof(int), 0644, proc_dointvec_minmax, false);
293 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
294 sizeof(int), 0644, proc_dointvec_minmax, false);
295 set_table_entry(&table[9], "cache_nice_tries",
296 &sd->cache_nice_tries,
297 sizeof(int), 0644, proc_dointvec_minmax, false);
298 set_table_entry(&table[10], "flags", &sd->flags,
299 sizeof(int), 0644, proc_dointvec_minmax, false);
300 set_table_entry(&table[11], "max_newidle_lb_cost",
301 &sd->max_newidle_lb_cost,
302 sizeof(long), 0644, proc_doulongvec_minmax, false);
303 set_table_entry(&table[12], "name", sd->name,
304 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
305 /* &table[13] is terminator */
310 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
312 struct ctl_table *entry, *table;
313 struct sched_domain *sd;
314 int domain_num = 0, i;
317 for_each_domain(cpu, sd)
319 entry = table = sd_alloc_ctl_entry(domain_num + 1);
324 for_each_domain(cpu, sd) {
325 snprintf(buf, 32, "domain%d", i);
326 entry->procname = kstrdup(buf, GFP_KERNEL);
328 entry->child = sd_alloc_ctl_domain_table(sd);
335 static cpumask_var_t sd_sysctl_cpus;
336 static struct ctl_table_header *sd_sysctl_header;
338 void register_sched_domain_sysctl(void)
340 static struct ctl_table *cpu_entries;
341 static struct ctl_table **cpu_idx;
346 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
350 WARN_ON(sd_ctl_dir[0].child);
351 sd_ctl_dir[0].child = cpu_entries;
355 struct ctl_table *e = cpu_entries;
357 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
361 /* deal with sparse possible map */
362 for_each_possible_cpu(i) {
368 if (!cpumask_available(sd_sysctl_cpus)) {
369 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
372 /* init to possible to not have holes in @cpu_entries */
373 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
376 for_each_cpu(i, sd_sysctl_cpus) {
377 struct ctl_table *e = cpu_idx[i];
380 sd_free_ctl_entry(&e->child);
383 snprintf(buf, 32, "cpu%d", i);
384 e->procname = kstrdup(buf, GFP_KERNEL);
387 e->child = sd_alloc_ctl_cpu_table(i);
389 __cpumask_clear_cpu(i, sd_sysctl_cpus);
392 WARN_ON(sd_sysctl_header);
393 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
396 void dirty_sched_domain_sysctl(int cpu)
398 if (cpumask_available(sd_sysctl_cpus))
399 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
402 /* may be called multiple times per register */
403 void unregister_sched_domain_sysctl(void)
405 unregister_sysctl_table(sd_sysctl_header);
406 sd_sysctl_header = NULL;
408 #endif /* CONFIG_SYSCTL */
409 #endif /* CONFIG_SMP */
411 #ifdef CONFIG_FAIR_GROUP_SCHED
412 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
414 struct sched_entity *se = tg->se[cpu];
417 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
418 #define P_SCHEDSTAT(F) \
419 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
421 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
422 #define PN_SCHEDSTAT(F) \
423 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
430 PN(se->sum_exec_runtime);
431 if (schedstat_enabled()) {
432 PN_SCHEDSTAT(se->statistics.wait_start);
433 PN_SCHEDSTAT(se->statistics.sleep_start);
434 PN_SCHEDSTAT(se->statistics.block_start);
435 PN_SCHEDSTAT(se->statistics.sleep_max);
436 PN_SCHEDSTAT(se->statistics.block_max);
437 PN_SCHEDSTAT(se->statistics.exec_max);
438 PN_SCHEDSTAT(se->statistics.slice_max);
439 PN_SCHEDSTAT(se->statistics.wait_max);
440 PN_SCHEDSTAT(se->statistics.wait_sum);
441 P_SCHEDSTAT(se->statistics.wait_count);
444 P(se->runnable_weight);
448 P(se->avg.runnable_load_avg);
458 #ifdef CONFIG_CGROUP_SCHED
459 static char group_path[PATH_MAX];
461 static char *task_group_path(struct task_group *tg)
463 if (autogroup_path(tg, group_path, PATH_MAX))
466 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
472 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
477 SEQ_printf(m, " %c", task_state_to_char(p));
479 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
480 p->comm, task_pid_nr(p),
481 SPLIT_NS(p->se.vruntime),
482 (long long)(p->nvcsw + p->nivcsw),
485 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
486 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
487 SPLIT_NS(p->se.sum_exec_runtime),
488 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
490 #ifdef CONFIG_NUMA_BALANCING
491 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
493 #ifdef CONFIG_CGROUP_SCHED
494 SEQ_printf(m, " %s", task_group_path(task_group(p)));
500 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
502 struct task_struct *g, *p;
505 SEQ_printf(m, "runnable tasks:\n");
506 SEQ_printf(m, " S task PID tree-key switches prio"
507 " wait-time sum-exec sum-sleep\n");
508 SEQ_printf(m, "-------------------------------------------------------"
509 "----------------------------------------------------\n");
512 for_each_process_thread(g, p) {
513 if (task_cpu(p) != rq_cpu)
516 print_task(m, rq, p);
521 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
523 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
524 spread, rq0_min_vruntime, spread0;
525 struct rq *rq = cpu_rq(cpu);
526 struct sched_entity *last;
529 #ifdef CONFIG_FAIR_GROUP_SCHED
531 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
534 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
536 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
537 SPLIT_NS(cfs_rq->exec_clock));
539 raw_spin_lock_irqsave(&rq->lock, flags);
540 if (rb_first_cached(&cfs_rq->tasks_timeline))
541 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
542 last = __pick_last_entity(cfs_rq);
544 max_vruntime = last->vruntime;
545 min_vruntime = cfs_rq->min_vruntime;
546 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
547 raw_spin_unlock_irqrestore(&rq->lock, flags);
548 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
549 SPLIT_NS(MIN_vruntime));
550 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
551 SPLIT_NS(min_vruntime));
552 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
553 SPLIT_NS(max_vruntime));
554 spread = max_vruntime - MIN_vruntime;
555 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
557 spread0 = min_vruntime - rq0_min_vruntime;
558 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
560 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
561 cfs_rq->nr_spread_over);
562 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
563 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
565 SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
566 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
567 cfs_rq->avg.load_avg);
568 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
569 cfs_rq->avg.runnable_load_avg);
570 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
571 cfs_rq->avg.util_avg);
572 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
573 cfs_rq->removed.load_avg);
574 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
575 cfs_rq->removed.util_avg);
576 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum",
577 cfs_rq->removed.runnable_sum);
578 #ifdef CONFIG_FAIR_GROUP_SCHED
579 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
580 cfs_rq->tg_load_avg_contrib);
581 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
582 atomic_long_read(&cfs_rq->tg->load_avg));
585 #ifdef CONFIG_CFS_BANDWIDTH
586 SEQ_printf(m, " .%-30s: %d\n", "throttled",
588 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
589 cfs_rq->throttle_count);
592 #ifdef CONFIG_FAIR_GROUP_SCHED
593 print_cfs_group_stats(m, cpu, cfs_rq->tg);
597 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
599 #ifdef CONFIG_RT_GROUP_SCHED
601 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
604 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
608 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
610 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
612 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
627 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
632 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
635 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
640 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
642 dl_bw = &dl_rq->dl_bw;
644 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
645 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
650 extern __read_mostly int sched_clock_running;
652 static void print_cpu(struct seq_file *m, int cpu)
654 struct rq *rq = cpu_rq(cpu);
659 unsigned int freq = cpu_khz ? : 1;
661 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
662 cpu, freq / 1000, (freq % 1000));
665 SEQ_printf(m, "cpu#%d\n", cpu);
670 if (sizeof(rq->x) == 4) \
671 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
673 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
677 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
680 SEQ_printf(m, " .%-30s: %lu\n", "load",
684 P(nr_uninterruptible);
686 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
698 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
700 P64(max_idle_balance_cost);
704 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
705 if (schedstat_enabled()) {
714 spin_lock_irqsave(&sched_debug_lock, flags);
715 print_cfs_stats(m, cpu);
716 print_rt_stats(m, cpu);
717 print_dl_stats(m, cpu);
719 print_rq(m, rq, cpu);
720 spin_unlock_irqrestore(&sched_debug_lock, flags);
724 static const char *sched_tunable_scaling_names[] = {
730 static void sched_debug_header(struct seq_file *m)
732 u64 ktime, sched_clk, cpu_clk;
735 local_irq_save(flags);
736 ktime = ktime_to_ns(ktime_get());
737 sched_clk = sched_clock();
738 cpu_clk = local_clock();
739 local_irq_restore(flags);
741 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
742 init_utsname()->release,
743 (int)strcspn(init_utsname()->version, " "),
744 init_utsname()->version);
747 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
749 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
754 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
755 P(sched_clock_stable());
761 SEQ_printf(m, "sysctl_sched\n");
764 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
766 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
767 PN(sysctl_sched_latency);
768 PN(sysctl_sched_min_granularity);
769 PN(sysctl_sched_wakeup_granularity);
770 P(sysctl_sched_child_runs_first);
771 P(sysctl_sched_features);
775 SEQ_printf(m, " .%-40s: %d (%s)\n",
776 "sysctl_sched_tunable_scaling",
777 sysctl_sched_tunable_scaling,
778 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
782 static int sched_debug_show(struct seq_file *m, void *v)
784 int cpu = (unsigned long)(v - 2);
789 sched_debug_header(m);
794 void sysrq_sched_debug_show(void)
798 sched_debug_header(NULL);
799 for_each_online_cpu(cpu)
800 print_cpu(NULL, cpu);
805 * This itererator needs some explanation.
806 * It returns 1 for the header position.
807 * This means 2 is cpu 0.
808 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
809 * to use cpumask_* to iterate over the cpus.
811 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
813 unsigned long n = *offset;
821 n = cpumask_next(n - 1, cpu_online_mask);
823 n = cpumask_first(cpu_online_mask);
828 return (void *)(unsigned long)(n + 2);
832 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
835 return sched_debug_start(file, offset);
838 static void sched_debug_stop(struct seq_file *file, void *data)
842 static const struct seq_operations sched_debug_sops = {
843 .start = sched_debug_start,
844 .next = sched_debug_next,
845 .stop = sched_debug_stop,
846 .show = sched_debug_show,
849 static int sched_debug_release(struct inode *inode, struct file *file)
851 seq_release(inode, file);
856 static int sched_debug_open(struct inode *inode, struct file *filp)
860 ret = seq_open(filp, &sched_debug_sops);
865 static const struct file_operations sched_debug_fops = {
866 .open = sched_debug_open,
869 .release = sched_debug_release,
872 static int __init init_sched_debug_procfs(void)
874 struct proc_dir_entry *pe;
876 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
882 __initcall(init_sched_debug_procfs);
885 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
887 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
889 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
891 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
894 #ifdef CONFIG_NUMA_BALANCING
895 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
896 unsigned long tpf, unsigned long gsf, unsigned long gpf)
898 SEQ_printf(m, "numa_faults node=%d ", node);
899 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
900 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
905 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
907 #ifdef CONFIG_NUMA_BALANCING
908 struct mempolicy *pol;
911 P(mm->numa_scan_seq);
915 if (pol && !(pol->flags & MPOL_F_MORON))
920 P(numa_pages_migrated);
921 P(numa_preferred_nid);
922 P(total_numa_faults);
923 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
924 task_node(p), task_numa_group_id(p));
925 show_numa_stats(p, m);
930 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
933 unsigned long nr_switches;
935 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
938 "---------------------------------------------------------"
941 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
943 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
944 #define P_SCHEDSTAT(F) \
945 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
947 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
949 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
950 #define PN_SCHEDSTAT(F) \
951 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
955 PN(se.sum_exec_runtime);
957 nr_switches = p->nvcsw + p->nivcsw;
961 if (schedstat_enabled()) {
962 u64 avg_atom, avg_per_cpu;
964 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
965 PN_SCHEDSTAT(se.statistics.wait_start);
966 PN_SCHEDSTAT(se.statistics.sleep_start);
967 PN_SCHEDSTAT(se.statistics.block_start);
968 PN_SCHEDSTAT(se.statistics.sleep_max);
969 PN_SCHEDSTAT(se.statistics.block_max);
970 PN_SCHEDSTAT(se.statistics.exec_max);
971 PN_SCHEDSTAT(se.statistics.slice_max);
972 PN_SCHEDSTAT(se.statistics.wait_max);
973 PN_SCHEDSTAT(se.statistics.wait_sum);
974 P_SCHEDSTAT(se.statistics.wait_count);
975 PN_SCHEDSTAT(se.statistics.iowait_sum);
976 P_SCHEDSTAT(se.statistics.iowait_count);
977 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
978 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
979 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
980 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
981 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
982 P_SCHEDSTAT(se.statistics.nr_wakeups);
983 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
984 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
985 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
986 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
987 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
988 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
989 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
990 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
992 avg_atom = p->se.sum_exec_runtime;
994 avg_atom = div64_ul(avg_atom, nr_switches);
998 avg_per_cpu = p->se.sum_exec_runtime;
999 if (p->se.nr_migrations) {
1000 avg_per_cpu = div64_u64(avg_per_cpu,
1001 p->se.nr_migrations);
1011 SEQ_printf(m, "%-45s:%21Ld\n",
1012 "nr_voluntary_switches", (long long)p->nvcsw);
1013 SEQ_printf(m, "%-45s:%21Ld\n",
1014 "nr_involuntary_switches", (long long)p->nivcsw);
1017 P(se.runnable_weight);
1020 P(se.avg.runnable_load_sum);
1023 P(se.avg.runnable_load_avg);
1025 P(se.avg.last_update_time);
1029 if (p->policy == SCHED_DEADLINE) {
1041 unsigned int this_cpu = raw_smp_processor_id();
1044 t0 = cpu_clock(this_cpu);
1045 t1 = cpu_clock(this_cpu);
1046 SEQ_printf(m, "%-45s:%21Ld\n",
1047 "clock-delta", (long long)(t1-t0));
1050 sched_show_numa(p, m);
1053 void proc_sched_set_task(struct task_struct *p)
1055 #ifdef CONFIG_SCHEDSTATS
1056 memset(&p->se.statistics, 0, sizeof(p->se.statistics));