2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
5 * Copyright (c) 2013 Intel Corporation.
6 * Len Brown <len.brown@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
28 #include <sys/types.h>
31 #include <sys/resource.h>
42 #include <linux/capability.h>
45 char *proc_stat = "/proc/stat";
46 unsigned int interval_sec = 5;
48 unsigned int rapl_joules;
49 unsigned int summary_only;
50 unsigned int dump_only;
53 unsigned int do_nhm_cstates;
54 unsigned int do_snb_cstates;
55 unsigned int do_knl_cstates;
60 unsigned int do_c8_c9_c10;
61 unsigned int do_skl_residency;
62 unsigned int do_slm_cstates;
63 unsigned int use_c1_residency_msr;
64 unsigned int has_aperf;
66 unsigned int units = 1000000; /* MHz etc */
67 unsigned int genuine_intel;
68 unsigned int has_invariant_tsc;
69 unsigned int do_nhm_platform_info;
70 unsigned int extra_msr_offset32;
71 unsigned int extra_msr_offset64;
72 unsigned int extra_delta_offset32;
73 unsigned int extra_delta_offset64;
74 unsigned int aperf_mperf_multiplier = 1;
77 unsigned int show_pkg;
78 unsigned int show_core;
79 unsigned int show_cpu;
80 unsigned int show_pkg_only;
81 unsigned int show_core_only;
82 char *output_buffer, *outp;
86 unsigned int tcc_activation_temp;
87 unsigned int tcc_activation_temp_override;
88 double rapl_power_units, rapl_time_units;
89 double rapl_dram_energy_units, rapl_energy_units;
90 double rapl_joule_counter_range;
91 unsigned int do_core_perf_limit_reasons;
92 unsigned int do_gfx_perf_limit_reasons;
93 unsigned int do_ring_perf_limit_reasons;
94 unsigned int crystal_hz;
95 unsigned long long tsc_hz;
98 #define RAPL_PKG (1 << 0)
99 /* 0x610 MSR_PKG_POWER_LIMIT */
100 /* 0x611 MSR_PKG_ENERGY_STATUS */
101 #define RAPL_PKG_PERF_STATUS (1 << 1)
102 /* 0x613 MSR_PKG_PERF_STATUS */
103 #define RAPL_PKG_POWER_INFO (1 << 2)
104 /* 0x614 MSR_PKG_POWER_INFO */
106 #define RAPL_DRAM (1 << 3)
107 /* 0x618 MSR_DRAM_POWER_LIMIT */
108 /* 0x619 MSR_DRAM_ENERGY_STATUS */
109 #define RAPL_DRAM_PERF_STATUS (1 << 4)
110 /* 0x61b MSR_DRAM_PERF_STATUS */
111 #define RAPL_DRAM_POWER_INFO (1 << 5)
112 /* 0x61c MSR_DRAM_POWER_INFO */
114 #define RAPL_CORES (1 << 6)
115 /* 0x638 MSR_PP0_POWER_LIMIT */
116 /* 0x639 MSR_PP0_ENERGY_STATUS */
117 #define RAPL_CORE_POLICY (1 << 7)
118 /* 0x63a MSR_PP0_POLICY */
120 #define RAPL_GFX (1 << 8)
121 /* 0x640 MSR_PP1_POWER_LIMIT */
122 /* 0x641 MSR_PP1_ENERGY_STATUS */
123 /* 0x642 MSR_PP1_POLICY */
124 #define TJMAX_DEFAULT 100
126 #define MAX(a, b) ((a) > (b) ? (a) : (b))
128 int aperf_mperf_unstable;
132 cpu_set_t *cpu_present_set, *cpu_affinity_set;
133 size_t cpu_present_setsize, cpu_affinity_setsize;
136 unsigned long long tsc;
137 unsigned long long aperf;
138 unsigned long long mperf;
139 unsigned long long c1;
140 unsigned long long extra_msr64;
141 unsigned long long extra_delta64;
142 unsigned long long extra_msr32;
143 unsigned long long extra_delta32;
144 unsigned int smi_count;
147 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
148 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
149 } *thread_even, *thread_odd;
152 unsigned long long c3;
153 unsigned long long c6;
154 unsigned long long c7;
155 unsigned int core_temp_c;
156 unsigned int core_id;
157 } *core_even, *core_odd;
160 unsigned long long pc2;
161 unsigned long long pc3;
162 unsigned long long pc6;
163 unsigned long long pc7;
164 unsigned long long pc8;
165 unsigned long long pc9;
166 unsigned long long pc10;
167 unsigned long long pkg_wtd_core_c0;
168 unsigned long long pkg_any_core_c0;
169 unsigned long long pkg_any_gfxe_c0;
170 unsigned long long pkg_both_core_gfxe_c0;
171 unsigned int package_id;
172 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
173 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
174 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
175 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
176 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
177 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
178 unsigned int pkg_temp_c;
180 } *package_even, *package_odd;
182 #define ODD_COUNTERS thread_odd, core_odd, package_odd
183 #define EVEN_COUNTERS thread_even, core_even, package_even
185 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
186 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
187 topo.num_threads_per_core + \
188 (core_no) * topo.num_threads_per_core + (thread_no))
189 #define GET_CORE(core_base, core_no, pkg_no) \
190 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
191 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
193 struct system_summary {
194 struct thread_data threads;
195 struct core_data cores;
196 struct pkg_data packages;
205 int num_cores_per_pkg;
206 int num_threads_per_core;
209 struct timeval tv_even, tv_odd, tv_delta;
211 void setup_all_buffers(void);
213 int cpu_is_not_present(int cpu)
215 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
218 * run func(thread, core, package) in topology order
219 * skip non-present cpus
222 int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
223 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
225 int retval, pkg_no, core_no, thread_no;
227 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
228 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
229 for (thread_no = 0; thread_no <
230 topo.num_threads_per_core; ++thread_no) {
231 struct thread_data *t;
235 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
237 if (cpu_is_not_present(t->cpu_id))
240 c = GET_CORE(core_base, core_no, pkg_no);
241 p = GET_PKG(pkg_base, pkg_no);
243 retval = func(t, c, p);
252 int cpu_migrate(int cpu)
254 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
255 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
256 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
262 int get_msr(int cpu, off_t offset, unsigned long long *msr)
268 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
269 fd = open(pathname, O_RDONLY);
271 err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
273 retval = pread(fd, msr, sizeof *msr, offset);
276 if (retval != sizeof *msr)
277 err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
283 * Example Format w/ field column widths:
285 * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz SMI %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
286 * 123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678123456781234567812345678
289 void print_header(void)
292 outp += sprintf(outp, " Package");
294 outp += sprintf(outp, " Core");
296 outp += sprintf(outp, " CPU");
298 outp += sprintf(outp, " Avg_MHz");
300 outp += sprintf(outp, " %%Busy");
302 outp += sprintf(outp, " Bzy_MHz");
303 outp += sprintf(outp, " TSC_MHz");
305 if (extra_delta_offset32)
306 outp += sprintf(outp, " count 0x%03X", extra_delta_offset32);
307 if (extra_delta_offset64)
308 outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64);
309 if (extra_msr_offset32)
310 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32);
311 if (extra_msr_offset64)
312 outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
318 outp += sprintf(outp, " SMI");
321 outp += sprintf(outp, " CPU%%c1");
322 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
323 outp += sprintf(outp, " CPU%%c3");
325 outp += sprintf(outp, " CPU%%c6");
327 outp += sprintf(outp, " CPU%%c7");
330 outp += sprintf(outp, " CoreTmp");
332 outp += sprintf(outp, " PkgTmp");
334 if (do_skl_residency) {
335 outp += sprintf(outp, " Totl%%C0");
336 outp += sprintf(outp, " Any%%C0");
337 outp += sprintf(outp, " GFX%%C0");
338 outp += sprintf(outp, " CPUGFX%%");
342 outp += sprintf(outp, " Pkg%%pc2");
344 outp += sprintf(outp, " Pkg%%pc3");
346 outp += sprintf(outp, " Pkg%%pc6");
348 outp += sprintf(outp, " Pkg%%pc7");
350 outp += sprintf(outp, " Pkg%%pc8");
351 outp += sprintf(outp, " Pkg%%pc9");
352 outp += sprintf(outp, " Pk%%pc10");
355 if (do_rapl && !rapl_joules) {
356 if (do_rapl & RAPL_PKG)
357 outp += sprintf(outp, " PkgWatt");
358 if (do_rapl & RAPL_CORES)
359 outp += sprintf(outp, " CorWatt");
360 if (do_rapl & RAPL_GFX)
361 outp += sprintf(outp, " GFXWatt");
362 if (do_rapl & RAPL_DRAM)
363 outp += sprintf(outp, " RAMWatt");
364 if (do_rapl & RAPL_PKG_PERF_STATUS)
365 outp += sprintf(outp, " PKG_%%");
366 if (do_rapl & RAPL_DRAM_PERF_STATUS)
367 outp += sprintf(outp, " RAM_%%");
368 } else if (do_rapl && rapl_joules) {
369 if (do_rapl & RAPL_PKG)
370 outp += sprintf(outp, " Pkg_J");
371 if (do_rapl & RAPL_CORES)
372 outp += sprintf(outp, " Cor_J");
373 if (do_rapl & RAPL_GFX)
374 outp += sprintf(outp, " GFX_J");
375 if (do_rapl & RAPL_DRAM)
376 outp += sprintf(outp, " RAM_J");
377 if (do_rapl & RAPL_PKG_PERF_STATUS)
378 outp += sprintf(outp, " PKG_%%");
379 if (do_rapl & RAPL_DRAM_PERF_STATUS)
380 outp += sprintf(outp, " RAM_%%");
381 outp += sprintf(outp, " time");
385 outp += sprintf(outp, "\n");
388 int dump_counters(struct thread_data *t, struct core_data *c,
391 outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p);
394 outp += sprintf(outp, "CPU: %d flags 0x%x\n",
395 t->cpu_id, t->flags);
396 outp += sprintf(outp, "TSC: %016llX\n", t->tsc);
397 outp += sprintf(outp, "aperf: %016llX\n", t->aperf);
398 outp += sprintf(outp, "mperf: %016llX\n", t->mperf);
399 outp += sprintf(outp, "c1: %016llX\n", t->c1);
400 outp += sprintf(outp, "msr0x%x: %08llX\n",
401 extra_delta_offset32, t->extra_delta32);
402 outp += sprintf(outp, "msr0x%x: %016llX\n",
403 extra_delta_offset64, t->extra_delta64);
404 outp += sprintf(outp, "msr0x%x: %08llX\n",
405 extra_msr_offset32, t->extra_msr32);
406 outp += sprintf(outp, "msr0x%x: %016llX\n",
407 extra_msr_offset64, t->extra_msr64);
409 outp += sprintf(outp, "SMI: %08X\n", t->smi_count);
413 outp += sprintf(outp, "core: %d\n", c->core_id);
414 outp += sprintf(outp, "c3: %016llX\n", c->c3);
415 outp += sprintf(outp, "c6: %016llX\n", c->c6);
416 outp += sprintf(outp, "c7: %016llX\n", c->c7);
417 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
421 outp += sprintf(outp, "package: %d\n", p->package_id);
423 outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0);
424 outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0);
425 outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0);
426 outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0);
428 outp += sprintf(outp, "pc2: %016llX\n", p->pc2);
430 outp += sprintf(outp, "pc3: %016llX\n", p->pc3);
432 outp += sprintf(outp, "pc6: %016llX\n", p->pc6);
434 outp += sprintf(outp, "pc7: %016llX\n", p->pc7);
435 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
436 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
437 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
438 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
439 outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores);
440 outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx);
441 outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram);
442 outp += sprintf(outp, "Throttle PKG: %0X\n",
443 p->rapl_pkg_perf_status);
444 outp += sprintf(outp, "Throttle RAM: %0X\n",
445 p->rapl_dram_perf_status);
446 outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c);
449 outp += sprintf(outp, "\n");
455 * column formatting convention & formats
457 int format_counters(struct thread_data *t, struct core_data *c,
460 double interval_float;
463 /* if showing only 1st thread in core and this isn't one, bail out */
464 if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
467 /* if showing only 1st thread in pkg and this isn't one, bail out */
468 if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
471 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
473 /* topo columns, print blanks on 1st (average) line */
474 if (t == &average.threads) {
476 outp += sprintf(outp, " -");
478 outp += sprintf(outp, " -");
480 outp += sprintf(outp, " -");
484 outp += sprintf(outp, "%8d", p->package_id);
486 outp += sprintf(outp, " -");
490 outp += sprintf(outp, "%8d", c->core_id);
492 outp += sprintf(outp, " -");
495 outp += sprintf(outp, "%8d", t->cpu_id);
500 outp += sprintf(outp, "%8.0f",
501 1.0 / units * t->aperf / interval_float);
506 outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
508 outp += sprintf(outp, "********");
513 outp += sprintf(outp, "%8.0f",
514 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
517 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
520 if (extra_delta_offset32)
521 outp += sprintf(outp, " %11llu", t->extra_delta32);
524 if (extra_delta_offset64)
525 outp += sprintf(outp, " %11llu", t->extra_delta64);
527 if (extra_msr_offset32)
528 outp += sprintf(outp, " 0x%08llx", t->extra_msr32);
531 if (extra_msr_offset64)
532 outp += sprintf(outp, " 0x%016llx", t->extra_msr64);
539 outp += sprintf(outp, "%8d", t->smi_count);
541 if (do_nhm_cstates) {
543 outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
545 outp += sprintf(outp, "********");
548 /* print per-core data only for 1st thread in core */
549 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
552 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
553 outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
555 outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
557 outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
560 outp += sprintf(outp, "%8d", c->core_temp_c);
562 /* print per-package data only for 1st core in package */
563 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
568 outp += sprintf(outp, "%8d", p->pkg_temp_c);
570 /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
571 if (do_skl_residency) {
572 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_wtd_core_c0/t->tsc);
573 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_core_c0/t->tsc);
574 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_any_gfxe_c0/t->tsc);
575 outp += sprintf(outp, "%8.2f", 100.0 * p->pkg_both_core_gfxe_c0/t->tsc);
579 outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
581 outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
583 outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
585 outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
587 outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
588 outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
589 outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
593 * If measurement interval exceeds minimum RAPL Joule Counter range,
594 * indicate that results are suspect by printing "**" in fraction place.
596 if (interval_float < rapl_joule_counter_range)
601 if (do_rapl && !rapl_joules) {
602 if (do_rapl & RAPL_PKG)
603 outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
604 if (do_rapl & RAPL_CORES)
605 outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
606 if (do_rapl & RAPL_GFX)
607 outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
608 if (do_rapl & RAPL_DRAM)
609 outp += sprintf(outp, fmt8, p->energy_dram * rapl_dram_energy_units / interval_float);
610 if (do_rapl & RAPL_PKG_PERF_STATUS)
611 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
612 if (do_rapl & RAPL_DRAM_PERF_STATUS)
613 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
614 } else if (do_rapl && rapl_joules) {
615 if (do_rapl & RAPL_PKG)
616 outp += sprintf(outp, fmt8,
617 p->energy_pkg * rapl_energy_units);
618 if (do_rapl & RAPL_CORES)
619 outp += sprintf(outp, fmt8,
620 p->energy_cores * rapl_energy_units);
621 if (do_rapl & RAPL_GFX)
622 outp += sprintf(outp, fmt8,
623 p->energy_gfx * rapl_energy_units);
624 if (do_rapl & RAPL_DRAM)
625 outp += sprintf(outp, fmt8,
626 p->energy_dram * rapl_dram_energy_units);
627 if (do_rapl & RAPL_PKG_PERF_STATUS)
628 outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
629 if (do_rapl & RAPL_DRAM_PERF_STATUS)
630 outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
632 outp += sprintf(outp, fmt8, interval_float);
635 outp += sprintf(outp, "\n");
642 fputs(output_buffer, stdout);
644 outp = output_buffer;
648 fputs(output_buffer, stderr);
649 outp = output_buffer;
651 void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
655 if (!printed || !summary_only)
658 if (topo.num_cpus > 1)
659 format_counters(&average.threads, &average.cores,
667 for_all_cpus(format_counters, t, c, p);
670 #define DELTA_WRAP32(new, old) \
674 old = 0x100000000 + new - old; \
678 delta_package(struct pkg_data *new, struct pkg_data *old)
681 if (do_skl_residency) {
682 old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
683 old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
684 old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
685 old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
687 old->pc2 = new->pc2 - old->pc2;
689 old->pc3 = new->pc3 - old->pc3;
691 old->pc6 = new->pc6 - old->pc6;
693 old->pc7 = new->pc7 - old->pc7;
694 old->pc8 = new->pc8 - old->pc8;
695 old->pc9 = new->pc9 - old->pc9;
696 old->pc10 = new->pc10 - old->pc10;
697 old->pkg_temp_c = new->pkg_temp_c;
699 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
700 DELTA_WRAP32(new->energy_cores, old->energy_cores);
701 DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
702 DELTA_WRAP32(new->energy_dram, old->energy_dram);
703 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
704 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
708 delta_core(struct core_data *new, struct core_data *old)
710 old->c3 = new->c3 - old->c3;
711 old->c6 = new->c6 - old->c6;
712 old->c7 = new->c7 - old->c7;
713 old->core_temp_c = new->core_temp_c;
720 delta_thread(struct thread_data *new, struct thread_data *old,
721 struct core_data *core_delta)
723 old->tsc = new->tsc - old->tsc;
725 /* check for TSC < 1 Mcycles over interval */
726 if (old->tsc < (1000 * 1000))
727 errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n"
728 "You can disable all c-states by booting with \"idle=poll\"\n"
729 "or just the deep ones with \"processor.max_cstate=1\"");
731 old->c1 = new->c1 - old->c1;
734 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
735 old->aperf = new->aperf - old->aperf;
736 old->mperf = new->mperf - old->mperf;
739 if (!aperf_mperf_unstable) {
740 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
741 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
742 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
744 aperf_mperf_unstable = 1;
747 * mperf delta is likely a huge "positive" number
748 * can not use it for calculating c0 time
756 if (use_c1_residency_msr) {
758 * Some models have a dedicated C1 residency MSR,
759 * which should be more accurate than the derivation below.
763 * As counter collection is not atomic,
764 * it is possible for mperf's non-halted cycles + idle states
765 * to exceed TSC's all cycles: show c1 = 0% in that case.
767 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
770 /* normal case, derive c1 */
771 old->c1 = old->tsc - old->mperf - core_delta->c3
772 - core_delta->c6 - core_delta->c7;
776 if (old->mperf == 0) {
777 if (debug > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
778 old->mperf = 1; /* divide by 0 protection */
781 old->extra_delta32 = new->extra_delta32 - old->extra_delta32;
782 old->extra_delta32 &= 0xFFFFFFFF;
784 old->extra_delta64 = new->extra_delta64 - old->extra_delta64;
787 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
789 old->extra_msr32 = new->extra_msr32;
790 old->extra_msr64 = new->extra_msr64;
793 old->smi_count = new->smi_count - old->smi_count;
796 int delta_cpu(struct thread_data *t, struct core_data *c,
797 struct pkg_data *p, struct thread_data *t2,
798 struct core_data *c2, struct pkg_data *p2)
800 /* calculate core delta only for 1st thread in core */
801 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
804 /* always calculate thread delta */
805 delta_thread(t, t2, c2); /* c2 is core delta */
807 /* calculate package delta only for 1st core in package */
808 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
809 delta_package(p, p2);
814 void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
822 t->extra_delta32 = 0;
823 t->extra_delta64 = 0;
825 /* tells format_counters to dump all fields from this set */
826 t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
833 p->pkg_wtd_core_c0 = 0;
834 p->pkg_any_core_c0 = 0;
835 p->pkg_any_gfxe_c0 = 0;
836 p->pkg_both_core_gfxe_c0 = 0;
853 p->rapl_pkg_perf_status = 0;
854 p->rapl_dram_perf_status = 0;
857 int sum_counters(struct thread_data *t, struct core_data *c,
860 average.threads.tsc += t->tsc;
861 average.threads.aperf += t->aperf;
862 average.threads.mperf += t->mperf;
863 average.threads.c1 += t->c1;
865 average.threads.extra_delta32 += t->extra_delta32;
866 average.threads.extra_delta64 += t->extra_delta64;
868 /* sum per-core values only for 1st thread in core */
869 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
872 average.cores.c3 += c->c3;
873 average.cores.c6 += c->c6;
874 average.cores.c7 += c->c7;
876 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
878 /* sum per-pkg values only for 1st core in pkg */
879 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
882 if (do_skl_residency) {
883 average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
884 average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
885 average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
886 average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
889 average.packages.pc2 += p->pc2;
891 average.packages.pc3 += p->pc3;
893 average.packages.pc6 += p->pc6;
895 average.packages.pc7 += p->pc7;
896 average.packages.pc8 += p->pc8;
897 average.packages.pc9 += p->pc9;
898 average.packages.pc10 += p->pc10;
900 average.packages.energy_pkg += p->energy_pkg;
901 average.packages.energy_dram += p->energy_dram;
902 average.packages.energy_cores += p->energy_cores;
903 average.packages.energy_gfx += p->energy_gfx;
905 average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
907 average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
908 average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
912 * sum the counters for all cpus in the system
913 * compute the weighted average
915 void compute_average(struct thread_data *t, struct core_data *c,
918 clear_counters(&average.threads, &average.cores, &average.packages);
920 for_all_cpus(sum_counters, t, c, p);
922 average.threads.tsc /= topo.num_cpus;
923 average.threads.aperf /= topo.num_cpus;
924 average.threads.mperf /= topo.num_cpus;
925 average.threads.c1 /= topo.num_cpus;
927 average.threads.extra_delta32 /= topo.num_cpus;
928 average.threads.extra_delta32 &= 0xFFFFFFFF;
930 average.threads.extra_delta64 /= topo.num_cpus;
932 average.cores.c3 /= topo.num_cores;
933 average.cores.c6 /= topo.num_cores;
934 average.cores.c7 /= topo.num_cores;
936 if (do_skl_residency) {
937 average.packages.pkg_wtd_core_c0 /= topo.num_packages;
938 average.packages.pkg_any_core_c0 /= topo.num_packages;
939 average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
940 average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
943 average.packages.pc2 /= topo.num_packages;
945 average.packages.pc3 /= topo.num_packages;
947 average.packages.pc6 /= topo.num_packages;
949 average.packages.pc7 /= topo.num_packages;
951 average.packages.pc8 /= topo.num_packages;
952 average.packages.pc9 /= topo.num_packages;
953 average.packages.pc10 /= topo.num_packages;
956 static unsigned long long rdtsc(void)
958 unsigned int low, high;
960 asm volatile("rdtsc" : "=a" (low), "=d" (high));
962 return low | ((unsigned long long)high) << 32;
969 * acquire and record local counters for that cpu
971 int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
974 unsigned long long msr;
976 if (cpu_migrate(cpu)) {
977 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
981 t->tsc = rdtsc(); /* we are running on local CPU of interest */
984 if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
986 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
988 t->aperf = t->aperf * aperf_mperf_multiplier;
989 t->mperf = t->mperf * aperf_mperf_multiplier;
993 if (get_msr(cpu, MSR_SMI_COUNT, &msr))
995 t->smi_count = msr & 0xFFFFFFFF;
997 if (extra_delta_offset32) {
998 if (get_msr(cpu, extra_delta_offset32, &msr))
1000 t->extra_delta32 = msr & 0xFFFFFFFF;
1003 if (extra_delta_offset64)
1004 if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64))
1007 if (extra_msr_offset32) {
1008 if (get_msr(cpu, extra_msr_offset32, &msr))
1010 t->extra_msr32 = msr & 0xFFFFFFFF;
1013 if (extra_msr_offset64)
1014 if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
1017 if (use_c1_residency_msr) {
1018 if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
1022 /* collect core counters only for 1st thread in core */
1023 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1026 if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
1027 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1031 if (do_nhm_cstates && !do_knl_cstates) {
1032 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1034 } else if (do_knl_cstates) {
1035 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1040 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1044 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
1046 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1050 /* collect package counters only for 1st core in package */
1051 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1054 if (do_skl_residency) {
1055 if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
1057 if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
1059 if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
1061 if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
1065 if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
1068 if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
1071 if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
1074 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
1077 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
1079 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
1081 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
1084 if (do_rapl & RAPL_PKG) {
1085 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
1087 p->energy_pkg = msr & 0xFFFFFFFF;
1089 if (do_rapl & RAPL_CORES) {
1090 if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
1092 p->energy_cores = msr & 0xFFFFFFFF;
1094 if (do_rapl & RAPL_DRAM) {
1095 if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
1097 p->energy_dram = msr & 0xFFFFFFFF;
1099 if (do_rapl & RAPL_GFX) {
1100 if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
1102 p->energy_gfx = msr & 0xFFFFFFFF;
1104 if (do_rapl & RAPL_PKG_PERF_STATUS) {
1105 if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
1107 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
1109 if (do_rapl & RAPL_DRAM_PERF_STATUS) {
1110 if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
1112 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1115 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1117 p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1123 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit:
1124 * If you change the values, note they are used both in comparisons
1125 * (>= PCL__7) and to index pkg_cstate_limit_strings[].
1128 #define PCLUKN 0 /* Unknown */
1129 #define PCLRSV 1 /* Reserved */
1130 #define PCL__0 2 /* PC0 */
1131 #define PCL__1 3 /* PC1 */
1132 #define PCL__2 4 /* PC2 */
1133 #define PCL__3 5 /* PC3 */
1134 #define PCL__4 6 /* PC4 */
1135 #define PCL__6 7 /* PC6 */
1136 #define PCL_6N 8 /* PC6 No Retention */
1137 #define PCL_6R 9 /* PC6 Retention */
1138 #define PCL__7 10 /* PC7 */
1139 #define PCL_7S 11 /* PC7 Shrink */
1140 #define PCL__8 12 /* PC8 */
1141 #define PCL__9 13 /* PC9 */
1142 #define PCLUNL 14 /* Unlimited */
1144 int pkg_cstate_limit = PCLUKN;
1145 char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
1146 "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
1148 int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1149 int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1150 int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1151 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1152 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1153 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
1156 dump_nhm_platform_info(void)
1158 unsigned long long msr;
1161 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
1163 fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1165 ratio = (msr >> 40) & 0xFF;
1166 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
1167 ratio, bclk, ratio * bclk);
1169 ratio = (msr >> 8) & 0xFF;
1170 fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
1171 ratio, bclk, ratio * bclk);
1173 get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
1174 fprintf(stderr, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
1175 base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
1181 dump_hsw_turbo_ratio_limits(void)
1183 unsigned long long msr;
1186 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
1188 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
1190 ratio = (msr >> 8) & 0xFF;
1192 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 18 active cores\n",
1193 ratio, bclk, ratio * bclk);
1195 ratio = (msr >> 0) & 0xFF;
1197 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 17 active cores\n",
1198 ratio, bclk, ratio * bclk);
1203 dump_ivt_turbo_ratio_limits(void)
1205 unsigned long long msr;
1208 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
1210 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
1212 ratio = (msr >> 56) & 0xFF;
1214 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
1215 ratio, bclk, ratio * bclk);
1217 ratio = (msr >> 48) & 0xFF;
1219 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
1220 ratio, bclk, ratio * bclk);
1222 ratio = (msr >> 40) & 0xFF;
1224 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1225 ratio, bclk, ratio * bclk);
1227 ratio = (msr >> 32) & 0xFF;
1229 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1230 ratio, bclk, ratio * bclk);
1232 ratio = (msr >> 24) & 0xFF;
1234 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1235 ratio, bclk, ratio * bclk);
1237 ratio = (msr >> 16) & 0xFF;
1239 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1240 ratio, bclk, ratio * bclk);
1242 ratio = (msr >> 8) & 0xFF;
1244 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1245 ratio, bclk, ratio * bclk);
1247 ratio = (msr >> 0) & 0xFF;
1249 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1250 ratio, bclk, ratio * bclk);
1255 dump_nhm_turbo_ratio_limits(void)
1257 unsigned long long msr;
1260 get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
1262 fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
1264 ratio = (msr >> 56) & 0xFF;
1266 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1267 ratio, bclk, ratio * bclk);
1269 ratio = (msr >> 48) & 0xFF;
1271 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1272 ratio, bclk, ratio * bclk);
1274 ratio = (msr >> 40) & 0xFF;
1276 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1277 ratio, bclk, ratio * bclk);
1279 ratio = (msr >> 32) & 0xFF;
1281 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1282 ratio, bclk, ratio * bclk);
1284 ratio = (msr >> 24) & 0xFF;
1286 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1287 ratio, bclk, ratio * bclk);
1289 ratio = (msr >> 16) & 0xFF;
1291 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1292 ratio, bclk, ratio * bclk);
1294 ratio = (msr >> 8) & 0xFF;
1296 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1297 ratio, bclk, ratio * bclk);
1299 ratio = (msr >> 0) & 0xFF;
1301 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1302 ratio, bclk, ratio * bclk);
1307 dump_knl_turbo_ratio_limits(void)
1311 unsigned long long msr;
1316 get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
1318 fprintf(stderr, "cpu%d: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
1322 * Turbo encoding in KNL is as follows:
1323 * [7:0] -- Base value of number of active cores of bucket 1.
1324 * [15:8] -- Base value of freq ratio of bucket 1.
1325 * [20:16] -- +ve delta of number of active cores of bucket 2.
1326 * i.e. active cores of bucket 2 =
1327 * active cores of bucket 1 + delta
1328 * [23:21] -- Negative delta of freq ratio of bucket 2.
1329 * i.e. freq ratio of bucket 2 =
1330 * freq ratio of bucket 1 - delta
1331 * [28:24]-- +ve delta of number of active cores of bucket 3.
1332 * [31:29]-- -ve delta of freq ratio of bucket 3.
1333 * [36:32]-- +ve delta of number of active cores of bucket 4.
1334 * [39:37]-- -ve delta of freq ratio of bucket 4.
1335 * [44:40]-- +ve delta of number of active cores of bucket 5.
1336 * [47:45]-- -ve delta of freq ratio of bucket 5.
1337 * [52:48]-- +ve delta of number of active cores of bucket 6.
1338 * [55:53]-- -ve delta of freq ratio of bucket 6.
1339 * [60:56]-- +ve delta of number of active cores of bucket 7.
1340 * [63:61]-- -ve delta of freq ratio of bucket 7.
1343 ratio = (msr >> 8) && 0xFF;
1346 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1347 ratio, bclk, ratio * bclk, cores);
1349 for (i = 16; i < 64; i = i + 8) {
1350 delta_cores = (msr >> i) & 0x1F;
1351 delta_ratio = (msr >> (i + 5)) && 0x7;
1352 if (!delta_cores || !delta_ratio)
1354 cores = cores + delta_cores;
1355 ratio = ratio - delta_ratio;
1357 /** -ve ratios will make successive ratio calculations
1358 * negative. Hence return instead of carrying on.
1362 "%d * %.0f = %.0f MHz max turbo %d active cores\n",
1363 ratio, bclk, ratio * bclk, cores);
1368 dump_nhm_cst_cfg(void)
1370 unsigned long long msr;
1372 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1374 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
1375 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
1377 fprintf(stderr, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr);
1379 fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
1380 (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
1381 (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
1382 (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
1383 (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
1384 (msr & (1 << 15)) ? "" : "UN",
1385 (unsigned int)msr & 7,
1386 pkg_cstate_limit_strings[pkg_cstate_limit]);
1391 dump_config_tdp(void)
1393 unsigned long long msr;
1395 get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
1396 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
1397 fprintf(stderr, " (base_ratio=%d)\n", (unsigned int)msr & 0xEF);
1399 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
1400 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
1402 fprintf(stderr, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1403 fprintf(stderr, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1404 fprintf(stderr, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1405 fprintf(stderr, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0xEFFF);
1407 fprintf(stderr, ")\n");
1409 get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
1410 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
1412 fprintf(stderr, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
1413 fprintf(stderr, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
1414 fprintf(stderr, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
1415 fprintf(stderr, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0xEFFF);
1417 fprintf(stderr, ")\n");
1419 get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
1420 fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
1422 fprintf(stderr, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
1423 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1424 fprintf(stderr, ")\n");
1426 get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
1427 fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
1428 fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xEF);
1429 fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
1430 fprintf(stderr, ")\n");
1433 void free_all_buffers(void)
1435 CPU_FREE(cpu_present_set);
1436 cpu_present_set = NULL;
1437 cpu_present_set = 0;
1439 CPU_FREE(cpu_affinity_set);
1440 cpu_affinity_set = NULL;
1441 cpu_affinity_setsize = 0;
1449 package_even = NULL;
1459 free(output_buffer);
1460 output_buffer = NULL;
1465 * Open a file, and exit on failure
1467 FILE *fopen_or_die(const char *path, const char *mode)
1469 FILE *filep = fopen(path, "r");
1471 err(1, "%s: open failed", path);
1476 * Parse a file containing a single int.
1478 int parse_int_file(const char *fmt, ...)
1481 char path[PATH_MAX];
1485 va_start(args, fmt);
1486 vsnprintf(path, sizeof(path), fmt, args);
1488 filep = fopen_or_die(path, "r");
1489 if (fscanf(filep, "%d", &value) != 1)
1490 err(1, "%s: failed to parse number from file", path);
1496 * get_cpu_position_in_core(cpu)
1497 * return the position of the CPU among its HT siblings in the core
1498 * return -1 if the sibling is not in list
1500 int get_cpu_position_in_core(int cpu)
1509 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
1511 filep = fopen(path, "r");
1512 if (filep == NULL) {
1517 for (i = 0; i < topo.num_threads_per_core; i++) {
1518 fscanf(filep, "%d", &this_cpu);
1519 if (this_cpu == cpu) {
1524 /* Account for no separator after last thread*/
1525 if (i != (topo.num_threads_per_core - 1))
1526 fscanf(filep, "%c", &character);
1534 * cpu_is_first_core_in_package(cpu)
1535 * return 1 if given CPU is 1st core in package
1537 int cpu_is_first_core_in_package(int cpu)
1539 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
1542 int get_physical_package_id(int cpu)
1544 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
1547 int get_core_id(int cpu)
1549 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
1552 int get_num_ht_siblings(int cpu)
1562 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
1563 filep = fopen_or_die(path, "r");
1567 * A ',' separated or '-' separated set of numbers
1568 * (eg 1-2 or 1,3,4,5)
1570 fscanf(filep, "%d%c\n", &sib1, &character);
1571 fseek(filep, 0, SEEK_SET);
1572 fgets(str, 100, filep);
1573 ch = strchr(str, character);
1574 while (ch != NULL) {
1576 ch = strchr(ch+1, character);
1584 * run func(thread, core, package) in topology order
1585 * skip non-present cpus
1588 int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
1589 struct pkg_data *, struct thread_data *, struct core_data *,
1590 struct pkg_data *), struct thread_data *thread_base,
1591 struct core_data *core_base, struct pkg_data *pkg_base,
1592 struct thread_data *thread_base2, struct core_data *core_base2,
1593 struct pkg_data *pkg_base2)
1595 int retval, pkg_no, core_no, thread_no;
1597 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
1598 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
1599 for (thread_no = 0; thread_no <
1600 topo.num_threads_per_core; ++thread_no) {
1601 struct thread_data *t, *t2;
1602 struct core_data *c, *c2;
1603 struct pkg_data *p, *p2;
1605 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
1607 if (cpu_is_not_present(t->cpu_id))
1610 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
1612 c = GET_CORE(core_base, core_no, pkg_no);
1613 c2 = GET_CORE(core_base2, core_no, pkg_no);
1615 p = GET_PKG(pkg_base, pkg_no);
1616 p2 = GET_PKG(pkg_base2, pkg_no);
1618 retval = func(t, c, p, t2, c2, p2);
1628 * run func(cpu) on every cpu in /proc/stat
1629 * return max_cpu number
1631 int for_all_proc_cpus(int (func)(int))
1637 fp = fopen_or_die(proc_stat, "r");
1639 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1641 err(1, "%s: failed to parse format", proc_stat);
1644 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
1648 retval = func(cpu_num);
1658 void re_initialize(void)
1661 setup_all_buffers();
1662 printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
1668 * remember the last one seen, it will be the max
1670 int count_cpus(int cpu)
1672 if (topo.max_cpu_num < cpu)
1673 topo.max_cpu_num = cpu;
1678 int mark_cpu_present(int cpu)
1680 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
1684 void turbostat_loop()
1692 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1695 } else if (retval == -1) {
1696 if (restarted > 1) {
1703 gettimeofday(&tv_even, (struct timezone *)NULL);
1706 if (for_all_proc_cpus(cpu_is_not_present)) {
1710 sleep(interval_sec);
1711 retval = for_all_cpus(get_counters, ODD_COUNTERS);
1714 } else if (retval == -1) {
1718 gettimeofday(&tv_odd, (struct timezone *)NULL);
1719 timersub(&tv_odd, &tv_even, &tv_delta);
1720 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
1721 compute_average(EVEN_COUNTERS);
1722 format_all_counters(EVEN_COUNTERS);
1724 sleep(interval_sec);
1725 retval = for_all_cpus(get_counters, EVEN_COUNTERS);
1728 } else if (retval == -1) {
1732 gettimeofday(&tv_even, (struct timezone *)NULL);
1733 timersub(&tv_even, &tv_odd, &tv_delta);
1734 for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
1735 compute_average(ODD_COUNTERS);
1736 format_all_counters(ODD_COUNTERS);
1741 void check_dev_msr()
1746 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1747 if (stat(pathname, &sb))
1748 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1749 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1752 void check_permissions()
1754 struct __user_cap_header_struct cap_header_data;
1755 cap_user_header_t cap_header = &cap_header_data;
1756 struct __user_cap_data_struct cap_data_data;
1757 cap_user_data_t cap_data = &cap_data_data;
1758 extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
1762 /* check for CAP_SYS_RAWIO */
1763 cap_header->pid = getpid();
1764 cap_header->version = _LINUX_CAPABILITY_VERSION;
1765 if (capget(cap_header, cap_data) < 0)
1766 err(-6, "capget(2) failed");
1768 if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
1770 warnx("capget(CAP_SYS_RAWIO) failed,"
1771 " try \"# setcap cap_sys_rawio=ep %s\"", progname);
1774 /* test file permissions */
1775 sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
1776 if (euidaccess(pathname, R_OK)) {
1778 warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
1781 /* if all else fails, thell them to be root */
1784 warnx("... or simply run as root");
1791 * NHM adds support for additional MSRs:
1793 * MSR_SMI_COUNT 0x00000034
1795 * MSR_NHM_PLATFORM_INFO 0x000000ce
1796 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1798 * MSR_PKG_C3_RESIDENCY 0x000003f8
1799 * MSR_PKG_C6_RESIDENCY 0x000003f9
1800 * MSR_CORE_C3_RESIDENCY 0x000003fc
1801 * MSR_CORE_C6_RESIDENCY 0x000003fd
1804 * sets global pkg_cstate_limit to decode MSR_NHM_SNB_PKG_CST_CFG_CTL
1806 int probe_nhm_msrs(unsigned int family, unsigned int model)
1808 unsigned long long msr;
1809 int *pkg_cstate_limits;
1818 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1819 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1820 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1821 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1822 case 0x2C: /* Westmere EP - Gulftown */
1823 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1824 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1825 pkg_cstate_limits = nhm_pkg_cstate_limits;
1827 case 0x2A: /* SNB */
1828 case 0x2D: /* SNB Xeon */
1829 case 0x3A: /* IVB */
1830 case 0x3E: /* IVB Xeon */
1831 pkg_cstate_limits = snb_pkg_cstate_limits;
1833 case 0x3C: /* HSW */
1834 case 0x3F: /* HSX */
1835 case 0x45: /* HSW */
1836 case 0x46: /* HSW */
1837 case 0x3D: /* BDW */
1838 case 0x47: /* BDW */
1839 case 0x4F: /* BDX */
1840 case 0x56: /* BDX-DE */
1841 case 0x4E: /* SKL */
1842 case 0x5E: /* SKL */
1843 pkg_cstate_limits = hsw_pkg_cstate_limits;
1845 case 0x37: /* BYT */
1846 case 0x4D: /* AVN */
1847 pkg_cstate_limits = slv_pkg_cstate_limits;
1849 case 0x4C: /* AMT */
1850 pkg_cstate_limits = amt_pkg_cstate_limits;
1852 case 0x57: /* PHI */
1853 pkg_cstate_limits = phi_pkg_cstate_limits;
1858 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1860 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1864 int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
1867 /* Nehalem compatible, but do not include turbo-ratio limit support */
1868 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1869 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1875 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
1884 case 0x3E: /* IVB Xeon */
1885 case 0x3F: /* HSW Xeon */
1891 int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
1900 case 0x3F: /* HSW Xeon */
1907 int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
1916 case 0x57: /* Knights Landing */
1922 int has_config_tdp(unsigned int family, unsigned int model)
1931 case 0x3A: /* IVB */
1932 case 0x3C: /* HSW */
1933 case 0x3F: /* HSX */
1934 case 0x45: /* HSW */
1935 case 0x46: /* HSW */
1936 case 0x3D: /* BDW */
1937 case 0x47: /* BDW */
1938 case 0x4F: /* BDX */
1939 case 0x56: /* BDX-DE */
1940 case 0x4E: /* SKL */
1941 case 0x5E: /* SKL */
1943 case 0x57: /* Knights Landing */
1951 dump_cstate_pstate_config_info(family, model)
1953 if (!do_nhm_platform_info)
1956 dump_nhm_platform_info();
1958 if (has_hsw_turbo_ratio_limit(family, model))
1959 dump_hsw_turbo_ratio_limits();
1961 if (has_ivt_turbo_ratio_limit(family, model))
1962 dump_ivt_turbo_ratio_limits();
1964 if (has_nhm_turbo_ratio_limit(family, model))
1965 dump_nhm_turbo_ratio_limits();
1967 if (has_knl_turbo_ratio_limit(family, model))
1968 dump_knl_turbo_ratio_limits();
1970 if (has_config_tdp(family, model))
1979 * Decode the ENERGY_PERF_BIAS MSR
1981 int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1983 unsigned long long msr;
1992 /* EPB is per-package */
1993 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1996 if (cpu_migrate(cpu)) {
1997 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2001 if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
2004 switch (msr & 0xF) {
2005 case ENERGY_PERF_BIAS_PERFORMANCE:
2006 epb_string = "performance";
2008 case ENERGY_PERF_BIAS_NORMAL:
2009 epb_string = "balanced";
2011 case ENERGY_PERF_BIAS_POWERSAVE:
2012 epb_string = "powersave";
2015 epb_string = "custom";
2018 fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
2024 * print_perf_limit()
2026 int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2028 unsigned long long msr;
2034 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2037 if (cpu_migrate(cpu)) {
2038 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2042 if (do_core_perf_limit_reasons) {
2043 get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
2044 fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2045 fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
2046 (msr & 1 << 15) ? "bit15, " : "",
2047 (msr & 1 << 14) ? "bit14, " : "",
2048 (msr & 1 << 13) ? "Transitions, " : "",
2049 (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
2050 (msr & 1 << 11) ? "PkgPwrL2, " : "",
2051 (msr & 1 << 10) ? "PkgPwrL1, " : "",
2052 (msr & 1 << 9) ? "CorePwr, " : "",
2053 (msr & 1 << 8) ? "Amps, " : "",
2054 (msr & 1 << 6) ? "VR-Therm, " : "",
2055 (msr & 1 << 5) ? "Auto-HWP, " : "",
2056 (msr & 1 << 4) ? "Graphics, " : "",
2057 (msr & 1 << 2) ? "bit2, " : "",
2058 (msr & 1 << 1) ? "ThermStatus, " : "",
2059 (msr & 1 << 0) ? "PROCHOT, " : "");
2060 fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
2061 (msr & 1 << 31) ? "bit31, " : "",
2062 (msr & 1 << 30) ? "bit30, " : "",
2063 (msr & 1 << 29) ? "Transitions, " : "",
2064 (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
2065 (msr & 1 << 27) ? "PkgPwrL2, " : "",
2066 (msr & 1 << 26) ? "PkgPwrL1, " : "",
2067 (msr & 1 << 25) ? "CorePwr, " : "",
2068 (msr & 1 << 24) ? "Amps, " : "",
2069 (msr & 1 << 22) ? "VR-Therm, " : "",
2070 (msr & 1 << 21) ? "Auto-HWP, " : "",
2071 (msr & 1 << 20) ? "Graphics, " : "",
2072 (msr & 1 << 18) ? "bit18, " : "",
2073 (msr & 1 << 17) ? "ThermStatus, " : "",
2074 (msr & 1 << 16) ? "PROCHOT, " : "");
2077 if (do_gfx_perf_limit_reasons) {
2078 get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
2079 fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2080 fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
2081 (msr & 1 << 0) ? "PROCHOT, " : "",
2082 (msr & 1 << 1) ? "ThermStatus, " : "",
2083 (msr & 1 << 4) ? "Graphics, " : "",
2084 (msr & 1 << 6) ? "VR-Therm, " : "",
2085 (msr & 1 << 8) ? "Amps, " : "",
2086 (msr & 1 << 9) ? "GFXPwr, " : "",
2087 (msr & 1 << 10) ? "PkgPwrL1, " : "",
2088 (msr & 1 << 11) ? "PkgPwrL2, " : "");
2089 fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
2090 (msr & 1 << 16) ? "PROCHOT, " : "",
2091 (msr & 1 << 17) ? "ThermStatus, " : "",
2092 (msr & 1 << 20) ? "Graphics, " : "",
2093 (msr & 1 << 22) ? "VR-Therm, " : "",
2094 (msr & 1 << 24) ? "Amps, " : "",
2095 (msr & 1 << 25) ? "GFXPwr, " : "",
2096 (msr & 1 << 26) ? "PkgPwrL1, " : "",
2097 (msr & 1 << 27) ? "PkgPwrL2, " : "");
2099 if (do_ring_perf_limit_reasons) {
2100 get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
2101 fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
2102 fprintf(stderr, " (Active: %s%s%s%s%s%s)",
2103 (msr & 1 << 0) ? "PROCHOT, " : "",
2104 (msr & 1 << 1) ? "ThermStatus, " : "",
2105 (msr & 1 << 6) ? "VR-Therm, " : "",
2106 (msr & 1 << 8) ? "Amps, " : "",
2107 (msr & 1 << 10) ? "PkgPwrL1, " : "",
2108 (msr & 1 << 11) ? "PkgPwrL2, " : "");
2109 fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
2110 (msr & 1 << 16) ? "PROCHOT, " : "",
2111 (msr & 1 << 17) ? "ThermStatus, " : "",
2112 (msr & 1 << 22) ? "VR-Therm, " : "",
2113 (msr & 1 << 24) ? "Amps, " : "",
2114 (msr & 1 << 26) ? "PkgPwrL1, " : "",
2115 (msr & 1 << 27) ? "PkgPwrL2, " : "");
2120 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
2121 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
2123 double get_tdp(model)
2125 unsigned long long msr;
2127 if (do_rapl & RAPL_PKG_POWER_INFO)
2128 if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
2129 return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
2141 * rapl_dram_energy_units_probe()
2142 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
2145 rapl_dram_energy_units_probe(int model, double rapl_energy_units)
2147 /* only called for genuine_intel, family 6 */
2150 case 0x3F: /* HSX */
2151 case 0x4F: /* BDX */
2152 case 0x56: /* BDX-DE */
2153 case 0x57: /* KNL */
2154 return (rapl_dram_energy_units = 15.3 / 1000000);
2156 return (rapl_energy_units);
2164 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
2166 void rapl_probe(unsigned int family, unsigned int model)
2168 unsigned long long msr;
2169 unsigned int time_unit;
2181 case 0x3C: /* HSW */
2182 case 0x45: /* HSW */
2183 case 0x46: /* HSW */
2184 case 0x3D: /* BDW */
2185 case 0x47: /* BDW */
2186 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
2188 case 0x4E: /* SKL */
2189 case 0x5E: /* SKL */
2190 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2192 case 0x3F: /* HSX */
2193 case 0x4F: /* BDX */
2194 case 0x56: /* BDX-DE */
2195 case 0x57: /* KNL */
2196 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
2200 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
2202 case 0x37: /* BYT */
2203 case 0x4D: /* AVN */
2204 do_rapl = RAPL_PKG | RAPL_CORES ;
2210 /* units on package 0, verify later other packages match */
2211 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
2214 rapl_power_units = 1.0 / (1 << (msr & 0xF));
2216 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
2218 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
2220 rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units);
2222 time_unit = msr >> 16 & 0xF;
2226 rapl_time_units = 1.0 / (1 << (time_unit));
2228 tdp = get_tdp(model);
2230 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
2232 fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
2237 void perf_limit_reasons_probe(family, model)
2246 case 0x3C: /* HSW */
2247 case 0x45: /* HSW */
2248 case 0x46: /* HSW */
2249 do_gfx_perf_limit_reasons = 1;
2250 case 0x3F: /* HSX */
2251 do_core_perf_limit_reasons = 1;
2252 do_ring_perf_limit_reasons = 1;
2258 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2260 unsigned long long msr;
2264 if (!(do_dts || do_ptm))
2269 /* DTS is per-core, no need to print for each thread */
2270 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
2273 if (cpu_migrate(cpu)) {
2274 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2278 if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
2279 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
2282 dts = (msr >> 16) & 0x7F;
2283 fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
2284 cpu, msr, tcc_activation_temp - dts);
2287 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
2290 dts = (msr >> 16) & 0x7F;
2291 dts2 = (msr >> 8) & 0x7F;
2292 fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2293 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2299 unsigned int resolution;
2301 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
2304 dts = (msr >> 16) & 0x7F;
2305 resolution = (msr >> 27) & 0xF;
2306 fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
2307 cpu, msr, tcc_activation_temp - dts, resolution);
2310 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
2313 dts = (msr >> 16) & 0x7F;
2314 dts2 = (msr >> 8) & 0x7F;
2315 fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
2316 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
2323 void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
2325 fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
2327 ((msr >> 15) & 1) ? "EN" : "DIS",
2328 ((msr >> 0) & 0x7FFF) * rapl_power_units,
2329 (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
2330 (((msr >> 16) & 1) ? "EN" : "DIS"));
2335 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2337 unsigned long long msr;
2343 /* RAPL counters are per package, so print only for 1st thread/package */
2344 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2348 if (cpu_migrate(cpu)) {
2349 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2353 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
2357 fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
2358 "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
2359 rapl_power_units, rapl_energy_units, rapl_time_units);
2361 if (do_rapl & RAPL_PKG_POWER_INFO) {
2363 if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
2367 fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2369 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2370 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2371 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2372 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2375 if (do_rapl & RAPL_PKG) {
2377 if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
2380 fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
2381 cpu, msr, (msr >> 63) & 1 ? "": "UN");
2383 print_power_limit_msr(cpu, msr, "PKG Limit #1");
2384 fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
2386 ((msr >> 47) & 1) ? "EN" : "DIS",
2387 ((msr >> 32) & 0x7FFF) * rapl_power_units,
2388 (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
2389 ((msr >> 48) & 1) ? "EN" : "DIS");
2392 if (do_rapl & RAPL_DRAM_POWER_INFO) {
2393 if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
2396 fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
2398 ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2399 ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2400 ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
2401 ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
2403 if (do_rapl & RAPL_DRAM) {
2404 if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
2406 fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
2407 cpu, msr, (msr >> 31) & 1 ? "": "UN");
2409 print_power_limit_msr(cpu, msr, "DRAM Limit");
2411 if (do_rapl & RAPL_CORE_POLICY) {
2413 if (get_msr(cpu, MSR_PP0_POLICY, &msr))
2416 fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
2419 if (do_rapl & RAPL_CORES) {
2422 if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
2424 fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
2425 cpu, msr, (msr >> 31) & 1 ? "": "UN");
2426 print_power_limit_msr(cpu, msr, "Cores Limit");
2429 if (do_rapl & RAPL_GFX) {
2431 if (get_msr(cpu, MSR_PP1_POLICY, &msr))
2434 fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
2436 if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
2438 fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
2439 cpu, msr, (msr >> 31) & 1 ? "": "UN");
2440 print_power_limit_msr(cpu, msr, "GFX Limit");
2447 * SNB adds support for additional MSRs:
2449 * MSR_PKG_C7_RESIDENCY 0x000003fa
2450 * MSR_CORE_C7_RESIDENCY 0x000003fe
2451 * MSR_PKG_C2_RESIDENCY 0x0000060d
2454 int has_snb_msrs(unsigned int family, unsigned int model)
2462 case 0x3A: /* IVB */
2463 case 0x3E: /* IVB Xeon */
2464 case 0x3C: /* HSW */
2465 case 0x3F: /* HSW */
2466 case 0x45: /* HSW */
2467 case 0x46: /* HSW */
2468 case 0x3D: /* BDW */
2469 case 0x47: /* BDW */
2470 case 0x4F: /* BDX */
2471 case 0x56: /* BDX-DE */
2472 case 0x4E: /* SKL */
2473 case 0x5E: /* SKL */
2480 * HSW adds support for additional MSRs:
2482 * MSR_PKG_C8_RESIDENCY 0x00000630
2483 * MSR_PKG_C9_RESIDENCY 0x00000631
2484 * MSR_PKG_C10_RESIDENCY 0x00000632
2486 int has_hsw_msrs(unsigned int family, unsigned int model)
2492 case 0x45: /* HSW */
2493 case 0x3D: /* BDW */
2494 case 0x4E: /* SKL */
2495 case 0x5E: /* SKL */
2502 * SKL adds support for additional MSRS:
2504 * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
2505 * MSR_PKG_ANY_CORE_C0_RES 0x00000659
2506 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
2507 * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
2509 int has_skl_msrs(unsigned int family, unsigned int model)
2515 case 0x4E: /* SKL */
2516 case 0x5E: /* SKL */
2524 int is_slm(unsigned int family, unsigned int model)
2529 case 0x37: /* BYT */
2530 case 0x4D: /* AVN */
2536 int is_knl(unsigned int family, unsigned int model)
2541 case 0x57: /* KNL */
2547 unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
2549 if (is_knl(family, model))
2554 #define SLM_BCLK_FREQS 5
2555 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
2557 double slm_bclk(void)
2559 unsigned long long msr = 3;
2563 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
2564 fprintf(stderr, "SLM BCLK: unknown\n");
2567 if (i >= SLM_BCLK_FREQS) {
2568 fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
2571 freq = slm_freq_table[i];
2573 fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
2578 double discover_bclk(unsigned int family, unsigned int model)
2580 if (has_snb_msrs(family, model))
2582 else if (is_slm(family, model))
2589 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
2590 * the Thermal Control Circuit (TCC) activates.
2591 * This is usually equal to tjMax.
2593 * Older processors do not have this MSR, so there we guess,
2594 * but also allow cmdline over-ride with -T.
2596 * Several MSR temperature values are in units of degrees-C
2597 * below this value, including the Digital Thermal Sensor (DTS),
2598 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
2600 int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
2602 unsigned long long msr;
2603 unsigned int target_c_local;
2606 /* tcc_activation_temp is used only for dts or ptm */
2607 if (!(do_dts || do_ptm))
2610 /* this is a per-package concept */
2611 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
2615 if (cpu_migrate(cpu)) {
2616 fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
2620 if (tcc_activation_temp_override != 0) {
2621 tcc_activation_temp = tcc_activation_temp_override;
2622 fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
2623 cpu, tcc_activation_temp);
2627 /* Temperature Target MSR is Nehalem and newer only */
2628 if (!do_nhm_platform_info)
2631 if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
2634 target_c_local = (msr >> 16) & 0xFF;
2637 fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
2638 cpu, msr, target_c_local);
2640 if (!target_c_local)
2643 tcc_activation_temp = target_c_local;
2648 tcc_activation_temp = TJMAX_DEFAULT;
2649 fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
2650 cpu, tcc_activation_temp);
2654 void process_cpuid()
2656 unsigned int eax, ebx, ecx, edx, max_level;
2657 unsigned int fms, family, model, stepping;
2659 eax = ebx = ecx = edx = 0;
2661 __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
2663 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
2667 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
2668 (char *)&ebx, (char *)&edx, (char *)&ecx);
2670 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
2671 family = (fms >> 8) & 0xf;
2672 model = (fms >> 4) & 0xf;
2673 stepping = fms & 0xf;
2674 if (family == 6 || family == 0xf)
2675 model += ((fms >> 16) & 0xf) << 4;
2678 fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
2679 max_level, family, model, stepping, family, model, stepping);
2681 if (!(edx & (1 << 5)))
2682 errx(1, "CPUID: no MSR");
2685 * check max extended function levels of CPUID.
2686 * This is needed to check for invariant TSC.
2687 * This check is valid for both Intel and AMD.
2689 ebx = ecx = edx = 0;
2690 __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
2692 if (max_level >= 0x80000007) {
2695 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
2696 * this check is valid for both Intel and AMD
2698 __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
2699 has_invariant_tsc = edx & (1 << 8);
2703 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
2704 * this check is valid for both Intel and AMD
2707 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
2708 has_aperf = ecx & (1 << 0);
2709 do_dts = eax & (1 << 0);
2710 do_ptm = eax & (1 << 6);
2711 has_epb = ecx & (1 << 3);
2714 fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
2715 has_aperf ? "" : "No ",
2716 do_dts ? "" : "No ",
2717 do_ptm ? "" : "No ",
2718 has_epb ? "" : "No ");
2720 if (max_level > 0x15) {
2721 unsigned int eax_crystal;
2722 unsigned int ebx_tsc;
2725 * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz
2727 eax_crystal = ebx_tsc = crystal_hz = edx = 0;
2728 __get_cpuid(0x15, &eax_crystal, &ebx_tsc, &crystal_hz, &edx);
2732 if (debug && (ebx != 0))
2733 fprintf(stderr, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n",
2734 eax_crystal, ebx_tsc, crystal_hz);
2736 if (crystal_hz == 0)
2738 case 0x4E: /* SKL */
2739 case 0x5E: /* SKL */
2740 crystal_hz = 24000000; /* 24 MHz */
2747 tsc_hz = (unsigned long long) crystal_hz * ebx_tsc / eax_crystal;
2749 fprintf(stderr, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n",
2750 tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal);
2756 aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
2758 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
2759 do_snb_cstates = has_snb_msrs(family, model);
2760 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
2761 do_pc3 = (pkg_cstate_limit >= PCL__3);
2762 do_pc6 = (pkg_cstate_limit >= PCL__6);
2763 do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
2764 do_c8_c9_c10 = has_hsw_msrs(family, model);
2765 do_skl_residency = has_skl_msrs(family, model);
2766 do_slm_cstates = is_slm(family, model);
2767 do_knl_cstates = is_knl(family, model);
2768 bclk = discover_bclk(family, model);
2770 rapl_probe(family, model);
2771 perf_limit_reasons_probe(family, model);
2774 dump_cstate_pstate_config_info();
2782 "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n"
2784 "Turbostat forks the specified COMMAND and prints statistics\n"
2785 "when COMMAND completes.\n"
2786 "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
2787 "to print statistics, until interrupted.\n"
2788 "--debug run in \"debug\" mode\n"
2789 "--interval sec Override default 5-second measurement interval\n"
2790 "--help print this help message\n"
2791 "--counter msr print 32-bit counter at address \"msr\"\n"
2792 "--Counter msr print 64-bit Counter at address \"msr\"\n"
2793 "--msr msr print 32-bit value at address \"msr\"\n"
2794 "--MSR msr print 64-bit Value at address \"msr\"\n"
2795 "--version print version information\n"
2797 "For more help, run \"man turbostat\"\n");
2802 * in /dev/cpu/ return success for names that are numbers
2803 * ie. filter out ".", "..", "microcode".
2805 int dir_filter(const struct dirent *dirp)
2807 if (isdigit(dirp->d_name[0]))
2813 int open_dev_cpu_msr(int dummy1)
2818 void topology_probe()
2821 int max_core_id = 0;
2822 int max_package_id = 0;
2823 int max_siblings = 0;
2824 struct cpu_topology {
2826 int physical_package_id;
2829 /* Initialize num_cpus, max_cpu_num */
2831 topo.max_cpu_num = 0;
2832 for_all_proc_cpus(count_cpus);
2833 if (!summary_only && topo.num_cpus > 1)
2837 fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
2839 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
2841 err(1, "calloc cpus");
2844 * Allocate and initialize cpu_present_set
2846 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
2847 if (cpu_present_set == NULL)
2848 err(3, "CPU_ALLOC");
2849 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2850 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
2851 for_all_proc_cpus(mark_cpu_present);
2854 * Allocate and initialize cpu_affinity_set
2856 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
2857 if (cpu_affinity_set == NULL)
2858 err(3, "CPU_ALLOC");
2859 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
2860 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
2865 * find max_core_id, max_package_id
2867 for (i = 0; i <= topo.max_cpu_num; ++i) {
2870 if (cpu_is_not_present(i)) {
2872 fprintf(stderr, "cpu%d NOT PRESENT\n", i);
2875 cpus[i].core_id = get_core_id(i);
2876 if (cpus[i].core_id > max_core_id)
2877 max_core_id = cpus[i].core_id;
2879 cpus[i].physical_package_id = get_physical_package_id(i);
2880 if (cpus[i].physical_package_id > max_package_id)
2881 max_package_id = cpus[i].physical_package_id;
2883 siblings = get_num_ht_siblings(i);
2884 if (siblings > max_siblings)
2885 max_siblings = siblings;
2887 fprintf(stderr, "cpu %d pkg %d core %d\n",
2888 i, cpus[i].physical_package_id, cpus[i].core_id);
2890 topo.num_cores_per_pkg = max_core_id + 1;
2892 fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
2893 max_core_id, topo.num_cores_per_pkg);
2894 if (debug && !summary_only && topo.num_cores_per_pkg > 1)
2897 topo.num_packages = max_package_id + 1;
2899 fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
2900 max_package_id, topo.num_packages);
2901 if (debug && !summary_only && topo.num_packages > 1)
2904 topo.num_threads_per_core = max_siblings;
2906 fprintf(stderr, "max_siblings %d\n", max_siblings);
2912 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
2916 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
2917 topo.num_packages, sizeof(struct thread_data));
2921 for (i = 0; i < topo.num_threads_per_core *
2922 topo.num_cores_per_pkg * topo.num_packages; i++)
2923 (*t)[i].cpu_id = -1;
2925 *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
2926 sizeof(struct core_data));
2930 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
2931 (*c)[i].core_id = -1;
2933 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
2937 for (i = 0; i < topo.num_packages; i++)
2938 (*p)[i].package_id = i;
2942 err(1, "calloc counters");
2947 * set cpu_id, core_num, pkg_num
2948 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
2950 * increment topo.num_cores when 1st core in pkg seen
2952 void init_counter(struct thread_data *thread_base, struct core_data *core_base,
2953 struct pkg_data *pkg_base, int thread_num, int core_num,
2954 int pkg_num, int cpu_id)
2956 struct thread_data *t;
2957 struct core_data *c;
2960 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
2961 c = GET_CORE(core_base, core_num, pkg_num);
2962 p = GET_PKG(pkg_base, pkg_num);
2965 if (thread_num == 0) {
2966 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
2967 if (cpu_is_first_core_in_package(cpu_id))
2968 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
2971 c->core_id = core_num;
2972 p->package_id = pkg_num;
2976 int initialize_counters(int cpu_id)
2978 int my_thread_id, my_core_id, my_package_id;
2980 my_package_id = get_physical_package_id(cpu_id);
2981 my_core_id = get_core_id(cpu_id);
2982 my_thread_id = get_cpu_position_in_core(cpu_id);
2986 init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2987 init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
2991 void allocate_output_buffer()
2993 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
2994 outp = output_buffer;
2996 err(-1, "calloc output buffer");
2999 void setup_all_buffers(void)
3002 allocate_counters(&thread_even, &core_even, &package_even);
3003 allocate_counters(&thread_odd, &core_odd, &package_odd);
3004 allocate_output_buffer();
3005 for_all_proc_cpus(initialize_counters);
3008 void set_base_cpu(void)
3010 base_cpu = sched_getcpu();
3012 err(-ENODEV, "No valid cpus found");
3015 fprintf(stderr, "base_cpu = %d\n", base_cpu);
3018 void turbostat_init()
3020 setup_all_buffers();
3023 check_permissions();
3028 for_all_cpus(print_epb, ODD_COUNTERS);
3031 for_all_cpus(print_perf_limit, ODD_COUNTERS);
3034 for_all_cpus(print_rapl, ODD_COUNTERS);
3036 for_all_cpus(set_temperature_target, ODD_COUNTERS);
3039 for_all_cpus(print_thermal, ODD_COUNTERS);
3042 int fork_it(char **argv)
3047 status = for_all_cpus(get_counters, EVEN_COUNTERS);
3050 /* clear affinity side-effect of get_counters() */
3051 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
3052 gettimeofday(&tv_even, (struct timezone *)NULL);
3057 execvp(argv[0], argv);
3061 if (child_pid == -1)
3064 signal(SIGINT, SIG_IGN);
3065 signal(SIGQUIT, SIG_IGN);
3066 if (waitpid(child_pid, &status, 0) == -1)
3067 err(status, "waitpid");
3070 * n.b. fork_it() does not check for errors from for_all_cpus()
3071 * because re-starting is problematic when forking
3073 for_all_cpus(get_counters, ODD_COUNTERS);
3074 gettimeofday(&tv_odd, (struct timezone *)NULL);
3075 timersub(&tv_odd, &tv_even, &tv_delta);
3076 for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
3077 compute_average(EVEN_COUNTERS);
3078 format_all_counters(EVEN_COUNTERS);
3081 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
3086 int get_and_dump_counters(void)
3090 status = for_all_cpus(get_counters, ODD_COUNTERS);
3094 status = for_all_cpus(dump_counters, ODD_COUNTERS);
3103 void print_version() {
3104 fprintf(stderr, "turbostat version 4.7 17-June, 2015"
3105 " - Len Brown <lenb@kernel.org>\n");
3108 void cmdline(int argc, char **argv)
3111 int option_index = 0;
3112 static struct option long_options[] = {
3113 {"Counter", required_argument, 0, 'C'},
3114 {"counter", required_argument, 0, 'c'},
3115 {"Dump", no_argument, 0, 'D'},
3116 {"debug", no_argument, 0, 'd'},
3117 {"interval", required_argument, 0, 'i'},
3118 {"help", no_argument, 0, 'h'},
3119 {"Joules", no_argument, 0, 'J'},
3120 {"MSR", required_argument, 0, 'M'},
3121 {"msr", required_argument, 0, 'm'},
3122 {"Package", no_argument, 0, 'p'},
3123 {"processor", no_argument, 0, 'p'},
3124 {"Summary", no_argument, 0, 'S'},
3125 {"TCC", required_argument, 0, 'T'},
3126 {"version", no_argument, 0, 'v' },
3132 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:PpST:v",
3133 long_options, &option_index)) != -1) {
3136 sscanf(optarg, "%x", &extra_delta_offset64);
3139 sscanf(optarg, "%x", &extra_delta_offset32);
3152 interval_sec = atoi(optarg);
3158 sscanf(optarg, "%x", &extra_msr_offset64);
3161 sscanf(optarg, "%x", &extra_msr_offset32);
3173 tcc_activation_temp_override = atoi(optarg);
3183 int main(int argc, char **argv)
3185 cmdline(argc, argv);
3192 /* dump counters and exit */
3194 return get_and_dump_counters();
3197 * if any params left, it must be a command to fork
3200 return fork_it(argv + optind);