2 * turbostat -- Log CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors for collectd.
5 * Based on the 'turbostat' tool of the Linux kernel, found at
6 * linux/tools/power/x86/turbostat/turbostat.c:
8 * Copyright (c) 2013 Intel Corporation.
9 * Len Brown <len.brown@intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 * Ported to collectd by Vincent Brillault <git@lerya.net>
28 * _GNU_SOURCE is required because of the following functions:
38 #include <asm/msr-index.h>
43 #include <sys/types.h>
46 #include <sys/resource.h>
61 #define PLUGIN_NAME "turbostat"
63 static const char *proc_stat = "/proc/stat";
66 * If set, aperf_mperf_unstable disables a/mperf based stats.
67 * This includes: C0 & C1 states, frequency
69 * This value is automatically set if mperf or aperf decreases
71 static _Bool aperf_mperf_unstable;
73 static unsigned int do_core_cstate;
74 static unsigned int do_pkg_cstate;
75 static unsigned int do_rapl;
76 static unsigned int do_dts;
77 static unsigned int do_ptm;
78 static unsigned int tcc_activation_temp;
79 static double rapl_energy_units;
81 #define RAPL_PKG (1 << 0)
82 /* 0x610 MSR_PKG_POWER_LIMIT */
83 /* 0x611 MSR_PKG_ENERGY_STATUS */
84 #define RAPL_PKG_PERF_STATUS (1 << 1)
85 /* 0x613 MSR_PKG_PERF_STATUS */
86 #define RAPL_PKG_POWER_INFO (1 << 2)
87 /* 0x614 MSR_PKG_POWER_INFO */
89 #define RAPL_DRAM (1 << 3)
90 /* 0x618 MSR_DRAM_POWER_LIMIT */
91 /* 0x619 MSR_DRAM_ENERGY_STATUS */
92 /* 0x61c MSR_DRAM_POWER_INFO */
93 #define RAPL_DRAM_PERF_STATUS (1 << 4)
94 /* 0x61b MSR_DRAM_PERF_STATUS */
96 #define RAPL_CORES (1 << 5)
97 /* 0x638 MSR_PP0_POWER_LIMIT */
98 /* 0x639 MSR_PP0_ENERGY_STATUS */
99 #define RAPL_CORE_POLICY (1 << 6)
100 /* 0x63a MSR_PP0_POLICY */
103 #define RAPL_GFX (1 << 7)
104 /* 0x640 MSR_PP1_POWER_LIMIT */
105 /* 0x641 MSR_PP1_ENERGY_STATUS */
106 /* 0x642 MSR_PP1_POLICY */
107 #define TJMAX_DEFAULT 100
111 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set;
112 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_saved_affinity_setsize;
115 unsigned long long tsc;
116 unsigned long long aperf;
117 unsigned long long mperf;
118 unsigned long long c1;
119 unsigned int smi_count;
122 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
123 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
124 } *thread_even, *thread_odd;
127 unsigned long long c3;
128 unsigned long long c6;
129 unsigned long long c7;
130 unsigned int core_temp_c;
131 unsigned int core_id;
132 } *core_even, *core_odd;
135 unsigned long long pc2;
136 unsigned long long pc3;
137 unsigned long long pc6;
138 unsigned long long pc7;
139 unsigned long long pc8;
140 unsigned long long pc9;
141 unsigned long long pc10;
142 unsigned int package_id;
143 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
144 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
145 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
146 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
147 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
148 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
149 unsigned int tcc_activation_temp;
150 unsigned int pkg_temp_c;
151 } *package_even, *package_odd;
153 #define ODD_COUNTERS thread_odd, core_odd, package_odd
154 #define EVEN_COUNTERS thread_even, core_even, package_even
155 static _Bool is_even = 1;
157 static _Bool allocated = 0;
158 static _Bool initialized = 0;
160 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
161 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
162 topo.num_threads_per_core + \
163 (core_no) * topo.num_threads_per_core + (thread_no))
164 #define GET_CORE(core_base, core_no, pkg_no) \
165 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
166 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
173 int num_cores_per_pkg;
174 int num_threads_per_core;
177 struct timeval tv_even, tv_odd, tv_delta;
182 ERR_CPU_SAVE_SCHED_AFFINITY,
186 ERR_MSR_CORE_C3_RESIDENCY,
187 ERR_MSR_CORE_C6_RESIDENCY,
188 ERR_MSR_CORE_C7_RESIDENCY,
189 ERR_MSR_IA32_THERM_STATUS,
190 ERR_MSR_PKG_C3_RESIDENCY,
191 ERR_MSR_PKG_C6_RESIDENCY,
192 ERR_MSR_PKG_C2_RESIDENCY,
193 ERR_MSR_PKG_C7_RESIDENCY,
194 ERR_MSR_PKG_C8_RESIDENCY,
195 ERR_MSR_PKG_C9_RESIDENCY,
196 ERR_MSR_PKG_C10_RESIDENCY,
197 ERR_MSR_PKG_ENERGY_STATUS,
198 ERR_MSR_PKG_POWER_INFO,
199 ERR_MSR_PP0_ENERGY_STATUS,
200 ERR_MSR_DRAM_ENERGY_STATUS,
201 ERR_MSR_PP1_ENERGY_STATUS,
202 ERR_MSR_PKG_PERF_STATUS,
203 ERR_MSR_DRAM_PERF_STATUS,
204 ERR_MSR_IA32_PACKAGE_THERM_STATUS,
210 ERR_CANT_READ_NUMBER,
211 ERR_CANT_READ_PROC_STAT,
212 ERR_NO_INVARIANT_TSC,
220 static int setup_all_buffers(void);
223 cpu_is_not_present(int cpu)
225 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
228 * run func(thread, core, package) in topology order
229 * skip non-present cpus
232 static int __attribute__((warn_unused_result))
233 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
234 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
236 int retval, pkg_no, core_no, thread_no;
238 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
239 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
240 for (thread_no = 0; thread_no <
241 topo.num_threads_per_core; ++thread_no) {
242 struct thread_data *t;
246 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
248 if (cpu_is_not_present(t->cpu_id))
251 c = GET_CORE(core_base, core_no, pkg_no);
252 p = GET_PKG(pkg_base, pkg_no);
254 retval = func(t, c, p);
263 static int __attribute__((warn_unused_result))
264 open_msr(int cpu, _Bool multiple_read)
270 * If we need to do multiple read, let's migrate to the CPU
271 * Otherwise, we would lose time calling functions on another CPU
273 * If we are not yet initialized (cpu_affinity_setsize = 0),
274 * we need to skip this optimisation.
276 if (multiple_read && cpu_affinity_setsize) {
277 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
278 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
279 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
280 ERROR("Could not migrate to CPU %d", cpu);
281 return -ERR_CPU_MIGRATE;
285 ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu);
286 fd = open(pathname, O_RDONLY);
288 return -ERR_CANT_OPEN_MSR;
292 static int __attribute__((warn_unused_result))
293 read_msr(int fd, off_t offset, unsigned long long *msr)
297 retval = pread(fd, msr, sizeof *msr, offset);
299 if (retval != sizeof *msr) {
300 ERROR("MSR offset 0x%llx read failed", (unsigned long long)offset);
306 static int __attribute__((warn_unused_result))
307 get_msr(int cpu, off_t offset, unsigned long long *msr)
312 fd = open_msr(cpu, 0);
315 retval = read_msr(fd, offset, msr);
320 #define DELTA_WRAP32(new, old) \
324 old = 0x100000000 + new - old; \
328 delta_package(struct pkg_data *new, struct pkg_data *old)
330 old->pc2 = new->pc2 - old->pc2;
331 old->pc3 = new->pc3 - old->pc3;
332 old->pc6 = new->pc6 - old->pc6;
333 old->pc7 = new->pc7 - old->pc7;
334 old->pc8 = new->pc8 - old->pc8;
335 old->pc9 = new->pc9 - old->pc9;
336 old->pc10 = new->pc10 - old->pc10;
337 old->pkg_temp_c = new->pkg_temp_c;
339 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
340 DELTA_WRAP32(new->energy_cores, old->energy_cores);
341 DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
342 DELTA_WRAP32(new->energy_dram, old->energy_dram);
343 DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
344 DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
348 delta_core(struct core_data *new, struct core_data *old)
350 old->c3 = new->c3 - old->c3;
351 old->c6 = new->c6 - old->c6;
352 old->c7 = new->c7 - old->c7;
353 old->core_temp_c = new->core_temp_c;
359 static int __attribute__((warn_unused_result))
360 delta_thread(struct thread_data *new, struct thread_data *old,
361 struct core_data *core_delta)
363 old->tsc = new->tsc - old->tsc;
365 /* check for TSC < 1 Mcycles over interval */
366 if (old->tsc < (1000 * 1000)) {
367 WARNING("Insanely slow TSC rate, TSC stops in idle? ");
368 WARNING("You can disable all c-states by booting with \"idle=poll\" ");
369 WARNING("or just the deep ones with \"processor.max_cstate=1\"");
373 old->c1 = new->c1 - old->c1;
375 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
376 old->aperf = new->aperf - old->aperf;
377 old->mperf = new->mperf - old->mperf;
379 if (!aperf_mperf_unstable) {
380 WARNING(" APERF or MPERF went backwards * ");
381 WARNING("* Frequency results do not cover entire interval *");
382 WARNING("* fix this by running Linux-2.6.30 or later *");
384 aperf_mperf_unstable = 1;
390 * As counter collection is not atomic,
391 * it is possible for mperf's non-halted cycles + idle states
392 * to exceed TSC's all cycles: show c1 = 0% in that case.
394 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
397 /* normal case, derive c1 */
398 old->c1 = old->tsc - old->mperf - core_delta->c3
399 - core_delta->c6 - core_delta->c7;
402 if (old->mperf == 0) {
403 WARNING("cpu%d MPERF 0!", old->cpu_id);
404 old->mperf = 1; /* divide by 0 protection */
407 old->smi_count = new->smi_count - old->smi_count;
412 static int __attribute__((warn_unused_result))
413 delta_cpu(struct thread_data *t, struct core_data *c,
414 struct pkg_data *p, struct thread_data *t2,
415 struct core_data *c2, struct pkg_data *p2)
419 /* calculate core delta only for 1st thread in core */
420 if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
423 /* always calculate thread delta */
424 ret = delta_thread(t, t2, c2); /* c2 is core delta */
428 /* calculate package delta only for 1st core in package */
429 if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
430 delta_package(p, p2);
438 * acquire and record local counters for that cpu
440 static int __attribute__((warn_unused_result))
441 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
444 unsigned long long msr;
448 msr_fd = open_msr(cpu, 1);
452 #define READ_MSR(msr, dst) \
454 if (read_msr(msr_fd, msr, dst)) { \
455 retval = -ERR_##msr; \
460 READ_MSR(MSR_IA32_TSC, &t->tsc);
462 READ_MSR(MSR_IA32_APERF, &t->aperf);
463 READ_MSR(MSR_IA32_MPERF, &t->mperf);
465 READ_MSR(MSR_SMI_COUNT, &msr);
466 t->smi_count = msr & 0xFFFFFFFF;
468 /* collect core counters only for 1st thread in core */
469 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) {
474 if (do_core_cstate & (1 << 3))
475 READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3);
476 if (do_core_cstate & (1 << 6))
477 READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6);
478 if (do_core_cstate & (1 << 7))
479 READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7);
482 READ_MSR(MSR_IA32_THERM_STATUS, &msr);
483 c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
486 /* collect package counters only for 1st core in package */
487 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
492 if (do_pkg_cstate & (1 << 2))
493 READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2);
494 if (do_pkg_cstate & (1 << 3))
495 READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3);
496 if (do_pkg_cstate & (1 << 6))
497 READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6);
498 if (do_pkg_cstate & (1 << 7))
499 READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7);
500 if (do_pkg_cstate & (1 << 8))
501 READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8);
502 if (do_pkg_cstate & (1 << 9))
503 READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9);
504 if (do_pkg_cstate & (1 << 10))
505 READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10);
507 if (do_rapl & RAPL_PKG) {
508 READ_MSR(MSR_PKG_ENERGY_STATUS, &msr);
509 p->energy_pkg = msr & 0xFFFFFFFF;
511 if (do_rapl & RAPL_CORES) {
512 READ_MSR(MSR_PP0_ENERGY_STATUS, &msr);
513 p->energy_cores = msr & 0xFFFFFFFF;
515 if (do_rapl & RAPL_DRAM) {
516 READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr);
517 p->energy_dram = msr & 0xFFFFFFFF;
519 if (do_rapl & RAPL_GFX) {
520 READ_MSR(MSR_PP1_ENERGY_STATUS, &msr);
521 p->energy_gfx = msr & 0xFFFFFFFF;
523 if (do_rapl & RAPL_PKG_PERF_STATUS) {
524 READ_MSR(MSR_PKG_PERF_STATUS, &msr);
525 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
527 if (do_rapl & RAPL_DRAM_PERF_STATUS) {
528 READ_MSR(MSR_DRAM_PERF_STATUS, &msr);
529 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
532 READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr);
533 p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
542 free_all_buffers(void)
547 CPU_FREE(cpu_present_set);
548 cpu_present_set = NULL;
551 CPU_FREE(cpu_affinity_set);
552 cpu_affinity_set = NULL;
553 cpu_affinity_setsize = 0;
555 CPU_FREE(cpu_saved_affinity_set);
556 cpu_saved_affinity_set = NULL;
557 cpu_saved_affinity_setsize = 0;
577 * Parse a file containing a single int.
579 static int __attribute__ ((format(printf,1,2)))
580 parse_int_file(const char *fmt, ...)
588 vsnprintf(path, sizeof(path), fmt, args);
590 filep = fopen(path, "r");
592 ERROR("%s: open failed", path);
593 return -ERR_CANT_OPEN_FILE;
595 if (fscanf(filep, "%d", &value) != 1) {
596 ERROR("%s: failed to parse number from file", path);
597 return -ERR_CANT_READ_NUMBER;
604 * cpu_is_first_sibling_in_core(cpu)
605 * return 1 if given CPU is 1st HT sibling in the core
608 cpu_is_first_sibling_in_core(int cpu)
610 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
614 * cpu_is_first_core_in_package(cpu)
615 * return 1 if given CPU is 1st core in package
618 cpu_is_first_core_in_package(int cpu)
620 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
624 get_physical_package_id(int cpu)
626 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
632 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
636 get_num_ht_siblings(int cpu)
644 ssnprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
645 filep = fopen(path, "r");
647 ERROR("%s: open failed", path);
648 return -ERR_CANT_OPEN_FILE;
652 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
653 * otherwinse 1 sibling (self).
655 matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
666 * run func(thread, core, package) in topology order
667 * skip non-present cpus
671 static int __attribute__((warn_unused_result))
672 for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
673 struct pkg_data *, struct thread_data *, struct core_data *,
674 struct pkg_data *), struct thread_data *thread_base,
675 struct core_data *core_base, struct pkg_data *pkg_base,
676 struct thread_data *thread_base2, struct core_data *core_base2,
677 struct pkg_data *pkg_base2)
679 int retval, pkg_no, core_no, thread_no;
681 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
682 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
683 for (thread_no = 0; thread_no <
684 topo.num_threads_per_core; ++thread_no) {
685 struct thread_data *t, *t2;
686 struct core_data *c, *c2;
687 struct pkg_data *p, *p2;
689 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
691 if (cpu_is_not_present(t->cpu_id))
694 t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
696 c = GET_CORE(core_base, core_no, pkg_no);
697 c2 = GET_CORE(core_base2, core_no, pkg_no);
699 p = GET_PKG(pkg_base, pkg_no);
700 p2 = GET_PKG(pkg_base2, pkg_no);
702 retval = func(t, c, p, t2, c2, p2);
712 * run func(cpu) on every cpu in /proc/stat
713 * return max_cpu number
715 static int __attribute__((warn_unused_result))
716 for_all_proc_cpus(int (func)(int))
722 fp = fopen(proc_stat, "r");
724 ERROR("%s: open failed", proc_stat);
725 return -ERR_CANT_OPEN_FILE;
728 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
730 ERROR("%s: failed to parse format", proc_stat);
731 return -ERR_CANT_READ_PROC_STAT;
735 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
739 retval = func(cpu_num);
751 * remember the last one seen, it will be the max
756 if (topo.max_cpu_num < cpu)
757 topo.max_cpu_num = cpu;
763 mark_cpu_present(int cpu)
765 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
771 turbostat_submit (const char *plugin_instance,
772 const char *type, const char *type_instance,
775 value_list_t vl = VALUE_LIST_INIT;
781 sstrncpy (vl.host, hostname_g, sizeof (vl.host));
782 sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
783 if (plugin_instance != NULL)
784 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
785 sstrncpy (vl.type, type, sizeof (vl.type));
786 if (type_instance != NULL)
787 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
789 plugin_dispatch_values (&vl);
793 * column formatting convention & formats
794 * package: "pk" 2 columns %2d
795 * core: "cor" 3 columns %3d
796 * CPU: "CPU" 3 columns %3d
801 * GHz: "GHz" 3 columns %3.2
802 * TSC: "TSC" 3 columns %3.2
803 * SMI: "SMI" 4 columns %4d
804 * percentage " %pc3" %6.2
805 * Perf Status percentage: %5.2
806 * "CTMP" 4 columns %4d
810 submit_counters(struct thread_data *t, struct core_data *c,
814 double interval_float;
816 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
818 ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id);
820 if (!aperf_mperf_unstable)
821 turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
822 if (!aperf_mperf_unstable)
823 turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
826 if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))
827 turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / 1000000000 * t->aperf / t->mperf / interval_float);
830 turbostat_submit(NULL, "current", name, t->smi_count);
832 /* print per-core data only for 1st thread in core */
833 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
836 ssnprintf(name, sizeof(name), "core%02d", c->core_id);
838 if (do_core_cstate & (1 << 3))
839 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
840 if (do_core_cstate & (1 << 6))
841 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
842 if (do_core_cstate & (1 << 7))
843 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
846 turbostat_submit(NULL, "temperature", name, c->core_temp_c);
848 /* print per-package data only for 1st core in package */
849 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
852 ssnprintf(name, sizeof(name), "pkg%02d", p->package_id);
855 turbostat_submit(NULL, "temperature", name, p->pkg_temp_c);
857 if (do_pkg_cstate & (1 << 2))
858 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
859 if (do_pkg_cstate & (1 << 3))
860 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
861 if (do_pkg_cstate & (1 << 6))
862 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
863 if (do_pkg_cstate & (1 << 7))
864 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
865 if (do_pkg_cstate & (1 << 8))
866 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
867 if (do_pkg_cstate & (1 << 9))
868 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
869 if (do_pkg_cstate & (1 << 10))
870 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
873 if (do_rapl & RAPL_PKG)
874 turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float);
875 if (do_rapl & RAPL_CORES)
876 turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float);
877 if (do_rapl & RAPL_GFX)
878 turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float);
879 if (do_rapl & RAPL_DRAM)
880 turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float);
887 turbostat_read(user_data_t * not_used)
892 if ((ret = setup_all_buffers()) < 0)
896 if (for_all_proc_cpus(cpu_is_not_present)) {
898 if ((ret = setup_all_buffers()) < 0)
900 if (for_all_proc_cpus(cpu_is_not_present))
901 return -ERR_CPU_NOT_PRESENT;
904 /* Saving the scheduling affinity, as it will be modified by get_counters */
905 if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0)
906 return -ERR_CPU_SAVE_SCHED_AFFINITY;
909 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
911 gettimeofday(&tv_even, (struct timezone *)NULL);
919 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
921 gettimeofday(&tv_odd, (struct timezone *)NULL);
923 timersub(&tv_odd, &tv_even, &tv_delta);
924 if ((ret = for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) < 0)
926 if ((ret = for_all_cpus(submit_counters, EVEN_COUNTERS)) < 0)
929 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
931 gettimeofday(&tv_even, (struct timezone *)NULL);
933 timersub(&tv_even, &tv_odd, &tv_delta);
934 if ((ret = for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) < 0)
936 if ((ret = for_all_cpus(submit_counters, ODD_COUNTERS)) < 0)
942 * Let's restore the affinity
943 * This might fail if the number of CPU changed, but we can't do anything in that case..
945 (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set);
949 static int __attribute__((warn_unused_result))
954 if (stat("/dev/cpu/0/msr", &sb)) {
955 ERROR("no /dev/cpu/0/msr, try \"# modprobe msr\"");
961 static int __attribute__((warn_unused_result))
965 ERROR("must be root");
966 return -ERR_NOT_ROOT;
972 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
973 * the Thermal Control Circuit (TCC) activates.
974 * This is usually equal to tjMax.
976 * Older processors do not have this MSR, so there we guess,
977 * but also allow cmdline over-ride with -T.
979 * Several MSR temperature values are in units of degrees-C
980 * below this value, including the Digital Thermal Sensor (DTS),
981 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
983 static int __attribute__((warn_unused_result))
984 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
986 unsigned long long msr;
987 unsigned int target_c_local;
989 /* tcc_activation_temp is used only for dts or ptm */
990 if (!(do_dts || do_ptm))
993 /* this is a per-package concept */
994 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
997 if (tcc_activation_temp != 0) {
998 p->tcc_activation_temp = tcc_activation_temp;
1002 if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
1005 target_c_local = (msr >> 16) & 0x7F;
1007 if (target_c_local < 85 || target_c_local > 127)
1010 p->tcc_activation_temp = target_c_local;
1015 p->tcc_activation_temp = TJMAX_DEFAULT;
1016 WARNING("cpu%d: Guessing tjMax %d C, Please use TCCActivationTemp to specify",
1017 t->cpu_id, p->tcc_activation_temp);
1023 * Identify the functionality of the CPU
1025 static int __attribute__((warn_unused_result))
1028 unsigned int eax, ebx, ecx, edx, max_level;
1029 unsigned int fms, family, model;
1032 * - EAX: Maximum Input Value for Basic CPUID Information
1033 * - EBX: "Genu" (0x756e6547)
1034 * - EDX: "ineI" (0x49656e69)
1035 * - ECX: "ntel" (0x6c65746e)
1037 max_level = ebx = ecx = edx = 0;
1038 __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1039 if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
1040 ERROR("Unsupported CPU");
1041 return -UNSUPPORTED_CPU;
1045 * - EAX: Version Information: Type, Family, Model, and Stepping ID
1048 * + 12-13: Processor type
1049 * + 16-19: Extended Model ID
1050 * + 20-27: Extended Family ID
1051 * - EDX: Feature Information:
1052 * + 5: Support for MSR read/write operations
1054 fms = ebx = ecx = edx = 0;
1055 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1056 family = (fms >> 8) & 0xf;
1057 model = (fms >> 4) & 0xf;
1059 family += (fms >> 20) & 0xf;
1060 if (family == 6 || family == 0xf)
1061 model += ((fms >> 16) & 0xf) << 4;
1062 if (!(edx & (1 << 5))) {
1063 ERROR("CPUID: no MSR");
1068 * CPUID(0x80000000):
1069 * - EAX: Maximum Input Value for Extended Function CPUID Information
1071 * This allows us to verify if the CPUID(0x80000007) can be called
1073 * This check is valid for both Intel and AMD.
1075 max_level = ebx = ecx = edx = 0;
1076 __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
1077 if (max_level < 0x80000007) {
1078 ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level);
1079 return -ERR_NO_INVARIANT_TSC;
1083 * CPUID(0x80000007):
1085 * + 8: Invariant TSC available if set
1087 * This check is valid for both Intel and AMD
1089 __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1090 if (!(edx & (1 << 8))) {
1091 ERROR("No invariant TSC");
1092 return -ERR_NO_INVARIANT_TSC;
1098 * + 0: Digital temperature sensor is supported if set
1099 * + 6: Package thermal management is supported if set
1101 * + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF).
1102 * + 3: The processor supports performance-energy bias preference if set.
1103 * It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS
1105 * This check is valid for both Intel and AMD
1107 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1108 do_dts = eax & (1 << 0);
1109 do_ptm = eax & (1 << 6);
1110 if (!(ecx & (1 << 0))) {
1112 return -ERR_NO_APERF;
1116 * Enable or disable C states depending on the model and family
1120 /* Atom (partial) */
1123 do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6);
1126 case 0x37: /* BYT */
1128 case 0x4D: /* AVN */
1131 do_core_cstate = (1 << 1) | (1 << 6);
1132 do_pkg_cstate = (1 << 6);
1135 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1136 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1137 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1138 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1139 do_core_cstate = (1 << 3) | (1 << 6);
1140 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1143 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1144 case 0x2C: /* Westmere EP - Gulftown */
1145 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1146 do_core_cstate = (1 << 3) | (1 << 6);
1147 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1150 case 0x2A: /* SNB */
1151 case 0x2D: /* SNB Xeon */
1152 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1153 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1156 case 0x3A: /* IVB */
1157 case 0x3E: /* IVB Xeon */
1158 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1159 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1161 /* Haswell Bridge */
1162 case 0x3C: /* HSW */
1163 case 0x3F: /* HSW */
1164 case 0x46: /* HSW */
1165 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1166 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1168 case 0x45: /* HSW */
1169 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1170 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1173 case 0x4F: /* BDW */
1174 case 0x56: /* BDX-DE */
1175 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1176 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1178 case 0x3D: /* BDW */
1179 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1180 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1183 ERROR("Unsupported CPU");
1191 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_GFX;
1194 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1198 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1202 do_rapl = RAPL_PKG | RAPL_CORES;
1208 ERROR("Unsupported CPU");
1209 return -UNSUPPORTED_CPU;
1214 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1218 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1220 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1228 static int __attribute__((warn_unused_result))
1233 int max_core_id = 0;
1234 int max_package_id = 0;
1235 int max_siblings = 0;
1236 struct cpu_topology {
1238 int physical_package_id;
1241 /* Initialize num_cpus, max_cpu_num */
1243 topo.max_cpu_num = 0;
1244 ret = for_all_proc_cpus(count_cpus);
1248 DEBUG("num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
1250 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
1252 ERROR("calloc cpus");
1257 * Allocate and initialize cpu_present_set
1259 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
1260 if (cpu_present_set == NULL) {
1263 return -ERR_CPU_ALLOC;
1265 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1266 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
1267 ret = for_all_proc_cpus(mark_cpu_present);
1274 * Allocate and initialize cpu_affinity_set
1276 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1277 if (cpu_affinity_set == NULL) {
1280 return -ERR_CPU_ALLOC;
1282 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1283 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
1287 * Allocate and initialize cpu_saved_affinity_set
1289 cpu_saved_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1290 if (cpu_saved_affinity_set == NULL) {
1293 return -ERR_CPU_ALLOC;
1295 cpu_saved_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1296 CPU_ZERO_S(cpu_saved_affinity_setsize, cpu_saved_affinity_set);
1301 * find max_core_id, max_package_id
1303 for (i = 0; i <= topo.max_cpu_num; ++i) {
1306 if (cpu_is_not_present(i)) {
1307 WARNING("cpu%d NOT PRESENT", i);
1310 cpus[i].core_id = get_core_id(i);
1311 if (cpus[i].core_id < 0)
1312 return cpus[i].core_id;
1313 if (cpus[i].core_id > max_core_id)
1314 max_core_id = cpus[i].core_id;
1316 cpus[i].physical_package_id = get_physical_package_id(i);
1317 if (cpus[i].physical_package_id < 0)
1318 return cpus[i].physical_package_id;
1319 if (cpus[i].physical_package_id > max_package_id)
1320 max_package_id = cpus[i].physical_package_id;
1322 siblings = get_num_ht_siblings(i);
1325 if (siblings > max_siblings)
1326 max_siblings = siblings;
1327 DEBUG("cpu %d pkg %d core %d\n",
1328 i, cpus[i].physical_package_id, cpus[i].core_id);
1330 topo.num_cores_per_pkg = max_core_id + 1;
1331 DEBUG("max_core_id %d, sizing for %d cores per package\n",
1332 max_core_id, topo.num_cores_per_pkg);
1334 topo.num_packages = max_package_id + 1;
1335 DEBUG("max_package_id %d, sizing for %d packages\n",
1336 max_package_id, topo.num_packages);
1338 topo.num_threads_per_core = max_siblings;
1339 DEBUG("max_siblings %d\n", max_siblings);
1346 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
1350 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
1351 topo.num_packages, sizeof(struct thread_data));
1355 for (i = 0; i < topo.num_threads_per_core *
1356 topo.num_cores_per_pkg * topo.num_packages; i++)
1357 (*t)[i].cpu_id = -1;
1359 *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
1360 sizeof(struct core_data));
1364 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
1365 (*c)[i].core_id = -1;
1367 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
1371 for (i = 0; i < topo.num_packages; i++)
1372 (*p)[i].package_id = i;
1376 ERROR("calloc counters");
1382 * set cpu_id, core_num, pkg_num
1383 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
1385 * increment topo.num_cores when 1st core in pkg seen
1388 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1389 struct pkg_data *pkg_base, int thread_num, int core_num,
1390 int pkg_num, int cpu_id)
1393 struct thread_data *t;
1394 struct core_data *c;
1397 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
1398 c = GET_CORE(core_base, core_num, pkg_num);
1399 p = GET_PKG(pkg_base, pkg_num);
1402 if (thread_num == 0) {
1403 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1404 if ((ret = cpu_is_first_core_in_package(cpu_id)) < 0) {
1406 } else if (ret != 0) {
1407 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1411 c->core_id = core_num;
1412 p->package_id = pkg_num;
1419 initialize_counters(int cpu_id)
1421 int my_thread_id, my_core_id, my_package_id;
1424 my_package_id = get_physical_package_id(cpu_id);
1425 if (my_package_id < 0)
1426 return my_package_id;
1427 my_core_id = get_core_id(cpu_id);
1431 if ((ret = cpu_is_first_sibling_in_core(cpu_id)) < 0) {
1433 } else if (ret != 0) {
1440 ret = init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1443 ret = init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1449 #define DO_OR_GOTO_ERR(something) \
1451 ret = (something); \
1456 static int setup_all_buffers(void)
1460 DO_OR_GOTO_ERR(topology_probe());
1461 DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1462 DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1463 DO_OR_GOTO_ERR(for_all_proc_cpus(initialize_counters));
1473 turbostat_init(void)
1477 DO_OR_GOTO_ERR(check_super_user());
1478 DO_OR_GOTO_ERR(probe_cpu());
1479 DO_OR_GOTO_ERR(check_dev_msr());
1480 DO_OR_GOTO_ERR(setup_all_buffers());
1481 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1482 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS));
1484 plugin_register_complex_read(NULL, PLUGIN_NAME, turbostat_read, NULL, NULL);
1492 static const char *config_keys[] =
1494 "TCCActivationTemp",
1496 static const int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
1499 turbostat_config(const char *key, const char *value)
1501 long unsigned int tmp_val;
1504 if (strcasecmp("TCCActivationTemp", key) == 0) {
1505 tmp_val = strtoul(value, &end, 0);
1506 if (*end != '\0' || tmp_val > UINT_MAX)
1508 tcc_activation_temp = (unsigned int) tmp_val;
1515 void module_register(void);
1516 void module_register(void)
1518 plugin_register_init(PLUGIN_NAME, turbostat_init);
1519 plugin_register_config(PLUGIN_NAME, turbostat_config, config_keys, config_keys_num);