2 * turbostat -- Log CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors for collectd.
5 * Based on the 'turbostat' tool of the Linux kernel, found at
6 * linux/tools/power/x86/turbostat/turbostat.c:
8 * Copyright (c) 2013 Intel Corporation.
9 * Len Brown <len.brown@intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 * Ported to collectd by Vincent Brillault <git@lerya.net>
28 * _GNU_SOURCE is required because of the following functions:
38 #include <asm/msr-index.h>
43 #include <sys/types.h>
46 #include <sys/resource.h>
61 #define PLUGIN_NAME "turbostat"
64 * This tool uses the Model-Specific Registers (MSRs) present on Intel processors.
65 * The general description each of these registers, depending on the architecture,
66 * can be found in the IntelĀ® 64 and IA-32 Architectures Software Developer Manual,
67 * Volume 3 Chapter 35.
71 * If set, aperf_mperf_unstable disables a/mperf based stats.
72 * This includes: C0 & C1 states, frequency
74 * This value is automatically set if mperf or aperf go backward
76 static _Bool aperf_mperf_unstable;
79 * Bitmask of the list of core C states supported by the processor.
80 * Currently supported C-states (by this plugin): 3, 6, 7
82 static unsigned int do_core_cstate;
85 * Bitmask of the list of pacages C states supported by the processor.
86 * Currently supported C-states (by this plugin): 2, 3, 6, 7, 8, 9, 10
88 static unsigned int do_pkg_cstate;
91 * Boolean indicating if the processor supports 'Digital temperature sensor'
92 * This feature enables the monitoring of the temperature of each core
94 * This feature has two limitations:
95 * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
96 * - Temperatures above the tcc_activation_temp are not recorded
101 * Boolean indicating if the processor supports 'Package thermal management'
102 * This feature allows the monitoring of the temperature of each package
104 * This feature has two limitations:
105 * - if MSR_IA32_TEMPERATURE_TARGET is not supported, the absolute temperature might be wrong
106 * - Temperatures above the tcc_activation_temp are not recorded
111 * Thermal Control Circuit Activation Temperature as configured by the user.
112 * This override the automated detection via MSR_IA32_TEMPERATURE_TARGET
113 * and should only be used if the automated detection fails.
115 static unsigned int tcc_activation_temp;
117 static unsigned int do_rapl;
118 static double rapl_energy_units;
120 #define RAPL_PKG (1 << 0)
121 /* 0x610 MSR_PKG_POWER_LIMIT */
122 /* 0x611 MSR_PKG_ENERGY_STATUS */
123 #define RAPL_PKG_PERF_STATUS (1 << 1)
124 /* 0x613 MSR_PKG_PERF_STATUS */
125 #define RAPL_PKG_POWER_INFO (1 << 2)
126 /* 0x614 MSR_PKG_POWER_INFO */
128 #define RAPL_DRAM (1 << 3)
129 /* 0x618 MSR_DRAM_POWER_LIMIT */
130 /* 0x619 MSR_DRAM_ENERGY_STATUS */
131 /* 0x61c MSR_DRAM_POWER_INFO */
132 #define RAPL_DRAM_PERF_STATUS (1 << 4)
133 /* 0x61b MSR_DRAM_PERF_STATUS */
135 #define RAPL_CORES (1 << 5)
136 /* 0x638 MSR_PP0_POWER_LIMIT */
137 /* 0x639 MSR_PP0_ENERGY_STATUS */
138 #define RAPL_CORE_POLICY (1 << 6)
139 /* 0x63a MSR_PP0_POLICY */
142 #define RAPL_GFX (1 << 7)
143 /* 0x640 MSR_PP1_POWER_LIMIT */
144 /* 0x641 MSR_PP1_ENERGY_STATUS */
145 /* 0x642 MSR_PP1_POLICY */
146 #define TJMAX_DEFAULT 100
148 cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_saved_affinity_set;
149 size_t cpu_present_setsize, cpu_affinity_setsize, cpu_saved_affinity_setsize;
152 unsigned long long tsc;
153 unsigned long long aperf;
154 unsigned long long mperf;
155 unsigned long long c1;
156 unsigned int smi_count;
159 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
160 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
161 } *thread_delta, *thread_even, *thread_odd;
164 unsigned long long c3;
165 unsigned long long c6;
166 unsigned long long c7;
167 unsigned int core_temp_c;
168 unsigned int core_id;
169 } *core_delta, *core_even, *core_odd;
172 unsigned long long pc2;
173 unsigned long long pc3;
174 unsigned long long pc6;
175 unsigned long long pc7;
176 unsigned long long pc8;
177 unsigned long long pc9;
178 unsigned long long pc10;
179 unsigned int package_id;
180 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
181 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
182 unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
183 unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
184 unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
185 unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
186 unsigned int tcc_activation_temp;
187 unsigned int pkg_temp_c;
188 } *package_delta, *package_even, *package_odd;
190 #define DELTA_COUNTERS thread_delta, core_delta, package_delta
191 #define ODD_COUNTERS thread_odd, core_odd, package_odd
192 #define EVEN_COUNTERS thread_even, core_even, package_even
193 static _Bool is_even = 1;
195 static _Bool allocated = 0;
196 static _Bool initialized = 0;
198 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
199 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
200 topo.num_threads_per_core + \
201 (core_no) * topo.num_threads_per_core + (thread_no))
202 #define GET_CORE(core_base, core_no, pkg_no) \
203 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
204 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
211 int num_cores_per_pkg;
212 int num_threads_per_core;
215 struct timeval tv_even, tv_odd, tv_delta;
220 ERR_CPU_SAVE_SCHED_AFFINITY,
224 ERR_MSR_CORE_C3_RESIDENCY,
225 ERR_MSR_CORE_C6_RESIDENCY,
226 ERR_MSR_CORE_C7_RESIDENCY,
227 ERR_MSR_IA32_THERM_STATUS,
228 ERR_MSR_PKG_C3_RESIDENCY,
229 ERR_MSR_PKG_C6_RESIDENCY,
230 ERR_MSR_PKG_C2_RESIDENCY,
231 ERR_MSR_PKG_C7_RESIDENCY,
232 ERR_MSR_PKG_C8_RESIDENCY,
233 ERR_MSR_PKG_C9_RESIDENCY,
234 ERR_MSR_PKG_C10_RESIDENCY,
235 ERR_MSR_PKG_ENERGY_STATUS,
236 ERR_MSR_PKG_POWER_INFO,
237 ERR_MSR_PP0_ENERGY_STATUS,
238 ERR_MSR_DRAM_ENERGY_STATUS,
239 ERR_MSR_PP1_ENERGY_STATUS,
240 ERR_MSR_PKG_PERF_STATUS,
241 ERR_MSR_DRAM_PERF_STATUS,
242 ERR_MSR_IA32_PACKAGE_THERM_STATUS,
248 ERR_CANT_READ_NUMBER,
249 ERR_CANT_READ_PROC_STAT,
250 ERR_NO_INVARIANT_TSC,
259 /*****************************
260 * MSR Manipulation helpers *
261 *****************************/
264 * Open a MSR device for reading
265 * Can change the scheduling affinity of the current process if multiple_read is 1
267 static int __attribute__((warn_unused_result))
268 open_msr(int cpu, _Bool multiple_read)
274 * If we need to do multiple read, let's migrate to the CPU
275 * Otherwise, we would lose time calling functions on another CPU
277 * If we are not yet initialized (cpu_affinity_setsize = 0),
278 * we need to skip this optimisation.
280 if (multiple_read && cpu_affinity_setsize) {
281 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
282 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
283 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
284 ERROR("Could not migrate to CPU %d", cpu);
285 return -ERR_CPU_MIGRATE;
289 ssnprintf(pathname, sizeof(pathname), "/dev/cpu/%d/msr", cpu);
290 fd = open(pathname, O_RDONLY);
292 return -ERR_CANT_OPEN_MSR;
297 * Read a single MSR from an open file descriptor
299 static int __attribute__((warn_unused_result))
300 read_msr(int fd, off_t offset, unsigned long long *msr)
304 retval = pread(fd, msr, sizeof *msr, offset);
306 if (retval != sizeof *msr) {
307 ERROR("MSR offset 0x%llx read failed", (unsigned long long)offset);
314 * Open a MSR device for reading, read the value asked for and close it.
315 * This call will not affect the scheduling affinity of this thread.
317 static int __attribute__((warn_unused_result))
318 get_msr(int cpu, off_t offset, unsigned long long *msr)
323 fd = open_msr(cpu, 0);
326 retval = read_msr(fd, offset, msr);
332 /********************************
333 * Raw data acquisition (1 CPU) *
334 ********************************/
337 * Read every data avalaible for a single CPU
339 * Core data is shared for all threads in one core: extracted only for the first thread
340 * Package data is shared for all core in one package: extracted only for the first thread of the first core
342 * Side effect: migrates to the targeted CPU
344 static int __attribute__((warn_unused_result))
345 get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
348 unsigned long long msr;
352 msr_fd = open_msr(cpu, 1);
356 #define READ_MSR(msr, dst) \
358 if (read_msr(msr_fd, msr, dst)) { \
359 retval = -ERR_##msr; \
364 READ_MSR(MSR_IA32_TSC, &t->tsc);
366 READ_MSR(MSR_IA32_APERF, &t->aperf);
367 READ_MSR(MSR_IA32_MPERF, &t->mperf);
369 READ_MSR(MSR_SMI_COUNT, &msr);
370 t->smi_count = msr & 0xFFFFFFFF;
372 /* collect core counters only for 1st thread in core */
373 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) {
378 if (do_core_cstate & (1 << 3))
379 READ_MSR(MSR_CORE_C3_RESIDENCY, &c->c3);
380 if (do_core_cstate & (1 << 6))
381 READ_MSR(MSR_CORE_C6_RESIDENCY, &c->c6);
382 if (do_core_cstate & (1 << 7))
383 READ_MSR(MSR_CORE_C7_RESIDENCY, &c->c7);
386 READ_MSR(MSR_IA32_THERM_STATUS, &msr);
387 c->core_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
390 /* collect package counters only for 1st core in package */
391 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
396 if (do_pkg_cstate & (1 << 2))
397 READ_MSR(MSR_PKG_C2_RESIDENCY, &p->pc2);
398 if (do_pkg_cstate & (1 << 3))
399 READ_MSR(MSR_PKG_C3_RESIDENCY, &p->pc3);
400 if (do_pkg_cstate & (1 << 6))
401 READ_MSR(MSR_PKG_C6_RESIDENCY, &p->pc6);
402 if (do_pkg_cstate & (1 << 7))
403 READ_MSR(MSR_PKG_C7_RESIDENCY, &p->pc7);
404 if (do_pkg_cstate & (1 << 8))
405 READ_MSR(MSR_PKG_C8_RESIDENCY, &p->pc8);
406 if (do_pkg_cstate & (1 << 9))
407 READ_MSR(MSR_PKG_C9_RESIDENCY, &p->pc9);
408 if (do_pkg_cstate & (1 << 10))
409 READ_MSR(MSR_PKG_C10_RESIDENCY, &p->pc10);
411 if (do_rapl & RAPL_PKG) {
412 READ_MSR(MSR_PKG_ENERGY_STATUS, &msr);
413 p->energy_pkg = msr & 0xFFFFFFFF;
415 if (do_rapl & RAPL_CORES) {
416 READ_MSR(MSR_PP0_ENERGY_STATUS, &msr);
417 p->energy_cores = msr & 0xFFFFFFFF;
419 if (do_rapl & RAPL_DRAM) {
420 READ_MSR(MSR_DRAM_ENERGY_STATUS, &msr);
421 p->energy_dram = msr & 0xFFFFFFFF;
423 if (do_rapl & RAPL_GFX) {
424 READ_MSR(MSR_PP1_ENERGY_STATUS, &msr);
425 p->energy_gfx = msr & 0xFFFFFFFF;
427 if (do_rapl & RAPL_PKG_PERF_STATUS) {
428 READ_MSR(MSR_PKG_PERF_STATUS, &msr);
429 p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
431 if (do_rapl & RAPL_DRAM_PERF_STATUS) {
432 READ_MSR(MSR_DRAM_PERF_STATUS, &msr);
433 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
436 READ_MSR(MSR_IA32_PACKAGE_THERM_STATUS, &msr);
437 p->pkg_temp_c = p->tcc_activation_temp - ((msr >> 16) & 0x7F);
446 /**********************************
447 * Evaluating the changes (1 CPU) *
448 **********************************/
451 * Do delta = new - old on 32bits cyclique intergers
453 #define DELTA_WRAP32(delta, new, old) \
457 delta = 0x100000000 + new - old; \
461 * Extract the evolution old->new in delta at a package level
462 * (some are not new-delta, e.g. temperature)
465 delta_package(struct pkg_data *delta, const struct pkg_data *new, const struct pkg_data *old)
467 delta->pc2 = new->pc2 - old->pc2;
468 delta->pc3 = new->pc3 - old->pc3;
469 delta->pc6 = new->pc6 - old->pc6;
470 delta->pc7 = new->pc7 - old->pc7;
471 delta->pc8 = new->pc8 - old->pc8;
472 delta->pc9 = new->pc9 - old->pc9;
473 delta->pc10 = new->pc10 - old->pc10;
474 delta->pkg_temp_c = new->pkg_temp_c;
476 DELTA_WRAP32(delta->energy_pkg, new->energy_pkg, old->energy_pkg);
477 DELTA_WRAP32(delta->energy_cores, new->energy_cores, old->energy_cores);
478 DELTA_WRAP32(delta->energy_gfx, new->energy_gfx, old->energy_gfx);
479 DELTA_WRAP32(delta->energy_dram, new->energy_dram, old->energy_dram);
480 DELTA_WRAP32(delta->rapl_pkg_perf_status, new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
481 DELTA_WRAP32(delta->rapl_dram_perf_status, new->rapl_dram_perf_status, old->rapl_dram_perf_status);
485 * Extract the evolution old->new in delta at a core level
486 * (some are not new-delta, e.g. temperature)
489 delta_core(struct core_data *delta, const struct core_data *new, const struct core_data *old)
491 delta->c3 = new->c3 - old->c3;
492 delta->c6 = new->c6 - old->c6;
493 delta->c7 = new->c7 - old->c7;
494 delta->core_temp_c = new->core_temp_c;
498 * Extract the evolution old->new in delta at a package level
499 * core_delta is required for c1 estimation (tsc - c0 - all core cstates)
501 static inline int __attribute__((warn_unused_result))
502 delta_thread(struct thread_data *delta, const struct thread_data *new, const struct thread_data *old,
503 const struct core_data *core_delta)
505 delta->tsc = new->tsc - old->tsc;
507 /* check for TSC < 1 Mcycles over interval */
508 if (delta->tsc < (1000 * 1000)) {
509 WARNING("Insanely slow TSC rate, TSC stops in idle? ");
510 WARNING("You can disable all c-states by booting with \"idle=poll\" ");
511 WARNING("or just the deep ones with \"processor.max_cstate=1\"");
515 delta->c1 = new->c1 - old->c1;
517 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
518 delta->aperf = new->aperf - old->aperf;
519 delta->mperf = new->mperf - old->mperf;
521 if (!aperf_mperf_unstable) {
522 WARNING(" APERF or MPERF went backwards * ");
523 WARNING("* Frequency results do not cover entire interval *");
524 WARNING("* fix this by running Linux-2.6.30 or later *");
526 aperf_mperf_unstable = 1;
531 * As counter collection is not atomic,
532 * it is possible for mperf's non-halted cycles + idle states
533 * to exceed TSC's all cycles: show c1 = 0% in that case.
535 if ((delta->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > delta->tsc)
538 /* normal case, derive c1 */
539 delta->c1 = delta->tsc - delta->mperf - core_delta->c3
540 - core_delta->c6 - core_delta->c7;
543 if (delta->mperf == 0) {
544 WARNING("cpu%d MPERF 0!", old->cpu_id);
545 delta->mperf = 1; /* divide by 0 protection */
548 delta->smi_count = new->smi_count - old->smi_count;
553 /**********************************
554 * Submitting the results (1 CPU) *
555 **********************************/
558 * Submit one gauge value
561 turbostat_submit (const char *plugin_instance,
562 const char *type, const char *type_instance,
565 value_list_t vl = VALUE_LIST_INIT;
571 sstrncpy (vl.host, hostname_g, sizeof (vl.host));
572 sstrncpy (vl.plugin, PLUGIN_NAME, sizeof (vl.plugin));
573 if (plugin_instance != NULL)
574 sstrncpy (vl.plugin_instance, plugin_instance, sizeof (vl.plugin_instance));
575 sstrncpy (vl.type, type, sizeof (vl.type));
576 if (type_instance != NULL)
577 sstrncpy (vl.type_instance, type_instance, sizeof (vl.type_instance));
579 plugin_dispatch_values (&vl);
583 * Submit every data for a single CPU
585 * Core data is shared for all threads in one core: submitted only for the first thread
586 * Package data is shared for all core in one package: submitted only for the first thread of the first core
589 submit_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
592 double interval_float;
594 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
596 ssnprintf(name, sizeof(name), "cpu%02d", t->cpu_id);
598 if (!aperf_mperf_unstable)
599 turbostat_submit(name, "percent", "c0", 100.0 * t->mperf/t->tsc);
600 if (!aperf_mperf_unstable)
601 turbostat_submit(name, "percent", "c1", 100.0 * t->c1/t->tsc);
604 if ((!aperf_mperf_unstable) || (!(t->aperf > t->tsc || t->mperf > t->tsc)))
605 turbostat_submit(NULL, "frequency", name, 1.0 * t->tsc / 1000000000 * t->aperf / t->mperf / interval_float);
608 turbostat_submit(NULL, "current", name, t->smi_count);
610 /* submit per-core data only for 1st thread in core */
611 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
614 ssnprintf(name, sizeof(name), "core%02d", c->core_id);
616 if (do_core_cstate & (1 << 3))
617 turbostat_submit(name, "percent", "c3", 100.0 * c->c3/t->tsc);
618 if (do_core_cstate & (1 << 6))
619 turbostat_submit(name, "percent", "c6", 100.0 * c->c6/t->tsc);
620 if (do_core_cstate & (1 << 7))
621 turbostat_submit(name, "percent", "c7", 100.0 * c->c7/t->tsc);
624 turbostat_submit(NULL, "temperature", name, c->core_temp_c);
626 /* submit per-package data only for 1st core in package */
627 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
630 ssnprintf(name, sizeof(name), "pkg%02d", p->package_id);
633 turbostat_submit(NULL, "temperature", name, p->pkg_temp_c);
635 if (do_pkg_cstate & (1 << 2))
636 turbostat_submit(name, "percent", "pc2", 100.0 * p->pc2/t->tsc);
637 if (do_pkg_cstate & (1 << 3))
638 turbostat_submit(name, "percent", "pc3", 100.0 * p->pc3/t->tsc);
639 if (do_pkg_cstate & (1 << 6))
640 turbostat_submit(name, "percent", "pc6", 100.0 * p->pc6/t->tsc);
641 if (do_pkg_cstate & (1 << 7))
642 turbostat_submit(name, "percent", "pc7", 100.0 * p->pc7/t->tsc);
643 if (do_pkg_cstate & (1 << 8))
644 turbostat_submit(name, "percent", "pc8", 100.0 * p->pc8/t->tsc);
645 if (do_pkg_cstate & (1 << 9))
646 turbostat_submit(name, "percent", "pc9", 100.0 * p->pc9/t->tsc);
647 if (do_pkg_cstate & (1 << 10))
648 turbostat_submit(name, "percent", "pc10", 100.0 * p->pc10/t->tsc);
651 if (do_rapl & RAPL_PKG)
652 turbostat_submit(name, "power", "Pkg_W", p->energy_pkg * rapl_energy_units / interval_float);
653 if (do_rapl & RAPL_CORES)
654 turbostat_submit(name, "power", "Cor_W", p->energy_cores * rapl_energy_units / interval_float);
655 if (do_rapl & RAPL_GFX)
656 turbostat_submit(name, "power", "GFX_W", p->energy_gfx * rapl_energy_units / interval_float);
657 if (do_rapl & RAPL_DRAM)
658 turbostat_submit(name, "power", "RAM_W", p->energy_dram * rapl_energy_units / interval_float);
664 /**********************************
665 * Looping function over all CPUs *
666 **********************************/
669 * Check if a given cpu id is in our compiled list of existing CPUs
671 * CHECKME: Why do we need this?
674 cpu_is_not_present(int cpu)
676 return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
680 * Loop on all CPUs in topological order
682 * Skip 'non-present' cpus (CHECKME: Why do we need this?)
683 * Return the error code at the first error or 0
685 static int __attribute__((warn_unused_result))
686 for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
687 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
689 int retval, pkg_no, core_no, thread_no;
691 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
692 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
693 for (thread_no = 0; thread_no <
694 topo.num_threads_per_core; ++thread_no) {
695 struct thread_data *t;
699 t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
701 if (cpu_is_not_present(t->cpu_id))
704 c = GET_CORE(core_base, core_no, pkg_no);
705 p = GET_PKG(pkg_base, pkg_no);
707 retval = func(t, c, p);
717 * Dedicated loop: Extract every data evolution for all CPU
719 * Core data is shared for all threads in one core: extracted only for the first thread
720 * Package data is shared for all core in one package: extracted only for the first thread of the first core
722 static int __attribute__((warn_unused_result))
723 for_all_cpus_delta(const struct thread_data *thread_new_base, const struct core_data *core_new_base, const struct pkg_data *pkg_new_base,
724 const struct thread_data *thread_old_base, const struct core_data *core_old_base, const struct pkg_data *pkg_old_base)
726 int retval, pkg_no, core_no, thread_no;
728 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
729 for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
730 for (thread_no = 0; thread_no <
731 topo.num_threads_per_core; ++thread_no) {
732 struct thread_data *t_delta;
733 const struct thread_data *t_old, *t_new;
734 struct core_data *c_delta;
736 /* Get correct pointers for threads */
737 t_delta = GET_THREAD(thread_delta, thread_no, core_no, pkg_no);
738 t_new = GET_THREAD(thread_new_base, thread_no, core_no, pkg_no);
739 t_old = GET_THREAD(thread_old_base, thread_no, core_no, pkg_no);
741 /* Skip threads that disappeared */
742 if (cpu_is_not_present(t_delta->cpu_id))
745 /* c_delta is always required for delta_thread */
746 c_delta = GET_CORE(core_delta, core_no, pkg_no);
748 /* calculate core delta only for 1st thread in core */
749 if (t_new->flags & CPU_IS_FIRST_THREAD_IN_CORE) {
750 const struct core_data *c_old, *c_new;
752 c_new = GET_CORE(core_new_base, core_no, pkg_no);
753 c_old = GET_CORE(core_old_base, core_no, pkg_no);
755 delta_core(c_delta, c_new, c_old);
758 /* Always calculate thread delta */
759 retval = delta_thread(t_delta, t_new, t_old, c_delta);
763 /* calculate package delta only for 1st core in package */
764 if (t_new->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) {
765 struct pkg_data *p_delta;
766 const struct pkg_data *p_old, *p_new;
768 p_delta = GET_PKG(package_delta, pkg_no);
769 p_new = GET_PKG(pkg_new_base, pkg_no);
770 p_old = GET_PKG(pkg_old_base, pkg_no);
772 delta_package(p_delta, p_new, p_old);
782 free_all_buffers(void)
787 CPU_FREE(cpu_present_set);
788 cpu_present_set = NULL;
791 CPU_FREE(cpu_affinity_set);
792 cpu_affinity_set = NULL;
793 cpu_affinity_setsize = 0;
795 CPU_FREE(cpu_saved_affinity_set);
796 cpu_saved_affinity_set = NULL;
797 cpu_saved_affinity_setsize = 0;
821 package_delta = NULL;
825 * Parse a file containing a single int.
827 static int __attribute__ ((format(printf,1,2)))
828 parse_int_file(const char *fmt, ...)
836 vsnprintf(path, sizeof(path), fmt, args);
838 filep = fopen(path, "r");
840 ERROR("%s: open failed", path);
841 return -ERR_CANT_OPEN_FILE;
843 if (fscanf(filep, "%d", &value) != 1) {
844 ERROR("%s: failed to parse number from file", path);
845 return -ERR_CANT_READ_NUMBER;
852 * cpu_is_first_sibling_in_core(cpu)
853 * return 1 if given CPU is 1st HT sibling in the core
856 cpu_is_first_sibling_in_core(int cpu)
858 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
862 * cpu_is_first_core_in_package(cpu)
863 * return 1 if given CPU is 1st core in package
866 cpu_is_first_core_in_package(int cpu)
868 return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
872 get_physical_package_id(int cpu)
874 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
880 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
884 get_num_ht_siblings(int cpu)
892 ssnprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
893 filep = fopen(path, "r");
895 ERROR("%s: open failed", path);
896 return -ERR_CANT_OPEN_FILE;
900 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
901 * otherwinse 1 sibling (self).
903 matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
914 * run func(cpu) on every cpu in /proc/stat
915 * return max_cpu number
917 static int __attribute__((warn_unused_result))
918 for_all_proc_cpus(int (func)(int))
924 fp = fopen("/proc/stat", "r");
926 ERROR("Failed to open /proc/stat");
927 return -ERR_CANT_OPEN_FILE;
930 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
932 ERROR("Failed to parse /proc/stat");
934 return -ERR_CANT_READ_PROC_STAT;
938 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
942 retval = func(cpu_num);
954 * remember the last one seen, it will be the max
959 if (topo.max_cpu_num < cpu)
960 topo.max_cpu_num = cpu;
966 mark_cpu_present(int cpu)
968 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
973 static int setup_all_buffers(void);
976 turbostat_read(user_data_t * not_used)
981 if ((ret = setup_all_buffers()) < 0)
985 if (for_all_proc_cpus(cpu_is_not_present)) {
987 if ((ret = setup_all_buffers()) < 0)
989 if (for_all_proc_cpus(cpu_is_not_present))
990 return -ERR_CPU_NOT_PRESENT;
993 /* Saving the scheduling affinity, as it will be modified by get_counters */
994 if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0)
995 return -ERR_CPU_SAVE_SCHED_AFFINITY;
998 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1000 gettimeofday(&tv_even, (struct timezone *)NULL);
1008 if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
1010 gettimeofday(&tv_odd, (struct timezone *)NULL);
1012 timersub(&tv_odd, &tv_even, &tv_delta);
1013 if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0)
1015 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1018 if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
1020 gettimeofday(&tv_even, (struct timezone *)NULL);
1022 timersub(&tv_even, &tv_odd, &tv_delta);
1023 if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0)
1025 if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
1031 * Let's restore the affinity
1032 * This might fail if the number of CPU changed, but we can't do anything in that case..
1034 (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set);
1038 static int __attribute__((warn_unused_result))
1043 if (stat("/dev/cpu/0/msr", &sb)) {
1044 ERROR("no /dev/cpu/0/msr, try \"# modprobe msr\"");
1050 static int __attribute__((warn_unused_result))
1053 if (getuid() != 0) {
1054 ERROR("must be root");
1055 return -ERR_NOT_ROOT;
1061 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
1062 * the Thermal Control Circuit (TCC) activates.
1063 * This is usually equal to tjMax.
1065 * Older processors do not have this MSR, so there we guess,
1066 * but also allow conficuration over-ride with "TCCActivationTemp".
1068 * Several MSR temperature values are in units of degrees-C
1069 * below this value, including the Digital Thermal Sensor (DTS),
1070 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
1072 static int __attribute__((warn_unused_result))
1073 set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1075 unsigned long long msr;
1076 unsigned int target_c_local;
1078 /* tcc_activation_temp is used only for dts or ptm */
1079 if (!(do_dts || do_ptm))
1082 /* this is a per-package concept */
1083 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1086 if (tcc_activation_temp != 0) {
1087 p->tcc_activation_temp = tcc_activation_temp;
1091 if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
1094 target_c_local = (msr >> 16) & 0xFF;
1096 if (!target_c_local)
1099 p->tcc_activation_temp = target_c_local;
1104 p->tcc_activation_temp = TJMAX_DEFAULT;
1105 WARNING("cpu%d: Guessing tjMax %d C, Please use TCCActivationTemp to specify",
1106 t->cpu_id, p->tcc_activation_temp);
1112 * Identify the functionality of the CPU
1114 static int __attribute__((warn_unused_result))
1117 unsigned int eax, ebx, ecx, edx, max_level;
1118 unsigned int fms, family, model;
1121 * - EAX: Maximum Input Value for Basic CPUID Information
1122 * - EBX: "Genu" (0x756e6547)
1123 * - EDX: "ineI" (0x49656e69)
1124 * - ECX: "ntel" (0x6c65746e)
1126 max_level = ebx = ecx = edx = 0;
1127 __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
1128 if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
1129 ERROR("Unsupported CPU");
1130 return -UNSUPPORTED_CPU;
1134 * - EAX: Version Information: Type, Family, Model, and Stepping ID
1137 * + 12-13: Processor type
1138 * + 16-19: Extended Model ID
1139 * + 20-27: Extended Family ID
1140 * - EDX: Feature Information:
1141 * + 5: Support for MSR read/write operations
1143 fms = ebx = ecx = edx = 0;
1144 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1145 family = (fms >> 8) & 0xf;
1146 model = (fms >> 4) & 0xf;
1148 family += (fms >> 20) & 0xf;
1149 if (family == 6 || family == 0xf)
1150 model += ((fms >> 16) & 0xf) << 4;
1151 if (!(edx & (1 << 5))) {
1152 ERROR("CPUID: no MSR");
1157 * CPUID(0x80000000):
1158 * - EAX: Maximum Input Value for Extended Function CPUID Information
1160 * This allows us to verify if the CPUID(0x80000007) can be called
1162 * This check is valid for both Intel and AMD.
1164 max_level = ebx = ecx = edx = 0;
1165 __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
1166 if (max_level < 0x80000007) {
1167 ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level);
1168 return -ERR_NO_INVARIANT_TSC;
1172 * CPUID(0x80000007):
1174 * + 8: Invariant TSC available if set
1176 * This check is valid for both Intel and AMD
1178 __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1179 if (!(edx & (1 << 8))) {
1180 ERROR("No invariant TSC");
1181 return -ERR_NO_INVARIANT_TSC;
1187 * + 0: Digital temperature sensor is supported if set
1188 * + 6: Package thermal management is supported if set
1190 * + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF).
1191 * + 3: The processor supports performance-energy bias preference if set.
1192 * It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS
1194 * This check is valid for both Intel and AMD
1196 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
1197 do_dts = eax & (1 << 0);
1198 do_ptm = eax & (1 << 6);
1199 if (!(ecx & (1 << 0))) {
1201 return -ERR_NO_APERF;
1205 * Enable or disable C states depending on the model and family
1209 /* Atom (partial) */
1212 do_pkg_cstate = (1 << 2) | (1 << 4) | (1 << 6);
1215 case 0x37: /* BYT */
1217 case 0x4D: /* AVN */
1220 do_core_cstate = (1 << 1) | (1 << 6);
1221 do_pkg_cstate = (1 << 6);
1224 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1225 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1226 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1227 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1228 do_core_cstate = (1 << 3) | (1 << 6);
1229 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1232 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1233 case 0x2C: /* Westmere EP - Gulftown */
1234 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1235 do_core_cstate = (1 << 3) | (1 << 6);
1236 do_pkg_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1239 case 0x2A: /* SNB */
1240 case 0x2D: /* SNB Xeon */
1241 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1242 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1245 case 0x3A: /* IVB */
1246 case 0x3E: /* IVB Xeon */
1247 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1248 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1250 /* Haswell Bridge */
1251 case 0x3C: /* HSW */
1252 case 0x3F: /* HSW */
1253 case 0x46: /* HSW */
1254 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1255 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1257 case 0x45: /* HSW */
1258 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1259 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1262 case 0x4F: /* BDW */
1263 case 0x56: /* BDX-DE */
1264 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1265 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7);
1267 case 0x3D: /* BDW */
1268 do_core_cstate = (1 << 3) | (1 << 6) | (1 << 7);
1269 do_pkg_cstate = (1 << 2) | (1 << 3) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10);
1272 ERROR("Unsupported CPU");
1280 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_GFX;
1283 do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1287 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_PKG_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM | RAPL_DRAM_PERF_STATUS;
1291 do_rapl = RAPL_PKG | RAPL_CORES;
1297 ERROR("Unsupported CPU");
1298 return -UNSUPPORTED_CPU;
1303 if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
1307 rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
1309 rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
1317 static int __attribute__((warn_unused_result))
1322 int max_core_id = 0;
1323 int max_package_id = 0;
1324 int max_siblings = 0;
1325 struct cpu_topology {
1327 int physical_package_id;
1330 /* Initialize num_cpus, max_cpu_num */
1332 topo.max_cpu_num = 0;
1333 ret = for_all_proc_cpus(count_cpus);
1337 DEBUG("num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
1339 cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
1341 ERROR("calloc cpus");
1346 * Allocate and initialize cpu_present_set
1348 cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
1349 if (cpu_present_set == NULL) {
1352 return -ERR_CPU_ALLOC;
1354 cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1355 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
1356 ret = for_all_proc_cpus(mark_cpu_present);
1363 * Allocate and initialize cpu_affinity_set
1365 cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1366 if (cpu_affinity_set == NULL) {
1369 return -ERR_CPU_ALLOC;
1371 cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1372 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
1376 * Allocate and initialize cpu_saved_affinity_set
1378 cpu_saved_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
1379 if (cpu_saved_affinity_set == NULL) {
1382 return -ERR_CPU_ALLOC;
1384 cpu_saved_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
1385 CPU_ZERO_S(cpu_saved_affinity_setsize, cpu_saved_affinity_set);
1390 * find max_core_id, max_package_id
1392 for (i = 0; i <= topo.max_cpu_num; ++i) {
1395 if (cpu_is_not_present(i)) {
1396 WARNING("cpu%d NOT PRESENT", i);
1399 cpus[i].core_id = get_core_id(i);
1400 if (cpus[i].core_id < 0)
1401 return cpus[i].core_id;
1402 if (cpus[i].core_id > max_core_id)
1403 max_core_id = cpus[i].core_id;
1405 cpus[i].physical_package_id = get_physical_package_id(i);
1406 if (cpus[i].physical_package_id < 0)
1407 return cpus[i].physical_package_id;
1408 if (cpus[i].physical_package_id > max_package_id)
1409 max_package_id = cpus[i].physical_package_id;
1411 siblings = get_num_ht_siblings(i);
1414 if (siblings > max_siblings)
1415 max_siblings = siblings;
1416 DEBUG("cpu %d pkg %d core %d\n",
1417 i, cpus[i].physical_package_id, cpus[i].core_id);
1419 topo.num_cores_per_pkg = max_core_id + 1;
1420 DEBUG("max_core_id %d, sizing for %d cores per package\n",
1421 max_core_id, topo.num_cores_per_pkg);
1423 topo.num_packages = max_package_id + 1;
1424 DEBUG("max_package_id %d, sizing for %d packages\n",
1425 max_package_id, topo.num_packages);
1427 topo.num_threads_per_core = max_siblings;
1428 DEBUG("max_siblings %d\n", max_siblings);
1435 allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
1439 *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
1440 topo.num_packages, sizeof(struct thread_data));
1444 for (i = 0; i < topo.num_threads_per_core *
1445 topo.num_cores_per_pkg * topo.num_packages; i++)
1446 (*t)[i].cpu_id = -1;
1448 *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
1449 sizeof(struct core_data));
1453 for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
1454 (*c)[i].core_id = -1;
1456 *p = calloc(topo.num_packages, sizeof(struct pkg_data));
1460 for (i = 0; i < topo.num_packages; i++)
1461 (*p)[i].package_id = i;
1465 ERROR("calloc counters");
1471 * set cpu_id, core_num, pkg_num
1472 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
1474 * increment topo.num_cores when 1st core in pkg seen
1477 init_counter(struct thread_data *thread_base, struct core_data *core_base,
1478 struct pkg_data *pkg_base, int thread_num, int core_num,
1479 int pkg_num, int cpu_id)
1482 struct thread_data *t;
1483 struct core_data *c;
1486 t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
1487 c = GET_CORE(core_base, core_num, pkg_num);
1488 p = GET_PKG(pkg_base, pkg_num);
1491 if (thread_num == 0) {
1492 t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
1493 if ((ret = cpu_is_first_core_in_package(cpu_id)) < 0) {
1495 } else if (ret != 0) {
1496 t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
1500 c->core_id = core_num;
1501 p->package_id = pkg_num;
1508 initialize_counters(int cpu_id)
1510 int my_thread_id, my_core_id, my_package_id;
1513 my_package_id = get_physical_package_id(cpu_id);
1514 if (my_package_id < 0)
1515 return my_package_id;
1516 my_core_id = get_core_id(cpu_id);
1520 if ((ret = cpu_is_first_sibling_in_core(cpu_id)) < 0) {
1522 } else if (ret != 0) {
1529 ret = init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1532 ret = init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1535 ret = init_counter(DELTA_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
1541 #define DO_OR_GOTO_ERR(something) \
1543 ret = (something); \
1548 static int setup_all_buffers(void)
1552 DO_OR_GOTO_ERR(topology_probe());
1553 DO_OR_GOTO_ERR(allocate_counters(&thread_even, &core_even, &package_even));
1554 DO_OR_GOTO_ERR(allocate_counters(&thread_odd, &core_odd, &package_odd));
1555 DO_OR_GOTO_ERR(allocate_counters(&thread_delta, &core_delta, &package_delta));
1556 DO_OR_GOTO_ERR(for_all_proc_cpus(initialize_counters));
1557 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, EVEN_COUNTERS));
1558 DO_OR_GOTO_ERR(for_all_cpus(set_temperature_target, ODD_COUNTERS));
1568 turbostat_init(void)
1572 DO_OR_GOTO_ERR(check_super_user());
1573 DO_OR_GOTO_ERR(probe_cpu());
1574 DO_OR_GOTO_ERR(check_dev_msr());
1575 DO_OR_GOTO_ERR(setup_all_buffers());
1577 plugin_register_complex_read(NULL, PLUGIN_NAME, turbostat_read, NULL, NULL);
1585 static const char *config_keys[] =
1587 "TCCActivationTemp",
1589 static const int config_keys_num = STATIC_ARRAY_SIZE (config_keys);
1592 turbostat_config(const char *key, const char *value)
1594 long unsigned int tmp_val;
1597 if (strcasecmp("TCCActivationTemp", key) == 0) {
1598 tmp_val = strtoul(value, &end, 0);
1599 if (*end != '\0' || tmp_val > UINT_MAX)
1601 tcc_activation_temp = (unsigned int) tmp_val;
1608 void module_register(void);
1609 void module_register(void)
1611 plugin_register_init(PLUGIN_NAME, turbostat_init);
1612 plugin_register_config(PLUGIN_NAME, turbostat_config, config_keys, config_keys_num);