-/*
- * count_cpus()
- * remember the last one seen, it will be the max
- */
-static int
-count_cpus(int cpu)
-{
- if (topo.max_cpu_num < cpu)
- topo.max_cpu_num = cpu;
-
- topo.num_cpus += 1;
- return 0;
-}
-static int
-mark_cpu_present(int cpu)
-{
- CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
- return 0;
-}
-
-
-static int setup_all_buffers(void);
-
-static int
-turbostat_read(user_data_t * not_used)
-{
- int ret;
-
- if (!allocated) {
- if ((ret = setup_all_buffers()) < 0)
- return ret;
- }
-
- if (for_all_proc_cpus(cpu_is_not_present)) {
- free_all_buffers();
- if ((ret = setup_all_buffers()) < 0)
- return ret;
- if (for_all_proc_cpus(cpu_is_not_present))
- return -ERR_CPU_NOT_PRESENT;
- }
-
- /* Saving the scheduling affinity, as it will be modified by get_counters */
- if (sched_getaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set) != 0)
- return -ERR_CPU_SAVE_SCHED_AFFINITY;
-
- if (!initialized) {
- if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
- goto out;
- gettimeofday(&tv_even, (struct timezone *)NULL);
- is_even = 1;
- initialized = 1;
- ret = 0;
- goto out;
- }
-
- if (is_even) {
- if ((ret = for_all_cpus(get_counters, ODD_COUNTERS)) < 0)
- goto out;
- gettimeofday(&tv_odd, (struct timezone *)NULL);
- is_even = 0;
- timersub(&tv_odd, &tv_even, &tv_delta);
- if ((ret = for_all_cpus_delta(ODD_COUNTERS, EVEN_COUNTERS)) < 0)
- goto out;
- if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
- goto out;
- } else {
- if ((ret = for_all_cpus(get_counters, EVEN_COUNTERS)) < 0)
- goto out;
- gettimeofday(&tv_even, (struct timezone *)NULL);
- is_even = 1;
- timersub(&tv_even, &tv_odd, &tv_delta);
- if ((ret = for_all_cpus_delta(EVEN_COUNTERS, ODD_COUNTERS)) < 0)
- goto out;
- if ((ret = for_all_cpus(submit_counters, DELTA_COUNTERS)) < 0)
- goto out;
- }
- ret = 0;
-out:
- /*
- * Let's restore the affinity
- * This might fail if the number of CPU changed, but we can't do anything in that case..
- */
- (void)sched_setaffinity(0, cpu_saved_affinity_setsize, cpu_saved_affinity_set);
- return ret;
-}
-
-static int __attribute__((warn_unused_result))
-check_dev_msr()
-{
- struct stat sb;
-
- if (stat("/dev/cpu/0/msr", &sb)) {
- ERROR("no /dev/cpu/0/msr, try \"# modprobe msr\"");
- return -ERR_NO_MSR;
- }
- return 0;
-}
-
-static int __attribute__((warn_unused_result))
-check_super_user()
-{
- if (getuid() != 0) {
- ERROR("must be root");
- return -ERR_NOT_ROOT;
- }
- return 0;
-}
-
-/*
- * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
- * the Thermal Control Circuit (TCC) activates.
- * This is usually equal to tjMax.
- *
- * Older processors do not have this MSR, so there we guess,
- * but also allow conficuration over-ride with "TCCActivationTemp".
- *
- * Several MSR temperature values are in units of degrees-C
- * below this value, including the Digital Thermal Sensor (DTS),
- * Package Thermal Management Sensor (PTM), and thermal event thresholds.
- */
-static int __attribute__((warn_unused_result))
-set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
-{
- unsigned long long msr;
- unsigned int target_c_local;
-
- /* tcc_activation_temp is used only for dts or ptm */
- if (!(do_dts || do_ptm))
- return 0;
-
- /* this is a per-package concept */
- if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
- return 0;
-
- if (tcc_activation_temp != 0) {
- p->tcc_activation_temp = tcc_activation_temp;
- return 0;
- }
-
- if (get_msr(t->cpu_id, MSR_IA32_TEMPERATURE_TARGET, &msr))
- goto guess;
-
- target_c_local = (msr >> 16) & 0xFF;
-
- if (!target_c_local)
- goto guess;
-
- p->tcc_activation_temp = target_c_local;
-
- return 0;
-
-guess:
- p->tcc_activation_temp = TJMAX_DEFAULT;
- WARNING("cpu%d: Guessing tjMax %d C, Please use TCCActivationTemp to specify",
- t->cpu_id, p->tcc_activation_temp);
-
- return 0;
-}
-
-/*
- * Identify the functionality of the CPU
- */
-static int __attribute__((warn_unused_result))
-probe_cpu()
-{
- unsigned int eax, ebx, ecx, edx, max_level;
- unsigned int fms, family, model;
-
- /* CPUID(0):
- * - EAX: Maximum Input Value for Basic CPUID Information
- * - EBX: "Genu" (0x756e6547)
- * - EDX: "ineI" (0x49656e69)
- * - ECX: "ntel" (0x6c65746e)
- */
- max_level = ebx = ecx = edx = 0;
- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
- if (ebx != 0x756e6547 && edx != 0x49656e69 && ecx != 0x6c65746e) {
- ERROR("Unsupported CPU");
- return -UNSUPPORTED_CPU;
- }
-
- /* CPUID(1):
- * - EAX: Version Information: Type, Family, Model, and Stepping ID
- * + 4-7: Model ID
- * + 8-11: Family ID
- * + 12-13: Processor type
- * + 16-19: Extended Model ID
- * + 20-27: Extended Family ID
- * - EDX: Feature Information:
- * + 5: Support for MSR read/write operations
- */
- fms = ebx = ecx = edx = 0;
- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
- family = (fms >> 8) & 0xf;
- model = (fms >> 4) & 0xf;
- if (family == 0xf)
- family += (fms >> 20) & 0xf;
- if (family == 6 || family == 0xf)
- model += ((fms >> 16) & 0xf) << 4;
- if (!(edx & (1 << 5))) {
- ERROR("CPUID: no MSR");
- return -ERR_NO_MSR;
- }
-
- /*
- * CPUID(0x80000000):
- * - EAX: Maximum Input Value for Extended Function CPUID Information
- *
- * This allows us to verify if the CPUID(0x80000007) can be called
- *
- * This check is valid for both Intel and AMD.
- */
- max_level = ebx = ecx = edx = 0;
- __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
- if (max_level < 0x80000007) {
- ERROR("CPUID: no invariant TSC (max_level 0x%x)", max_level);
- return -ERR_NO_INVARIANT_TSC;
- }
-
- /*
- * CPUID(0x80000007):
- * - EDX:
- * + 8: Invariant TSC available if set
- *
- * This check is valid for both Intel and AMD
- */
- eax = ebx = ecx = edx = 0;
- __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
- if (!(edx & (1 << 8))) {
- ERROR("No invariant TSC");
- return -ERR_NO_INVARIANT_TSC;
- }
-
- /*
- * CPUID(6):
- * - EAX:
- * + 0: Digital temperature sensor is supported if set
- * + 6: Package thermal management is supported if set
- * - ECX:
- * + 0: Hardware Coordination Feedback Capability (Presence of IA32_MPERF and IA32_APERF).
- * + 3: The processor supports performance-energy bias preference if set.
- * It also implies the presence of a new architectural MSR called IA32_ENERGY_PERF_BIAS
- *
- * This check is valid for both Intel and AMD
- */
- eax = ebx = ecx = edx = 0;
- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
- do_dts = eax & (1 << 0);
- do_ptm = eax & (1 << 6);
- if (!(ecx & (1 << 0))) {
- ERROR("No APERF");
- return -ERR_NO_APERF;
- }