diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 166 |
1 files changed, 94 insertions, 72 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4b986c044741..392a8000b238 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -300,6 +300,7 @@ static struct cpufreq_driver *intel_pstate_driver __read_mostly; #define HYBRID_SCALING_FACTOR 78741 #define HYBRID_SCALING_FACTOR_MTL 80000 +#define HYBRID_SCALING_FACTOR_LNL 86957 static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR; @@ -355,15 +356,14 @@ static void intel_pstate_set_itmt_prio(int cpu) int ret; ret = cppc_get_perf_caps(cpu, &cppc_perf); - if (ret) - return; - /* - * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. - * In this case we can't use CPPC.highest_perf to enable ITMT. - * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0]. + * + * Also, on some systems with overclocking enabled, CPPC.highest_perf is + * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT. + * Fall back to MSR_HWP_CAPABILITIES then too. */ - if (cppc_perf.highest_perf == CPPC_MAX_PERF) + if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF) cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); /* @@ -1153,7 +1153,8 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - intel_pstate_get_hwp_cap(cpudata); + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; @@ -1301,12 +1302,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, no_turbo = !!clamp_t(int, input, 0, 1); - if (no_turbo == global.no_turbo) - goto unlock_driver; - - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; + } + + if (no_turbo == global.no_turbo) { goto unlock_driver; } @@ -1620,17 +1626,24 @@ static void intel_pstate_notify_work(struct work_struct *work) static DEFINE_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; +#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3) + void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); + u64 value, status_mask; unsigned long flags; - u64 value; - if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; + status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS; + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; + rdmsrl_safe(MSR_HWP_STATUS, &value); - if (!(value & 0x01)) + if (!(value & status_mask)) return; spin_lock_irqsave(&hwp_notify_lock, flags); @@ -1654,7 +1667,7 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { bool cancel_work; - if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ @@ -1668,17 +1681,25 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) cancel_delayed_work_sync(&cpudata->hwp_notify_work); } +#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2) + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { - /* Enable HWP notification interrupt for guaranteed performance change */ + /* Enable HWP notification interrupt for performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { + u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; + spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); spin_unlock_irq(&hwp_notify_lock); + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -1761,7 +1782,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1926,7 +1947,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; @@ -2362,54 +2383,54 @@ static const struct pstate_funcs knl_funcs = { .get_val = core_get_val, }; -#define X86_MATCH(model, policy) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - X86_FEATURE_APERFMPERF, &policy) +#define X86_MATCH(vfm, policy) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - X86_MATCH(SANDYBRIDGE, core_funcs), - X86_MATCH(SANDYBRIDGE_X, core_funcs), - X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), - X86_MATCH(IVYBRIDGE, core_funcs), - X86_MATCH(HASWELL, core_funcs), - X86_MATCH(BROADWELL, core_funcs), - X86_MATCH(IVYBRIDGE_X, core_funcs), - X86_MATCH(HASWELL_X, core_funcs), - X86_MATCH(HASWELL_L, core_funcs), - X86_MATCH(HASWELL_G, core_funcs), - X86_MATCH(BROADWELL_G, core_funcs), - X86_MATCH(ATOM_AIRMONT, airmont_funcs), - X86_MATCH(SKYLAKE_L, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE, core_funcs), - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(XEON_PHI_KNL, knl_funcs), - X86_MATCH(XEON_PHI_KNM, knl_funcs), - X86_MATCH(ATOM_GOLDMONT, core_funcs), - X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(COMETLAKE, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), - X86_MATCH(TIGERLAKE, core_funcs), - X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), - X86_MATCH(EMERALDRAPIDS_X, core_funcs), + X86_MATCH(INTEL_SANDYBRIDGE, core_funcs), + X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs), + X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs), + X86_MATCH(INTEL_IVYBRIDGE, core_funcs), + X86_MATCH(INTEL_HASWELL, core_funcs), + X86_MATCH(INTEL_BROADWELL, core_funcs), + X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs), + X86_MATCH(INTEL_HASWELL_X, core_funcs), + X86_MATCH(INTEL_HASWELL_L, core_funcs), + X86_MATCH(INTEL_HASWELL_G, core_funcs), + X86_MATCH(INTEL_BROADWELL_G, core_funcs), + X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs), + X86_MATCH(INTEL_SKYLAKE_L, core_funcs), + X86_MATCH(INTEL_BROADWELL_X, core_funcs), + X86_MATCH(INTEL_SKYLAKE, core_funcs), + X86_MATCH(INTEL_BROADWELL_D, core_funcs), + X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs), + X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs), + X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs), + X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs), + X86_MATCH(INTEL_SKYLAKE_X, core_funcs), + X86_MATCH(INTEL_COMETLAKE, core_funcs), + X86_MATCH(INTEL_ICELAKE_X, core_funcs), + X86_MATCH(INTEL_TIGERLAKE, core_funcs), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); #ifdef CONFIG_ACPI static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), - X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_BROADWELL_D, core_funcs), + X86_MATCH(INTEL_BROADWELL_X, core_funcs), + X86_MATCH(INTEL_SKYLAKE_X, core_funcs), + X86_MATCH(INTEL_ICELAKE_X, core_funcs), + X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), {} }; #endif static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { - X86_MATCH(KABYLAKE, core_funcs), + X86_MATCH(INTEL_KABYLAKE, core_funcs), {} }; @@ -2694,13 +2715,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) return intel_cpufreq_cpu_offline(policy); } -static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) +static void intel_pstate_cpu_exit(struct cpufreq_policy *policy) { pr_debug("CPU %d exiting\n", policy->cpu); policy->fast_switch_possible = false; - - return 0; } static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) @@ -3031,7 +3050,7 @@ pstate_exit: return ret; } -static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) +static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct freq_qos_request *req; @@ -3041,7 +3060,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) freq_qos_remove_request(req); kfree(req); - return intel_pstate_cpu_exit(policy); + intel_pstate_cpu_exit(policy); } static int intel_cpufreq_suspend(struct cpufreq_policy *policy) @@ -3345,14 +3364,13 @@ static inline void intel_pstate_request_control_from_smm(void) {} #define INTEL_PSTATE_HWP_BROADWELL 0x01 -#define X86_MATCH_HWP(model, hwp_mode) \ - X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ - X86_FEATURE_HWP, hwp_mode) +#define X86_MATCH_HWP(vfm, hwp_mode) \ + X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode) static const struct x86_cpu_id hwp_support_ids[] __initconst = { - X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), - X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), - X86_MATCH_HWP(ANY, 0), + X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), + X86_MATCH_HWP(INTEL_ANY, 0), {} }; @@ -3385,15 +3403,19 @@ static const struct x86_cpu_id intel_epp_default[] = { * which can result in one core turbo frequency for * AlderLake Mobile CPUs. */ - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, - HWP_EPP_BALANCE_POWERSAVE, 115, 16)), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, + 179, 64, 16)), + X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, + 179, 64, 16)), {} }; static const struct x86_cpu_id intel_hybrid_scaling_factor[] = { - X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL), + X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL), {} }; |