aboutsummaryrefslogtreecommitdiff
path: root/SOURCES/0001-amd-pstate.patch
diff options
context:
space:
mode:
authorJan200101 <sentrycraft123@gmail.com>2024-08-09 19:26:14 +0200
committerJan200101 <sentrycraft123@gmail.com>2024-08-09 19:26:14 +0200
commitee7306c6937f331f0fc0882a29a947c0a9560b3e (patch)
treeafc6552e91d247a927144361364f6012f9dea647 /SOURCES/0001-amd-pstate.patch
parent2de9c6dfed5c691b201b8a4374beb94d82ef4e8a (diff)
downloadkernel-fsync-ee7306c6937f331f0fc0882a29a947c0a9560b3e.tar.gz
kernel-fsync-ee7306c6937f331f0fc0882a29a947c0a9560b3e.zip
kernel 6.10.3
Diffstat (limited to 'SOURCES/0001-amd-pstate.patch')
-rw-r--r--SOURCES/0001-amd-pstate.patch337
1 files changed, 121 insertions, 216 deletions
diff --git a/SOURCES/0001-amd-pstate.patch b/SOURCES/0001-amd-pstate.patch
index 8b81bc4..d4986ef 100644
--- a/SOURCES/0001-amd-pstate.patch
+++ b/SOURCES/0001-amd-pstate.patch
@@ -1,7 +1,7 @@
-From 4ca68a16f48ef388742a25c6d6643bf996e8d1d5 Mon Sep 17 00:00:00 2001
+From 06c02d91fcfeb0fde264f03f0e364161b11a678d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
-Date: Fri, 5 Jul 2024 10:31:29 +0200
-Subject: [PATCH 02/10] amd-pstate
+Date: Sat, 3 Aug 2024 09:32:45 +0200
+Subject: [PATCH 01/12] amd-pstate
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
@@ -11,10 +11,10 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
arch/x86/kernel/cpu/scattered.c | 1 +
drivers/cpufreq/Kconfig.x86 | 1 +
drivers/cpufreq/acpi-cpufreq.c | 3 +-
- drivers/cpufreq/amd-pstate.c | 356 ++++++++++++++------
- drivers/cpufreq/amd-pstate.h | 16 +-
+ drivers/cpufreq/amd-pstate.c | 307 ++++++++++++++------
+ drivers/cpufreq/amd-pstate.h | 2 +
drivers/cpufreq/cpufreq.c | 11 +-
- 9 files changed, 290 insertions(+), 119 deletions(-)
+ 9 files changed, 251 insertions(+), 95 deletions(-)
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index 1e0d101b020a..d0324d44f548 100644
@@ -65,10 +65,10 @@ index 3c7434329661..6c128d463a14 100644
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
-index e72c2b872957..8738a7b3917d 100644
+index e022e6eb766c..384739d592af 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
-@@ -782,6 +782,8 @@
+@@ -781,6 +781,8 @@
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
@@ -123,7 +123,7 @@ index 4ac3a35dcd98..f4f8587c4ea0 100644
return 0;
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
-index 6af175e6c08a..80eaa58f1405 100644
+index a092b13ffbc2..804fab4ebb26 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -51,6 +51,7 @@
@@ -168,7 +168,7 @@ index 6af175e6c08a..80eaa58f1405 100644
if (!cppc_req_cached) {
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
&cppc_req_cached);
-@@ -252,7 +244,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+@@ -272,7 +264,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
int ret;
struct cppc_perf_ctrls perf_ctrls;
@@ -177,7 +177,7 @@ index 6af175e6c08a..80eaa58f1405 100644
u64 value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~GENMASK_ULL(31, 24);
-@@ -281,10 +273,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
+@@ -304,10 +296,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
int epp = -EINVAL;
int ret;
@@ -190,7 +190,7 @@ index 6af175e6c08a..80eaa58f1405 100644
if (epp == -EINVAL)
epp = epp_values[pref_index];
-@@ -521,7 +511,10 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
+@@ -524,7 +514,10 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
{
@@ -201,7 +201,7 @@ index 6af175e6c08a..80eaa58f1405 100644
u64 value = prev;
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
-@@ -530,6 +523,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+@@ -533,6 +526,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
@@ -211,7 +211,7 @@ index 6af175e6c08a..80eaa58f1405 100644
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
des_perf = 0;
-@@ -541,6 +537,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+@@ -544,6 +540,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(des_perf);
@@ -222,7 +222,7 @@ index 6af175e6c08a..80eaa58f1405 100644
value &= ~AMD_CPPC_MAX_PERF(~0L);
value |= AMD_CPPC_MAX_PERF(max_perf);
-@@ -651,10 +651,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+@@ -654,10 +654,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long capacity)
{
unsigned long max_perf, min_perf, des_perf,
@@ -234,7 +234,7 @@ index 6af175e6c08a..80eaa58f1405 100644
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
-@@ -662,7 +661,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+@@ -665,7 +664,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
@@ -242,7 +242,7 @@ index 6af175e6c08a..80eaa58f1405 100644
des_perf = cap_perf;
if (target_perf < capacity)
-@@ -680,32 +678,57 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+@@ -683,51 +681,111 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
max_perf = min_perf;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
@@ -254,35 +254,25 @@ index 6af175e6c08a..80eaa58f1405 100644
cpufreq_cpu_put(policy);
}
--static int amd_get_min_freq(struct amd_cpudata *cpudata)
+-static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
-- return READ_ONCE(cpudata->min_freq);
--}
-+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ u32 highest_perf, nominal_perf, nominal_freq, max_freq;
-+ int ret;
+ int ret;
--static int amd_get_max_freq(struct amd_cpudata *cpudata)
--{
-- return READ_ONCE(cpudata->max_freq);
--}
+- if (!cpudata->boost_supported) {
+- pr_err("Boost mode is not supported by this processor or SBIOS\n");
+- return -EINVAL;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
-
--static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
--{
-- return READ_ONCE(cpudata->nominal_freq);
--}
++
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
-
--static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
--{
-- return READ_ONCE(cpudata->lowest_nonlinear_freq);
++
+ value &= ~GENMASK_ULL(7, 0);
+ value |= on ? highest_perf : nominal_perf;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
@@ -297,32 +287,46 @@ index 6af175e6c08a..80eaa58f1405 100644
+ cpudata->cpu, ret);
+ return ret;
+ }
-+ }
-+
+ }
+
+- if (state)
+- policy->cpuinfo.max_freq = cpudata->max_freq;
+- else
+- policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
+ if (on)
+ policy->cpuinfo.max_freq = max_freq;
+ else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
+ policy->cpuinfo.max_freq = nominal_freq * 1000;
-+
-+ policy->max = policy->cpuinfo.max_freq;
-+
+
+ policy->max = policy->cpuinfo.max_freq;
+
+- ret = freq_qos_update_request(&cpudata->req[1],
+- policy->cpuinfo.max_freq);
+- if (ret < 0)
+- return ret;
+ if (cppc_state == AMD_PSTATE_PASSIVE) {
+ ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
+ if (ret < 0)
+ pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
+ }
-+
+
+- return 0;
+ return ret < 0 ? ret : 0;
}
- static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
-@@ -715,36 +738,51 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
++static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+ {
+- u32 highest_perf, nominal_perf;
++ struct amd_cpudata *cpudata = policy->driver_data;
++ int ret;
- if (!cpudata->boost_supported) {
- pr_err("Boost mode is not supported by this processor or SBIOS\n");
-- return -EINVAL;
+- highest_perf = READ_ONCE(cpudata->highest_perf);
+- nominal_perf = READ_ONCE(cpudata->nominal_perf);
++ if (!cpudata->boost_supported) {
++ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EOPNOTSUPP;
- }
++ }
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_cpu_boost_update(policy, state);
+ WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
@@ -330,23 +334,17 @@ index 6af175e6c08a..80eaa58f1405 100644
+ refresh_frequency_limits(policy);
+ mutex_unlock(&amd_pstate_driver_lock);
-- if (state)
-- policy->cpuinfo.max_freq = cpudata->max_freq;
-- else
-- policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
+- if (highest_perf <= nominal_perf)
+- return;
+ return ret;
+}
-- policy->max = policy->cpuinfo.max_freq;
+- cpudata->boost_supported = true;
+static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
+{
+ u64 boost_val;
+ int ret = -1;
-
-- ret = freq_qos_update_request(&cpudata->req[1],
-- policy->cpuinfo.max_freq);
-- if (ret < 0)
-- return ret;
++
+ /*
+ * If platform has no CPB support or disable it, initialize current driver
+ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
@@ -356,40 +354,29 @@ index 6af175e6c08a..80eaa58f1405 100644
+ ret = 0;
+ goto exit_err;
+ }
-
-- return 0;
--}
++
+ /* at least one CPU supports CPB, even if others fail later on to set up */
-+ current_pstate_driver->boost_enabled = true;
-
--static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
--{
-- u32 highest_perf, nominal_perf;
+ current_pstate_driver->boost_enabled = true;
++
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ if (ret) {
+ pr_err_once("failed to read initial CPU boost state!\n");
+ ret = -EIO;
+ goto exit_err;
+ }
-
-- highest_perf = READ_ONCE(cpudata->highest_perf);
-- nominal_perf = READ_ONCE(cpudata->nominal_perf);
++
+ if (!(boost_val & MSR_K7_HWCR_CPB_DIS))
+ cpudata->boost_supported = true;
-
-- if (highest_perf <= nominal_perf)
-- return;
++
+ return 0;
-
-- cpudata->boost_supported = true;
-- current_pstate_driver->boost_enabled = true;
++
+exit_err:
+ cpudata->boost_supported = false;
+ return ret;
}
static void amd_perf_ctl_reset(unsigned int cpu)
-@@ -773,7 +811,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
+@@ -756,7 +814,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
{
int ret;
@@ -398,58 +385,22 @@ index 6af175e6c08a..80eaa58f1405 100644
u64 cap1;
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
-@@ -860,7 +898,41 @@ static void amd_pstate_update_limits(unsigned int cpu)
- mutex_unlock(&amd_pstate_driver_lock);
- }
+@@ -852,8 +910,12 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
+ u32 transition_delay_ns;
--/**
-+/*
-+ * Get pstate transition delay time from ACPI tables that firmware set
-+ * instead of using hardcode value directly.
-+ */
-+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
-+{
-+ u32 transition_delay_ns;
-+
-+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+- if (transition_delay_ns == CPUFREQ_ETERNAL)
+- return AMD_PSTATE_TRANSITION_DELAY;
+ if (transition_delay_ns == CPUFREQ_ETERNAL) {
+ if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
+ return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
+ else
+ return AMD_PSTATE_TRANSITION_DELAY;
+ }
-+
-+ return transition_delay_ns / NSEC_PER_USEC;
-+}
-+
-+/*
-+ * Get pstate transition latency value from ACPI tables that firmware
-+ * set instead of using hardcode value directly.
-+ */
-+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
-+{
-+ u32 transition_latency;
-+
-+ transition_latency = cppc_get_transition_latency(cpu);
-+ if (transition_latency == CPUFREQ_ETERNAL)
-+ return AMD_PSTATE_TRANSITION_LATENCY;
-+
-+ return transition_latency;
-+}
-+
-+/*
- * amd_pstate_init_freq: Initialize the max_freq, min_freq,
- * nominal_freq and lowest_nonlinear_freq for
- * the @cpudata object.
-@@ -881,7 +953,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
- u32 boost_ratio, lowest_nonlinear_ratio;
- struct cppc_perf_caps cppc_perf;
--
- ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-@@ -912,12 +983,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+ return transition_delay_ns / NSEC_PER_USEC;
+ }
+@@ -924,12 +986,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
WRITE_ONCE(cpudata->max_freq, max_freq);
@@ -476,40 +427,35 @@ index 6af175e6c08a..80eaa58f1405 100644
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
-- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+- int min_freq, max_freq, nominal_freq, ret;
+ int min_freq, max_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
-@@ -946,20 +1035,15 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+@@ -958,18 +1038,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
-- min_freq = amd_get_min_freq(cpudata);
-- max_freq = amd_get_max_freq(cpudata);
-- nominal_freq = amd_get_nominal_freq(cpudata);
-- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
--
-- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
-- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
-- min_freq, max_freq);
-- ret = -EINVAL;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
- goto free_cpudata1;
-- }
-
-- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
-- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
-+ min_freq = READ_ONCE(cpudata->min_freq);
-+ max_freq = READ_ONCE(cpudata->max_freq);
++ goto free_cpudata1;
+
-+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
-+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+- nominal_freq = READ_ONCE(cpudata->nominal_freq);
+-
+- if (min_freq <= 0 || max_freq <= 0 ||
+- nominal_freq <= 0 || min_freq > max_freq) {
+- dev_err(dev,
+- "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
+- min_freq, max_freq, nominal_freq);
+- ret = -EINVAL;
+- goto free_cpudata1;
+- }
- policy->min = min_freq;
- policy->max = max_freq;
-@@ -967,10 +1051,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
+@@ -980,10 +1054,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
@@ -523,7 +469,7 @@ index 6af175e6c08a..80eaa58f1405 100644
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
-@@ -992,7 +1078,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+@@ -1005,7 +1081,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
@@ -531,25 +477,7 @@ index 6af175e6c08a..80eaa58f1405 100644
if (!current_pstate_driver->adjust_perf)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
-@@ -1052,7 +1137,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
-
-- max_freq = amd_get_max_freq(cpudata);
-+ max_freq = READ_ONCE(cpudata->max_freq);
- if (max_freq < 0)
- return max_freq;
-
-@@ -1065,7 +1150,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
-
-- freq = amd_get_lowest_nonlinear_freq(cpudata);
-+ freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
- if (freq < 0)
- return freq;
-
-@@ -1203,7 +1288,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
+@@ -1216,7 +1291,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
cppc_state = mode;
@@ -558,38 +486,38 @@ index 6af175e6c08a..80eaa58f1405 100644
return 0;
for_each_present_cpu(cpu) {
-@@ -1376,7 +1461,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
+@@ -1389,7 +1464,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
-- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+- int min_freq, max_freq, nominal_freq, ret;
+ int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
-@@ -1407,16 +1492,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+@@ -1420,17 +1495,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
-- min_freq = amd_get_min_freq(cpudata);
-- max_freq = amd_get_max_freq(cpudata);
-- nominal_freq = amd_get_nominal_freq(cpudata);
-- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
-- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
-- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
-- min_freq, max_freq);
-- ret = -EINVAL;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
- goto free_cpudata1;
-- }
++ goto free_cpudata1;
+
-+ min_freq = READ_ONCE(cpudata->min_freq);
-+ max_freq = READ_ONCE(cpudata->max_freq);
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+- nominal_freq = READ_ONCE(cpudata->nominal_freq);
+- if (min_freq <= 0 || max_freq <= 0 ||
+- nominal_freq <= 0 || min_freq > max_freq) {
+- dev_err(dev,
+- "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
+- min_freq, max_freq, nominal_freq);
+- ret = -EINVAL;
+- goto free_cpudata1;
+- }
policy->cpuinfo.min_freq = min_freq;
policy->cpuinfo.max_freq = max_freq;
-@@ -1425,11 +1506,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+@@ -1439,11 +1509,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
@@ -604,7 +532,7 @@ index 6af175e6c08a..80eaa58f1405 100644
/*
* Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
-@@ -1440,7 +1523,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+@@ -1454,7 +1526,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -613,7 +541,7 @@ index 6af175e6c08a..80eaa58f1405 100644
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret)
return ret;
-@@ -1451,7 +1534,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+@@ -1465,7 +1537,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
@@ -621,7 +549,7 @@ index 6af175e6c08a..80eaa58f1405 100644
return 0;
-@@ -1530,7 +1612,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+@@ -1544,7 +1615,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
epp = 0;
/* Set initial EPP value */
@@ -630,7 +558,7 @@ index 6af175e6c08a..80eaa58f1405 100644
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
}
-@@ -1553,6 +1635,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+@@ -1567,6 +1638,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
amd_pstate_epp_update_limit(policy);
@@ -643,7 +571,7 @@ index 6af175e6c08a..80eaa58f1405 100644
return 0;
}
-@@ -1569,7 +1657,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+@@ -1583,7 +1660,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
@@ -652,7 +580,7 @@ index 6af175e6c08a..80eaa58f1405 100644
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.max_perf = max_perf;
-@@ -1603,7 +1691,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+@@ -1617,7 +1694,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
value = READ_ONCE(cpudata->cppc_req_cached);
mutex_lock(&amd_pstate_limits_lock);
@@ -661,7 +589,7 @@ index 6af175e6c08a..80eaa58f1405 100644
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
/* Set max perf same as min perf */
-@@ -1707,6 +1795,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
+@@ -1721,6 +1798,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
@@ -669,7 +597,7 @@ index 6af175e6c08a..80eaa58f1405 100644
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
-@@ -1730,6 +1819,46 @@ static int __init amd_pstate_set_driver(int mode_idx)
+@@ -1744,6 +1822,46 @@ static int __init amd_pstate_set_driver(int mode_idx)
return -EINVAL;
}
@@ -716,7 +644,7 @@ index 6af175e6c08a..80eaa58f1405 100644
static int __init amd_pstate_init(void)
{
struct device *dev_root;
-@@ -1738,6 +1867,11 @@ static int __init amd_pstate_init(void)
+@@ -1752,6 +1870,11 @@ static int __init amd_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
@@ -728,7 +656,7 @@ index 6af175e6c08a..80eaa58f1405 100644
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
-@@ -1752,35 +1886,43 @@ static int __init amd_pstate_init(void)
+@@ -1766,35 +1889,43 @@ static int __init amd_pstate_init(void)
/* check if this machine need CPPC quirks */
dmi_check_system(amd_pstate_quirks_table);
@@ -782,7 +710,7 @@ index 6af175e6c08a..80eaa58f1405 100644
pr_debug("AMD CPPC MSR based functionality is supported\n");
if (cppc_state != AMD_PSTATE_ACTIVE)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
-@@ -1794,13 +1936,15 @@ static int __init amd_pstate_init(void)
+@@ -1808,13 +1939,15 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
@@ -800,7 +728,7 @@ index 6af175e6c08a..80eaa58f1405 100644
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
-@@ -1816,6 +1960,8 @@ static int __init amd_pstate_init(void)
+@@ -1830,6 +1963,8 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
@@ -810,33 +738,10 @@ index 6af175e6c08a..80eaa58f1405 100644
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
-index bc341f35908d..cc8bb2bc325a 100644
+index e6a28e7f4dbf..cc8bb2bc325a 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
-@@ -42,13 +42,17 @@ struct amd_aperf_mperf {
- * @lowest_perf: the absolute lowest performance level of the processor
- * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
- * priority.
-- * @max_freq: the frequency that mapped to highest_perf
-- * @min_freq: the frequency that mapped to lowest_perf
-- * @nominal_freq: the frequency that mapped to nominal_perf
-- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
-+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
-+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
-+ * @min_limit_freq: Cached value of policy->min (in khz)
-+ * @max_limit_freq: Cached value of policy->max (in khz)
-+ * @max_freq: the frequency (in khz) that mapped to highest_perf
-+ * @min_freq: the frequency (in khz) that mapped to lowest_perf
-+ * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
-+ * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
- * @prev: Last Aperf/Mperf/tsc count value read from register
-- * @freq: current cpu frequency value
-+ * @freq: current cpu frequency value (in khz)
- * @boost_supported: check whether the Processor or SBIOS supports boost mode
- * @hw_prefcore: check whether HW supports preferred core featue.
- * Only when hw_prefcore and early prefcore param are true,
-@@ -95,6 +99,8 @@ struct amd_cpudata {
+@@ -99,6 +99,8 @@ struct amd_cpudata {
u32 policy;
u64 cppc_cap1_cached;
bool suspended;
@@ -846,7 +751,7 @@ index bc341f35908d..cc8bb2bc325a 100644
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index d7630d9cdb2f..bbbeb8b90313 100644
+index 9e5060b27864..270ea04fb616 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -614,10 +614,9 @@ static ssize_t show_boost(struct kobject *kobj,