aboutsummaryrefslogtreecommitdiff
path: root/SOURCES
diff options
context:
space:
mode:
Diffstat (limited to 'SOURCES')
-rw-r--r--SOURCES/0001-amd-pstate.patch941
-rw-r--r--SOURCES/0001-ntsync.patch10
-rw-r--r--SOURCES/Patchlist.changelog3
-rw-r--r--SOURCES/asus-linux.patch89
-rw-r--r--SOURCES/kernel.changelog5
-rw-r--r--SOURCES/patch-6.9-redhat.patch16
6 files changed, 910 insertions, 154 deletions
diff --git a/SOURCES/0001-amd-pstate.patch b/SOURCES/0001-amd-pstate.patch
index 8e7b5cb..f654f25 100644
--- a/SOURCES/0001-amd-pstate.patch
+++ b/SOURCES/0001-amd-pstate.patch
@@ -1,19 +1,163 @@
-From 14416a42f37d3455e6de7d249df9a76b40a456bf Mon Sep 17 00:00:00 2001
+From b36ccc153f8a2ee04f046a4e10220dbe308f37ef Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
-Date: Fri, 21 Jun 2024 15:31:56 +0200
+Date: Tue, 25 Jun 2024 13:29:13 +0200
Subject: [PATCH 02/10] amd-pstate
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
- drivers/cpufreq/amd-pstate.c | 115 +++++++++++++++++++----------------
- drivers/cpufreq/amd-pstate.h | 14 +++--
- 2 files changed, 70 insertions(+), 59 deletions(-)
+ Documentation/admin-guide/pm/amd-pstate.rst | 40 +-
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/msr-index.h | 2 +
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ drivers/cpufreq/acpi-cpufreq.c | 2 -
+ drivers/cpufreq/amd-pstate-ut.c | 2 +-
+ drivers/cpufreq/amd-pstate.c | 463 +++++++++++++++-----
+ drivers/cpufreq/amd-pstate.h | 29 +-
+ drivers/cpufreq/cpufreq.c | 25 +-
+ include/linux/cpufreq.h | 2 +
+ 10 files changed, 436 insertions(+), 131 deletions(-)
+diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
+index 1e0d101b020a0..57995f54f0c88 100644
+--- a/Documentation/admin-guide/pm/amd-pstate.rst
++++ b/Documentation/admin-guide/pm/amd-pstate.rst
+@@ -281,6 +281,27 @@ integer values defined between 0 to 255 when EPP feature is enabled by platform
+ firmware, if EPP feature is disabled, driver will ignore the written value
+ This attribute is read-write.
+
++``boost``
++The `boost` sysfs attribute provides control over the CPU core
++performance boost, allowing users to manage the maximum frequency limitation
++of the CPU. This attribute can be used to enable or disable the boost feature
++on individual CPUs.
++
++When the boost feature is enabled, the CPU can dynamically increase its frequency
++beyond the base frequency, providing enhanced performance for demanding workloads.
++On the other hand, disabling the boost feature restricts the CPU to operate at the
++base frequency, which may be desirable in certain scenarios to prioritize power
++efficiency or manage temperature.
++
++To manipulate the `boost` attribute, users can write a value of `0` to disable the
++boost or `1` to enable it, for the respective CPU using the sysfs path
++`/sys/devices/system/cpu/cpuX/cpufreq/boost`, where `X` represents the CPU number.
++
++It is important to note that modifying the global variable
++`/sys/devices/system/cpu/amd_pstate/cpb_boost` will override the individual CPU
++settings.
++
++
+ Other performance and frequency values can be read back from
+ ``/sys/devices/system/cpu/cpuX/acpi_cppc/``, see :ref:`cppc_sysfs`.
+
+@@ -406,7 +427,7 @@ control its functionality at the system level. They are located in the
+ ``/sys/devices/system/cpu/amd_pstate/`` directory and affect all CPUs.
+
+ ``status``
+- Operation mode of the driver: "active", "passive" or "disable".
++ Operation mode of the driver: "active", "passive", "guided" or "disable".
+
+ "active"
+ The driver is functional and in the ``active mode``
+@@ -440,6 +461,23 @@ control its functionality at the system level. They are located in the
+ This attribute is read-only to check the state of preferred core set
+ by the kernel parameter.
+
++``cpb_boost``
++ Specifies whether core performance boost is requested to be enabled or disabled
++ If core performance boost is disabled while a core is in a boosted P-state, the
++ core automatically transitions to the highest performance non-boosted P-state.
++ AMD Core Performance Boost(CPB) is controlled by this attribute file which allows
++ user to change all cores frequency boosting state. It supports all amd-pstate modes.
++
++ States of the driver "/sys/devices/system/cpu/amd_pstate/cpb_boost"
++ "disabled" Core Performance Boosting Disabled.
++ "enabled" Core Performance Boosting Enabled.
++
++ To enable core performance boost:
++ # echo "enabled" > /sys/devices/system/cpu/amd_pstate/cpb_boost
++
++ To disable core performance boost:
++ # echo "disabled" > /sys/devices/system/cpu/amd_pstate/cpb_boost
++
+ ``cpupower`` tool support for ``amd-pstate``
+ ===============================================
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 3c7434329661c..6c128d463a143 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -470,6 +470,7 @@
+ #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
+ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
+ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
++#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* "" AMD Fast CPPC */
+
+ /*
+ * BUG word(s)
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index e72c2b8729579..8738a7b3917d8 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -782,6 +782,8 @@
+ #define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
+ #define MSR_K7_FID_VID_CTL 0xc0010041
+ #define MSR_K7_FID_VID_STATUS 0xc0010042
++#define MSR_K7_HWCR_CPB_DIS_BIT 25
++#define MSR_K7_HWCR_CPB_DIS BIT_ULL(MSR_K7_HWCR_CPB_DIS_BIT)
+
+ /* K6 MSRs */
+ #define MSR_K6_WHCR 0xc0000082
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index af5aa2c754c22..c84c30188fdf2 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
++ { X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
+ { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
+ { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 37f1cdf46d291..2fc82831bddd5 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -50,8 +50,6 @@ enum {
+ #define AMD_MSR_RANGE (0x7)
+ #define HYGON_MSR_RANGE (0x7)
+
+-#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
+-
+ struct acpi_cpufreq_data {
+ unsigned int resume;
+ unsigned int cpu_feature;
+diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
+index fc275d41d51e9..b528f198f4c36 100644
+--- a/drivers/cpufreq/amd-pstate-ut.c
++++ b/drivers/cpufreq/amd-pstate-ut.c
+@@ -227,7 +227,7 @@ static void amd_pstate_ut_check_freq(u32 index)
+ goto skip_test;
+ }
+
+- if (cpudata->boost_supported) {
++ if (amd_pstate_global_params.cpb_boost) {
+ if ((policy->max == cpudata->max_freq) ||
+ (policy->max == cpudata->nominal_freq))
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
-index 6c989d859b39..c08463f8dcac 100644
+index 6af175e6c08ac..6eeba793bf442 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
-@@ -85,15 +85,6 @@ struct quirk_entry {
+@@ -51,6 +51,7 @@
+
+ #define AMD_PSTATE_TRANSITION_LATENCY 20000
+ #define AMD_PSTATE_TRANSITION_DELAY 1000
++#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
+ #define CPPC_HIGHEST_PERF_PERFORMANCE 196
+ #define CPPC_HIGHEST_PERF_DEFAULT 166
+
+@@ -85,15 +86,6 @@ struct quirk_entry {
u32 lowest_freq;
};
@@ -29,7 +173,136 @@ index 6c989d859b39..c08463f8dcac 100644
static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
-@@ -688,26 +679,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+@@ -102,6 +94,11 @@ static bool cppc_enabled;
+ static bool amd_pstate_prefcore = true;
+ static struct quirk_entry *quirks;
+
++/* export the amd_pstate_global_params for unit test */
++struct amd_pstate_global_params amd_pstate_global_params;
++EXPORT_SYMBOL_GPL(amd_pstate_global_params);
++static int amd_pstate_cpu_boost(int cpu, bool state);
++
+ /*
+ * AMD Energy Preference Performance (EPP)
+ * The EPP is used in the CCLK DPM controller to drive
+@@ -143,6 +140,16 @@ static unsigned int epp_values[] = {
+ [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
+ };
+
++enum CPB_STATE_INDEX {
++ CPB_STATE_DISABLED = 0,
++ CPB_STATE_ENABLED = 1,
++};
++
++static const char * const cpb_state[] = {
++ [CPB_STATE_DISABLED] = "disabled",
++ [CPB_STATE_ENABLED] = "enabled",
++};
++
+ typedef int (*cppc_mode_transition_fn)(int);
+
+ static struct quirk_entry quirk_amd_7k62 = {
+@@ -157,7 +164,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+- if (boot_cpu_has(X86_FEATURE_ZEN2)) {
++ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+@@ -199,7 +206,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+ u64 epp;
+ int ret;
+
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ if (!cppc_req_cached) {
+ epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ &cppc_req_cached);
+@@ -252,7 +259,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+ int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+@@ -281,10 +288,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
+ int epp = -EINVAL;
+ int ret;
+
+- if (!pref_index) {
+- pr_debug("EPP pref_index is invalid\n");
+- return -EINVAL;
+- }
++ if (!pref_index)
++ epp = cpudata->epp_default;
+
+ if (epp == -EINVAL)
+ epp = epp_values[pref_index];
+@@ -521,7 +526,10 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
+ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+ {
++ unsigned long max_freq;
++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
+ u64 prev = READ_ONCE(cpudata->cppc_req_cached);
++ u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ u64 value = prev;
+
+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+@@ -530,6 +538,9 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ cpudata->max_limit_perf);
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+
++ max_freq = READ_ONCE(cpudata->max_limit_freq);
++ policy->cur = div_u64(des_perf * max_freq, max_perf);
++
+ if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
+ min_perf = des_perf;
+ des_perf = 0;
+@@ -541,6 +552,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(des_perf);
+
++ /* limit the max perf when core performance boost feature is disabled */
++ if (!amd_pstate_global_params.cpb_boost)
++ max_perf = min_t(unsigned long, nominal_perf, max_perf);
++
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+@@ -651,10 +666,9 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ unsigned long capacity)
+ {
+ unsigned long max_perf, min_perf, des_perf,
+- cap_perf, lowest_nonlinear_perf, max_freq;
++ cap_perf, lowest_nonlinear_perf;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+- unsigned int target_freq;
+
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+@@ -662,7 +676,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+- max_freq = READ_ONCE(cpudata->max_freq);
+
+ des_perf = cap_perf;
+ if (target_perf < capacity)
+@@ -680,34 +693,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
+ max_perf = min_perf;
+
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+- target_freq = div_u64(des_perf * max_freq, max_perf);
+- policy->cur = target_freq;
+
+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
+ policy->governor->flags);
cpufreq_cpu_put(policy);
}
@@ -56,7 +329,92 @@ index 6c989d859b39..c08463f8dcac 100644
static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
struct amd_cpudata *cpudata = policy->driver_data;
-@@ -860,7 +831,37 @@ static void amd_pstate_update_limits(unsigned int cpu)
+@@ -715,36 +706,53 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+- return -EINVAL;
++ return -ENOTSUPP;
+ }
++ mutex_lock(&amd_pstate_driver_lock);
++ ret = amd_pstate_cpu_boost(policy->cpu, state);
++ mutex_unlock(&amd_pstate_driver_lock);
+
+- if (state)
+- policy->cpuinfo.max_freq = cpudata->max_freq;
+- else
+- policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
+-
+- policy->max = policy->cpuinfo.max_freq;
+-
+- ret = freq_qos_update_request(&cpudata->req[1],
+- policy->cpuinfo.max_freq);
+- if (ret < 0)
+- return ret;
+-
+- return 0;
++ return ret < 0 ? ret : 0;
+ }
+
+-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
++static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
+ {
+- u32 highest_perf, nominal_perf;
++ u64 boost_val;
++ int ret = -1;
+
+- highest_perf = READ_ONCE(cpudata->highest_perf);
+- nominal_perf = READ_ONCE(cpudata->nominal_perf);
++ /*
++ * If platform has no CPB support or disble it, initialize current driver
++ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
++ */
++ if (!cpu_feature_enabled(X86_FEATURE_CPB)) {
++ pr_debug_once("Boost CPB capabilities not present in the processor\n");
++ ret = 0;
++ goto exit_err;
++ }
+
+- if (highest_perf <= nominal_perf)
+- return;
++ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
++ if (ret) {
++ pr_err_once("failed to read initial CPU boost state!\n");
++ ret = -EIO;
++ goto exit_err;
++ }
++
++ amd_pstate_global_params.cpb_supported = !(boost_val & MSR_K7_HWCR_CPB_DIS);
++ if (amd_pstate_global_params.cpb_supported) {
++ current_pstate_driver->boost_enabled = true;
++ cpudata->boost_supported = true;
++ cpudata->boost_state = true;
++ }
+
+- cpudata->boost_supported = true;
+- current_pstate_driver->boost_enabled = true;
++ amd_pstate_global_params.cpb_boost = amd_pstate_global_params.cpb_supported;
++ return 0;
++
++exit_err:
++ cpudata->boost_supported = false;
++ cpudata->boost_state = false;
++ current_pstate_driver->boost_enabled = false;
++ amd_pstate_global_params.cpb_boost = false;
++ return ret;
+ }
+
+ static void amd_perf_ctl_reset(unsigned int cpu)
+@@ -773,7 +781,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
+ {
+ int ret;
+
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ u64 cap1;
+
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+@@ -860,7 +868,41 @@ static void amd_pstate_update_limits(unsigned int cpu)
mutex_unlock(&amd_pstate_driver_lock);
}
@@ -70,8 +428,12 @@ index 6c989d859b39..c08463f8dcac 100644
+ u32 transition_delay_ns;
+
+ transition_delay_ns = cppc_get_transition_latency(cpu);
-+ if (transition_delay_ns == CPUFREQ_ETERNAL)
-+ return AMD_PSTATE_TRANSITION_DELAY;
++ if (transition_delay_ns == CPUFREQ_ETERNAL) {
++ if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
++ return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
++ else
++ return AMD_PSTATE_TRANSITION_DELAY;
++ }
+
+ return transition_delay_ns / NSEC_PER_USEC;
+}
@@ -95,7 +457,7 @@ index 6c989d859b39..c08463f8dcac 100644
* amd_pstate_init_freq: Initialize the max_freq, min_freq,
* nominal_freq and lowest_nonlinear_freq for
* the @cpudata object.
-@@ -881,7 +882,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+@@ -881,7 +923,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
u32 boost_ratio, lowest_nonlinear_ratio;
struct cppc_perf_caps cppc_perf;
@@ -103,16 +465,39 @@ index 6c989d859b39..c08463f8dcac 100644
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
-@@ -917,7 +917,7 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+@@ -912,12 +953,30 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+ WRITE_ONCE(cpudata->max_freq, max_freq);
+
++ /**
++ * Below values need to be initialized correctly, otherwise driver will fail to load
++ * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
++ * lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
++ * Check _CPC in ACPI table objects if any values are incorrect
++ */
++ if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
++ pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
++ min_freq, max_freq, nominal_freq * 1000);
++ return -EINVAL;
++ }
++
++ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
++ pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
++ lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
++ return -EINVAL;
++ }
++
+ return 0;
+ }
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
-+ int min_freq, max_freq, nominal_freq, ret;
++ int min_freq, max_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
-@@ -946,20 +946,21 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+@@ -946,20 +1005,11 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
@@ -120,21 +505,15 @@ index 6c989d859b39..c08463f8dcac 100644
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
-+ min_freq = READ_ONCE(cpudata->min_freq);
-+ max_freq = READ_ONCE(cpudata->max_freq);
-+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
-
+-
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
-+ if (min_freq <= 0 || max_freq <= 0 ||
-+ nominal_freq <= 0 || min_freq > max_freq) {
-+ dev_err(dev,
-+ "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
-+ min_freq, max_freq, nominal_freq);
- ret = -EINVAL;
- goto free_cpudata1;
- }
+- ret = -EINVAL;
+- goto free_cpudata1;
+- }
++ min_freq = READ_ONCE(cpudata->min_freq);
++ max_freq = READ_ONCE(cpudata->max_freq);
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
@@ -143,7 +522,24 @@ index 6c989d859b39..c08463f8dcac 100644
policy->min = min_freq;
policy->max = max_freq;
-@@ -1052,7 +1053,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
+@@ -970,7 +1020,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+- if (boot_cpu_has(X86_FEATURE_CPPC))
++ if (cpu_feature_enabled(X86_FEATURE_CPPC))
+ policy->fast_switch_possible = true;
+
+ ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
+@@ -992,7 +1042,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+
+ policy->driver_data = cpudata;
+
+- amd_pstate_boost_init(cpudata);
+ if (!current_pstate_driver->adjust_perf)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
+
+@@ -1052,7 +1101,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
int max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
@@ -152,7 +548,7 @@ index 6c989d859b39..c08463f8dcac 100644
if (max_freq < 0)
return max_freq;
-@@ -1065,7 +1066,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
+@@ -1065,7 +1114,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
int freq;
struct amd_cpudata *cpudata = policy->driver_data;
@@ -161,16 +557,177 @@ index 6c989d859b39..c08463f8dcac 100644
if (freq < 0)
return freq;
-@@ -1376,7 +1377,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
+@@ -1203,7 +1252,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
+
+ cppc_state = mode;
+- if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
++ if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
+ return 0;
+
+ for_each_present_cpu(cpu) {
+@@ -1312,6 +1361,118 @@ static ssize_t prefcore_show(struct device *dev,
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
+ }
+
++static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
++{
++ struct amd_cpudata *cpudata = policy->driver_data;
++ struct cppc_perf_ctrls perf_ctrls;
++ u32 highest_perf, nominal_perf, nominal_freq, max_freq;
++ int ret;
++
++ highest_perf = READ_ONCE(cpudata->highest_perf);
++ nominal_perf = READ_ONCE(cpudata->nominal_perf);
++ nominal_freq = READ_ONCE(cpudata->nominal_freq);
++ max_freq = READ_ONCE(cpudata->max_freq);
++
++ if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ u64 value = READ_ONCE(cpudata->cppc_req_cached);
++
++ value &= ~GENMASK_ULL(7, 0);
++ value |= on ? highest_perf : nominal_perf;
++ WRITE_ONCE(cpudata->cppc_req_cached, value);
++
++ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
++ } else {
++ perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
++ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
++ if (ret) {
++ cpufreq_cpu_release(policy);
++ pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
++ cpudata->cpu, ret);
++ return ret;
++ }
++ }
++
++ if (on)
++ policy->cpuinfo.max_freq = max_freq;
++ else
++ policy->cpuinfo.max_freq = nominal_freq * 1000;
++
++ policy->max = policy->cpuinfo.max_freq;
++
++ if (cppc_state == AMD_PSTATE_PASSIVE) {
++ ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
++ if (ret < 0)
++ pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
++ }
++
++ return ret < 0 ? ret : 0;
++}
++
++static int amd_pstate_cpu_boost(int cpu, bool state)
++{
++ int ret;
++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++ struct amd_cpudata *cpudata = policy->driver_data;
++
++ if (!policy) {
++ pr_err("policy is NULL\n");
++ ret = -ENODATA;
++ goto err_exit;
++ }
++
++ ret = amd_pstate_cpu_boost_update(policy, state);
++ refresh_frequency_limits(policy);
++ WRITE_ONCE(cpudata->boost_state, state);
++ policy->boost_enabled = state;
++
++err_exit:
++ cpufreq_cpu_put(policy);
++ return ret < 0 ? ret : 0;
++}
++
++static ssize_t cpb_boost_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ bool cpb_idx;
++
++ cpb_idx = amd_pstate_global_params.cpb_boost;
++
++ return sysfs_emit(buf, "%s\n", cpb_state[cpb_idx]);
++}
++
++static ssize_t cpb_boost_store(struct device *dev, struct device_attribute *b,
++ const char *buf, size_t count)
++{
++ bool new_state;
++ ssize_t ret;
++ int cpu, cpb_idx;
++
++ if (!amd_pstate_global_params.cpb_supported) {
++ pr_err("Boost mode is not supported by this processor or SBIOS\n");
++ return -EINVAL;
++ }
++
++ cpb_idx = sysfs_match_string(cpb_state, buf);
++ if (cpb_idx < 0)
++ return -EINVAL;
++
++ new_state = cpb_idx;
++
++ mutex_lock(&amd_pstate_driver_lock);
++ for_each_present_cpu(cpu) {
++ ret = amd_pstate_cpu_boost(cpu, new_state);
++ if (ret < 0) {
++ pr_warn("failed to update cpu boost for CPU%d (%zd)\n", cpu, ret);
++ goto err_exit;
++ }
++ }
++ amd_pstate_global_params.cpb_boost = !!new_state;
++
++err_exit:
++ mutex_unlock(&amd_pstate_driver_lock);
++ return ret < 0 ? ret : count;
++}
++
+ cpufreq_freq_attr_ro(amd_pstate_max_freq);
+ cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
+
+@@ -1322,6 +1483,7 @@ cpufreq_freq_attr_rw(energy_performance_preference);
+ cpufreq_freq_attr_ro(energy_performance_available_preferences);
+ static DEVICE_ATTR_RW(status);
+ static DEVICE_ATTR_RO(prefcore);
++static DEVICE_ATTR_RW(cpb_boost);
+
+ static struct freq_attr *amd_pstate_attr[] = {
+ &amd_pstate_max_freq,
+@@ -1346,6 +1508,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
+ static struct attribute *pstate_global_attributes[] = {
+ &dev_attr_status.attr,
+ &dev_attr_prefcore.attr,
++ &dev_attr_cpb_boost.attr,
+ NULL
+ };
+
+@@ -1374,9 +1537,24 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
+ return false;
+ }
+
++static int amd_pstate_init_boost(struct cpufreq_policy *policy)
++{
++ struct amd_cpudata *cpudata = policy->driver_data;
++ int ret;
++
++ /* initialize cpu cores boot state */
++ ret = amd_pstate_init_boost_support(cpudata);
++ if (ret)
++ return ret;
++
++ policy->boost_enabled = READ_ONCE(cpudata->boost_state);
++
++ return 0;
++}
++
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
-+ int min_freq, max_freq, nominal_freq, ret;
++ int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
-@@ -1407,13 +1408,14 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+@@ -1407,16 +1585,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
@@ -181,32 +738,169 @@ index 6c989d859b39..c08463f8dcac 100644
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+- ret = -EINVAL;
+- goto free_cpudata1;
+- }
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
-+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
-+ if (min_freq <= 0 || max_freq <= 0 ||
-+ nominal_freq <= 0 || min_freq > max_freq) {
-+ dev_err(dev,
-+ "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
-+ min_freq, max_freq, nominal_freq);
- ret = -EINVAL;
- goto free_cpudata1;
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+@@ -1425,7 +1595,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+
+ policy->driver_data = cpudata;
+
+- cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
++ cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+@@ -1440,7 +1610,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret)
+ return ret;
+@@ -1451,7 +1621,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
-@@ -1462,6 +1464,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+- amd_pstate_boost_init(cpudata);
- static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
- {
-+ struct amd_cpudata *cpudata = policy->driver_data;
-+
-+ if (cpudata) {
-+ kfree(cpudata);
-+ policy->driver_data = NULL;
-+ }
+ return 0;
+
+@@ -1530,7 +1699,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+ epp = 0;
+
+ /* Set initial EPP value */
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ }
+@@ -1553,6 +1722,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+
+ amd_pstate_epp_update_limit(policy);
+
++ /*
++ * policy->cur is never updated with the amd_pstate_epp driver, but it
++ * is used as a stale frequency value. So, keep it within limits.
++ */
++ policy->cur = policy->min;
+
- pr_debug("CPU %d exiting\n", policy->cpu);
return 0;
}
-@@ -1750,11 +1759,9 @@ static int __init amd_pstate_init(void)
+
+@@ -1569,7 +1744,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+@@ -1603,7 +1778,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+@@ -1690,6 +1865,7 @@ static struct cpufreq_driver amd_pstate_driver = {
+ .exit = amd_pstate_cpu_exit,
+ .suspend = amd_pstate_cpu_suspend,
+ .resume = amd_pstate_cpu_resume,
++ .init_boost = amd_pstate_init_boost,
+ .set_boost = amd_pstate_set_boost,
+ .update_limits = amd_pstate_update_limits,
+ .name = "amd-pstate",
+@@ -1707,6 +1883,8 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
+ .suspend = amd_pstate_epp_suspend,
+ .resume = amd_pstate_epp_resume,
+ .update_limits = amd_pstate_update_limits,
++ .init_boost = amd_pstate_init_boost,
++ .set_boost = amd_pstate_set_boost,
+ .name = "amd-pstate-epp",
+ .attr = amd_pstate_epp_attr,
+ };
+@@ -1730,6 +1908,46 @@ static int __init amd_pstate_set_driver(int mode_idx)
+ return -EINVAL;
+ }
+
++/**
++ * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
++ * show the debug message that helps to check if the CPU has CPPC support for loading issue.
++ */
++static bool amd_cppc_supported(void)
++{
++ struct cpuinfo_x86 *c = &cpu_data(0);
++ bool warn = false;
++
++ if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
++ pr_debug_once("CPPC feature is not supported by the processor\n");
++ return false;
++ }
++
++ /*
++ * If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC,
++ * the AMD Pstate driver may not function correctly.
++ * Check the CPPC flag and display a warning message if the platform supports CPPC.
++ * Note: below checking code will not abort the driver registeration process because of
++ * the code is added for debugging purposes.
++ */
++ if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
++ if (c->x86_model > 0x60 && c->x86_model < 0xaf)
++ warn = true;
++ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
++ if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
++ (c->x86_model > 0x40 && c->x86_model < 0xaf))
++ warn = true;
++ } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
++ warn = true;
++ }
++ }
++
++ if (warn)
++ pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
++ "Please enable it if your BIOS has the CPPC option.\n");
++ return true;
++}
++
+ static int __init amd_pstate_init(void)
+ {
+ struct device *dev_root;
+@@ -1738,6 +1956,11 @@ static int __init amd_pstate_init(void)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
++ /* show debug message only if CPPC is not supported */
++ if (!amd_cppc_supported())
++ return -EOPNOTSUPP;
++
++ /* show warning message when BIOS broken or ACPI disabled */
+ if (!acpi_cpc_valid()) {
+ pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
+ return -ENODEV;
+@@ -1752,35 +1975,43 @@ static int __init amd_pstate_init(void)
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
+- switch (cppc_state) {
+- case AMD_PSTATE_UNDEFINED:
++ /*
++ * determine the driver mode from the command line or kernel config.
++ * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
++ * command line options will override the kernel config settings.
++ */
++
++ if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
* 2. Server platforms
@@ -219,8 +913,64 @@ index 6c989d859b39..c08463f8dcac 100644
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
+- ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
+- if (ret)
+- return ret;
+- break;
++ /* get driver mode from kernel config option [1:4] */
++ cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
++ }
++
++ switch (cppc_state) {
+ case AMD_PSTATE_DISABLE:
++ pr_info("driver load is disabled, boot with specific mode to enable this\n");
+ return -ENODEV;
+ case AMD_PSTATE_PASSIVE:
+ case AMD_PSTATE_ACTIVE:
+ case AMD_PSTATE_GUIDED:
++ ret = amd_pstate_set_driver(cppc_state);
++ if (ret)
++ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* capability check */
+- if (boot_cpu_has(X86_FEATURE_CPPC)) {
++ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ pr_debug("AMD CPPC MSR based functionality is supported\n");
+ if (cppc_state != AMD_PSTATE_ACTIVE)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
+@@ -1794,13 +2025,15 @@ static int __init amd_pstate_init(void)
+ /* enable amd pstate feature */
+ ret = amd_pstate_enable(true);
+ if (ret) {
+- pr_err("failed to enable with return %d\n", ret);
++ pr_err("failed to enable driver mode(%d)\n", cppc_state);
+ return ret;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+- if (ret)
++ if (ret) {
+ pr_err("failed to register with return %d\n", ret);
++ goto disable_driver;
++ }
+
+ dev_root = bus_get_dev_root(&cpu_subsys);
+ if (dev_root) {
+@@ -1816,6 +2049,8 @@ static int __init amd_pstate_init(void)
+
+ global_attr_free:
+ cpufreq_unregister_driver(current_pstate_driver);
++disable_driver:
++ amd_pstate_enable(false);
+ return ret;
+ }
+ device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
-index bc341f35908d..e6a28e7f4dbf 100644
+index bc341f35908d7..fb240a8702892 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -42,13 +42,17 @@ struct amd_aperf_mperf {
@@ -246,6 +996,93 @@ index bc341f35908d..e6a28e7f4dbf 100644
* @boost_supported: check whether the Processor or SBIOS supports boost mode
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
+@@ -95,6 +99,21 @@ struct amd_cpudata {
+ u32 policy;
+ u64 cppc_cap1_cached;
+ bool suspended;
++ s16 epp_default;
++ bool boost_state;
+ };
+
++/**
++ * struct amd_pstate_global_params - Global parameters, mostly tunable via sysfs.
++ * @cpb_boost: Whether or not to use boost CPU P-states.
++ * @cpb_supported: Whether or not CPU boost P-states are available
++ * based on the MSR_K7_HWCR bit[25] state
++ */
++struct amd_pstate_global_params {
++ bool cpb_boost;
++ bool cpb_supported;
++};
++
++extern struct amd_pstate_global_params amd_pstate_global_params;
++
+ #endif /* _LINUX_AMD_PSTATE_H */
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index fd9c3ed21f49c..35296d8f9cdfa 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -614,10 +614,9 @@ static ssize_t show_boost(struct kobject *kobj,
+ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+- int ret, enable;
++ bool enable;
+
+- ret = sscanf(buf, "%d", &enable);
+- if (ret != 1 || enable < 0 || enable > 1)
++ if (kstrtobool(buf, &enable))
+ return -EINVAL;
+
+ if (cpufreq_boost_trigger_state(enable)) {
+@@ -641,10 +640,10 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
+ static ssize_t store_local_boost(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+ {
+- int ret, enable;
++ int ret;
++ bool enable;
+
+- ret = kstrtoint(buf, 10, &enable);
+- if (ret || enable < 0 || enable > 1)
++ if (kstrtobool(buf, &enable))
+ return -EINVAL;
+
+ if (!cpufreq_driver->boost_enabled)
+@@ -1430,8 +1429,18 @@ static int cpufreq_online(unsigned int cpu)
+ goto out_free_policy;
+ }
+
+- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+- policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
++ /* init boost state to prepare set_boost callback for each CPU */
++ if (cpufreq_driver->init_boost) {
++ ret = cpufreq_driver->init_boost(policy);
++ if (ret) {
++ pr_debug("%s: %d: boost initialization failed\n", __func__,
++ __LINE__);
++ goto out_offline_policy;
++ }
++ } else {
++ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
++ policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
++ }
+
+ /*
+ * The initialization has succeeded and the policy is online.
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 9956afb9acc23..3eb5ce712c587 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -399,6 +399,8 @@ struct cpufreq_driver {
+ bool boost_enabled;
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
+
++ /* initialize boost state to be consistent before calling set_boost */
++ int (*init_boost)(struct cpufreq_policy *policy);
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
--
2.45.2
diff --git a/SOURCES/0001-ntsync.patch b/SOURCES/0001-ntsync.patch
index ea60a73..4f3d773 100644
--- a/SOURCES/0001-ntsync.patch
+++ b/SOURCES/0001-ntsync.patch
@@ -1,7 +1,7 @@
-From 4e05dd5270a7e43940c9321c7be008d97143aca9 Mon Sep 17 00:00:00 2001
+From 3aa16b2e2770401eaac82fcd8f279ee3d0ab409c Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
-Date: Wed, 12 Jun 2024 18:20:30 +0200
-Subject: [PATCH 09/11] ntsync
+Date: Thu, 27 Jun 2024 16:53:18 +0200
+Subject: [PATCH 08/10] ntsync
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
@@ -457,10 +457,10 @@ index 0000000000000..767844637a7df
+ ``objs`` and in ``alert``. If this is attempted, the function fails
+ with ``EINVAL``.
diff --git a/MAINTAINERS b/MAINTAINERS
-index 28e20975c26f5..5845f3dd0488d 100644
+index 3121709d99e3b..baa28e4151aa6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
-@@ -15721,6 +15721,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git
+@@ -15720,6 +15720,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git
F: Documentation/filesystems/ntfs3.rst
F: fs/ntfs3/
diff --git a/SOURCES/Patchlist.changelog b/SOURCES/Patchlist.changelog
index 1ad8a76..21ca765 100644
--- a/SOURCES/Patchlist.changelog
+++ b/SOURCES/Patchlist.changelog
@@ -1,3 +1,6 @@
+https://gitlab.com/cki-project/kernel-ark/-/commit/982eb4ceebb7be3979ef62dbd5d4c8a514180ae1
+ 982eb4ceebb7be3979ef62dbd5d4c8a514180ae1 ACPI: scan: Ignore camera graph port nodes on all Dell Tiger, Alder and Raptor Lake models
+
https://gitlab.com/cki-project/kernel-ark/-/commit/a058bf42a79af81598f7404ad550ff677eb2be4d
a058bf42a79af81598f7404ad550ff677eb2be4d lsm: update security_lock_kernel_down
diff --git a/SOURCES/asus-linux.patch b/SOURCES/asus-linux.patch
index eb6c51c..569a7b7 100644
--- a/SOURCES/asus-linux.patch
+++ b/SOURCES/asus-linux.patch
@@ -1166,95 +1166,6 @@ index a6b648457908..7163cce7079c 100644
--
2.44.0
-From 7c2c8cca4989bc7803aef60a4aeb3efe1d211a4b Mon Sep 17 00:00:00 2001
-From: "Luke D. Jones" <luke@ljones.dev>
-Date: Sat, 2 Dec 2023 17:27:23 +1300
-Subject: [PATCH 1/4] HID: asus: fix more n-key report descriptors if n-key
- quirked
-
-Adjusts the report descriptor for N-Key devices to
-make the output count 0x01 which completely avoids
-the need for a block of filtering.
-
-Signed-off-by: Luke D. Jones <luke@ljones.dev>
----
- drivers/hid/hid-asus.c | 49 ++++++++++++++++++++----------------------
- 1 file changed, 23 insertions(+), 26 deletions(-)
-
-diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
-index 78cdfb8b9a7a..855972a4470f 100644
---- a/drivers/hid/hid-asus.c
-+++ b/drivers/hid/hid-asus.c
-@@ -335,36 +335,20 @@ static int asus_raw_event(struct hid_device *hdev,
- if (drvdata->quirks & QUIRK_MEDION_E1239T)
- return asus_e1239t_event(drvdata, data, size);
-
-- if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
-+ /*
-+ * Skip these report ID, the device emits a continuous stream associated
-+ * with the AURA mode it is in which looks like an 'echo'.
-+ */
-+ if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2)
-+ return -1;
-+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
- /*
-- * Skip these report ID, the device emits a continuous stream associated
-- * with the AURA mode it is in which looks like an 'echo'.
-+ * G713 and G733 send these codes on some keypresses, depending on
-+ * the key pressed it can trigger a shutdown event if not caught.
- */
-- if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
-- report->id == FEATURE_KBD_LED_REPORT_ID2) {
-+ if(data[0] == 0x02 && data[1] == 0x30) {
- return -1;
-- /* Additional report filtering */
-- } else if (report->id == FEATURE_KBD_REPORT_ID) {
-- /*
-- * G14 and G15 send these codes on some keypresses with no
-- * discernable reason for doing so. We'll filter them out to avoid
-- * unmapped warning messages later.
-- */
-- if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
-- data[1] == 0x8a || data[1] == 0x9e) {
-- return -1;
-- }
- }
-- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
-- /*
-- * G713 and G733 send these codes on some keypresses, depending on
-- * the key pressed it can trigger a shutdown event if not caught.
-- */
-- if(data[0] == 0x02 && data[1] == 0x30) {
-- return -1;
-- }
-- }
--
- }
-
- if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
-@@ -1250,6 +1234,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- rdesc[205] = 0x01;
- }
-
-+ /* match many more n-key devices */
-+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
-+ for (int i = 0; i < *rsize + 1; i++) {
-+ /* offset to the count from 0x5a report part always 14 */
-+ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
-+ rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
-+ hid_info(hdev, "Fixing up Asus N-Key report descriptor\n");
-+ rdesc[i + 15] = 0x01;
-+ break;
-+ }
-+ }
-+ }
-+
- return rdesc;
- }
-
---
-2.44.0
-
From de9b01c3b8869451d4cf44ab0baf55440e804fc6 Mon Sep 17 00:00:00 2001
From: "Luke D. Jones" <luke@ljones.dev>
Date: Sat, 2 Dec 2023 17:47:59 +1300
diff --git a/SOURCES/kernel.changelog b/SOURCES/kernel.changelog
index df18931..4183d93 100644
--- a/SOURCES/kernel.changelog
+++ b/SOURCES/kernel.changelog
@@ -1,3 +1,8 @@
+* Thu Jun 27 2024 Augusto Caringi <acaringi@redhat.com> [6.9.7-0]
+- ACPI: scan: Ignore camera graph port nodes on all Dell Tiger, Alder and Raptor Lake models (Hans de Goede)
+- Linux v6.9.7
+Resolves:
+
* Fri Jun 21 2024 Augusto Caringi <acaringi@redhat.com> [6.9.6-0]
- Linux v6.9.6
Resolves:
diff --git a/SOURCES/patch-6.9-redhat.patch b/SOURCES/patch-6.9-redhat.patch
index 9c04cc8..3de77a3 100644
--- a/SOURCES/patch-6.9-redhat.patch
+++ b/SOURCES/patch-6.9-redhat.patch
@@ -72,7 +72,7 @@ index 0000000000000..733a26bd887a2
+
+endmenu
diff --git a/Makefile b/Makefile
-index 8da63744745be..f605426ef3d12 100644
+index 17dc3e55323e7..205a0114f9351 100644
--- a/Makefile
+++ b/Makefile
@@ -22,6 +22,18 @@ $(if $(filter __%, $(MAKECMDGOALS)), \
@@ -1453,10 +1453,10 @@ index eff7f5df08e27..b58145ce7775c 100644
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 4f7b9b5b9c5b4..98831fb3769fd 100644
+index a4638ea92571d..aff13cdce0156 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
-@@ -119,6 +119,14 @@ static const char *sd_cache_types[] = {
+@@ -120,6 +120,14 @@ static const char *sd_cache_types[] = {
"write back, no read (daft)"
};
@@ -1471,7 +1471,7 @@ index 4f7b9b5b9c5b4..98831fb3769fd 100644
static void sd_set_flush_flag(struct scsi_disk *sdkp)
{
bool wc = false, fua = false;
-@@ -4258,6 +4266,8 @@ static int __init init_sd(void)
+@@ -4262,6 +4270,8 @@ static int __init init_sd(void)
goto err_out_class;
}
@@ -1579,7 +1579,7 @@ index d59b0947fba08..8b1e2e71d4858 100644
enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
{
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
-index 334e00efbde45..5c962e31ffe57 100644
+index 7e539f6f8c674..3c3e56566457a 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -436,6 +436,8 @@ LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
@@ -1632,7 +1632,7 @@ index ab7eea01ab427..fff7c5f737fc8 100644
int rmi_register_transport_device(struct rmi_transport_dev *xport);
diff --git a/include/linux/security.h b/include/linux/security.h
-index 41a8f667bdfa0..e2aebf520337e 100644
+index 5122e3ad83b19..b5f0081bc894d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -507,6 +507,7 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
@@ -1751,10 +1751,10 @@ index cd84d8ea1dfbf..e4c70a0312bc8 100644
const struct lsm_id lockdown_lsmid = {
diff --git a/security/security.c b/security/security.c
-index 0a9a0ac3f2662..a5cfc42f7c331 100644
+index 4fd3c839353ec..d52cb3bb720dd 100644
--- a/security/security.c
+++ b/security/security.c
-@@ -5567,6 +5567,18 @@ int security_locked_down(enum lockdown_reason what)
+@@ -5569,6 +5569,18 @@ int security_locked_down(enum lockdown_reason what)
}
EXPORT_SYMBOL(security_locked_down);