From e5e77ad2223f662e1615266d8ef39a8db7e65a70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20H=C3=A4dicke?= Date: Thu, 19 Nov 2020 09:22:32 +0100 Subject: HID: quirks: Add Apple Magic Trackpad 2 to hid_have_special_driver list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Apple Magic Trackpad 2 is handled by the magicmouse driver. And there were severe stability issues when both drivers (hid-generic and hid-magicmouse) were loaded for this device. Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=210241 Signed-off-by: Felix Hädicke --- drivers/hid/hid-quirks.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index bf7ecab5d9e5..142e9dae2837 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -478,6 +478,8 @@ static const struct hid_device_id hid_have_special_driver[] = { #if IS_ENABLED(CONFIG_HID_MAGICMOUSE) { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) }, #endif #if IS_ENABLED(CONFIG_HID_MAYFLASH) { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, -- cgit v1.2.3-1-gf6bb5 From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Wed, 3 Feb 2021 11:20:12 +0200 Subject: Revert "cpufreq: Avoid configuring old governors as default with intel_pstate" This is an undesirable behavior for us since our aggressive ondemand performs better than schedutil for gaming when using intel_pstate in passive mode. Also it interferes with the option to select the desired default governor we have. diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2c7171e0b0010..85de313ddec29 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -71,7 +71,6 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" - depends on !(X86_INTEL_PSTATE && SMP) select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_PERFORMANCE help @@ -83,7 +84,6 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" - depends on !(X86_INTEL_PSTATE && SMP) select CPU_FREQ_GOV_CONSERVATIVE select CPU_FREQ_GOV_PERFORMANCE help From 7695eb71d0872ed9633daf0ca779da3344b87dec Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 21 Aug 2023 14:15:13 +0800 Subject: [PATCH] drm/amd/pm: correct SMU13 gfx voltage related OD settings The voltage offset setting will be applied to the whole v/f curve line instead of per anchor point base. Signed-off-by: Evan Quan Acked-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 45 +++++++------------ .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 31 ++++++------- .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 31 ++++++------- 3 files changed, 43 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 1da7ece4c627..06aa5c18b40f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -643,18 +643,14 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * They can be used to calibrate the sclk voltage curve. This is * available for Vega20 and NV1X. * - * - voltage offset for the six anchor points of the v/f curve labeled - * OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This - * is only availabe for some SMU13 ASICs. - * * - voltage offset(in mV) applied on target voltage calculation. - * This is available for Sienna Cichlid, Navy Flounder and Dimgrey - * Cavefish. For these ASICs, the target voltage calculation can be - * illustrated by "voltage = voltage calculated from v/f curve + - * overdrive vddgfx offset" + * This is available for Sienna Cichlid, Navy Flounder, Dimgrey + * Cavefish and some later SMU13 ASICs. For these ASICs, the target + * voltage calculation can be illustrated by "voltage = voltage + * calculated from v/f curve + overdrive vddgfx offset" * - * - a list of valid ranges for sclk, mclk, and voltage curve points - * labeled OD_RANGE + * - a list of valid ranges for sclk, mclk, voltage curve points + * or voltage offset labeled OD_RANGE * * < For APUs > * @@ -686,24 +682,17 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * E.g., "p 2 0 800" would set the minimum core clock on core * 2 to 800Mhz. * - * For sclk voltage curve, - * - For NV1X, enter the new values by writing a string that - * contains "vc point clock voltage" to the file. The points - * are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update - * point1 with clock set as 300Mhz and voltage as 600mV. "vc 2 - * 1000 1000" will update point3 with clock set as 1000Mhz and - * voltage 1000mV. - * - For SMU13 ASICs, enter the new values by writing a string that - * contains "vc anchor_point_index voltage_offset" to the file. - * There are total six anchor points defined on the v/f curve with - * index as 0 - 5. - * - "vc 0 10" will update the voltage offset for point1 as 10mv. - * - "vc 5 -10" will update the voltage offset for point6 as -10mv. - * - * To update the voltage offset applied for gfxclk/voltage calculation, - * enter the new value by writing a string that contains "vo offset". - * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish. - * And the offset can be a positive or negative value. + * For sclk voltage curve supported by Vega20 and NV1X, enter the new + * values by writing a string that contains "vc point clock voltage" + * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300 + * 600" will update point1 with clock set as 300Mhz and voltage as 600mV. + * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and + * voltage 1000mV. + * + * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey + * Cavefish and some later SMU13 ASICs, enter the new value by writing a + * string that contains "vo offset". E.g., "vo -10" will update the extra + * voltage offset applied to the whole v/f curve line as -10mv. * * - When you have edited all of the states as needed, write "c" (commit) * to the file to commit your changes diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 3903a47669e4..bd0d5f027cac 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1304,16 +1304,14 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, od_table->OverDriveTable.UclkFmax); break; - case SMU_OD_VDDC_CURVE: + case SMU_OD_VDDGFX_OFFSET: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) break; - size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n"); - for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) - size += sysfs_emit_at(buf, size, "%d: %dmv\n", - i, - od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]); + size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "%dmV\n", + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]); break; case SMU_OD_RANGE: @@ -1355,7 +1353,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFX_VF_CURVE, &min_value, &max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", + size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n", min_value, max_value); } break; @@ -1504,29 +1502,26 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu, } break; - case PP_OD_EDIT_VDDC_CURVE: + case PP_OD_EDIT_VDDGFX_OFFSET: if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { - dev_warn(adev->dev, "VF curve setting not supported!\n"); + dev_warn(adev->dev, "Gfx offset setting not supported!\n"); return -ENOTSUPP; } - if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS || - input[0] < 0) - return -EINVAL; - smu_v13_0_0_get_od_setting_limits(smu, PP_OD_FEATURE_GFX_VF_CURVE, &minimum, &maximum); - if (input[1] < minimum || - input[1] > maximum) { + if (input[0] < minimum || + input[0] > maximum) { dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", - input[1], minimum, maximum); + input[0], minimum, maximum); return -EINVAL; } - od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT); break; case PP_OD_RESTORE_DEFAULT_TABLE: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 94ef5b4d116d..b9b3bf41eed3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1284,16 +1284,14 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, od_table->OverDriveTable.UclkFmax); break; - case SMU_OD_VDDC_CURVE: + case SMU_OD_VDDGFX_OFFSET: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) break; - size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n"); - for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) - size += sysfs_emit_at(buf, size, "%d: %dmv\n", - i, - od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]); + size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "%dmV\n", + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]); break; case SMU_OD_RANGE: @@ -1335,7 +1333,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFX_VF_CURVE, &min_value, &max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", + size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n", min_value, max_value); } break; @@ -1484,29 +1482,26 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, } break; - case PP_OD_EDIT_VDDC_CURVE: + case PP_OD_EDIT_VDDGFX_OFFSET: if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { - dev_warn(adev->dev, "VF curve setting not supported!\n"); + dev_warn(adev->dev, "Gfx offset setting not supported!\n"); return -ENOTSUPP; } - if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS || - input[0] < 0) - return -EINVAL; - smu_v13_0_7_get_od_setting_limits(smu, PP_OD_FEATURE_GFX_VF_CURVE, &minimum, &maximum); - if (input[1] < minimum || - input[1] > maximum) { + if (input[0] < minimum || + input[0] > maximum) { dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", - input[1], minimum, maximum); + input[0], minimum, maximum); return -EINVAL; } - od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT); break; case PP_OD_RESTORE_DEFAULT_TABLE: -- GitLab From 8bad128720ebc69e37f1c66767fb276088ef4fa7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 16 Aug 2023 14:51:19 +0800 Subject: [PATCH] drm/amd/pm: fulfill the support for SMU13 `pp_dpm_dcefclk` interface Fulfill the incomplete SMU13 `pp_dpm_dcefclk` implementation. Reported-by: Guan Yu Signed-off-by: Evan Quan Acked-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 27 +++++++++++++++++++ .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 27 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index bd0d5f027cac..5fdb2b3c042a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -176,6 +176,7 @@ static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { CLK_MAP(VCLK1, PPCLK_VCLK_1), CLK_MAP(DCLK, PPCLK_DCLK_0), CLK_MAP(DCLK1, PPCLK_DCLK_1), + CLK_MAP(DCEFCLK, PPCLK_DCFCLK), }; static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { @@ -707,6 +708,22 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) pcie_table->num_of_link_levels++; } + /* dcefclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dcef_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { + ret = smu_v13_0_set_single_dpm_table(smu, + SMU_DCEFCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + return 0; } @@ -794,6 +811,9 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, case METRICS_CURR_FCLK: *value = metrics->CurrClock[PPCLK_FCLK]; break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCFCLK]; + break; case METRICS_AVERAGE_GFXCLK: if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD) *value = metrics->AverageGfxclkFrequencyPostDs; @@ -1047,6 +1067,9 @@ static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu, case PPCLK_DCLK_1: member_type = METRICS_AVERAGE_DCLK1; break; + case PPCLK_DCFCLK: + member_type = METRICS_CURR_DCEFCLK; + break; default: return -EINVAL; } @@ -1196,6 +1219,9 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, case SMU_DCLK1: single_dpm_table = &(dpm_context->dpm_tables.dclk_table); break; + case SMU_DCEFCLK: + single_dpm_table = &(dpm_context->dpm_tables.dcef_table); + break; default: break; } @@ -1209,6 +1235,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, case SMU_VCLK1: case SMU_DCLK: case SMU_DCLK1: + case SMU_DCEFCLK: ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); if (ret) { dev_err(smu->adev->dev, "Failed to get current clock freq!"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b9b3bf41eed3..12949928e285 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -147,6 +147,7 @@ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { CLK_MAP(VCLK1, PPCLK_VCLK_1), CLK_MAP(DCLK, PPCLK_DCLK_0), CLK_MAP(DCLK1, PPCLK_DCLK_1), + CLK_MAP(DCEFCLK, PPCLK_DCFCLK), }; static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = { @@ -696,6 +697,22 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) pcie_table->num_of_link_levels++; } + /* dcefclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dcef_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { + ret = smu_v13_0_set_single_dpm_table(smu, + SMU_DCEFCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + return 0; } @@ -777,6 +794,9 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, case METRICS_CURR_FCLK: *value = metrics->CurrClock[PPCLK_FCLK]; break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCFCLK]; + break; case METRICS_AVERAGE_GFXCLK: *value = metrics->AverageGfxclkFrequencyPreDs; break; @@ -1027,6 +1047,9 @@ static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu, case PPCLK_DCLK_1: member_type = METRICS_CURR_DCLK1; break; + case PPCLK_DCFCLK: + member_type = METRICS_CURR_DCEFCLK; + break; default: return -EINVAL; } @@ -1176,6 +1199,9 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, case SMU_DCLK1: single_dpm_table = &(dpm_context->dpm_tables.dclk_table); break; + case SMU_DCEFCLK: + single_dpm_table = &(dpm_context->dpm_tables.dcef_table); + break; default: break; } @@ -1189,6 +1215,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, case SMU_VCLK1: case SMU_DCLK: case SMU_DCLK1: + case SMU_DCEFCLK: ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); if (ret) { dev_err(smu->adev->dev, "Failed to get current clock freq!"); -- GitLab From 3a2fb905145e76e4bbb32e90e0c6cd532dafb1b0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 14 Aug 2023 10:16:27 +0800 Subject: [PATCH] Revert "drm/amd/pm: disable the SMU13 OD feature support temporarily" This reverts commit 3592cc20beeece83db4c50a0f400e2dd15139de9. The enablement for the new OD mechanism completed. Also, the support for fan control related OD feature has been added via this new mechanism. Thus, it is time to bring back the SMU13 OD support. Signed-off-by: Evan Quan Acked-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 18 +++--------------- .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 12 +++--------- 2 files changed, 6 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index c48f81450d24..093962a37688 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -348,13 +348,10 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; PPTable_t *pptable = smu->smu_table.driver_pptable; -#if 0 - PPTable_t *pptable = smu->smu_table.driver_pptable; const OverDriveLimits_t * const overdrive_upperlimits = &pptable->SkuTable.OverDriveLimitsBasicMax; const OverDriveLimits_t * const overdrive_lowerlimits = &pptable->SkuTable.OverDriveLimitsMin; -#endif if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -357,27 +357,18 @@ smu_baco->maco_support = true; } - /* - * We are in the transition to a new OD mechanism. - * Disable the OD feature support for SMU13 temporarily. - * TODO: get this reverted when new OD mechanism online - */ -#if 0 if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) smu->od_enabled = false; + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + /* * Instead of having its own buffer space and get overdrive_table copied, * smu->od_settings just points to the actual overdrive_table */ smu->od_settings = &powerplay_table->overdrive_table; -#else - smu->od_enabled = false; -#endif - - table_context->thermal_controller_type = - powerplay_table->thermal_controller_type; smu->adev->pm.no_fan = !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 99bc449799a6..430ad1b05ba3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -338,12 +338,10 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) struct smu_baco_context *smu_baco = &smu->smu_baco; PPTable_t *smc_pptable = table_context->driver_pptable; BoardTable_t *BoardTable = &smc_pptable->BoardTable; -#if 0 const OverDriveLimits_t * const overdrive_upperlimits = &smc_pptable->SkuTable.OverDriveLimitsBasicMax; const OverDriveLimits_t * const overdrive_lowerlimits = &smc_pptable->SkuTable.OverDriveLimitsMin; -#endif if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -348,22 +348,18 @@ smu_baco->maco_support = true; } -#if 0 if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) smu->od_enabled = false; + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + /* * Instead of having its own buffer space and get overdrive_table copied, * smu->od_settings just points to the actual overdrive_table */ smu->od_settings = &powerplay_table->overdrive_table; -#else - smu->od_enabled = false; -#endif - - table_context->thermal_controller_type = - powerplay_table->thermal_controller_type; return 0; } -- GitLab From 072a8dc3b5260ba08ba2e66036c2c63abd77df52 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 24 Aug 2023 17:25:51 +0530 Subject: [PATCH] drm/amd/pm: Fix clock reporting for SMUv13.0.6 On SMU v13.0.6, effective clocks are reported by FW which won't exactly match with DPM level. Report the current clock based on the values matching closest to the effective clock. Also, when deep sleep is applied to a clock, report it with a special level "S:" as in sample clock levels below S: 19Mhz * 0: 615Mhz 1: 800Mhz 2: 888Mhz 3: 1000Mhz Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Evan Quan --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 159 +++++++----------- 1 file changed, 62 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index c2308783053c..29e1cada7667 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -91,6 +91,8 @@ #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define LINK_SPEED_MAX 4 +#define SMU_13_0_6_DSCLK_THRESHOLD 100 + static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -783,13 +785,61 @@ static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu, return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value); } +static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, + struct smu_13_0_dpm_table *single_dpm_table, + uint32_t curr_clk, const char *clk_name) +{ + struct pp_clock_levels_with_latency clocks; + int i, ret, size = 0, level = -1; + uint32_t clk1, clk2; + + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, "Attempt to get %s clk levels failed!", + clk_name); + return ret; + } + + if (!clocks.num_levels) + return -EINVAL; + + if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) { + size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk); + for (i = 0; i < clocks.num_levels; i++) + size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i, + clocks.data[i].clocks_in_khz / + 1000); + + } else { + if ((clocks.num_levels == 1) || + (curr_clk < (clocks.data[0].clocks_in_khz / 1000))) + level = 0; + for (i = 0; i < clocks.num_levels; i++) { + clk1 = clocks.data[i].clocks_in_khz / 1000; + + if (i < (clocks.num_levels - 1)) + clk2 = clocks.data[i + 1].clocks_in_khz / 1000; + + if (curr_clk >= clk1 && curr_clk < clk2) { + level = (curr_clk - clk1) <= (clk2 - curr_clk) ? + i : + i + 1; + } + + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, + clk1, (level == i) ? "*" : ""); + } + } + + return size; +} + static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) { - int i, now, size = 0; + int now, size = 0; int ret = 0; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; - struct pp_clock_levels_with_latency clocks; struct smu_13_0_dpm_table *single_dpm_table; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_13_0_dpm_context *dpm_context = NULL; @@ -852,26 +902,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, } single_dpm_table = &(dpm_context->dpm_tables.uclk_table); - ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, - "Attempt to get memory clk levels Failed!"); - return ret; - } - for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at( - buf, size, "%d: %uMhz %s\n", i, - clocks.data[i].clocks_in_khz / 1000, - (clocks.num_levels == 1) ? - "*" : - (smu_v13_0_6_freqs_in_same_level( - clocks.data[i].clocks_in_khz / - 1000, - now) ? - "*" : - "")); - break; + return smu_v13_0_6_print_clks(smu, buf, single_dpm_table, now, + "mclk"); case SMU_SOCCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK, @@ -883,26 +916,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, } single_dpm_table = &(dpm_context->dpm_tables.soc_table); - ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, - "Attempt to get socclk levels Failed!"); - return ret; - } - for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at( - buf, size, "%d: %uMhz %s\n", i, - clocks.data[i].clocks_in_khz / 1000, - (clocks.num_levels == 1) ? - "*" : - (smu_v13_0_6_freqs_in_same_level( - clocks.data[i].clocks_in_khz / - 1000, - now) ? - "*" : - "")); - break; + return smu_v13_0_6_print_clks(smu, buf, single_dpm_table, now, + "socclk"); case SMU_FCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK, @@ -914,26 +930,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, } single_dpm_table = &(dpm_context->dpm_tables.fclk_table); - ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, - "Attempt to get fclk levels Failed!"); - return ret; - } - for (i = 0; i < single_dpm_table->count; i++) - size += sysfs_emit_at( - buf, size, "%d: %uMhz %s\n", i, - single_dpm_table->dpm_levels[i].value, - (clocks.num_levels == 1) ? - "*" : - (smu_v13_0_6_freqs_in_same_level( - clocks.data[i].clocks_in_khz / - 1000, - now) ? - "*" : - "")); - break; + return smu_v13_0_6_print_clks(smu, buf, single_dpm_table, now, + "fclk"); case SMU_VCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK, @@ -945,26 +944,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, } single_dpm_table = &(dpm_context->dpm_tables.vclk_table); - ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, - "Attempt to get vclk levels Failed!"); - return ret; - } - for (i = 0; i < single_dpm_table->count; i++) - size += sysfs_emit_at( - buf, size, "%d: %uMhz %s\n", i, - single_dpm_table->dpm_levels[i].value, - (clocks.num_levels == 1) ? - "*" : - (smu_v13_0_6_freqs_in_same_level( - clocks.data[i].clocks_in_khz / - 1000, - now) ? - "*" : - "")); - break; + return smu_v13_0_6_print_clks(smu, buf, single_dpm_table, now, + "vclk"); case SMU_DCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK, @@ -976,26 +958,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, } single_dpm_table = &(dpm_context->dpm_tables.dclk_table); - ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); - if (ret) { - dev_err(smu->adev->dev, - "Attempt to get dclk levels Failed!"); - return ret; - } - for (i = 0; i < single_dpm_table->count; i++) - size += sysfs_emit_at( - buf, size, "%d: %uMhz %s\n", i, - single_dpm_table->dpm_levels[i].value, - (clocks.num_levels == 1) ? - "*" : - (smu_v13_0_6_freqs_in_same_level( - clocks.data[i].clocks_in_khz / - 1000, - now) ? - "*" : - "")); - break; + return smu_v13_0_6_print_clks(smu, buf, single_dpm_table, now, + "dclk"); default: break; -- GitLab