From 1b6e2ff790837a76bdca5f93d7c92c43b9dde124 Mon Sep 17 00:00:00 2001 From: Jan200101 Date: Sun, 13 Aug 2023 21:19:49 +0200 Subject: kernel 6.4.10 --- SOURCES/patch-6.4-redhat.patch | 349 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 347 insertions(+), 2 deletions(-) (limited to 'SOURCES/patch-6.4-redhat.patch') diff --git a/SOURCES/patch-6.4-redhat.patch b/SOURCES/patch-6.4-redhat.patch index 4f81dfa..4eddfcf 100644 --- a/SOURCES/patch-6.4-redhat.patch +++ b/SOURCES/patch-6.4-redhat.patch @@ -5,6 +5,8 @@ arch/s390/kernel/ipl.c | 5 + arch/s390/kernel/setup.c | 4 + arch/x86/kernel/setup.c | 22 ++-- + arch/x86/kvm/svm/sev.c | 124 ++++++++++----------- + arch/x86/kvm/svm/svm.h | 26 +++++ drivers/acpi/apei/hest.c | 8 ++ drivers/acpi/irq.c | 17 ++- drivers/acpi/resource.c | 60 ++++++++++ @@ -16,6 +18,7 @@ drivers/firmware/efi/efi.c | 124 +++++++++++++++------ drivers/firmware/efi/secureboot.c | 38 +++++++ drivers/firmware/sysfb.c | 18 ++- + .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 2 +- drivers/hid/hid-rmi.c | 66 ----------- drivers/hwtracing/coresight/coresight-etm4x-core.c | 19 ++++ drivers/input/rmi4/rmi_driver.c | 124 ++++++++++++--------- @@ -37,10 +40,10 @@ security/lockdown/Kconfig | 13 +++ security/lockdown/lockdown.c | 1 + security/security.c | 12 ++ - 39 files changed, 558 insertions(+), 179 deletions(-) + 42 files changed, 646 insertions(+), 243 deletions(-) diff --git a/Makefile b/Makefile -index 5547e02f6104..3193786e8ba0 100644 +index bf463afef54b..7717cdde1d65 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,18 @@ $(if $(filter __%, $(MAKECMDGOALS)), \ @@ -200,6 +203,335 @@ index 0cccfeb67c3a..a7dccf67c4a1 100644 reserve_initrd(); +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 69ae5e1b3120..512dfe45d91e 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -2414,15 +2414,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) + */ + memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); + +- vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); +- vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); +- vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb); +- vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb); +- vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb); ++ BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); ++ memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); + +- svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); ++ vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); ++ vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); ++ vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); ++ vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); ++ vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); + +- if (ghcb_xcr0_is_valid(ghcb)) { ++ svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); ++ ++ if (kvm_ghcb_xcr0_is_valid(svm)) { + vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); + kvm_update_cpuid_runtime(vcpu); + } +@@ -2433,84 +2436,88 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) + control->exit_code_hi = upper_32_bits(exit_code); + control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); + control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); ++ svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); + + /* Clear the valid entries fields */ + memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); + } + ++static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) ++{ ++ return (((u64)control->exit_code_hi) << 32) | control->exit_code; ++} ++ + static int sev_es_validate_vmgexit(struct vcpu_svm *svm) + { +- struct kvm_vcpu *vcpu; +- struct ghcb *ghcb; ++ struct vmcb_control_area *control = &svm->vmcb->control; ++ struct kvm_vcpu *vcpu = &svm->vcpu; + u64 exit_code; + u64 reason; + +- ghcb = svm->sev_es.ghcb; +- + /* + * Retrieve the exit code now even though it may not be marked valid + * as it could help with debugging. + */ +- exit_code = ghcb_get_sw_exit_code(ghcb); ++ exit_code = kvm_ghcb_get_sw_exit_code(control); + + /* Only GHCB Usage code 0 is supported */ +- if (ghcb->ghcb_usage) { ++ if (svm->sev_es.ghcb->ghcb_usage) { + reason = GHCB_ERR_INVALID_USAGE; + goto vmgexit_err; + } + + reason = GHCB_ERR_MISSING_INPUT; + +- if (!ghcb_sw_exit_code_is_valid(ghcb) || +- !ghcb_sw_exit_info_1_is_valid(ghcb) || +- !ghcb_sw_exit_info_2_is_valid(ghcb)) ++ if (!kvm_ghcb_sw_exit_code_is_valid(svm) || ++ !kvm_ghcb_sw_exit_info_1_is_valid(svm) || ++ !kvm_ghcb_sw_exit_info_2_is_valid(svm)) + goto vmgexit_err; + +- switch (ghcb_get_sw_exit_code(ghcb)) { ++ switch (exit_code) { + case SVM_EXIT_READ_DR7: + break; + case SVM_EXIT_WRITE_DR7: +- if (!ghcb_rax_is_valid(ghcb)) ++ if (!kvm_ghcb_rax_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_EXIT_RDTSC: + break; + case SVM_EXIT_RDPMC: +- if (!ghcb_rcx_is_valid(ghcb)) ++ if (!kvm_ghcb_rcx_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_EXIT_CPUID: +- if (!ghcb_rax_is_valid(ghcb) || +- !ghcb_rcx_is_valid(ghcb)) ++ if (!kvm_ghcb_rax_is_valid(svm) || ++ !kvm_ghcb_rcx_is_valid(svm)) + goto vmgexit_err; +- if (ghcb_get_rax(ghcb) == 0xd) +- if (!ghcb_xcr0_is_valid(ghcb)) ++ if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd) ++ if (!kvm_ghcb_xcr0_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_EXIT_INVD: + break; + case SVM_EXIT_IOIO: +- if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { +- if (!ghcb_sw_scratch_is_valid(ghcb)) ++ if (control->exit_info_1 & SVM_IOIO_STR_MASK) { ++ if (!kvm_ghcb_sw_scratch_is_valid(svm)) + goto vmgexit_err; + } else { +- if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) +- if (!ghcb_rax_is_valid(ghcb)) ++ if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) ++ if (!kvm_ghcb_rax_is_valid(svm)) + goto vmgexit_err; + } + break; + case SVM_EXIT_MSR: +- if (!ghcb_rcx_is_valid(ghcb)) ++ if (!kvm_ghcb_rcx_is_valid(svm)) + goto vmgexit_err; +- if (ghcb_get_sw_exit_info_1(ghcb)) { +- if (!ghcb_rax_is_valid(ghcb) || +- !ghcb_rdx_is_valid(ghcb)) ++ if (control->exit_info_1) { ++ if (!kvm_ghcb_rax_is_valid(svm) || ++ !kvm_ghcb_rdx_is_valid(svm)) + goto vmgexit_err; + } + break; + case SVM_EXIT_VMMCALL: +- if (!ghcb_rax_is_valid(ghcb) || +- !ghcb_cpl_is_valid(ghcb)) ++ if (!kvm_ghcb_rax_is_valid(svm) || ++ !kvm_ghcb_cpl_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_EXIT_RDTSCP: +@@ -2518,19 +2525,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) + case SVM_EXIT_WBINVD: + break; + case SVM_EXIT_MONITOR: +- if (!ghcb_rax_is_valid(ghcb) || +- !ghcb_rcx_is_valid(ghcb) || +- !ghcb_rdx_is_valid(ghcb)) ++ if (!kvm_ghcb_rax_is_valid(svm) || ++ !kvm_ghcb_rcx_is_valid(svm) || ++ !kvm_ghcb_rdx_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_EXIT_MWAIT: +- if (!ghcb_rax_is_valid(ghcb) || +- !ghcb_rcx_is_valid(ghcb)) ++ if (!kvm_ghcb_rax_is_valid(svm) || ++ !kvm_ghcb_rcx_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_VMGEXIT_MMIO_READ: + case SVM_VMGEXIT_MMIO_WRITE: +- if (!ghcb_sw_scratch_is_valid(ghcb)) ++ if (!kvm_ghcb_sw_scratch_is_valid(svm)) + goto vmgexit_err; + break; + case SVM_VMGEXIT_NMI_COMPLETE: +@@ -2546,11 +2553,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) + return 0; + + vmgexit_err: +- vcpu = &svm->vcpu; +- + if (reason == GHCB_ERR_INVALID_USAGE) { + vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", +- ghcb->ghcb_usage); ++ svm->sev_es.ghcb->ghcb_usage); + } else if (reason == GHCB_ERR_INVALID_EVENT) { + vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", + exit_code); +@@ -2560,11 +2565,8 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) + dump_ghcb(svm); + } + +- /* Clear the valid entries fields */ +- memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); +- +- ghcb_set_sw_exit_info_1(ghcb, 2); +- ghcb_set_sw_exit_info_2(ghcb, reason); ++ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); ++ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); + + /* Resume the guest to "return" the error code. */ + return 1; +@@ -2583,7 +2585,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) + */ + if (svm->sev_es.ghcb_sa_sync) { + kvm_write_guest(svm->vcpu.kvm, +- ghcb_get_sw_scratch(svm->sev_es.ghcb), ++ svm->sev_es.sw_scratch, + svm->sev_es.ghcb_sa, + svm->sev_es.ghcb_sa_len); + svm->sev_es.ghcb_sa_sync = false; +@@ -2629,12 +2631,11 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) + static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) + { + struct vmcb_control_area *control = &svm->vmcb->control; +- struct ghcb *ghcb = svm->sev_es.ghcb; + u64 ghcb_scratch_beg, ghcb_scratch_end; + u64 scratch_gpa_beg, scratch_gpa_end; + void *scratch_va; + +- scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); ++ scratch_gpa_beg = svm->sev_es.sw_scratch; + if (!scratch_gpa_beg) { + pr_err("vmgexit: scratch gpa not provided\n"); + goto e_scratch; +@@ -2705,8 +2706,8 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) + return 0; + + e_scratch: +- ghcb_set_sw_exit_info_1(ghcb, 2); +- ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); ++ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); ++ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + + return 1; + } +@@ -2819,7 +2820,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_control_area *control = &svm->vmcb->control; + u64 ghcb_gpa, exit_code; +- struct ghcb *ghcb; + int ret; + + /* Validate the GHCB */ +@@ -2844,20 +2844,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; +- ghcb = svm->sev_es.ghcb_map.hva; + +- trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); +- +- exit_code = ghcb_get_sw_exit_code(ghcb); ++ trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); + ++ sev_es_sync_from_ghcb(svm); + ret = sev_es_validate_vmgexit(svm); + if (ret) + return ret; + +- sev_es_sync_from_ghcb(svm); +- ghcb_set_sw_exit_info_1(ghcb, 0); +- ghcb_set_sw_exit_info_2(ghcb, 0); ++ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0); ++ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); + ++ exit_code = kvm_ghcb_get_sw_exit_code(control); + switch (exit_code) { + case SVM_VMGEXIT_MMIO_READ: + ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); +@@ -2895,13 +2893,13 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) + break; + case 1: + /* Get AP jump table address */ +- ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table); ++ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table); + break; + default: + pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", + control->exit_info_1); +- ghcb_set_sw_exit_info_1(ghcb, 2); +- ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); ++ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); ++ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); + } + + ret = 1; +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h +index f44751dd8d5d..ece0d5959567 100644 +--- a/arch/x86/kvm/svm/svm.h ++++ b/arch/x86/kvm/svm/svm.h +@@ -190,10 +190,12 @@ struct vcpu_sev_es_state { + /* SEV-ES support */ + struct sev_es_save_area *vmsa; + struct ghcb *ghcb; ++ u8 valid_bitmap[16]; + struct kvm_host_map ghcb_map; + bool received_first_sipi; + + /* SEV-ES scratch area support */ ++ u64 sw_scratch; + void *ghcb_sa; + u32 ghcb_sa_len; + bool ghcb_sa_sync; +@@ -745,4 +747,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); + void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); + void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); + ++#define DEFINE_KVM_GHCB_ACCESSORS(field) \ ++ static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ ++ { \ ++ return test_bit(GHCB_BITMAP_IDX(field), \ ++ (unsigned long *)&svm->sev_es.valid_bitmap); \ ++ } \ ++ \ ++ static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ ++ { \ ++ return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ ++ } \ ++ ++DEFINE_KVM_GHCB_ACCESSORS(cpl) ++DEFINE_KVM_GHCB_ACCESSORS(rax) ++DEFINE_KVM_GHCB_ACCESSORS(rcx) ++DEFINE_KVM_GHCB_ACCESSORS(rdx) ++DEFINE_KVM_GHCB_ACCESSORS(rbx) ++DEFINE_KVM_GHCB_ACCESSORS(rsi) ++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) ++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) ++DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) ++DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) ++DEFINE_KVM_GHCB_ACCESSORS(xcr0) ++ + #endif diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 6aef1ee5e1bd..8f146b1b4972 100644 --- a/drivers/acpi/apei/hest.c @@ -694,6 +1026,19 @@ index 3c197db42c9d..16e4a2e90fae 100644 pd = sysfb_create_simplefb(si, &mode); if (!IS_ERR(pd)) goto unlock_mutex; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 9bc86deac9e8..b885c39bd16b 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -1320,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + if (computed_streams[i]) + continue; + +- if (!res_pool->funcs->remove_stream_from_ctx || ++ if (res_pool->funcs->remove_stream_from_ctx && + res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) + return -EINVAL; + diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 84e7ba5314d3..efc96776f761 100644 --- a/drivers/hid/hid-rmi.c -- cgit v1.2.3