diff options
Diffstat (limited to 'SOURCES')
-rw-r--r-- | SOURCES/Patchlist.changelog | 3 | ||||
-rw-r--r-- | SOURCES/patch-6.1-redhat.patch | 134 |
2 files changed, 134 insertions, 3 deletions
diff --git a/SOURCES/Patchlist.changelog b/SOURCES/Patchlist.changelog index 7e13a82..7ffeaa8 100644 --- a/SOURCES/Patchlist.changelog +++ b/SOURCES/Patchlist.changelog @@ -1,3 +1,6 @@ +"https://gitlab.com/cki-project/kernel-ark/-/commit"/d37d701b5ab6bf4cbcd9b301e08d4cbb502fecd1 + d37d701b5ab6bf4cbcd9b301e08d4cbb502fecd1 x86/mm: Randomize per-cpu entry area + "https://gitlab.com/cki-project/kernel-ark/-/commit"/1595d8197147ac3b439c1334527eb2a5cec86095 1595d8197147ac3b439c1334527eb2a5cec86095 KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS diff --git a/SOURCES/patch-6.1-redhat.patch b/SOURCES/patch-6.1-redhat.patch index 38e4655..31268fd 100644 --- a/SOURCES/patch-6.1-redhat.patch +++ b/SOURCES/patch-6.1-redhat.patch @@ -4,9 +4,13 @@ arch/s390/include/asm/ipl.h | 1 + arch/s390/kernel/ipl.c | 5 + arch/s390/kernel/setup.c | 4 + + arch/x86/include/asm/cpu_entry_area.h | 4 - + arch/x86/include/asm/pgtable_areas.h | 8 +- + arch/x86/kernel/hw_breakpoint.c | 2 +- arch/x86/kernel/setup.c | 22 ++-- arch/x86/kvm/vmx/nested.c | 11 ++ arch/x86/kvm/vmx/vmx.c | 6 +- + arch/x86/mm/cpu_entry_area.c | 46 +++++++- drivers/acpi/apei/hest.c | 8 ++ drivers/acpi/irq.c | 17 ++- drivers/acpi/scan.c | 9 ++ @@ -36,10 +40,10 @@ security/lockdown/Kconfig | 13 +++ security/lockdown/lockdown.c | 1 + security/security.c | 6 + - 38 files changed, 484 insertions(+), 180 deletions(-) + 42 files changed, 534 insertions(+), 190 deletions(-) diff --git a/Makefile b/Makefile -index 49261450039a..b077c86bcb5f 100644 +index 3778b422fa11..63f076bb2bc7 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,10 @@ $(if $(filter __%, $(MAKECMDGOALS)), \ @@ -137,6 +141,52 @@ index ab19ddb09d65..d463ec57b218 100644 /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ *cmdline_p = boot_command_line; +diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h +index 75efc4c6f076..462fc34f1317 100644 +--- a/arch/x86/include/asm/cpu_entry_area.h ++++ b/arch/x86/include/asm/cpu_entry_area.h +@@ -130,10 +130,6 @@ struct cpu_entry_area { + }; + + #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +-#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) +- +-/* Total size includes the readonly IDT mapping page as well: */ +-#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) + + DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); + DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); +diff --git a/arch/x86/include/asm/pgtable_areas.h b/arch/x86/include/asm/pgtable_areas.h +index d34cce1b995c..4f056fb88174 100644 +--- a/arch/x86/include/asm/pgtable_areas.h ++++ b/arch/x86/include/asm/pgtable_areas.h +@@ -11,6 +11,12 @@ + + #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) + +-#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) ++#ifdef CONFIG_X86_32 ++#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \ ++ (CPU_ENTRY_AREA_SIZE * NR_CPUS) - \ ++ CPU_ENTRY_AREA_BASE) ++#else ++#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE ++#endif + + #endif /* _ASM_X86_PGTABLE_AREAS_H */ +diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c +index 668a4a6533d9..bbb0f737aab1 100644 +--- a/arch/x86/kernel/hw_breakpoint.c ++++ b/arch/x86/kernel/hw_breakpoint.c +@@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end) + + /* CPU entry erea is always used for CPU entry */ + if (within_area(addr, end, CPU_ENTRY_AREA_BASE, +- CPU_ENTRY_AREA_TOTAL_SIZE)) ++ CPU_ENTRY_AREA_MAP_SIZE)) + return true; + + /* diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 892609cde4a2..ad9aa11ba3a0 100644 --- a/arch/x86/kernel/setup.c @@ -207,7 +257,7 @@ index 10c63b1bf92f..df8995977ec2 100644 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c -index 63247c57c72c..179dfe9dc1a5 100644 +index 4ae248e87f5e..95ed874fbbcc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1348,8 +1348,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, @@ -223,6 +273,84 @@ index 63247c57c72c..179dfe9dc1a5 100644 */ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) indirect_branch_prediction_barrier(); +diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c +index 6c2f1b76a0b6..20844cf141fb 100644 +--- a/arch/x86/mm/cpu_entry_area.c ++++ b/arch/x86/mm/cpu_entry_area.c +@@ -15,16 +15,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage) + #ifdef CONFIG_X86_64 + static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); + DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); +-#endif + +-#ifdef CONFIG_X86_32 ++static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset); ++ ++static __always_inline unsigned int cea_offset(unsigned int cpu) ++{ ++ return per_cpu(_cea_offset, cpu); ++} ++ ++static __init void init_cea_offsets(void) ++{ ++ unsigned int max_cea; ++ unsigned int i, j; ++ ++ max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE; ++ ++ /* O(sodding terrible) */ ++ for_each_possible_cpu(i) { ++ unsigned int cea; ++ ++again: ++ cea = prandom_u32_max(max_cea); ++ ++ for_each_possible_cpu(j) { ++ if (cea_offset(j) == cea) ++ goto again; ++ ++ if (i == j) ++ break; ++ } ++ ++ per_cpu(_cea_offset, i) = cea; ++ } ++} ++#else /* !X86_64 */ + DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack); ++ ++static __always_inline unsigned int cea_offset(unsigned int cpu) ++{ ++ return cpu; ++} ++static inline void init_cea_offsets(void) { } + #endif + + /* Is called from entry code, so must be noinstr */ + noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu) + { +- unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; ++ unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE; + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return (struct cpu_entry_area *) va; +@@ -205,7 +242,6 @@ static __init void setup_cpu_entry_area_ptes(void) + + /* The +1 is for the readonly IDT: */ + BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); +- BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); + + start = CPU_ENTRY_AREA_BASE; +@@ -221,6 +257,8 @@ void __init setup_cpu_entry_areas(void) + { + unsigned int cpu; + ++ init_cea_offsets(); ++ + setup_cpu_entry_area_ptes(); + + for_each_possible_cpu(cpu) diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 6aef1ee5e1bd..8f146b1b4972 100644 --- a/drivers/acpi/apei/hest.c |