aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SOURCES/Patchlist.changelog3
-rw-r--r--SOURCES/patch-6.1-redhat.patch134
-rwxr-xr-xSPECS/kernel.spec22
3 files changed, 149 insertions, 10 deletions
diff --git a/SOURCES/Patchlist.changelog b/SOURCES/Patchlist.changelog
index 7e13a82..7ffeaa8 100644
--- a/SOURCES/Patchlist.changelog
+++ b/SOURCES/Patchlist.changelog
@@ -1,3 +1,6 @@
+"https://gitlab.com/cki-project/kernel-ark/-/commit"/d37d701b5ab6bf4cbcd9b301e08d4cbb502fecd1
+ d37d701b5ab6bf4cbcd9b301e08d4cbb502fecd1 x86/mm: Randomize per-cpu entry area
+
"https://gitlab.com/cki-project/kernel-ark/-/commit"/1595d8197147ac3b439c1334527eb2a5cec86095
1595d8197147ac3b439c1334527eb2a5cec86095 KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
diff --git a/SOURCES/patch-6.1-redhat.patch b/SOURCES/patch-6.1-redhat.patch
index 38e4655..31268fd 100644
--- a/SOURCES/patch-6.1-redhat.patch
+++ b/SOURCES/patch-6.1-redhat.patch
@@ -4,9 +4,13 @@
arch/s390/include/asm/ipl.h | 1 +
arch/s390/kernel/ipl.c | 5 +
arch/s390/kernel/setup.c | 4 +
+ arch/x86/include/asm/cpu_entry_area.h | 4 -
+ arch/x86/include/asm/pgtable_areas.h | 8 +-
+ arch/x86/kernel/hw_breakpoint.c | 2 +-
arch/x86/kernel/setup.c | 22 ++--
arch/x86/kvm/vmx/nested.c | 11 ++
arch/x86/kvm/vmx/vmx.c | 6 +-
+ arch/x86/mm/cpu_entry_area.c | 46 +++++++-
drivers/acpi/apei/hest.c | 8 ++
drivers/acpi/irq.c | 17 ++-
drivers/acpi/scan.c | 9 ++
@@ -36,10 +40,10 @@
security/lockdown/Kconfig | 13 +++
security/lockdown/lockdown.c | 1 +
security/security.c | 6 +
- 38 files changed, 484 insertions(+), 180 deletions(-)
+ 42 files changed, 534 insertions(+), 190 deletions(-)
diff --git a/Makefile b/Makefile
-index 49261450039a..b077c86bcb5f 100644
+index 3778b422fa11..63f076bb2bc7 100644
--- a/Makefile
+++ b/Makefile
@@ -18,6 +18,10 @@ $(if $(filter __%, $(MAKECMDGOALS)), \
@@ -137,6 +141,52 @@ index ab19ddb09d65..d463ec57b218 100644
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
+diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
+index 75efc4c6f076..462fc34f1317 100644
+--- a/arch/x86/include/asm/cpu_entry_area.h
++++ b/arch/x86/include/asm/cpu_entry_area.h
+@@ -130,10 +130,6 @@ struct cpu_entry_area {
+ };
+
+ #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
+-#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
+-
+-/* Total size includes the readonly IDT mapping page as well: */
+-#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
+
+ DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
+diff --git a/arch/x86/include/asm/pgtable_areas.h b/arch/x86/include/asm/pgtable_areas.h
+index d34cce1b995c..4f056fb88174 100644
+--- a/arch/x86/include/asm/pgtable_areas.h
++++ b/arch/x86/include/asm/pgtable_areas.h
+@@ -11,6 +11,12 @@
+
+ #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
+
+-#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
++#ifdef CONFIG_X86_32
++#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
++ (CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
++ CPU_ENTRY_AREA_BASE)
++#else
++#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
++#endif
+
+ #endif /* _ASM_X86_PGTABLE_AREAS_H */
+diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
+index 668a4a6533d9..bbb0f737aab1 100644
+--- a/arch/x86/kernel/hw_breakpoint.c
++++ b/arch/x86/kernel/hw_breakpoint.c
+@@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
+
+ /* CPU entry erea is always used for CPU entry */
+ if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
+- CPU_ENTRY_AREA_TOTAL_SIZE))
++ CPU_ENTRY_AREA_MAP_SIZE))
+ return true;
+
+ /*
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 892609cde4a2..ad9aa11ba3a0 100644
--- a/arch/x86/kernel/setup.c
@@ -207,7 +257,7 @@ index 10c63b1bf92f..df8995977ec2 100644
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
-index 63247c57c72c..179dfe9dc1a5 100644
+index 4ae248e87f5e..95ed874fbbcc 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1348,8 +1348,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
@@ -223,6 +273,84 @@ index 63247c57c72c..179dfe9dc1a5 100644
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
indirect_branch_prediction_barrier();
+diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
+index 6c2f1b76a0b6..20844cf141fb 100644
+--- a/arch/x86/mm/cpu_entry_area.c
++++ b/arch/x86/mm/cpu_entry_area.c
+@@ -15,16 +15,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
+ #ifdef CONFIG_X86_64
+ static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
+ DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
+-#endif
+
+-#ifdef CONFIG_X86_32
++static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
++
++static __always_inline unsigned int cea_offset(unsigned int cpu)
++{
++ return per_cpu(_cea_offset, cpu);
++}
++
++static __init void init_cea_offsets(void)
++{
++ unsigned int max_cea;
++ unsigned int i, j;
++
++ max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
++
++ /* O(sodding terrible) */
++ for_each_possible_cpu(i) {
++ unsigned int cea;
++
++again:
++ cea = prandom_u32_max(max_cea);
++
++ for_each_possible_cpu(j) {
++ if (cea_offset(j) == cea)
++ goto again;
++
++ if (i == j)
++ break;
++ }
++
++ per_cpu(_cea_offset, i) = cea;
++ }
++}
++#else /* !X86_64 */
+ DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
++
++static __always_inline unsigned int cea_offset(unsigned int cpu)
++{
++ return cpu;
++}
++static inline void init_cea_offsets(void) { }
+ #endif
+
+ /* Is called from entry code, so must be noinstr */
+ noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
+ {
+- unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
++ unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
+ BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+ return (struct cpu_entry_area *) va;
+@@ -205,7 +242,6 @@ static __init void setup_cpu_entry_area_ptes(void)
+
+ /* The +1 is for the readonly IDT: */
+ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
+- BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
+ BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
+
+ start = CPU_ENTRY_AREA_BASE;
+@@ -221,6 +257,8 @@ void __init setup_cpu_entry_areas(void)
+ {
+ unsigned int cpu;
+
++ init_cea_offsets();
++
+ setup_cpu_entry_area_ptes();
+
+ for_each_possible_cpu(cpu)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 6aef1ee5e1bd..8f146b1b4972 100644
--- a/drivers/acpi/apei/hest.c
diff --git a/SPECS/kernel.spec b/SPECS/kernel.spec
index b6f89e3..038649c 100755
--- a/SPECS/kernel.spec
+++ b/SPECS/kernel.spec
@@ -124,17 +124,17 @@ Summary: The Linux kernel
# the --with-release option overrides this setting.)
%define debugbuildsenabled 1
%define buildid .fsync
-%define specversion 6.1.8
+%define specversion 6.1.9
%define patchversion 6.1
%define pkgrelease 200
%define kversion 6
-%define tarfile_release 6.1.8
+%define tarfile_release 6.1.9
# This is needed to do merge window version magic
%define patchlevel 1
# This allows pkg_release to have configurable %%{?dist} tag
%define specrelease 201%{?buildid}%{?dist}
# This defines the kabi tarball version
-%define kabiversion 6.1.8
+%define kabiversion 6.1.9
# If this variable is set to 1, a bpf selftests build failure will cause a
# fatal kernel package build error
@@ -388,8 +388,8 @@ Summary: The Linux kernel
%endif
%if 0%{?fedora}
-# don't do debug builds on anything but i686 and x86_64
-%ifnarch i686 x86_64
+# don't do debug builds on anything but aarch64 and x86_64
+%ifnarch aarch64 x86_64
%define with_debug 0
%endif
%endif
@@ -3214,8 +3214,16 @@ fi
#
#
%changelog
-* Tue Jan 31 2023 Jan Drögehoff <sentrycraft123@gmail.com> - 6.1.8-201.fsync
-- kernel-fsync v6.1.8
+* Sun Feb 05 2023 Jan Drögehoff <sentrycraft123@gmail.com> - 6.1.9-201.fsync
+- kernel-fsync v6.1.9
+
+* Wed Feb 01 2023 Augusto Caringi <acaringi@redhat.com> [6.1.9-0]
+- Add BugsFixed file with bz entries to be included in updates. (Justin M. Forbes)
+- x86/mm: Randomize per-cpu entry area (Peter Zijlstra)
+- Update self-test data to not expect debugbuildsenabled 0 (Justin M. Forbes)
+- Turn off forced debug builds (Justin M. Forbes)
+- Turn on debug builds for aarch64 Fedora (Justin M. Forbes)
+- Linux v6.1.9
* Tue Jan 24 2023 Augusto Caringi <acaringi@redhat.com> [6.1.8-0]
- Linux v6.1.8