aboutsummaryrefslogtreecommitdiff
path: root/SOURCES/cachy-bore.patch
diff options
context:
space:
mode:
authorJan200101 <sentrycraft123@gmail.com>2024-07-17 19:41:57 +0200
committerJan200101 <sentrycraft123@gmail.com>2024-07-17 19:41:57 +0200
commit82bbf281341ef6fdc89bf3cd4b8f9e49884deccd (patch)
tree7d0f2ea8b4cf6bbab4f9413f4625176115069842 /SOURCES/cachy-bore.patch
parent805152f39f74846f5b07f681b55e3356907bd428 (diff)
downloadkernel-fsync-82bbf281341ef6fdc89bf3cd4b8f9e49884deccd.tar.gz
kernel-fsync-82bbf281341ef6fdc89bf3cd4b8f9e49884deccd.zip
kernel 6.9.9
Diffstat (limited to 'SOURCES/cachy-bore.patch')
-rw-r--r--SOURCES/cachy-bore.patch95
1 files changed, 70 insertions, 25 deletions
diff --git a/SOURCES/cachy-bore.patch b/SOURCES/cachy-bore.patch
index 9f0fcc4..c321203 100644
--- a/SOURCES/cachy-bore.patch
+++ b/SOURCES/cachy-bore.patch
@@ -1,21 +1,22 @@
-From 61efc6f62710e42ca623e209f901394d4f356cc9 Mon Sep 17 00:00:00 2001
-From: Peter Jung <admin@ptr1337.dev>
-Date: Thu, 30 May 2024 10:45:04 +0200
+From 699662da34346e7dfea9523fb4ae2b18287f527c Mon Sep 17 00:00:00 2001
+From: Piotr Gorski <lucjan.lucjanov@gmail.com>
+Date: Thu, 4 Jul 2024 21:28:26 +0200
Subject: [PATCH] bore
-Signed-off-by: Peter Jung <admin@ptr1337.dev>
+Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
---
include/linux/sched.h | 10 ++
init/Kconfig | 17 +++
+ kernel/Kconfig.hz | 16 +++
kernel/sched/core.c | 143 +++++++++++++++++++++
kernel/sched/debug.c | 60 ++++++++-
- kernel/sched/fair.c | 272 +++++++++++++++++++++++++++++++++++++---
- kernel/sched/features.h | 20 ++-
- kernel/sched/sched.h | 7 ++
- 7 files changed, 512 insertions(+), 17 deletions(-)
+ kernel/sched/fair.c | 275 +++++++++++++++++++++++++++++++++++++---
+ kernel/sched/features.h | 28 +++-
+ kernel/sched/sched.h | 7 +
+ 8 files changed, 538 insertions(+), 18 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 3c2abbc587b49..e7bf3a034aa20 100644
+index 3c2abbc58..e7bf3a034 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -547,6 +547,16 @@ struct sched_entity {
@@ -36,7 +37,7 @@ index 3c2abbc587b49..e7bf3a034aa20 100644
u64 slice;
diff --git a/init/Kconfig b/init/Kconfig
-index 664bedb9a71fb..6f9c7fc90707a 100644
+index 459f44ef7..17385c859 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1279,6 +1279,23 @@ config CHECKPOINT_RESTORE
@@ -63,8 +64,34 @@ index 664bedb9a71fb..6f9c7fc90707a 100644
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
+diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
+index 38ef6d068..5f6eecd1e 100644
+--- a/kernel/Kconfig.hz
++++ b/kernel/Kconfig.hz
+@@ -55,5 +55,21 @@ config HZ
+ default 300 if HZ_300
+ default 1000 if HZ_1000
+
++config MIN_BASE_SLICE_NS
++ int "Default value for min_base_slice_ns"
++ default 2000000
++ help
++ The BORE Scheduler automatically calculates the optimal base
++ slice for the configured HZ using the following equation:
++
++ base_slice_ns = max(min_base_slice_ns, 1000000000/HZ)
++
++ This option sets the default lower bound limit of the base slice
++ to prevent the loss of task throughput due to overscheduling.
++
++ Setting this value too high can cause the system to boot with
++ an unnecessarily large base slice, resulting in high scheduling
++ latency and poor system responsiveness.
++
+ config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index d211d40a2edc9..362df741dc85e 100644
+index d211d40a2..b6b2aa707 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4516,6 +4516,138 @@ int wake_up_state(struct task_struct *p, unsigned int state)
@@ -84,7 +111,7 @@ index d211d40a2edc9..362df741dc85e 100644
+ init_task.se.child_burst_last_cached = 0;
+}
+
-+void inline sched_fork_bore(struct task_struct *p) {
++inline void sched_fork_bore(struct task_struct *p) {
+ p->se.burst_time = 0;
+ p->se.curr_burst_penalty = 0;
+ p->se.burst_score = 0;
@@ -232,14 +259,14 @@ index d211d40a2edc9..362df741dc85e 100644
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
-+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.2.0 by Masahito Suzuki");
++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.2.4 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 8d5d98a5834df..b1786126171e9 100644
+index 8d5d98a58..b17861261 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = {
@@ -346,7 +373,7 @@ index 8d5d98a5834df..b1786126171e9 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 213c94d027a4c..3c2d149d0235d 100644
+index 213c94d02..6dffa3419 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
@@ -383,7 +410,7 @@ index 213c94d027a4c..3c2d149d0235d 100644
+#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ;
+static unsigned int configured_sched_base_slice = 1000000000ULL / HZ;
-+unsigned int sysctl_sched_min_base_slice = 2000000ULL;
++unsigned int sysctl_sched_min_base_slice = CONFIG_MIN_BASE_SLICE_NS;
+#else // !CONFIG_SCHED_BORE
unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
@@ -705,7 +732,17 @@ index 213c94d027a4c..3c2d149d0235d 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
-@@ -5472,7 +5697,7 @@ pick_next_entity(struct cfs_rq *cfs_rq)
+@@ -5258,7 +5483,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ * on average, halfway through their slice, as such start tasks
+ * off with half a slice to ease into the competition.
+ */
+- if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
++ if ((sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL)) ||
++ (sched_feat(PLACE_DEADLINE_WAKEUP) && (flags & ENQUEUE_WAKEUP)))
+ vslice /= 2;
+
+ /*
+@@ -5472,7 +5698,7 @@ pick_next_entity(struct cfs_rq *cfs_rq)
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
return cfs_rq->next;
@@ -714,7 +751,7 @@ index 213c94d027a4c..3c2d149d0235d 100644
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
-@@ -6835,6 +7060,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -6835,6 +7061,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
@@ -729,7 +766,7 @@ index 213c94d027a4c..3c2d149d0235d 100644
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
-@@ -8369,10 +8602,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
+@@ -8369,10 +8603,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
@@ -741,7 +778,7 @@ index 213c94d027a4c..3c2d149d0235d 100644
goto preempt;
return;
-@@ -8590,16 +8820,25 @@ static void yield_task_fair(struct rq *rq)
+@@ -8590,16 +8821,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
@@ -767,7 +804,7 @@ index 213c94d027a4c..3c2d149d0235d 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
-@@ -12660,6 +12899,9 @@ static void task_fork_fair(struct task_struct *p)
+@@ -12660,6 +12900,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
@@ -778,10 +815,10 @@ index 213c94d027a4c..3c2d149d0235d 100644
rq_unlock(rq, &rf);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index 143f55df890b1..bfeb9f65383d9 100644
+index 143f55df8..9ad25e4e7 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -5,8 +5,26 @@
+@@ -5,8 +5,34 @@
* sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
*/
SCHED_FEAT(PLACE_LAG, true)
@@ -791,6 +828,14 @@ index 143f55df890b1..bfeb9f65383d9 100644
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
-SCHED_FEAT(RUN_TO_PARITY, true)
+/*
++ * Give waken tasks half a slice to ease into the competition.
++ */
++#ifdef CONFIG_SCHED_BORE
++SCHED_FEAT(PLACE_DEADLINE_WAKEUP, true)
++#else // !CONFIG_SCHED_BORE
++SCHED_FEAT(PLACE_DEADLINE_WAKEUP, false)
++#endif // CONFIG_SCHED_BORE
++/*
+ * Inhibit (wakeup) preemption until the current task has exhausted its slice.
+ */
+#ifdef CONFIG_SCHED_BORE
@@ -810,7 +855,7 @@ index 143f55df890b1..bfeb9f65383d9 100644
/*
* Prefer to schedule the task we woke last (assuming it failed
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index ae50f212775e5..8c976d27f6e9c 100644
+index ae50f2127..8c976d27f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1967,7 +1967,11 @@ static inline void dirty_sched_domain_sysctl(int cpu)
@@ -836,5 +881,5 @@ index ae50f212775e5..8c976d27f6e9c 100644
#ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms;
--
-2.45.1
+2.45.2.606.g9005149a4a