summaryrefslogtreecommitdiff
path: root/SOURCES/cachy-bore.patch
diff options
context:
space:
mode:
authorJan200101 <sentrycraft123@gmail.com>2024-04-06 17:05:32 +0200
committerJan200101 <sentrycraft123@gmail.com>2024-04-06 17:05:32 +0200
commit684f5ef56790771b425c7007c9dfcfbd8ea1a300 (patch)
tree118298a92be197f4a59d03cf2a5b447cbe9b9354 /SOURCES/cachy-bore.patch
parentc0c9b770e4f24e17886587762c42194b6e524720 (diff)
downloadkernel-fsync-684f5ef56790771b425c7007c9dfcfbd8ea1a300.tar.gz
kernel-fsync-684f5ef56790771b425c7007c9dfcfbd8ea1a300.zip
kernel 6.8.2
Diffstat (limited to 'SOURCES/cachy-bore.patch')
-rw-r--r--SOURCES/cachy-bore.patch284
1 files changed, 54 insertions, 230 deletions
diff --git a/SOURCES/cachy-bore.patch b/SOURCES/cachy-bore.patch
index 9ff833d..711ded8 100644
--- a/SOURCES/cachy-bore.patch
+++ b/SOURCES/cachy-bore.patch
@@ -1,24 +1,24 @@
-From ed828209e2e391d7155e153267df515ffffdefb2 Mon Sep 17 00:00:00 2001
+From c75c12077f2f8dfa7c763f434f746727b8a3b9e6 Mon Sep 17 00:00:00 2001
From: Piotr Gorski <lucjan.lucjanov@gmail.com>
-Date: Mon, 4 Mar 2024 12:48:19 +0100
+Date: Tue, 26 Mar 2024 08:10:41 +0100
Subject: [PATCH] bore
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
---
- include/linux/sched.h | 12 ++
- init/Kconfig | 19 +++
- kernel/sched/core.c | 148 ++++++++++++++++++++
- kernel/sched/debug.c | 61 +++++++-
- kernel/sched/fair.c | 302 ++++++++++++++++++++++++++++++++++++++--
+ include/linux/sched.h | 10 ++
+ init/Kconfig | 17 ++++
+ kernel/sched/core.c | 144 ++++++++++++++++++++++++++
+ kernel/sched/debug.c | 60 ++++++++++-
+ kernel/sched/fair.c | 218 +++++++++++++++++++++++++++++++++++++++-
kernel/sched/features.h | 4 +
- kernel/sched/sched.h | 7 +
- 7 files changed, 538 insertions(+), 15 deletions(-)
+ kernel/sched/sched.h | 7 ++
+ 7 files changed, 457 insertions(+), 3 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 292c31697..5abe14fc1 100644
+index ffe8f618a..0ab0b0424 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -562,6 +562,18 @@ struct sched_entity {
+@@ -547,6 +547,16 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
@@ -28,8 +28,6 @@ index 292c31697..5abe14fc1 100644
+ u8 curr_burst_penalty;
+ u8 burst_penalty;
+ u8 burst_score;
-+ u32 burst_load;
-+ bool on_cfs_rq;
+ u8 child_burst;
+ u32 child_burst_cnt;
+ u64 child_burst_last_cached;
@@ -38,10 +36,10 @@ index 292c31697..5abe14fc1 100644
u64 slice;
diff --git a/init/Kconfig b/init/Kconfig
-index bfde8189c..2258b8ef5 100644
+index bee58f746..13427dbb4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1267,6 +1267,25 @@ config CHECKPOINT_RESTORE
+@@ -1279,6 +1279,23 @@ config CHECKPOINT_RESTORE
If unsure, say N here.
@@ -60,18 +58,16 @@ index bfde8189c..2258b8ef5 100644
+ With a little impact to scheduling fairness, it may improve
+ responsiveness especially under heavy background workload.
+
-+ You can turn it off by setting the sysctl kernel.sched_bore = 0.
-+
+ If unsure, say Y here.
+
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index a708d225c..31f5e8a73 100644
+index 9116bcc90..fc3d7b48e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4480,6 +4480,143 @@ int wake_up_state(struct task_struct *p, unsigned int state)
+@@ -4507,6 +4507,139 @@ int wake_up_state(struct task_struct *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
@@ -86,18 +82,14 @@ index a708d225c..31f5e8a73 100644
+ init_task.se.curr_burst_penalty = 0;
+ init_task.se.burst_penalty = 0;
+ init_task.se.burst_score = 0;
-+ init_task.se.on_cfs_rq = false;
+ init_task.se.child_burst_last_cached = 0;
-+ init_task.se.burst_load = 0;
+}
+
+void inline sched_fork_bore(struct task_struct *p) {
+ p->se.burst_time = 0;
+ p->se.curr_burst_penalty = 0;
+ p->se.burst_score = 0;
-+ p->se.on_cfs_rq = false;
+ p->se.child_burst_last_cached = 0;
-+ p->se.burst_load = 0;
+}
+
+static u32 count_child_tasks(struct task_struct *p) {
@@ -206,7 +198,7 @@ index a708d225c..31f5e8a73 100644
+}
+
+static void sched_post_fork_bore(struct task_struct *p) {
-+ if (p->sched_class == &fair_sched_class && likely(sched_bore))
++ if (p->sched_class == &fair_sched_class)
+ inherit_burst(p);
+ p->se.burst_penalty = p->se.prev_burst_penalty;
+}
@@ -215,7 +207,7 @@ index a708d225c..31f5e8a73 100644
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
-@@ -4496,6 +4633,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -4523,6 +4656,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
@@ -225,7 +217,7 @@ index a708d225c..31f5e8a73 100644
p->se.vlag = 0;
p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
-@@ -4815,6 +4955,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+@@ -4839,6 +4975,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
void sched_post_fork(struct task_struct *p)
{
@@ -235,20 +227,20 @@ index a708d225c..31f5e8a73 100644
uclamp_post_fork(p);
}
-@@ -9885,6 +10028,11 @@ void __init sched_init(void)
+@@ -9910,6 +10049,11 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
-+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.1 by Masahito Suzuki");
++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.0.3 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
-index 4580a4507..033cbe7d3 100644
+index 8d5d98a58..b17861261 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = {
@@ -344,19 +336,18 @@ index 4580a4507..033cbe7d3 100644
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
-@@ -1063,6 +1118,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+@@ -1068,6 +1123,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.load.weight);
#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_BORE
-+ P(se.burst_load);
+ P(se.burst_score);
+#endif // CONFIG_SCHED_BORE
P(se.avg.load_sum);
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 7ac9f4b1d..8f7694f05 100644
+index 533547e3c..ae55f46a8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
@@ -369,7 +360,7 @@ index 7ac9f4b1d..8f7694f05 100644
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
-@@ -64,20 +67,129 @@
+@@ -64,20 +67,126 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
@@ -403,14 +394,12 @@ index 7ac9f4b1d..8f7694f05 100644
+#ifdef CONFIG_SCHED_BORE
+u8 __read_mostly sched_bore = 1;
-+u8 __read_mostly sched_burst_score_rounding = 0;
+u8 __read_mostly sched_burst_smoothness_long = 1;
+u8 __read_mostly sched_burst_smoothness_short = 0;
+u8 __read_mostly sched_burst_fork_atavistic = 2;
+u8 __read_mostly sched_burst_penalty_offset = 22;
+uint __read_mostly sched_burst_penalty_scale = 1280;
+uint __read_mostly sched_burst_cache_lifetime = 60000000;
-+u8 __read_mostly sched_vlag_deviation_limit = 11;
+static int __maybe_unused thirty_two = 32;
+static int __maybe_unused sixty_four = 64;
+static int __maybe_unused maxval_12_bits = 4095;
@@ -447,20 +436,19 @@ index 7ac9f4b1d..8f7694f05 100644
+ return __unscale_slice(delta, se->burst_score);
+}
+
-+static void avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se);
-+static void avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se);
++void reweight_task(struct task_struct *p, int prio);
+
+static void update_burst_score(struct sched_entity *se) {
-+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
-+ u8 prev_score = se->burst_score;
-+ u32 penalty = se->burst_penalty;
-+ if (sched_burst_score_rounding) penalty += 0x2U;
-+ se->burst_score = penalty >> 2;
++ if (!entity_is_task(se)) return;
++ struct task_struct *p = task_of(se);
++ u8 prio = p->static_prio - MAX_RT_PRIO;
++ u8 prev_prio = min(39, prio + se->burst_score);
+
-+ if ((se->burst_score != prev_score) && se->on_cfs_rq) {
-+ avg_vruntime_sub(cfs_rq, se);
-+ avg_vruntime_add(cfs_rq, se);
-+ }
++ se->burst_score = se->burst_penalty >> 2;
++
++ u8 new_prio = min(39, prio + se->burst_score);
++ if (new_prio != prev_prio)
++ reweight_task(p, new_prio);
+}
+
+static void update_burst_penalty(struct sched_entity *se) {
@@ -501,7 +489,7 @@ index 7ac9f4b1d..8f7694f05 100644
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
{
-@@ -137,6 +249,87 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
+@@ -137,6 +246,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
@@ -512,16 +500,7 @@ index 7ac9f4b1d..8f7694f05 100644
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
-+ .extra1 = SYSCTL_ZERO,
-+ .extra2 = SYSCTL_ONE,
-+ },
-+ {
-+ .procname = "sched_burst_score_rounding",
-+ .data = &sched_burst_score_rounding,
-+ .maxlen = sizeof(u8),
-+ .mode = 0644,
-+ .proc_handler = proc_dou8vec_minmax,
-+ .extra1 = SYSCTL_ZERO,
++ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
@@ -576,20 +555,11 @@ index 7ac9f4b1d..8f7694f05 100644
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
-+ {
-+ .procname = "sched_vlag_deviation_limit",
-+ .data = &sched_vlag_deviation_limit,
-+ .maxlen = sizeof(u8),
-+ .mode = 0644,
-+ .proc_handler = proc_dou8vec_minmax,
-+ .extra1 = SYSCTL_ZERO,
-+ .extra2 = &thirty_two,
-+ },
+#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
-@@ -195,6 +388,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
+@@ -195,6 +367,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
@@ -603,7 +573,7 @@ index 7ac9f4b1d..8f7694f05 100644
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
-@@ -225,6 +425,7 @@ static void update_sysctl(void)
+@@ -225,6 +404,7 @@ static void update_sysctl(void)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
@@ -611,130 +581,17 @@ index 7ac9f4b1d..8f7694f05 100644
void __init sched_init_granularity(void)
{
-@@ -298,6 +499,9 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
-
-+#ifdef CONFIG_SCHED_BORE
-+ if (likely(sched_bore)) delta = scale_slice(delta, se);
-+#endif // CONFIG_SCHED_BORE
- return delta;
- }
-
-@@ -620,10 +824,26 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
- *
- * As measured, the max (key * weight) value was ~44 bits for a kernel build.
- */
-+#if !defined(CONFIG_SCHED_BORE)
-+#define entity_weight(se) scale_load_down(se->load.weight)
-+#else // CONFIG_SCHED_BORE
-+static unsigned long entity_weight(struct sched_entity *se) {
-+ unsigned long weight = se->load.weight;
-+ if (likely(sched_bore)) weight = unscale_slice(weight, se);
-+#ifdef CONFIG_64BIT
-+ weight >>= SCHED_FIXEDPOINT_SHIFT - 3;
-+#endif // CONFIG_64BIT
-+ return weight;
-+}
-+#endif // CONFIG_SCHED_BORE
-+
- static void
- avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-- unsigned long weight = scale_load_down(se->load.weight);
-+ unsigned long weight = entity_weight(se);
-+#ifdef CONFIG_SCHED_BORE
-+ se->burst_load = weight;
-+#endif // CONFIG_SCHED_BORE
- s64 key = entity_key(cfs_rq, se);
-
- cfs_rq->avg_vruntime += key * weight;
-@@ -633,7 +853,12 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
- static void
- avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-- unsigned long weight = scale_load_down(se->load.weight);
-+#if !defined(CONFIG_SCHED_BORE)
-+ unsigned long weight = entity_weight(se);
-+#else // CONFIG_SCHED_BORE
-+ unsigned long weight = se->burst_load;
-+ se->burst_load = 0;
-+#endif // CONFIG_SCHED_BORE
- s64 key = entity_key(cfs_rq, se);
-
- cfs_rq->avg_vruntime -= key * weight;
-@@ -653,14 +878,14 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
- * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
- * For this to be so, the result of this function must have a left bias.
- */
--u64 avg_vruntime(struct cfs_rq *cfs_rq)
-+static u64 avg_key(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *curr = cfs_rq->curr;
- s64 avg = cfs_rq->avg_vruntime;
- long load = cfs_rq->avg_load;
-
- if (curr && curr->on_rq) {
-- unsigned long weight = scale_load_down(curr->load.weight);
-+ unsigned long weight = entity_weight(curr);
-
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
-@@ -670,12 +895,15 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
- /* sign flips effective floor / ceil */
- if (avg < 0)
- avg -= (load - 1);
-- avg = div_s64(avg, load);
-+ avg = div64_s64(avg, load);
- }
-
-- return cfs_rq->min_vruntime + avg;
-+ return avg;
- }
-
-+u64 avg_vruntime(struct cfs_rq *cfs_rq) {
-+ return cfs_rq->min_vruntime + avg_key(cfs_rq);
-+}
- /*
- * lag_i = S - s_i = w_i * (V - v_i)
- *
-@@ -700,6 +928,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -704,6 +884,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
lag = avg_vruntime(cfs_rq) - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#ifdef CONFIG_SCHED_BORE
-+ if (likely(sched_bore)) limit >>= 1;
++ limit >>= 1;
+#endif // CONFIG_SCHED_BORE
se->vlag = clamp(lag, -limit, limit);
}
-@@ -727,7 +958,7 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
- long load = cfs_rq->avg_load;
-
- if (curr && curr->on_rq) {
-- unsigned long weight = scale_load_down(curr->load.weight);
-+ unsigned long weight = entity_weight(curr);
-
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
-@@ -819,10 +1050,16 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- se->min_deadline = se->deadline;
- rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
- __entity_less, &min_deadline_cb);
-+#ifdef CONFIG_SCHED_BORE
-+ se->on_cfs_rq = true;
-+#endif // CONFIG_SCHED_BORE
- }
-
- static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
-+#ifdef CONFIG_SCHED_BORE
-+ se->on_cfs_rq = false;
-+#endif // CONFIG_SCHED_BORE
- rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
- &min_deadline_cb);
- avg_vruntime_sub(cfs_rq, se);
-@@ -981,6 +1218,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+@@ -955,6 +1138,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
@@ -742,7 +599,7 @@ index 7ac9f4b1d..8f7694f05 100644
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
-@@ -992,6 +1230,7 @@ int sched_update_scaling(void)
+@@ -966,6 +1150,7 @@ int sched_update_scaling(void)
return 0;
}
@@ -750,9 +607,9 @@ index 7ac9f4b1d..8f7694f05 100644
#endif
#endif
-@@ -1158,7 +1397,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq->exec_clock, delta_exec);
+@@ -1165,7 +1350,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
+ if (unlikely(delta_exec <= 0))
+ return;
+#ifdef CONFIG_SCHED_BORE
+ curr->burst_time += delta_exec;
@@ -764,50 +621,17 @@ index 7ac9f4b1d..8f7694f05 100644
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
-@@ -5164,8 +5409,8 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
- static void
- place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
-- u64 vslice, vruntime = avg_vruntime(cfs_rq);
-- s64 lag = 0;
-+ s64 lag = 0, key = avg_key(cfs_rq);
-+ u64 vslice, vruntime = cfs_rq->min_vruntime + key;
-
- se->slice = sysctl_sched_base_slice;
- vslice = calc_delta_fair(se->slice, se);
-@@ -5178,6 +5423,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+@@ -5171,6 +5362,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*
* EEVDF: placement strategy #1 / #2
*/
+#ifdef CONFIG_SCHED_BORE
-+ if (unlikely(!sched_bore) || se->vlag)
++ if (se->vlag)
+#endif // CONFIG_SCHED_BORE
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
-@@ -5238,12 +5486,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- */
- load = cfs_rq->avg_load;
- if (curr && curr->on_rq)
-- load += scale_load_down(curr->load.weight);
-+ load += entity_weight(curr);
-
-- lag *= load + scale_load_down(se->load.weight);
-+ lag *= load + entity_weight(se);
- if (WARN_ON_ONCE(!load))
- load = 1;
-- lag = div_s64(lag, load);
-+ lag = div64_s64(lag, load);
-+#ifdef CONFIG_SCHED_BORE
-+ if (likely(sched_bore)) {
-+ s64 limit = vslice << sched_vlag_deviation_limit;
-+ lag = clamp(lag, -limit, limit);
-+ }
-+#endif // CONFIG_SCHED_BORE
- }
-
- se->vruntime = vruntime - lag;
-@@ -6810,6 +7064,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -6803,6 +6997,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
@@ -822,7 +646,7 @@ index 7ac9f4b1d..8f7694f05 100644
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
-@@ -8545,16 +8807,25 @@ static void yield_task_fair(struct rq *rq)
+@@ -8552,16 +8754,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
@@ -848,7 +672,7 @@ index 7ac9f4b1d..8f7694f05 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
-@@ -12644,6 +12915,9 @@ static void task_fork_fair(struct task_struct *p)
+@@ -12651,6 +12862,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
@@ -859,7 +683,7 @@ index 7ac9f4b1d..8f7694f05 100644
rq_unlock(rq, &rf);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index a3ddf84de..5adea65fa 100644
+index 143f55df8..3f0fe409f 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -6,7 +6,11 @@
@@ -875,10 +699,10 @@ index a3ddf84de..5adea65fa 100644
/*
* Prefer to schedule the task we woke last (assuming it failed
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 2e5a95486..fc4ec9ebb 100644
+index 001fe047b..da3ad1d4e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1929,7 +1929,11 @@ static inline void dirty_sched_domain_sysctl(int cpu)
+@@ -1965,7 +1965,11 @@ static inline void dirty_sched_domain_sysctl(int cpu)
}
#endif
@@ -890,7 +714,7 @@ index 2e5a95486..fc4ec9ebb 100644
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
-@@ -2509,6 +2513,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
+@@ -2552,6 +2556,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice;