summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSentry <sentry@Sentry-Desktop-Fedora.local>2020-11-06 18:36:05 +0100
committerJan200101 <sentrycraft123@gmail.com>2020-12-05 19:40:07 +0100
commit2323591ec35bad9ae5469f48a5bb697254f97459 (patch)
tree2535d388c81e9745ce4b812b269826825f999102
parent341c2b7474ae9534963c9001cae5b8e6ecefe0f2 (diff)
downloadkernel-fsync-2323591ec35bad9ae5469f48a5bb697254f97459.tar.gz
kernel-fsync-2323591ec35bad9ae5469f48a5bb697254f97459.zip
kernel 5.8.185.8
-rw-r--r--SOURCES/0003-glitched-base.patch447
-rw-r--r--SOURCES/fsync.patch (renamed from SOURCES/0007-v5.8-fsync.patch)0
-rw-r--r--SOURCES/zen.patch308
-rw-r--r--SPECS/kernel.spec20
4 files changed, 320 insertions, 455 deletions
diff --git a/SOURCES/0003-glitched-base.patch b/SOURCES/0003-glitched-base.patch
deleted file mode 100644
index 60e1d44..0000000
--- a/SOURCES/0003-glitched-base.patch
+++ /dev/null
@@ -1,447 +0,0 @@
-From 43e519023ea4a79fc6a771bb9ebbb0cfe5fa39bc Mon Sep 17 00:00:00 2001
-From: Sentry <sentrycraft123@gmail.com>
-Date: Sun, 12 Jul 2020 20:43:50 +0200
-Subject: [PATCH] glitched base
-
----
- .../admin-guide/kernel-parameters.txt | 3 ++
- block/elevator.c | 6 ++--
- drivers/cpufreq/intel_pstate.c | 2 ++
- drivers/infiniband/core/addr.c | 1 +
- drivers/tty/Kconfig | 13 ++++++++
- fs/dcache.c | 2 +-
- include/linux/blkdev.h | 7 +++-
- include/linux/compiler_types.h | 4 +++
- include/linux/mm.h | 5 ++-
- include/uapi/linux/vt.h | 15 ++++++++-
- init/Kconfig | 33 ++++++++++++++++++-
- kernel/sched/core.c | 6 ++--
- kernel/sched/fair.c | 25 ++++++++++++++
- mm/huge_memory.c | 4 +++
- mm/page-writeback.c | 8 +++++
- mm/page_alloc.c | 2 +-
- net/ipv4/Kconfig | 4 +++
- net/sched/Kconfig | 4 +++
- scripts/mkcompile_h | 4 +--
- scripts/setlocalversion | 2 +-
- 20 files changed, 133 insertions(+), 17 deletions(-)
-
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 7bc83f3d9..d31f0323c 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -1826,6 +1826,9 @@
- disable
- Do not enable intel_pstate as the default
- scaling driver for the supported processors
-+ enable
-+ Enable intel_pstate in-case "disable" was passed
-+ previously in the kernel boot parameters
- passive
- Use intel_pstate as a scaling driver, but configure it
- to work with generic cpufreq governors (instead of
-diff --git a/block/elevator.c b/block/elevator.c
-index 4eab3d70e..79669aa39 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -623,15 +623,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
- }
-
- /*
-- * For single queue devices, default to using mq-deadline. If we have multiple
-- * queues or mq-deadline is not available, default to "none".
-+ * For single queue devices, default to using bfq. If we have multiple
-+ * queues or bfq is not available, default to "none".
- */
- static struct elevator_type *elevator_get_default(struct request_queue *q)
- {
- if (q->nr_hw_queues != 1)
- return NULL;
-
-- return elevator_get(q, "mq-deadline", false);
-+ return elevator_get(q, "bfq", false);
- }
-
- /*
-diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index 4d3429b20..8bf9e0adf 100644
---- a/drivers/cpufreq/intel_pstate.c
-+++ b/drivers/cpufreq/intel_pstate.c
-@@ -2824,6 +2824,8 @@ static int __init intel_pstate_setup(char *str)
- pr_info("HWP disabled\n");
- no_hwp = 1;
- }
-+ if (!strcmp(str, "enable"))
-+ no_load = 0;
- if (!strcmp(str, "force"))
- force_load = 1;
- if (!strcmp(str, "hwp_only"))
-diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
-index 1753a9801..184b30282 100644
---- a/drivers/infiniband/core/addr.c
-+++ b/drivers/infiniband/core/addr.c
-@@ -816,6 +816,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
- union {
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
-+ struct sockaddr_ib _sockaddr_ib;
- } sgid_addr, dgid_addr;
- int ret;
-
-diff --git a/fs/dcache.c b/fs/dcache.c
-index b280e07e1..74e90f940 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -71,7 +71,7 @@
- * If no ancestor relationship:
- * arbitrary, since it's serialized on rename_lock
- */
--int sysctl_vfs_cache_pressure __read_mostly = 100;
-+int sysctl_vfs_cache_pressure __read_mostly = 50;
- EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
-
- __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
-diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 32868fbed..f028c8070 100644
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -45,7 +45,11 @@ struct blk_queue_stats;
- struct blk_stat_callback;
-
- #define BLKDEV_MIN_RQ 4
-+#ifdef CONFIG_ZENIFY
-+#define BLKDEV_MAX_RQ 512
-+#else
- #define BLKDEV_MAX_RQ 128 /* Default maximum */
-+#endif
-
- /* Must be consistent with blk_mq_poll_stats_bkt() */
- #define BLK_MQ_POLL_STATS_BKTS 16
-@@ -614,7 +618,8 @@ struct request_queue {
- #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
-
- #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
-- (1 << QUEUE_FLAG_SAME_COMP))
-+ (1 << QUEUE_FLAG_SAME_COMP) | \
-+ (1 << QUEUE_FLAG_SAME_FORCE))
-
- void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
- void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
-diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
-index e970f97a7..f3aa9e6c4 100644
---- a/include/linux/compiler_types.h
-+++ b/include/linux/compiler_types.h
-@@ -207,6 +207,10 @@ struct ftrace_likely_data {
- # define __no_fgcse
- #endif
-
-+#ifndef asm_volatile_goto
-+#define asm_volatile_goto(x...) asm goto(x)
-+#endif
-+
- /* Are two types/vars the same type (ignoring qualifiers)? */
- #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index f3fe73718..8fb564dbb 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -189,8 +189,7 @@ static inline void __mm_zero_struct_page(struct page *page)
- * not a hard limit any more. Although some userspace tools can be surprised by
- * that.
- */
--#define MAPCOUNT_ELF_CORE_MARGIN (5)
--#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-+#define DEFAULT_MAX_MAP_COUNT (262144)
-
- extern int sysctl_max_map_count;
-
-@@ -2613,7 +2612,7 @@ int __must_check write_one_page(struct page *page);
- void task_dirty_inc(struct task_struct *tsk);
-
- /* readahead.c */
--#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
-+#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE)
-
- int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t offset, unsigned long nr_to_read);
-diff --git a/init/Kconfig b/init/Kconfig
-index 74a5ac656..bc63ba750 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -61,6 +61,38 @@ config THREAD_INFO_IN_TASK
-
- menu "General setup"
-
-+config ZENIFY
-+ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience"
-+ default y
-+ help
-+ Tunes the kernel for responsiveness at the cost of throughput and power usage.
-+
-+ --- Virtual Memory Subsystem ---------------------------
-+
-+ Mem dirty before bg writeback..: 10 % -> 20 %
-+ Mem dirty before sync writeback: 20 % -> 50 %
-+
-+ --- Block Layer ----------------------------------------
-+
-+ Queue depth...............: 128 -> 512
-+ Default MQ scheduler......: mq-deadline -> bfq
-+
-+ --- CFS CPU Scheduler ----------------------------------
-+
-+ Scheduling latency.............: 6 -> 3 ms
-+ Minimal granularity............: 0.75 -> 0.3 ms
-+ Wakeup granularity.............: 1 -> 0.5 ms
-+ CPU migration cost.............: 0.5 -> 0.25 ms
-+ Bandwidth slice size...........: 5 -> 3 ms
-+ Ondemand fine upscaling limit..: 95 % -> 85 %
-+
-+ --- MuQSS CPU Scheduler --------------------------------
-+
-+ Scheduling interval............: 6 -> 3 ms
-+ ISO task max realtime use......: 70 % -> 25 %
-+ Ondemand coarse upscaling limit: 80 % -> 45 %
-+ Ondemand fine upscaling limit..: 95 % -> 45 %
-+
- config BROKEN
- bool
-
-@@ -1240,7 +1272,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
-
- config CC_OPTIMIZE_FOR_PERFORMANCE_O3
- bool "Optimize more for performance (-O3)"
-- depends on ARC
- help
- Choosing this option will pass "-O3" to your compiler to optimize
- the kernel yet more for performance.
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 9a2fbf98f..630c93d66 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -57,7 +57,7 @@ const_debug unsigned int sysctl_sched_features =
- * Number of tasks to iterate in a single balance run.
- * Limited because this is done with IRQs disabled.
- */
--const_debug unsigned int sysctl_sched_nr_migrate = 32;
-+const_debug unsigned int sysctl_sched_nr_migrate = 128;
-
- /*
- * period over which we measure -rt task CPU usage in us.
-@@ -69,9 +69,9 @@ __read_mostly int scheduler_running;
-
- /*
- * part of the period that we allow rt tasks to run in us.
-- * default: 0.95s
-+ * XanMod default: 0.98s
- */
--int sysctl_sched_rt_runtime = 950000;
-+int sysctl_sched_rt_runtime = 980000;
-
- /*
- * __task_rq_lock - lock the rq @p resides on.
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index da3e5b547..0a8dc0e64 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -37,8 +37,13 @@
- *
- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
- */
-+#ifdef CONFIG_ZENIFY
-+unsigned int sysctl_sched_latency = 3000000ULL;
-+static unsigned int normalized_sysctl_sched_latency = 3000000ULL;
-+#else
- unsigned int sysctl_sched_latency = 6000000ULL;
- static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
-+#endif
-
- /*
- * The initial- and re-scaling of tunables is configurable
-@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
- *
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
- */
-+#ifdef CONFIG_ZENIFY
-+unsigned int sysctl_sched_min_granularity = 300000ULL;
-+static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL;
-+#else
- unsigned int sysctl_sched_min_granularity = 750000ULL;
- static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
-+#endif
-
- /*
- * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
- */
-+#ifdef CONFIG_ZENIFY
-+static unsigned int sched_nr_latency = 10;
-+#else
- static unsigned int sched_nr_latency = 8;
-+#endif
-
- /*
- * After fork, child runs first. If set to 0 (default) then
-@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
- *
- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
- */
-+#ifdef CONFIG_ZENIFY
-+unsigned int sysctl_sched_wakeup_granularity = 500000UL;
-+static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL;
-+
-+const_debug unsigned int sysctl_sched_migration_cost = 50000UL;
-+#else
- unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
- static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-
- const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-+#endif
-
- int sched_thermal_decay_shift;
- static int __init setup_sched_thermal_decay_shift(char *str)
-@@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu)
- *
- * (default: 5 msec, units: microseconds)
- */
-+#ifdef CONFIG_ZENIFY
-+unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
-+#else
- unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
- #endif
-+#endif
-
- static inline void update_load_add(struct load_weight *lw, unsigned long inc)
- {
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 6ecd10451..8a3bdff2c 100644
---- a/mm/huge_memory.c
-+++ b/mm/huge_memory.c
-@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
- #endif
-+#ifdef CONFIG_AVL_INTERACTIVE
-+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG)|
-+#else
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
-+#endif
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
- (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
-
-diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 7326b54ab..7baec8caf 100644
---- a/mm/page-writeback.c
-+++ b/mm/page-writeback.c
-@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
- /*
- * Start background writeback (via writeback threads) at this percentage
- */
-+#ifdef CONFIG_ZENIFY
-+int dirty_background_ratio = 20;
-+#else
- int dirty_background_ratio = 10;
-+#endif
-
- /*
- * dirty_background_bytes starts at 0 (disabled) so that it is a function of
-@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
- /*
- * The generator of dirty data starts writeback at this percentage
- */
-+#ifdef CONFIG_ZENIFY
-+int vm_dirty_ratio = 50;
-+#else
- int vm_dirty_ratio = 20;
-+#endif
-
- /*
- * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 13cc65312..3e5325634 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -329,7 +329,7 @@ int watermark_boost_factor __read_mostly;
- #else
- int watermark_boost_factor __read_mostly = 15000;
- #endif
--int watermark_scale_factor = 10;
-+int watermark_scale_factor = 200;
-
- static unsigned long nr_kernel_pages __initdata;
- static unsigned long nr_all_pages __initdata;
-diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
-index 25a888882..888606f36 100644
---- a/net/ipv4/Kconfig
-+++ b/net/ipv4/Kconfig
-@@ -688,6 +688,9 @@ choice
- config DEFAULT_VEGAS
- bool "Vegas" if TCP_CONG_VEGAS=y
-
-+ config DEFAULT_YEAH
-+ bool "YeAH" if TCP_CONG_YEAH=y
-+
- config DEFAULT_VENO
- bool "Veno" if TCP_CONG_VENO=y
-
-@@ -721,6 +724,7 @@ config DEFAULT_TCP_CONG
- default "htcp" if DEFAULT_HTCP
- default "hybla" if DEFAULT_HYBLA
- default "vegas" if DEFAULT_VEGAS
-+ default "yeah" if DEFAULT_YEAH
- default "westwood" if DEFAULT_WESTWOOD
- default "veno" if DEFAULT_VENO
- default "reno" if DEFAULT_RENO
-diff --git a/net/sched/Kconfig b/net/sched/Kconfig
-index bfbefb7bf..8fd074d54 100644
---- a/net/sched/Kconfig
-+++ b/net/sched/Kconfig
-@@ -459,6 +459,9 @@ choice
- Select the queueing discipline that will be used by default
- for all network devices.
-
-+ config DEFAULT_CAKE
-+ bool "Common Applications Kept Enhanced" if NET_SCH_CAKE
-+
- config DEFAULT_FQ
- bool "Fair Queue" if NET_SCH_FQ
-
-@@ -478,6 +481,7 @@ endchoice
- config DEFAULT_NET_SCH
- string
- default "pfifo_fast" if DEFAULT_PFIFO_FAST
-+ default "cake" if DEFAULT_CAKE
- default "fq" if DEFAULT_FQ
- default "fq_codel" if DEFAULT_FQ_CODEL
- default "sfq" if DEFAULT_SFQ
-diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
-index 5b80a4699..a46ce3227 100755
---- a/scripts/mkcompile_h
-+++ b/scripts/mkcompile_h
-@@ -41,8 +41,8 @@ else
- fi
-
- UTS_VERSION="#$VERSION"
--CONFIG_FLAGS=""
--if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
-+CONFIG_FLAGS="TKG"
-+if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi
- if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
- if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi
-
-diff --git a/scripts/setlocalversion b/scripts/setlocalversion
-index 20f2efd57..0552d8b9f 100755
---- a/scripts/setlocalversion
-+++ b/scripts/setlocalversion
-@@ -54,7 +54,7 @@ scm_version()
- # If only the short version is requested, don't bother
- # running further git commands
- if $short; then
-- echo "+"
-+ # echo "+"
- return
- fi
- # If we are past a tagged commit (like
---
-2.26.2
-
diff --git a/SOURCES/0007-v5.8-fsync.patch b/SOURCES/fsync.patch
index 01c86d8..01c86d8 100644
--- a/SOURCES/0007-v5.8-fsync.patch
+++ b/SOURCES/fsync.patch
diff --git a/SOURCES/zen.patch b/SOURCES/zen.patch
new file mode 100644
index 0000000..89c1934
--- /dev/null
+++ b/SOURCES/zen.patch
@@ -0,0 +1,308 @@
+From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001
+From: Etienne Juvigny <Ti3noU@gmail.com>
+Date: Mon, 3 Sep 2018 17:36:25 +0200
+Subject: [PATCH 07/17] Zenify & stuff
+
+---
+ init/Kconfig | 32 ++++++++++++++++++++++++++++++++
+ kernel/sched/fair.c | 25 +++++++++++++++++++++++++
+ mm/page-writeback.c | 8 ++++++++
+ 3 files changed, 65 insertions(+)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 3ae8678e1145..da708eed0f1e 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -92,6 +92,38 @@ config THREAD_INFO_IN_TASK
+
+ menu "General setup"
+
++config ZENIFY
++ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience"
++ default y
++ help
++ Tunes the kernel for responsiveness at the cost of throughput and power usage.
++
++ --- Virtual Memory Subsystem ---------------------------
++
++ Mem dirty before bg writeback..: 10 % -> 20 %
++ Mem dirty before sync writeback: 20 % -> 50 %
++
++ --- Block Layer ----------------------------------------
++
++ Queue depth...............: 128 -> 512
++ Default MQ scheduler......: mq-deadline -> bfq
++
++ --- CFS CPU Scheduler ----------------------------------
++
++ Scheduling latency.............: 6 -> 3 ms
++ Minimal granularity............: 0.75 -> 0.3 ms
++ Wakeup granularity.............: 1 -> 0.5 ms
++ CPU migration cost.............: 0.5 -> 0.25 ms
++ Bandwidth slice size...........: 5 -> 3 ms
++ Ondemand fine upscaling limit..: 95 % -> 85 %
++
++ --- MuQSS CPU Scheduler --------------------------------
++
++ Scheduling interval............: 6 -> 3 ms
++ ISO task max realtime use......: 70 % -> 25 %
++ Ondemand coarse upscaling limit: 80 % -> 45 %
++ Ondemand fine upscaling limit..: 95 % -> 45 %
++
+ config BROKEN
+ bool
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 6b3b59cc51d6..2a0072192c3d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -37,8 +37,13 @@
+ *
+ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_latency = 3000000ULL;
++static unsigned int normalized_sysctl_sched_latency = 3000000ULL;
++#else
+ unsigned int sysctl_sched_latency = 6000000ULL;
+ static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
++#endif
+
+ /*
+ * The initial- and re-scaling of tunables is configurable
+@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
+ *
+ * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_min_granularity = 300000ULL;
++static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL;
++#else
+ unsigned int sysctl_sched_min_granularity = 750000ULL;
+ static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
++#endif
+
+ /*
+ * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
+ */
++#ifdef CONFIG_ZENIFY
++static unsigned int sched_nr_latency = 10;
++#else
+ static unsigned int sched_nr_latency = 8;
++#endif
+
+ /*
+ * After fork, child runs first. If set to 0 (default) then
+@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
+ *
+ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_wakeup_granularity = 500000UL;
++static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL;
++
++const_debug unsigned int sysctl_sched_migration_cost = 50000UL;
++#else
+ unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+ static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
+
+ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
++#endif
+
+ int sched_thermal_decay_shift;
+ static int __init setup_sched_thermal_decay_shift(char *str)
+@@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu)
+ *
+ * (default: 5 msec, units: microseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
++#else
+ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
+ #endif
++#endif
+
+ static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+ {
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 28b3e7a67565..01a1aef2b9b1 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
+ /*
+ * Start background writeback (via writeback threads) at this percentage
+ */
++#ifdef CONFIG_ZENIFY
++int dirty_background_ratio = 20;
++#else
+ int dirty_background_ratio = 10;
++#endif
+
+ /*
+ * dirty_background_bytes starts at 0 (disabled) so that it is a function of
+@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
+ /*
+ * The generator of dirty data starts writeback at this percentage
+ */
++#ifdef CONFIG_ZENIFY
++int vm_dirty_ratio = 50;
++#else
+ int vm_dirty_ratio = 20;
++#endif
+
+ /*
+ * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
+--
+2.28.0
+
+
+From e92e67143385cf285851e12aa8b7f083dd38dd24 Mon Sep 17 00:00:00 2001
+From: Steven Barrett <damentz@liquorix.net>
+Date: Sun, 16 Jan 2011 18:57:32 -0600
+Subject: [PATCH 08/17] ZEN: Allow TCP YeAH as default congestion control
+
+4.4: In my tests YeAH dramatically slowed down transfers over a WLAN,
+ reducing throughput from ~65Mbps (CUBIC) to ~7MBps (YeAH) over 10
+ seconds (netperf TCP_STREAM) including long stalls.
+
+ Be careful when choosing this. ~heftig
+---
+ net/ipv4/Kconfig | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
+index e64e59b536d3..bfb55ef7ebbe 100644
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -691,6 +691,9 @@ choice
+ config DEFAULT_VEGAS
+ bool "Vegas" if TCP_CONG_VEGAS=y
+
++ config DEFAULT_YEAH
++ bool "YeAH" if TCP_CONG_YEAH=y
++
+ config DEFAULT_VENO
+ bool "Veno" if TCP_CONG_VENO=y
+
+@@ -724,6 +727,7 @@ config DEFAULT_TCP_CONG
+ default "htcp" if DEFAULT_HTCP
+ default "hybla" if DEFAULT_HYBLA
+ default "vegas" if DEFAULT_VEGAS
++ default "yeah" if DEFAULT_YEAH
+ default "westwood" if DEFAULT_WESTWOOD
+ default "veno" if DEFAULT_VENO
+ default "reno" if DEFAULT_RENO
+--
+2.28.0
+
+
+From 76dbe7477bfde1b5e8bf29a71b5af7ab2be9b98e Mon Sep 17 00:00:00 2001
+From: Steven Barrett <steven@liquorix.net>
+Date: Wed, 28 Nov 2018 19:01:27 -0600
+Subject: [PATCH 09/17] zen: Use [defer+madvise] as default khugepaged defrag
+ strategy
+
+For some reason, the default strategy to respond to THP fault fallbacks
+is still just madvise, meaning stall if the program wants transparent
+hugepages, but don't trigger a background reclaim / compaction if THP
+begins to fail allocations. This creates a snowball affect where we
+still use the THP code paths, but we almost always fail once a system
+has been active and busy for a while.
+
+The option "defer" was created for interactive systems where THP can
+still improve performance. If we have to fallback to a regular page due
+to an allocation failure or anything else, we will trigger a background
+reclaim and compaction so future THP attempts succeed and previous
+attempts eventually have their smaller pages combined without stalling
+running applications.
+
+We still want madvise to stall applications that explicitely want THP,
+so defer+madvise _does_ make a ton of sense. Make it the default for
+interactive systems, especially if the kernel maintainer left
+transparent hugepages on "always".
+
+Reasoning and details in the original patch: https://lwn.net/Articles/711248/
+---
+ mm/huge_memory.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 74300e337c3c..9277f22c10a7 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
+ #endif
++#ifdef CONFIG_ZENIFY
++ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG)|
++#else
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
++#endif
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
+ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+
+--
+2.28.0
+
+
+From 716f41cf6631f3a85834dcb67b4ce99185b6387f Mon Sep 17 00:00:00 2001
+From: Steven Barrett <steven@liquorix.net>
+Date: Wed, 15 Jan 2020 20:43:56 -0600
+Subject: [PATCH 17/17] ZEN: intel-pstate: Implement "enable" parameter
+
+If intel-pstate is compiled into the kernel, it will preempt the loading
+of acpi-cpufreq so you can take advantage of hardware p-states without
+any friction.
+
+However, intel-pstate is not completely superior to cpufreq's ondemand
+for one reason. There's no concept of an up_threshold property.
+
+In ondemand, up_threshold essentially reduces the maximum utilization to
+compare against, allowing you to hit max frequencies and turbo boost
+from a much lower core utilization.
+
+With intel-pstate, you have the concept of minimum and maximum
+performance, but no tunable that lets you define, maximum frequency
+means 50% core utilization. For just this oversight, there's reasons
+you may want ondemand.
+
+Lets support setting "enable" in kernel boot parameters. This lets
+kernel maintainers include "intel_pstate=disable" statically in the
+static boot parameters, but let users of the kernel override this
+selection.
+---
+ Documentation/admin-guide/kernel-parameters.txt | 3 +++
+ drivers/cpufreq/intel_pstate.c | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index fb95fad81c79..3e92fee81e33 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1857,6 +1857,9 @@
+ disable
+ Do not enable intel_pstate as the default
+ scaling driver for the supported processors
++ enable
++ Enable intel_pstate in-case "disable" was passed
++ previously in the kernel boot parameters
+ passive
+ Use intel_pstate as a scaling driver, but configure it
+ to work with generic cpufreq governors (instead of
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 36a469150ff9..aee891c9b78a 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2845,6 +2845,8 @@ static int __init intel_pstate_setup(char *str)
+ pr_info("HWP disabled\n");
+ no_hwp = 1;
+ }
++ if (!strcmp(str, "enable"))
++ no_load = 0;
+ if (!strcmp(str, "force"))
+ force_load = 1;
+ if (!strcmp(str, "hwp_only"))
+--
+2.28.0
+
diff --git a/SPECS/kernel.spec b/SPECS/kernel.spec
index 309679d..d41e821 100644
--- a/SPECS/kernel.spec
+++ b/SPECS/kernel.spec
@@ -92,7 +92,7 @@ Summary: The Linux kernel
%if 0%{?released_kernel}
# Do we have a -stable update to apply?
-%define stable_update 17
+%define stable_update 18
# Set rpm version accordingly
%if 0%{?stable_update}
%define stablerev %{stable_update}
@@ -905,8 +905,9 @@ Patch131: arm64-dts-allwinner-h6-Pine-H64-Fix-ethernet-node.patch
# CVE-2020-27675 rhbz 1891114 1891115
Patch132: 0001-xen-events-avoid-removing-an-event-channel-while-han.patch
-# Linux-tkg patches - https://github.com/Frogging-Family/linux-tkg/blob/master/linux57-tkg
-Patch200: 0007-v5.8-fsync.patch
+# Linux-tkg patches - https://github.com/Frogging-Family/linux-tkg/tree/master/linux-tkg-patches/5.8
+Patch200: zen.patch
+Patch201: fsync.patch
# END OF PATCH DEFINITIONS
@@ -3021,8 +3022,11 @@ fi
#
#
%changelog
-* Tue Nov 3 17:17:46 CET 2020 Jan Drögehoff <sentrycraft123@gmail.com> - 5.8.17-301.fsync
-- Linux v5.8.17 fsync
+* Fri Nov 6 18:29:14 CET 2020 Jan Drögehoff <sentrycraft123@gmail.com> - 5.8.18-301.fsync
+- Linux v5.8.18 fsync zen
+
+* Mon Nov 2 10:50:39 CST 2020 Justin M. Forbes <jforbes@fedoraproject.org> - 5.8.18-300
+- Linux v5.8.18
* Thu Oct 29 07:55:15 CDT 2020 Justin M. Forbes <jforbes@fedoraproject.org> - 5.8.17-300
- Linux v5.8.17
@@ -3044,13 +3048,13 @@ fi
- Linux v5.8.15
- Fix CVE-2020-16119 (rhbz 1886374 1888083)
+* Wed Oct 7 07:21:34 CDT 2020 Justin M. Forbes <jforbes@fedoraproject.org> - 5.8.14-300
+- Linux v5.8.14
+
* Wed Oct 7 2020 Peter Robinson <pbrobinson@fedoraproject.org>
- Fix aarch64 boot crash on BTI capable systems
- Fix boot crash on aarch64 Ampere eMAG systems (rhbz #1874117)
-* Wed Oct 7 07:21:34 CDT 2020 Justin M. Forbes <jforbes@fedoraproject.org> - 5.8.14-300
-- Linux v5.8.14
-
* Thu Oct 1 12:09:16 CDT 2020 Justin M. Forbes <jforbes@fedoraproject.org> - 5.8.13-300
- Linux v5.8.13