summaryrefslogtreecommitdiff
path: root/SOURCES/tkg.patch
diff options
context:
space:
mode:
authorJan200101 <sentrycraft123@gmail.com>2021-11-28 15:40:28 +0100
committerJan200101 <sentrycraft123@gmail.com>2021-11-28 15:42:17 +0100
commitd39788f1fe96e14f04e5e7f9a223427a32cddf1a (patch)
treed548fce3cd24e6a68a25d6f41b6b2c6d2e23eb50 /SOURCES/tkg.patch
parent603d921759e256c1fd9824078eab66907e7b2ad5 (diff)
downloadkernel-fsync-d39788f1fe96e14f04e5e7f9a223427a32cddf1a.tar.gz
kernel-fsync-d39788f1fe96e14f04e5e7f9a223427a32cddf1a.zip
kernel 5.15.4
Diffstat (limited to 'SOURCES/tkg.patch')
-rw-r--r--SOURCES/tkg.patch549
1 files changed, 549 insertions, 0 deletions
diff --git a/SOURCES/tkg.patch b/SOURCES/tkg.patch
new file mode 100644
index 0000000..f8d8f18
--- /dev/null
+++ b/SOURCES/tkg.patch
@@ -0,0 +1,549 @@
+From c304f43d14e98d4bf1215fc10bc5012f554bdd8a Mon Sep 17 00:00:00 2001
+From: Alexandre Frade <admfrade@gmail.com>
+Date: Mon, 29 Jan 2018 16:59:22 +0000
+Subject: [PATCH 02/17] dcache: cache_pressure = 50 decreases the rate at which
+ VFS caches are reclaimed
+
+Signed-off-by: Alexandre Frade <admfrade@gmail.com>
+---
+ fs/dcache.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 361ea7ab30ea..0c5cf69b241a 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -71,7 +71,7 @@
+ * If no ancestor relationship:
+ * arbitrary, since it's serialized on rename_lock
+ */
+-int sysctl_vfs_cache_pressure __read_mostly = 100;
++int sysctl_vfs_cache_pressure __read_mostly = 50;
+ EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
+
+ __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
+--
+2.28.0
+
+
+From 28f32f59d9d55ac7ec3a20b79bdd02d2a0a5f7e1 Mon Sep 17 00:00:00 2001
+From: Alexandre Frade <admfrade@gmail.com>
+Date: Mon, 29 Jan 2018 18:29:13 +0000
+Subject: [PATCH 03/17] sched/core: nr_migrate = 128 increases number of tasks
+ to iterate in a single balance run.
+
+Signed-off-by: Alexandre Frade <admfrade@gmail.com>
+---
+ kernel/sched/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f788cd61df21..2bfbb4213707 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -59,7 +59,7 @@ const_debug unsigned int sysctl_sched_features =
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
+-const_debug unsigned int sysctl_sched_nr_migrate = 32;
++const_debug unsigned int sysctl_sched_nr_migrate = 128;
+
+ /*
+ * period over which we measure -rt task CPU usage in us.
+@@ -366,9 +366,9 @@
+
+ /*
+ * part of the period that we allow rt tasks to run in us.
+- * default: 0.95s
++ * XanMod default: 0.98s
+ */
+-int sysctl_sched_rt_runtime = 950000;
++int sysctl_sched_rt_runtime = 980000;
+
+
+ /*
+--
+2.28.0
+
+
+From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001
+From: Etienne Juvigny <Ti3noU@gmail.com>
+Date: Mon, 3 Sep 2018 17:36:25 +0200
+Subject: [PATCH 07/17] Zenify & stuff
+
+---
+ init/Kconfig | 32 ++++++++++++++++++++++++++++++++
+ kernel/sched/fair.c | 25 +++++++++++++++++++++++++
+ mm/page-writeback.c | 8 ++++++++
+ 3 files changed, 65 insertions(+)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 3ae8678e1145..da708eed0f1e 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -92,6 +92,38 @@ config THREAD_INFO_IN_TASK
+
+ menu "General setup"
+
++config ZENIFY
++ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience"
++ default y
++ help
++ Tunes the kernel for responsiveness at the cost of throughput and power usage.
++
++ --- Virtual Memory Subsystem ---------------------------
++
++ Mem dirty before bg writeback..: 10 % -> 20 %
++ Mem dirty before sync writeback: 20 % -> 50 %
++
++ --- Block Layer ----------------------------------------
++
++ Queue depth...............: 128 -> 512
++ Default MQ scheduler......: mq-deadline -> bfq
++
++ --- CFS CPU Scheduler ----------------------------------
++
++ Scheduling latency.............: 6 -> 3 ms
++ Minimal granularity............: 0.75 -> 0.3 ms
++ Wakeup granularity.............: 1 -> 0.5 ms
++ CPU migration cost.............: 0.5 -> 0.25 ms
++ Bandwidth slice size...........: 5 -> 3 ms
++ Ondemand fine upscaling limit..: 95 % -> 85 %
++
++ --- MuQSS CPU Scheduler --------------------------------
++
++ Scheduling interval............: 6 -> 3 ms
++ ISO task max realtime use......: 70 % -> 25 %
++ Ondemand coarse upscaling limit: 80 % -> 45 %
++ Ondemand fine upscaling limit..: 95 % -> 45 %
++
+ config BROKEN
+ bool
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 6b3b59cc51d6..2a0072192c3d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -37,8 +37,13 @@
+ *
+ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_latency = 3000000ULL;
++static unsigned int normalized_sysctl_sched_latency = 3000000ULL;
++#else
+ unsigned int sysctl_sched_latency = 6000000ULL;
+ static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
++#endif
+
+ /*
+ * The initial- and re-scaling of tunables is configurable
+@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
+ *
+ * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_min_granularity = 300000ULL;
++static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL;
++#else
+ unsigned int sysctl_sched_min_granularity = 750000ULL;
+ static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
++#endif
+
+ /*
+ * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
+ */
++#ifdef CONFIG_ZENIFY
++static unsigned int sched_nr_latency = 10;
++#else
+ static unsigned int sched_nr_latency = 8;
++#endif
+
+ /*
+ * After fork, child runs first. If set to 0 (default) then
+@@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu)
+ *
+ * (default: 5 msec, units: microseconds)
+ */
++#ifdef CONFIG_ZENIFY
++unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
++#else
+ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
+ #endif
++#endif
+
+ static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+ {
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 28b3e7a67565..01a1aef2b9b1 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
+ /*
+ * Start background writeback (via writeback threads) at this percentage
+ */
++#ifdef CONFIG_ZENIFY
++int dirty_background_ratio = 20;
++#else
+ int dirty_background_ratio = 10;
++#endif
+
+ /*
+ * dirty_background_bytes starts at 0 (disabled) so that it is a function of
+@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
+ /*
+ * The generator of dirty data starts writeback at this percentage
+ */
++#ifdef CONFIG_ZENIFY
++int vm_dirty_ratio = 50;
++#else
+ int vm_dirty_ratio = 20;
++#endif
+
+ /*
+ * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
+--
+2.28.0
+
+
+From e92e67143385cf285851e12aa8b7f083dd38dd24 Mon Sep 17 00:00:00 2001
+From: Steven Barrett <damentz@liquorix.net>
+Date: Sun, 16 Jan 2011 18:57:32 -0600
+Subject: [PATCH 08/17] ZEN: Allow TCP YeAH as default congestion control
+
+4.4: In my tests YeAH dramatically slowed down transfers over a WLAN,
+ reducing throughput from ~65Mbps (CUBIC) to ~7MBps (YeAH) over 10
+ seconds (netperf TCP_STREAM) including long stalls.
+
+ Be careful when choosing this. ~heftig
+---
+ net/ipv4/Kconfig | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
+index e64e59b536d3..bfb55ef7ebbe 100644
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -691,6 +691,9 @@ choice
+ config DEFAULT_VEGAS
+ bool "Vegas" if TCP_CONG_VEGAS=y
+
++ config DEFAULT_YEAH
++ bool "YeAH" if TCP_CONG_YEAH=y
++
+ config DEFAULT_VENO
+ bool "Veno" if TCP_CONG_VENO=y
+
+@@ -724,6 +727,7 @@ config DEFAULT_TCP_CONG
+ default "htcp" if DEFAULT_HTCP
+ default "hybla" if DEFAULT_HYBLA
+ default "vegas" if DEFAULT_VEGAS
++ default "yeah" if DEFAULT_YEAH
+ default "westwood" if DEFAULT_WESTWOOD
+ default "veno" if DEFAULT_VENO
+ default "reno" if DEFAULT_RENO
+--
+2.28.0
+
+
+From 76dbe7477bfde1b5e8bf29a71b5af7ab2be9b98e Mon Sep 17 00:00:00 2001
+From: Steven Barrett <steven@liquorix.net>
+Date: Wed, 28 Nov 2018 19:01:27 -0600
+Subject: [PATCH 09/17] zen: Use [defer+madvise] as default khugepaged defrag
+ strategy
+
+For some reason, the default strategy to respond to THP fault fallbacks
+is still just madvise, meaning stall if the program wants transparent
+hugepages, but don't trigger a background reclaim / compaction if THP
+begins to fail allocations. This creates a snowball affect where we
+still use the THP code paths, but we almost always fail once a system
+has been active and busy for a while.
+
+The option "defer" was created for interactive systems where THP can
+still improve performance. If we have to fallback to a regular page due
+to an allocation failure or anything else, we will trigger a background
+reclaim and compaction so future THP attempts succeed and previous
+attempts eventually have their smaller pages combined without stalling
+running applications.
+
+We still want madvise to stall applications that explicitely want THP,
+so defer+madvise _does_ make a ton of sense. Make it the default for
+interactive systems, especially if the kernel maintainer left
+transparent hugepages on "always".
+
+Reasoning and details in the original patch: https://lwn.net/Articles/711248/
+---
+ mm/huge_memory.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 74300e337c3c..9277f22c10a7 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
+ #endif
++#ifdef CONFIG_ZENIFY
++ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG)|
++#else
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
++#endif
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
+ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+
+--
+2.28.0
+
+
+From 2b65a1329cb220b43c19c4d0de5833fae9e2b22d Mon Sep 17 00:00:00 2001
+From: Alexandre Frade <admfrade@gmail.com>
+Date: Wed, 24 Oct 2018 16:58:52 -0300
+Subject: [PATCH 10/17] net/sched: allow configuring cake qdisc as default
+
+Signed-off-by: Alexandre Frade <admfrade@gmail.com>
+---
+ net/sched/Kconfig | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/sched/Kconfig b/net/sched/Kconfig
+index 84badf00647e..6a922bca9f39 100644
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -471,6 +471,9 @@ choice
+ config DEFAULT_SFQ
+ bool "Stochastic Fair Queue" if NET_SCH_SFQ
+
++ config DEFAULT_CAKE
++ bool "Common Applications Kept Enhanced" if NET_SCH_CAKE
++
+ config DEFAULT_PFIFO_FAST
+ bool "Priority FIFO Fast"
+ endchoice
+@@ -488,6 +488,7 @@
+ default "fq_codel" if DEFAULT_FQ_CODEL
+ default "fq_pie" if DEFAULT_FQ_PIE
+ default "sfq" if DEFAULT_SFQ
++ default "cake" if DEFAULT_CAKE
+ default "pfifo_fast"
+ endif
+
+--
+2.28.0
+
+
+From 816ee502759e954304693813bd03d94986b28dba Mon Sep 17 00:00:00 2001
+From: Tk-Glitch <ti3nou@gmail.com>
+Date: Mon, 18 Feb 2019 17:40:57 +0100
+Subject: [PATCH 11/17] mm: Set watermark_scale_factor to 200 (from 10)
+
+Multiple users have reported it's helping reducing/eliminating stuttering
+with DXVK.
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 898ff44f2c7b..e72074034793 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -330,7 +330,7 @@ int watermark_boost_factor __read_mostly;
+ int min_free_kbytes = 1024;
+ int user_min_free_kbytes = -1;
+ int watermark_boost_factor __read_mostly = 15000;
+-int watermark_scale_factor = 10;
++int watermark_scale_factor = 200;
+
+ static unsigned long nr_kernel_pages __initdata;
+ static unsigned long nr_all_pages __initdata;
+--
+2.28.0
+
+
+From 90240bcd90a568878738e66c0d45bed3e38e347b Mon Sep 17 00:00:00 2001
+From: Tk-Glitch <ti3nou@gmail.com>
+Date: Fri, 19 Apr 2019 12:33:38 +0200
+Subject: [PATCH 12/17] Set vm.max_map_count to 262144 by default
+
+The value is still pretty low, and AMD64-ABI and ELF extended numbering
+supports that, so we should be fine on modern x86 systems.
+
+This fixes crashes in some applications using more than 65535 vmas (also
+affects some windows games running in wine, such as Star Citizen).
+---
+ include/linux/mm.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index bc05c3588aa3..b0cefe94920d 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -190,8 +190,7 @@ static inline void __mm_zero_struct_page(struct page *page)
+ * not a hard limit any more. Although some userspace tools can be surprised by
+ * that.
+ */
+-#define MAPCOUNT_ELF_CORE_MARGIN (5)
+-#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
++#define DEFAULT_MAX_MAP_COUNT (262144)
+
+ extern int sysctl_max_map_count;
+
+--
+2.28.0
+
+
+From 3a34034dba5efe91bcec491efe8c66e8087f509b Mon Sep 17 00:00:00 2001
+From: Tk-Glitch <ti3nou@gmail.com>
+Date: Mon, 27 Jul 2020 00:19:18 +0200
+Subject: [PATCH 13/17] mm: bump DEFAULT_MAX_MAP_COUNT
+
+Some games such as Detroit: Become Human tend to be very crash prone with
+lower values.
+---
+ include/linux/mm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index b0cefe94920d..890165099b07 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -190,7 +190,7 @@ static inline void __mm_zero_struct_page(struct page *page)
+ * not a hard limit any more. Although some userspace tools can be surprised by
+ * that.
+ */
+-#define DEFAULT_MAX_MAP_COUNT (262144)
++#define DEFAULT_MAX_MAP_COUNT (524288)
+
+ extern int sysctl_max_map_count;
+
+--
+2.28.0
+
+
+From 977812938da7c7226415778c340832141d9278b7 Mon Sep 17 00:00:00 2001
+From: Alexandre Frade <admfrade@gmail.com>
+Date: Mon, 25 Nov 2019 15:13:06 -0300
+Subject: [PATCH 14/17] elevator: set default scheduler to bfq for blk-mq
+
+Signed-off-by: Alexandre Frade <admfrade@gmail.com>
+---
+ block/elevator.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/block/elevator.c b/block/elevator.c
+index 4eab3d70e880..79669aa39d79 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -623,19 +623,19 @@ static inline bool elv_support_iosched(struct request_queue *q)
+ }
+
+ /*
+- * For single queue devices, default to using mq-deadline. If we have multiple
+- * queues or mq-deadline is not available, default to "none".
++ * For single queue devices, default to using bfq. If we have multiple
++ * queues or bfq is not available, default to "none".
+ */
+ static struct elevator_type *elevator_get_default(struct request_queue *q)
+ {
+ if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
+ return NULL;
+
+ if (q->nr_hw_queues != 1 &&
+ !blk_mq_is_sbitmap_shared(q->tag_set->flags))
+ return NULL;
+
+- return elevator_get(q, "mq-deadline", false);
++ return elevator_get(q, "bfq", false);
+ }
+
+ /*
+--
+2.28.0
+
+From 3c229f434aca65c4ca61772bc03c3e0370817b92 Mon Sep 17 00:00:00 2001
+From: Alexandre Frade <kernel@xanmod.org>
+Date: Mon, 3 Aug 2020 17:05:04 +0000
+Subject: [PATCH 16/17] mm: set 2 megabytes for address_space-level file
+ read-ahead pages size
+
+Signed-off-by: Alexandre Frade <kernel@xanmod.org>
+---
+ include/linux/pagemap.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index cf2468da68e9..007dea784451 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -851,7 +851,7 @@
+ ._index = i, \
+ }
+
+-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
++#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE)
+
+ void page_cache_ra_unbounded(struct readahead_control *,
+ unsigned long nr_to_read, unsigned long lookahead_count);
+--
+2.28.0
+
+
+From 716f41cf6631f3a85834dcb67b4ce99185b6387f Mon Sep 17 00:00:00 2001
+From: Steven Barrett <steven@liquorix.net>
+Date: Wed, 15 Jan 2020 20:43:56 -0600
+Subject: [PATCH 17/17] ZEN: intel-pstate: Implement "enable" parameter
+
+If intel-pstate is compiled into the kernel, it will preempt the loading
+of acpi-cpufreq so you can take advantage of hardware p-states without
+any friction.
+
+However, intel-pstate is not completely superior to cpufreq's ondemand
+for one reason. There's no concept of an up_threshold property.
+
+In ondemand, up_threshold essentially reduces the maximum utilization to
+compare against, allowing you to hit max frequencies and turbo boost
+from a much lower core utilization.
+
+With intel-pstate, you have the concept of minimum and maximum
+performance, but no tunable that lets you define, maximum frequency
+means 50% core utilization. For just this oversight, there's reasons
+you may want ondemand.
+
+Lets support setting "enable" in kernel boot parameters. This lets
+kernel maintainers include "intel_pstate=disable" statically in the
+static boot parameters, but let users of the kernel override this
+selection.
+---
+ Documentation/admin-guide/kernel-parameters.txt | 3 +++
+ drivers/cpufreq/intel_pstate.c | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index fb95fad81c79..3e92fee81e33 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1857,6 +1857,9 @@
+ disable
+ Do not enable intel_pstate as the default
+ scaling driver for the supported processors
++ enable
++ Enable intel_pstate in-case "disable" was passed
++ previously in the kernel boot parameters
+ passive
+ Use intel_pstate as a scaling driver, but configure it
+ to work with generic cpufreq governors (instead of
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 36a469150ff9..aee891c9b78a 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2845,6 +2845,8 @@ static int __init intel_pstate_setup(char *str)
+ if (!strcmp(str, "no_hwp"))
+ no_hwp = 1;
+
++ if (!strcmp(str, "enable"))
++ no_load = 0;
+ if (!strcmp(str, "force"))
+ force_load = 1;
+ if (!strcmp(str, "hwp_only"))
+--
+2.28.0