From 43e519023ea4a79fc6a771bb9ebbb0cfe5fa39bc Mon Sep 17 00:00:00 2001 From: Sentry Date: Sun, 12 Jul 2020 20:43:50 +0200 Subject: [PATCH] glitched base --- .../admin-guide/kernel-parameters.txt | 3 ++ block/elevator.c | 6 ++-- drivers/cpufreq/intel_pstate.c | 2 ++ drivers/infiniband/core/addr.c | 1 + drivers/tty/Kconfig | 13 ++++++++ fs/dcache.c | 2 +- include/linux/blkdev.h | 7 +++- include/linux/compiler_types.h | 4 +++ include/linux/mm.h | 5 ++- include/uapi/linux/vt.h | 15 ++++++++- init/Kconfig | 33 ++++++++++++++++++- kernel/sched/core.c | 6 ++-- kernel/sched/fair.c | 25 ++++++++++++++ mm/huge_memory.c | 4 +++ mm/page-writeback.c | 8 +++++ mm/page_alloc.c | 2 +- net/ipv4/Kconfig | 4 +++ net/sched/Kconfig | 4 +++ scripts/mkcompile_h | 4 +-- scripts/setlocalversion | 2 +- 20 files changed, 133 insertions(+), 17 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 7bc83f3d9..d31f0323c 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1826,6 +1826,9 @@ disable Do not enable intel_pstate as the default scaling driver for the supported processors + enable + Enable intel_pstate in-case "disable" was passed + previously in the kernel boot parameters passive Use intel_pstate as a scaling driver, but configure it to work with generic cpufreq governors (instead of diff --git a/block/elevator.c b/block/elevator.c index 4eab3d70e..79669aa39 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -623,15 +623,15 @@ static inline bool elv_support_iosched(struct request_queue *q) } /* - * For single queue devices, default to using mq-deadline. If we have multiple - * queues or mq-deadline is not available, default to "none". + * For single queue devices, default to using bfq. If we have multiple + * queues or bfq is not available, default to "none". */ static struct elevator_type *elevator_get_default(struct request_queue *q) { if (q->nr_hw_queues != 1) return NULL; - return elevator_get(q, "mq-deadline", false); + return elevator_get(q, "bfq", false); } /* diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4d3429b20..8bf9e0adf 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2824,6 +2824,8 @@ static int __init intel_pstate_setup(char *str) pr_info("HWP disabled\n"); no_hwp = 1; } + if (!strcmp(str, "enable")) + no_load = 0; if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 1753a9801..184b30282 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -816,6 +816,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, union { struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; + struct sockaddr_ib _sockaddr_ib; } sgid_addr, dgid_addr; int ret; diff --git a/fs/dcache.c b/fs/dcache.c index b280e07e1..74e90f940 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -71,7 +71,7 @@ * If no ancestor relationship: * arbitrary, since it's serialized on rename_lock */ -int sysctl_vfs_cache_pressure __read_mostly = 100; +int sysctl_vfs_cache_pressure __read_mostly = 50; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32868fbed..f028c8070 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -45,7 +45,11 @@ struct blk_queue_stats; struct blk_stat_callback; #define BLKDEV_MIN_RQ 4 +#ifdef CONFIG_ZENIFY +#define BLKDEV_MAX_RQ 512 +#else #define BLKDEV_MAX_RQ 128 /* Default maximum */ +#endif /* Must be consistent with blk_mq_poll_stats_bkt() */ #define BLK_MQ_POLL_STATS_BKTS 16 @@ -614,7 +618,8 @@ struct request_queue { #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP)) + (1 << QUEUE_FLAG_SAME_COMP) | \ + (1 << QUEUE_FLAG_SAME_FORCE)) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e970f97a7..f3aa9e6c4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -207,6 +207,10 @@ struct ftrace_likely_data { # define __no_fgcse #endif +#ifndef asm_volatile_goto +#define asm_volatile_goto(x...) asm goto(x) +#endif + /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/mm.h b/include/linux/mm.h index f3fe73718..8fb564dbb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -189,8 +189,7 @@ static inline void __mm_zero_struct_page(struct page *page) * not a hard limit any more. Although some userspace tools can be surprised by * that. */ -#define MAPCOUNT_ELF_CORE_MARGIN (5) -#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) +#define DEFAULT_MAX_MAP_COUNT (262144) extern int sysctl_max_map_count; @@ -2613,7 +2612,7 @@ int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) +#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE) int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); diff --git a/init/Kconfig b/init/Kconfig index 74a5ac656..bc63ba750 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -61,6 +61,38 @@ config THREAD_INFO_IN_TASK menu "General setup" +config ZENIFY + bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience" + default y + help + Tunes the kernel for responsiveness at the cost of throughput and power usage. + + --- Virtual Memory Subsystem --------------------------- + + Mem dirty before bg writeback..: 10 % -> 20 % + Mem dirty before sync writeback: 20 % -> 50 % + + --- Block Layer ---------------------------------------- + + Queue depth...............: 128 -> 512 + Default MQ scheduler......: mq-deadline -> bfq + + --- CFS CPU Scheduler ---------------------------------- + + Scheduling latency.............: 6 -> 3 ms + Minimal granularity............: 0.75 -> 0.3 ms + Wakeup granularity.............: 1 -> 0.5 ms + CPU migration cost.............: 0.5 -> 0.25 ms + Bandwidth slice size...........: 5 -> 3 ms + Ondemand fine upscaling limit..: 95 % -> 85 % + + --- MuQSS CPU Scheduler -------------------------------- + + Scheduling interval............: 6 -> 3 ms + ISO task max realtime use......: 70 % -> 25 % + Ondemand coarse upscaling limit: 80 % -> 45 % + Ondemand fine upscaling limit..: 95 % -> 45 % + config BROKEN bool @@ -1240,7 +1272,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE config CC_OPTIMIZE_FOR_PERFORMANCE_O3 bool "Optimize more for performance (-O3)" - depends on ARC help Choosing this option will pass "-O3" to your compiler to optimize the kernel yet more for performance. diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9a2fbf98f..630c93d66 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -57,7 +57,7 @@ const_debug unsigned int sysctl_sched_features = * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ -const_debug unsigned int sysctl_sched_nr_migrate = 32; +const_debug unsigned int sysctl_sched_nr_migrate = 128; /* * period over which we measure -rt task CPU usage in us. @@ -69,9 +69,9 @@ __read_mostly int scheduler_running; /* * part of the period that we allow rt tasks to run in us. - * default: 0.95s + * XanMod default: 0.98s */ -int sysctl_sched_rt_runtime = 950000; +int sysctl_sched_rt_runtime = 980000; /* * __task_rq_lock - lock the rq @p resides on. diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da3e5b547..0a8dc0e64 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -37,8 +37,13 @@ * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_latency = 3000000ULL; +static unsigned int normalized_sysctl_sched_latency = 3000000ULL; +#else unsigned int sysctl_sched_latency = 6000000ULL; static unsigned int normalized_sysctl_sched_latency = 6000000ULL; +#endif /* * The initial- and re-scaling of tunables is configurable @@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_min_granularity = 300000ULL; +static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL; +#else unsigned int sysctl_sched_min_granularity = 750000ULL; static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; +#endif /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity */ +#ifdef CONFIG_ZENIFY +static unsigned int sched_nr_latency = 10; +#else static unsigned int sched_nr_latency = 8; +#endif /* * After fork, child runs first. If set to 0 (default) then @@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_wakeup_granularity = 500000UL; +static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL; + +const_debug unsigned int sysctl_sched_migration_cost = 50000UL; +#else unsigned int sysctl_sched_wakeup_granularity = 1000000UL; static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +#endif int sched_thermal_decay_shift; static int __init setup_sched_thermal_decay_shift(char *str) @@ -128,8 +149,12 @@ int __weak arch_asym_cpu_priority(int cpu) * * (default: 5 msec, units: microseconds) */ +#ifdef CONFIG_ZENIFY +unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL; +#else unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #endif +#endif static inline void update_load_add(struct load_weight *lw, unsigned long inc) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6ecd10451..8a3bdff2c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly = #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE (1<