linux-tkg/linux56-tkg/linux56-tkg-patches/0003-glitched-base.patch

901 lines
31 KiB
Diff

From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001
From: Tk-Glitch <ti3nou@gmail.com>
Date: Wed, 4 Jul 2018 04:30:08 +0200
Subject: glitched
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 87f1fc9..b3be470 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -50,8 +50,8 @@ else
fi
UTS_VERSION="#$VERSION"
-CONFIG_FLAGS=""
-if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+CONFIG_FLAGS="TKG"
+if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
diff --git a/fs/dcache.c b/fs/dcache.c
index 2acfc69878f5..3f1131431e06 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -69,7 +69,7 @@
* If no ancestor relationship:
* arbitrary, since it's serialized on rename_lock
*/
-int sysctl_vfs_cache_pressure __read_mostly = 100;
+int sysctl_vfs_cache_pressure __read_mostly = 50;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 211890edf37e..37121563407d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -41,7 +41,7 @@ const_debug unsigned int sysctl_sched_features =
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
-const_debug unsigned int sysctl_sched_nr_migrate = 32;
+const_debug unsigned int sysctl_sched_nr_migrate = 128;
/*
* period over which we average the RT time consumption, measured
@@ -61,9 +61,9 @@ __read_mostly int scheduler_running;
/*
* part of the period that we allow rt tasks to run in us.
- * default: 0.95s
+ * XanMod default: 0.98s
*/
-int sysctl_sched_rt_runtime = 950000;
+int sysctl_sched_rt_runtime = 980000;
/*
* __task_rq_lock - lock the rq @p resides on.
diff --git a/mm/zswap.c b/mm/zswap.c
index 61a5c41972db..2674c2806130 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -91,7 +91,7 @@ static struct kernel_param_ops zswap_enabled_param_ops = {
module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
/* Crypto compressor to use */
-#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
+#define ZSWAP_COMPRESSOR_DEFAULT "lz4"
static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
static int zswap_compressor_param_set(const char *,
const struct kernel_param *);
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 71f39410691b..288f9679e883 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -54,7 +54,7 @@ scm_version()
# If only the short version is requested, don't bother
# running further git commands
if $short; then
- echo "+"
+ # echo "+"
return
fi
# If we are past a tagged commit (like
From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001
From: Etienne Juvigny <Ti3noU@gmail.com>
Date: Mon, 3 Sep 2018 17:36:25 +0200
Subject: Zenify & stuff
diff --git a/init/Kconfig b/init/Kconfig
index b4daad2bac23..c1e59dc04209 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1244,7 +1244,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
config CC_OPTIMIZE_FOR_PERFORMANCE_O3
bool "Optimize more for performance (-O3)"
- depends on ARC
help
Choosing this option will pass "-O3" to your compiler to optimize
the kernel yet more for performance.
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 4f32c4062fb6..c0bf039e1b40 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -721,6 +721,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
+ struct sockaddr_ib _sockaddr_ib;
} sgid_addr, dgid_addr;
int ret;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 0840d27381ea..73aba9a31064 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -75,6 +75,19 @@ config VT_CONSOLE_SLEEP
def_bool y
depends on VT_CONSOLE && PM_SLEEP
+config NR_TTY_DEVICES
+ int "Maximum tty device number"
+ depends on VT
+ range 12 63
+ default 63
+ ---help---
+ This option is used to change the number of tty devices in /dev.
+ The default value is 63. The lowest number you can set is 12,
+ 63 is also the upper limit so we don't overrun the serial
+ consoles.
+
+ If unsure, say 63.
+
config HW_CONSOLE
bool
depends on VT && !UML
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 79226ca8f80f..2a30060e7e1d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -47,7 +47,11 @@ struct blk_queue_stats;
struct blk_stat_callback;
#define BLKDEV_MIN_RQ 4
+#ifdef CONFIG_ZENIFY
+#define BLKDEV_MAX_RQ 512
+#else
#define BLKDEV_MAX_RQ 128 /* Default maximum */
+#endif
/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h
index e9d39c48520a..3bceead8da40 100644
--- a/include/uapi/linux/vt.h
+++ b/include/uapi/linux/vt.h
@@ -3,12 +3,25 @@
#define _UAPI_LINUX_VT_H
+/*
+ * We will make this definition solely for the purpose of making packages
+ * such as splashutils build, because they can not understand that
+ * NR_TTY_DEVICES is defined in the kernel configuration.
+ */
+#ifndef CONFIG_NR_TTY_DEVICES
+#define CONFIG_NR_TTY_DEVICES 63
+#endif
+
/*
* These constants are also useful for user-level apps (e.g., VC
* resizing).
*/
#define MIN_NR_CONSOLES 1 /* must be at least 1 */
-#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
+/*
+ * NR_TTY_DEVICES:
+ * Value MUST be at least 12 and must never be higher then 63
+ */
+#define MAX_NR_CONSOLES CONFIG_NR_TTY_DEVICES /* serial lines start above this */
/* Note: the ioctl VT_GETSTATE does not work for
consoles 16 and higher (since it returns a short) */
diff --git a/init/Kconfig b/init/Kconfig
index 041f3a022122..5ed70eb1ad3a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -45,6 +45,38 @@ config THREAD_INFO_IN_TASK
menu "General setup"
+config ZENIFY
+ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience"
+ default y
+ help
+ Tunes the kernel for responsiveness at the cost of throughput and power usage.
+
+ --- Virtual Memory Subsystem ---------------------------
+
+ Mem dirty before bg writeback..: 10 % -> 20 %
+ Mem dirty before sync writeback: 20 % -> 50 %
+
+ --- Block Layer ----------------------------------------
+
+ Queue depth...............: 128 -> 512
+ Default MQ scheduler......: mq-deadline -> bfq
+
+ --- CFS CPU Scheduler ----------------------------------
+
+ Scheduling latency.............: 6 -> 3 ms
+ Minimal granularity............: 0.75 -> 0.3 ms
+ Wakeup granularity.............: 1 -> 0.5 ms
+ CPU migration cost.............: 0.5 -> 0.25 ms
+ Bandwidth slice size...........: 5 -> 3 ms
+ Ondemand fine upscaling limit..: 95 % -> 85 %
+
+ --- MuQSS CPU Scheduler --------------------------------
+
+ Scheduling interval............: 6 -> 3 ms
+ ISO task max realtime use......: 70 % -> 25 %
+ Ondemand coarse upscaling limit: 80 % -> 45 %
+ Ondemand fine upscaling limit..: 95 % -> 45 %
+
config BROKEN
bool
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f0a0be4d344..bada807c7e59 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,8 +37,13 @@
*
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*/
+#ifdef CONFIG_ZENIFY
+unsigned int sysctl_sched_latency = 3000000ULL;
+static unsigned int normalized_sysctl_sched_latency = 3000000ULL;
+#else
unsigned int sysctl_sched_latency = 6000000ULL;
static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
+#endif
/*
* The initial- and re-scaling of tunables is configurable
@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
*
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
+#ifdef CONFIG_ZENIFY
+unsigned int sysctl_sched_min_granularity = 300000ULL;
+static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL;
+#else
unsigned int sysctl_sched_min_granularity = 750000ULL;
static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
+#endif
/*
* This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
*/
+#ifdef CONFIG_ZENIFY
+static unsigned int sched_nr_latency = 10;
+#else
static unsigned int sched_nr_latency = 8;
+#endif
/*
* After fork, child runs first. If set to 0 (default) then
@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
*
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
+#ifdef CONFIG_ZENIFY
+unsigned int sysctl_sched_wakeup_granularity = 500000UL;
+static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL;
+
+const_debug unsigned int sysctl_sched_migration_cost = 50000UL;
+#else
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+#endif
#ifdef CONFIG_SMP
/*
@@ -107,8 +128,12 @@ int __weak arch_asym_cpu_priority(int cpu)
*
* (default: 5 msec, units: microseconds)
*/
+#ifdef CONFIG_ZENIFY
+unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
+#else
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
+#endif
/*
* The margin used when comparing utilization with CPU capacity:
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 337c6afb3345..9315e358f292 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -71,7 +71,11 @@ static long ratelimit_pages = 32;
/*
* Start background writeback (via writeback threads) at this percentage
*/
+#ifdef CONFIG_ZENIFY
+int dirty_background_ratio = 20;
+#else
int dirty_background_ratio = 10;
+#endif
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
+#ifdef CONFIG_ZENIFY
+int vm_dirty_ratio = 50;
+#else
int vm_dirty_ratio = 20;
+#endif
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 80dad301361d..42b7fa7d01f8 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -702,6 +702,9 @@ choice
config DEFAULT_VEGAS
bool "Vegas" if TCP_CONG_VEGAS=y
+ config DEFAULT_YEAH
+ bool "YeAH" if TCP_CONG_YEAH=y
+
config DEFAULT_VENO
bool "Veno" if TCP_CONG_VENO=y
@@ -735,6 +738,7 @@ config DEFAULT_TCP_CONG
default "htcp" if DEFAULT_HTCP
default "hybla" if DEFAULT_HYBLA
default "vegas" if DEFAULT_VEGAS
+ default "yeah" if DEFAULT_YEAH
default "westwood" if DEFAULT_WESTWOOD
default "veno" if DEFAULT_VENO
default "reno" if DEFAULT_RENO
From: Nick Desaulniers <ndesaulniers@google.com>
Date: Mon, 24 Dec 2018 13:37:41 +0200
Subject: include/linux/compiler*.h: define asm_volatile_goto
asm_volatile_goto should also be defined for other compilers that
support asm goto.
Fixes commit 815f0dd ("include/linux/compiler*.h: make compiler-*.h
mutually exclusive").
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index ba814f1..e77eeb0 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -188,6 +188,10 @@ struct ftrace_likely_data {
#define asm_volatile_goto(x...) asm goto(x)
#endif
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
From: Andy Lavr <andy.lavr@gmail.com>
Date: Mon, 24 Dec 2018 14:57:47 +0200
Subject: avl: Use [defer+madvise] as default khugepaged defrag strategy
For some reason, the default strategy to respond to THP fault fallbacks
is still just madvise, meaning stall if the program wants transparent
hugepages, but don't trigger a background reclaim / compaction if THP
begins to fail allocations. This creates a snowball affect where we
still use the THP code paths, but we almost always fail once a system
has been active and busy for a while.
The option "defer" was created for interactive systems where THP can
still improve performance. If we have to fallback to a regular page due
to an allocation failure or anything else, we will trigger a background
reclaim and compaction so future THP attempts succeed and previous
attempts eventually have their smaller pages combined without stalling
running applications.
We still want madvise to stall applications that explicitely want THP,
so defer+madvise _does_ make a ton of sense. Make it the default for
interactive systems, especially if the kernel maintainer left
transparent hugepages on "always".
Reasoning and details in the original patch:
https://lwn.net/Articles/711248/
Signed-off-by: Andy Lavr <andy.lavr@gmail.com>
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e84a10b..21d62b7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
+#ifdef CONFIG_AVL_INTERACTIVE
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG)|
+#else
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
+#endif
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -429,6 +429,9 @@
Select the queueing discipline that will be used by default
for all network devices.
+ config DEFAULT_CAKE
+ bool "Common Applications Kept Enhanced" if NET_SCH_CAKE
+
config DEFAULT_FQ
bool "Fair Queue" if NET_SCH_FQ
@@ -448,6 +451,7 @@
config DEFAULT_NET_SCH
string
default "pfifo_fast" if DEFAULT_PFIFO_FAST
+ default "cake" if DEFAULT_CAKE
default "fq" if DEFAULT_FQ
default "fq_codel" if DEFAULT_FQ_CODEL
default "sfq" if DEFAULT_SFQ
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a29043ea9..3fb219747 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -263,7 +263,7 @@ compound_page_dtor * const compound_page_dtors[] = {
#else
int watermark_boost_factor __read_mostly = 15000;
#endif
-int watermark_scale_factor = 10;
+int watermark_scale_factor = 200;
static unsigned long nr_kernel_pages __initdata;
static unsigned long nr_all_pages __initdata;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb6408f..6c8b55cd1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -146,8 +146,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
* not a hard limit any more. Although some userspace tools can be surprised by
* that.
*/
-#define MAPCOUNT_ELF_CORE_MARGIN (5)
-#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+#define DEFAULT_MAX_MAP_COUNT (262144)
extern int sysctl_max_map_count;
From adb1f9df27f08e6488bcd80b1607987c6114a77a Mon Sep 17 00:00:00 2001
From: Alexandre Frade <admfrade@gmail.com>
Date: Mon, 25 Nov 2019 15:13:06 -0300
Subject: [PATCH] elevator: set default scheduler to bfq for blk-mq
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
---
block/elevator.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/block/elevator.c b/block/elevator.c
index 076ba7308e65..81f89095aa77 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -623,15 +623,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
}
/*
- * For single queue devices, default to using mq-deadline. If we have multiple
- * queues or mq-deadline is not available, default to "none".
+ * For single queue devices, default to using bfq. If we have multiple
+ * queues or bfq is not available, default to "none".
*/
static struct elevator_type *elevator_get_default(struct request_queue *q)
{
if (q->nr_hw_queues != 1)
return NULL;
- return elevator_get(q, "mq-deadline", false);
+ return elevator_get(q, "bfq", false);
}
/*
From c3ec05777c46e19a8a26d0fc4ca0c0db8a19de97 Mon Sep 17 00:00:00 2001
From: Alexandre Frade <admfrade@gmail.com>
Date: Fri, 10 May 2019 16:45:59 -0300
Subject: [PATCH] block: set rq_affinity = 2 for full multithreading I/O
requests
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
---
include/linux/blkdev.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f3ea78b0c91c..4dbacc6b073b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -621,7 +621,8 @@ struct request_queue {
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+ (1 << QUEUE_FLAG_SAME_COMP) | \
+ (1 << QUEUE_FLAG_SAME_FORCE))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
From 8171d33d0b84a953649863538fdbe4c26c035e4f Mon Sep 17 00:00:00 2001
From: Alexandre Frade <admfrade@gmail.com>
Date: Fri, 10 May 2019 14:32:50 -0300
Subject: [PATCH] mm: set 2 megabytes for address_space-level file read-ahead
pages size
Signed-off-by: Alexandre Frade <admfrade@gmail.com>
---
include/linux/mm.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a2adf95b3f9c..e804d9f7583a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2416,7 +2416,7 @@ int __must_check write_one_page(struct page *page);
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
+#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE)
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
From de7119e3db9fdb4c704355854a02a7e9fad931d4 Mon Sep 17 00:00:00 2001
From: Steven Barrett <steven@liquorix.net>
Date: Wed, 15 Jan 2020 20:43:56 -0600
Subject: [PATCH] ZEN: intel-pstate: Implement "enable" parameter
If intel-pstate is compiled into the kernel, it will preempt the loading
of acpi-cpufreq so you can take advantage of hardware p-states without
any friction.
However, intel-pstate is not completely superior to cpufreq's ondemand
for one reason. There's no concept of an up_threshold property.
In ondemand, up_threshold essentially reduces the maximum utilization to
compare against, allowing you to hit max frequencies and turbo boost
from a much lower core utilization.
With intel-pstate, you have the concept of minimum and maximum
performance, but no tunable that lets you define, maximum frequency
means 50% core utilization. For just this oversight, there's reasons
you may want ondemand.
Lets support setting "enable" in kernel boot parameters. This lets
kernel maintainers include "intel_pstate=disable" statically in the
static boot parameters, but let users of the kernel override this
selection.
---
Documentation/admin-guide/kernel-parameters.txt | 3 +++
drivers/cpufreq/intel_pstate.c | 2 ++
2 files changed, 5 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index ade4e6ec23e03..0b613370d28d8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1765,6 +1765,9 @@
disable
Do not enable intel_pstate as the default
scaling driver for the supported processors
+ enable
+ Enable intel_pstate in-case "disable" was passed
+ previously in the kernel boot parameters
passive
Use intel_pstate as a scaling driver, but configure it
to work with generic cpufreq governors (instead of
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d2fa3e9ccd97c..bd10cb02fc0ff 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2826,6 +2826,8 @@ static int __init intel_pstate_setup(char *str)
pr_info("HWP disabled\n");
no_hwp = 1;
}
+ if (!strcmp(str, "enable"))
+ no_load = 0;
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
From: Tk-Glitch <ti3nou@gmail.com>
Date: Mon, 6 Apr 2020 7:20:12 +0100
Subject: Import backported drm amd patchset based on
https://patchwork.freedesktop.org/series/74931/#rev2
Due to recent firmware changes, powerplay power limit
isn't being applied/used. This patchset addresses
the issue.
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 657a6f17e91f..323e7e61493b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -570,6 +570,7 @@ struct pptable_funcs {
int (*override_pcie_parameters)(struct smu_context *smu);
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+ int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
};
int smu_load_microcode(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 6900877de845..40c35bcc5a0a 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -211,4 +211,7 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
#define smu_disable_umc_cdr_12gbps_workaround(smu) \
((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
+#define smu_set_power_source(smu, power_src) \
+ ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 1c88219fe403..674e426ed59b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 4fd77c7cfc80..20174bed11ce 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1939,3 +1939,17 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
return ret;
}
+int smu_v11_0_set_power_source(struct smu_context *smu,
+ enum smu_power_src_type power_src)
+{
+ int pwr_source;
+
+ pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+ if (pwr_source < 0)
+ return -EINVAL;
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NotifyPowerSource,
+ pwr_source);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index d66dfa7410b6..a23eaac28095 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -2369,6 +2369,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_pptable_power_limit = navi10_get_pptable_power_limit,
.run_btc = navi10_run_btc,
.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
+ .set_power_source = smu_v11_0_set_power_source,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f6d4b0ef46ad..2cfb911ab370 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1154,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
}
}
+
+ if (adev->asic_type >= CHIP_NAVI10 &&
+ adev->asic_type <= CHIP_NAVI12) {
+ /*
+ * For Navi1X, manually switch it to AC mode as PMFW
+ * may boot it with DC mode.
+ * TODO: should check whether we are indeed under AC
+ * mode before doing this.
+ */
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ if (ret) {
+ pr_err("Failed to switch to AC mode!\n");
+ return ret;
+ }
+ }
}
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 2cfb911ab370..54d156bbc0f3 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1155,15 +1155,15 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
}
}
- if (adev->asic_type >= CHIP_NAVI10 &&
- adev->asic_type <= CHIP_NAVI12) {
+ if (smu->ppt_funcs->set_power_source) {
/*
* For Navi1X, manually switch it to AC mode as PMFW
* may boot it with DC mode.
- * TODO: should check whether we are indeed under AC
- * mode before doing this.
*/
- ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ if (adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
if (ret) {
pr_err("Failed to switch to AC mode!\n");
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 323e7e61493b..18172dfec947 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -408,6 +408,7 @@ struct smu_context
uint32_t smc_if_version;
bool uploading_custom_pp_table;
+ bool dc_controlled_by_gpio;
};
struct i2c_adapter;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index a23eaac28095..9c60b38ab53a 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -347,7 +347,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
@@ -391,6 +390,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+ if (smu->dc_controlled_by_gpio)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
/* only for navi10 A0 */
@@ -525,6 +527,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
+ if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index bc3cf04a1a94..f197f1be0969 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -92,6 +92,9 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
+
+ if (is_support_sw_smu(adev))
+ smu_set_ac_dc(&adev->smu);
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 54d156bbc0f3..6f4015f87781 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -2087,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
return 0;
}
+int smu_set_ac_dc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ /* controlled by firmware */
+ if (smu->dc_controlled_by_gpio)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->set_power_source) {
+ if (smu->adev->pm.ac_power)
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+ else
+ ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+ if (ret)
+ pr_err("Failed to switch to %s mode!\n",
+ smu->adev->pm.ac_power ? "AC" : "DC");
+ }
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 18172dfec947..ae2c318dd6fa 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -720,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
+int smu_set_ac_dc(struct smu_context *smu);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 20174bed11ce..d19e1d0d56c0 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1525,6 +1525,12 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
return ret;
}
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt);
+}
+
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
@@ -1558,6 +1565,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
break;
}
+ } else if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == 0xfe)
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
}
return 0;
@@ -1597,6 +1607,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ 0xfe,
+ irq_src);
+ if (ret)
+ return ret;
+
return ret;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2214,7 +2214,7 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
gfn_t start_gfn = gpa >> PAGE_SHIFT;
gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
- gfn_t nr_pages_avail;
+ gfn_t nr_pages_avail = 0;
/* Update ghc->generation before performing any error checks. */
ghc->generation = slots->generation;