linux-tkg/linux54-tkg/linux54-tkg-patches/0005-glitched-pds.patch

214 lines
6.3 KiB
Diff

From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001
From: Tk-Glitch <ti3nou@gmail.com>
Date: Wed, 4 Jul 2018 04:30:08 +0200
Subject: glitched - PDS
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 2a202a846757..1d9c7ed79b11 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -4,7 +4,7 @@
choice
prompt "Timer frequency"
- default HZ_250
+ default HZ_500
help
Allows the configuration of the timer frequency. It is customary
to have the timer interrupt run at 1000 Hz but 100 Hz may be more
@@ -39,6 +39,13 @@ choice
on SMP and NUMA systems and exactly dividing by both PAL and
NTSC frame rates for video and multimedia work.
+ config HZ_500
+ bool "500 HZ"
+ help
+ 500 Hz is a balanced timer frequency. Provides fast interactivity
+ on desktops with great smoothness without increasing CPU power
+ consumption and sacrificing the battery life on laptops.
+
config HZ_1000
bool "1000 HZ"
help
@@ -52,6 +59,7 @@ config HZ
default 100 if HZ_100
default 250 if HZ_250
default 300 if HZ_300
+ default 500 if HZ_500
default 1000 if HZ_1000
config SCHED_HRTICK
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 2a202a846757..1d9c7ed79b11 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -4,7 +4,7 @@
choice
prompt "Timer frequency"
- default HZ_500
+ default HZ_750
help
Allows the configuration of the timer frequency. It is customary
to have the timer interrupt run at 1000 Hz but 100 Hz may be more
@@ -46,6 +46,13 @@ choice
on desktops with great smoothness without increasing CPU power
consumption and sacrificing the battery life on laptops.
+ config HZ_750
+ bool "750 HZ"
+ help
+ 750 Hz is a good timer frequency for desktops. Provides fast
+ interactivity with great smoothness without sacrificing too
+ much throughput.
+
config HZ_1000
bool "1000 HZ"
help
@@ -60,6 +67,7 @@ config HZ
default 250 if HZ_250
default 300 if HZ_300
default 500 if HZ_500
+ default 750 if HZ_750
default 1000 if HZ_1000
config SCHED_HRTICK
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9270a4370d54..30d01e647417 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -159,7 +159,7 @@ struct scan_control {
/*
* From 0 .. 100. Higher means more swappy.
*/
-int vm_swappiness = 60;
+int vm_swappiness = 20;
/*
* The total number of pages which are beyond the high watermark within all
* zones.
diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c
index c2d831b242b6d18a47e0d87a9f5433a7748b52ff..5bc8d7a8f920c21feab69b2706a3328dc8d39f9a 100644
--- a/kernel/sched/pds.c
+++ b/kernel/sched/pds.c
@@ -409,12 +409,11 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
* [L] ->on_rq
* RELEASE (rq->lock)
*
- * If we observe the old CPU in task_rq_lock(), the acquire of
+ * If we observe the old CPU in task_rq_lock, the acquire of
* the old rq->lock will fully serialize against the stores.
*
- * If we observe the new CPU in task_rq_lock(), the address
- * dependency headed by '[L] rq = task_rq()' and the acquire
- * will pair with the WMB to ensure we then also see migrating.
+ * If we observe the new CPU in task_rq_lock, the acquire will
+ * pair with the WMB to ensure we must then also see migrating.
*/
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
return rq;
@@ -952,9 +953,9 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
smp_wmb();
#ifdef CONFIG_THREAD_INFO_IN_TASK
- WRITE_ONCE(p->cpu, cpu);
+ p->cpu = cpu;
#else
- WRITE_ONCE(task_thread_info(p)->cpu, cpu);
+ task_thread_info(p)->cpu = cpu;
#endif
#endif
}
@@ -1035,7 +1036,7 @@ static void detach_task(struct rq *rq, struct task_struct *p, int target_cpu)
{
lockdep_assert_held(&rq->lock);
- WRITE_ONCE(p->on_rq ,TASK_ON_RQ_MIGRATING);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(p, rq, 0);
diff --git a/kernel/sched/pds_sched.h b/kernel/sched/pds_sched.h
index 20dcf19ea057627d91be07b4ec20f0827c30084c..24fa90ca63d144cc4f45d82d88407ea70d2d2edf 100644
--- a/kernel/sched/pds_sched.h
+++ b/kernel/sched/pds_sched.h
@@ -56,7 +56,7 @@ static inline int task_on_rq_queued(struct task_struct *p)
static inline int task_on_rq_migrating(struct task_struct *p)
{
- return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
+ return p->on_rq == TASK_ON_RQ_MIGRATING;
}
enum {
diff --git a/init/Kconfig b/init/Kconfig
index 11fd9b502d06..e9bc34d3019b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -948,7 +948,6 @@ config CGROUP_DEVICE
config CGROUP_CPUACCT
bool "Simple CPU accounting controller"
- depends on !SCHED_PDS
help
Provides a simple controller for monitoring the
total CPU consumed by the tasks in a cgroup.
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index b23231bae996..cab4e5c5b38e 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -24,13 +24,13 @@ obj-y += fair.o rt.o deadline.o
obj-$(CONFIG_SMP) += cpudeadline.o topology.o stop_task.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
-obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
endif
obj-y += loadavg.o clock.o cputime.o
obj-y += idle.o
obj-y += wait.o wait_bit.o swait.o completion.o
obj-$(CONFIG_SMP) += cpupri.o pelt.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
+obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o
diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c
index 9281ad164..f09a609cf 100644
--- a/kernel/sched/pds.c
+++ b/kernel/sched/pds.c
@@ -81,6 +81,18 @@ enum {
NR_CPU_AFFINITY_CHK_LEVEL
};
+/*
+ * This allows printing both to /proc/sched_debug and
+ * to the console
+ */
+#define SEQ_printf(m, x...) \
+ do { \
+ if (m) \
+ seq_printf(m, x); \
+ else \
+ pr_cont(x); \
+ } while (0)
+
static inline void print_scheduler_version(void)
{
printk(KERN_INFO "pds: PDS-mq CPU Scheduler 0.99o by Alfred Chen.\n");
@@ -6353,7 +6365,10 @@ void ia64_set_curr_task(int cpu, struct task_struct *p)
#ifdef CONFIG_SCHED_DEBUG
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
struct seq_file *m)
-{}
+{
+ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
+ get_nr_threads(p));
+}
void proc_sched_set_task(struct task_struct *p)
{}