master
Tk-Glitch 2021-03-30 20:48:57 +07:00
parent cfb19b10c6
commit b61ce06e3a
3 changed files with 35 additions and 24 deletions

@ -53,7 +53,7 @@ else
fi
pkgname=("${pkgbase}" "${pkgbase}-headers")
pkgver="${_basekernel}"."${_sub}"
pkgrel=143
pkgrel=144
pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here
url="http://www.kernel.org/"
@ -365,7 +365,7 @@ case $_basever in
#0008-5.11-bcachefs.patch
0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch
0009-prjc_v5.11-r2.patch
0009-prjc_v5.11-r3.patch
#0012-linux-hardened.patch
0012-misc-additions.patch
# MM Dirty Soft for WRITE_WATCH support in Wine
@ -395,7 +395,7 @@ case $_basever in
'd220593436059b76c975ceee061fd124dec37fff774db45a4419c2ce1839c351'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911'
'e394d4b7721f55837a8364c8311cb06cb5a59484de8aa8731e38d1aff2b7014e'
'5cd64937e3a517f49f4311c47bd692eb8e117f09d655cd456e03366373ba8060'
'7fb1104c167edb79ec8fbdcde97940ed0f806aa978bdd14d0c665a1d76d25c24'
'b1c6599d0e1ac9b66898d652ed99dae3fb8676d840a43ffa920a78d96e0521be'
'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6')

@ -419,7 +419,7 @@ _tkg_srcprep() {
elif [ "$_basever" = "510" ]; then
rev=2
elif [ "$_basever" = "511" ]; then
rev=2
rev=3
else
rev=0
fi

@ -837,10 +837,10 @@ index 5fc9c9b70862..eb6d7d87779f 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644
index 000000000000..7b99fdbb48df
index 000000000000..0066b97100bb
--- /dev/null
+++ b/kernel/sched/alt_core.c
@@ -0,0 +1,6910 @@
@@ -0,0 +1,6914 @@
+/*
+ * kernel/sched/alt_core.c
+ *
@ -895,7 +895,7 @@ index 000000000000..7b99fdbb48df
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
+
+#define ALT_SCHED_VERSION "v5.11-r2"
+#define ALT_SCHED_VERSION "v5.11-r3"
+
+/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio)
@ -2026,6 +2026,9 @@ index 000000000000..7b99fdbb48df
+{
+ struct task_struct *p = current;
+
+ if (0 == p->migration_disabled)
+ return;
+
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
@ -4232,7 +4235,8 @@ index 000000000000..7b99fdbb48df
+ rq->active_balance = 0;
+ /* _something_ may have changed the task, double check again */
+ if (task_on_rq_queued(p) && task_rq(p) == rq &&
+ cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask)) {
+ cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
+ !is_migration_disabled(p)) {
+ int cpu = cpu_of(rq);
+ int dcpu = __best_mask_cpu(cpu, &tmp,
+ per_cpu(sched_cpu_llc_mask, cpu));
@ -4260,7 +4264,7 @@ index 000000000000..7b99fdbb48df
+ curr = rq->curr;
+ res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
+ cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
+ (!rq->active_balance);
+ !is_migration_disabled(curr) && (!rq->active_balance);
+
+ if (res)
+ rq->active_balance = 1;
@ -7790,10 +7794,10 @@ index 000000000000..1212a031700e
+{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644
index 000000000000..51f11bf416f4
index 000000000000..7bcd96cc6bed
--- /dev/null
+++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,683 @@
@@ -0,0 +1,684 @@
+#ifndef ALT_SCHED_H
+#define ALT_SCHED_H
+
@ -8394,7 +8398,8 @@ index 000000000000..51f11bf416f4
+{
+ struct update_util_data *data;
+
+ data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+ cpu_of(rq)));
+ if (data)
+ data->func(data, rq_clock(rq), flags);
+}
@ -8704,7 +8709,7 @@ index 000000000000..13eda4b26b6a
+ boost_task(p);
+}
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 6931f0cdeb80..0c074c53c60a 100644
index 6931f0cdeb80..c5e3d3839650 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -171,6 +171,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
@ -8715,31 +8720,37 @@ index 6931f0cdeb80..0c074c53c60a 100644
/*
* This function computes an effective utilization for the given CPU, to be
* used for frequency selection given the linear relation: f = u * f_max.
@@ -287,6 +288,13 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
@@ -288,6 +289,18 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
FREQUENCY_UTIL, NULL);
}
+#else /* CONFIG_SCHED_ALT */
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+
+static void sugov_get_util(struct sugov_cpu *sg_cpu)
+{
+ sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
+ return sg_cpu->max;
+ unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
+
+ sg_cpu->max = max;
+ sg_cpu->bw_dl = 0;
+ sg_cpu->util = cpu_rq(sg_cpu->cpu)->nr_running ? max:0UL;
+}
+#endif
+
/**
* sugov_iowait_reset() - Reset the IO boost status of a CPU.
@@ -428,7 +436,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
* @sg_cpu: the sugov data for the CPU to boost
@@ -428,8 +441,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
+#ifndef CONFIG_SCHED_ALT
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+#endif
sg_policy->limits_changed = true;
+#endif
}
@@ -711,6 +721,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
@@ -711,6 +726,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
}
ret = sched_setattr_nocheck(thread, &attr);
@ -8747,7 +8758,7 @@ index 6931f0cdeb80..0c074c53c60a 100644
if (ret) {
kthread_stop(thread);
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
@@ -943,6 +954,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
@@ -943,6 +959,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
cpufreq_governor_init(schedutil_gov);
#ifdef CONFIG_ENERGY_MODEL
@ -8755,7 +8766,7 @@ index 6931f0cdeb80..0c074c53c60a 100644
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
@@ -966,4 +978,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
@@ -966,4 +983,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
}
}