master
Tk-Glitch 2021-03-03 17:30:23 +07:00
parent 93eaa3f080
commit 6c5c4de3e5
3 changed files with 82 additions and 19 deletions

@ -53,7 +53,7 @@ else
fi fi
pkgname=("${pkgbase}" "${pkgbase}-headers") pkgname=("${pkgbase}" "${pkgbase}-headers")
pkgver="${_basekernel}"."${_sub}" pkgver="${_basekernel}"."${_sub}"
pkgrel=129 pkgrel=130
pkgdesc='Linux-tkg' pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here arch=('x86_64') # no i686 in here
url="http://www.kernel.org/" url="http://www.kernel.org/"
@ -374,7 +374,7 @@ case $_basever in
#0008-5.11-bcachefs.patch #0008-5.11-bcachefs.patch
0009-glitched-ondemand-bmq.patch 0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch 0009-glitched-bmq.patch
0009-prjc_v5.11-r0.patch 0009-prjc_v5.11-r1.patch
0011-ZFS-fix.patch 0011-ZFS-fix.patch
#0012-linux-hardened.patch #0012-linux-hardened.patch
0012-misc-additions.patch 0012-misc-additions.patch
@ -398,7 +398,7 @@ case $_basever in
'073e7b8ab48aa9abdb5cedb5c729a2f624275ebdbe1769476231c9e712145496' '073e7b8ab48aa9abdb5cedb5c729a2f624275ebdbe1769476231c9e712145496'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911'
'ad3ff525e9e7cd53e0f9a19350c144398dffa943573b9741f4cdb085b05efffe' '782ffe25924d5ca63d4318f6551d9855d84adb4099e662cd996ae31aa9b7fa90'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104' '49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'44e58452e6b759bf6b84ed880f53892ddfc404bc28dd9c8cfd2ae221a677a624') '44e58452e6b759bf6b84ed880f53892ddfc404bc28dd9c8cfd2ae221a677a624')
;; ;;

@ -399,6 +399,8 @@ _tkg_srcprep() {
rev=3 rev=3
elif [ "$_basever" = "510" ]; then elif [ "$_basever" = "510" ]; then
rev=2 rev=2
elif [ "$_basever" = "511" ]; then
rev=1
else else
rev=0 rev=0
fi fi

@ -830,10 +830,10 @@ index 5fc9c9b70862..eb6d7d87779f 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644 new file mode 100644
index 000000000000..cd1b0b85af6d index 000000000000..d5aeadfc1e9b
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_core.c +++ b/kernel/sched/alt_core.c
@@ -0,0 +1,6800 @@ @@ -0,0 +1,6861 @@
+/* +/*
+ * kernel/sched/alt_core.c + * kernel/sched/alt_core.c
+ * + *
@ -888,7 +888,7 @@ index 000000000000..cd1b0b85af6d
+ */ + */
+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
+ +
+#define ALT_SCHED_VERSION "v5.11-r0" +#define ALT_SCHED_VERSION "v5.11-r1"
+ +
+/* rt_prio(prio) defined in include/linux/sched/rt.h */ +/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio) +#define rt_task(p) rt_prio((p)->prio)
@ -1982,6 +1982,8 @@ index 000000000000..cd1b0b85af6d
+ return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed)); + return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed));
+} +}
+ +
+#define MDF_FORCE_ENABLED 0x80
+
+static void +static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags); +__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
+ +
@ -2001,6 +2003,7 @@ index 000000000000..cd1b0b85af6d
+ preempt_disable(); + preempt_disable();
+ this_rq()->nr_pinned++; + this_rq()->nr_pinned++;
+ p->migration_disabled = 1; + p->migration_disabled = 1;
+ p->migration_flags &= ~MDF_FORCE_ENABLED;
+ +
+ /* + /*
+ * Violates locking rules! see comment in __do_set_cpus_allowed(). + * Violates locking rules! see comment in __do_set_cpus_allowed().
@ -2599,6 +2602,7 @@ index 000000000000..cd1b0b85af6d
+ if (p->cpus_ptr != &p->cpus_mask) + if (p->cpus_ptr != &p->cpus_mask)
+ __do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE); + __do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
+ p->migration_disabled = 0; + p->migration_disabled = 0;
+ p->migration_flags |= MDF_FORCE_ENABLED;
+ /* When p is migrate_disabled, rq->lock should be held */ + /* When p is migrate_disabled, rq->lock should be held */
+ rq->nr_pinned--; + rq->nr_pinned--;
+ } + }
@ -2816,6 +2820,13 @@ index 000000000000..cd1b0b85af6d
+static inline bool ttwu_queue_cond(int cpu, int wake_flags) +static inline bool ttwu_queue_cond(int cpu, int wake_flags)
+{ +{
+ /* + /*
+ * Do not complicate things with the async wake_list while the CPU is
+ * in hotplug state.
+ */
+ if (!cpu_active(cpu))
+ return false;
+
+ /*
+ * If the CPU does not share cache, then queue the task on the + * If the CPU does not share cache, then queue the task on the
+ * remote rqs wakelist to avoid accessing remote data. + * remote rqs wakelist to avoid accessing remote data.
+ */ + */
@ -4597,7 +4608,7 @@ index 000000000000..cd1b0b85af6d
+ +
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ +
+#define SCHED_RQ_NR_MIGRATION (32UL) +#define SCHED_RQ_NR_MIGRATION (32U)
+/* +/*
+ * Migrate pending tasks in @rq to @dest_cpu + * Migrate pending tasks in @rq to @dest_cpu
+ * Will try to migrate mininal of half of @rq nr_running tasks and + * Will try to migrate mininal of half of @rq nr_running tasks and
@ -6253,15 +6264,6 @@ index 000000000000..cd1b0b85af6d
+ return ret; + return ret;
+} +}
+ +
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. It does this by
+ * scheduling away the current task. If it still has the earliest deadline
+ * it will be scheduled again as the next task.
+ *
+ * Return: 0.
+ */
+static void do_sched_yield(void) +static void do_sched_yield(void)
+{ +{
+ struct rq *rq; + struct rq *rq;
@ -6289,6 +6291,14 @@ index 000000000000..cd1b0b85af6d
+ schedule(); + schedule();
+} +}
+ +
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. If there are no
+ * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
+ */
+SYSCALL_DEFINE0(sched_yield) +SYSCALL_DEFINE0(sched_yield)
+{ +{
+ do_sched_yield(); + do_sched_yield();
@ -7085,6 +7095,25 @@ index 000000000000..cd1b0b85af6d
+ atomic_long_add(delta, &calc_load_tasks); + atomic_long_add(delta, &calc_load_tasks);
+} +}
+ +
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+ struct task_struct *g, *p;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+ for_each_process_thread(g, p) {
+ if (task_cpu(p) != cpu)
+ continue;
+
+ if (!task_on_rq_queued(p))
+ continue;
+
+ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+ }
+}
+
+int sched_cpu_dying(unsigned int cpu) +int sched_cpu_dying(unsigned int cpu)
+{ +{
+ struct rq *rq = cpu_rq(cpu); + struct rq *rq = cpu_rq(cpu);
@ -7094,7 +7123,10 @@ index 000000000000..cd1b0b85af6d
+ sched_tick_stop(cpu); + sched_tick_stop(cpu);
+ +
+ raw_spin_lock_irqsave(&rq->lock, flags); + raw_spin_lock_irqsave(&rq->lock, flags);
+ BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq)); + if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+ WARN(true, "Dying CPU not properly vacated!");
+ dump_rq_tasks(rq, KERN_WARNING);
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock_irqrestore(&rq->lock, flags);
+ +
+ /* + /*
@ -7413,6 +7445,35 @@ index 000000000000..cd1b0b85af6d
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+void __cant_migrate(const char *file, int line) +void __cant_migrate(const char *file, int line)
+{ +{
+ static unsigned long prev_jiffy;
+
+ if (irqs_disabled())
+ return;
+
+ if (is_migration_disabled(current))
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ if (preempt_count() > 0)
+ return;
+
+ if (current->migration_flags & MDF_FORCE_ENABLED)
+ return;
+
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
+ pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), is_migration_disabled(current),
+ current->pid, current->comm);
+
+ debug_show_held_locks(current);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+} +}
+EXPORT_SYMBOL_GPL(__cant_migrate); +EXPORT_SYMBOL_GPL(__cant_migrate);
+#endif +#endif
@ -7673,7 +7734,7 @@ index 000000000000..1212a031700e
+{} +{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644 new file mode 100644
index 000000000000..cc2739f843af index 000000000000..192586fee177
--- /dev/null --- /dev/null
+++ b/kernel/sched/alt_sched.h +++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,638 @@ @@ -0,0 +1,638 @@
@ -7832,7 +7893,7 @@ index 000000000000..cc2739f843af
+ u64 last_ts_switch; + u64 last_ts_switch;
+ u64 clock_task; + u64 clock_task;
+ +
+ unsigned long nr_running; + unsigned int nr_running;
+ unsigned long nr_uninterruptible; + unsigned long nr_uninterruptible;
+ +
+#ifdef CONFIG_SCHED_HRTICK +#ifdef CONFIG_SCHED_HRTICK