Tk-Glitch 2020-05-09 19:34:25 +07:00
parent 8242887a7a
commit 3f8139e4a4
2 changed files with 79 additions and 33 deletions

@ -89,7 +89,7 @@ pkgname=("${pkgbase}" "${pkgbase}-headers")
_basekernel=5.6
_sub=11
pkgver="${_basekernel}"."${_sub}"
pkgrel=22
pkgrel=23
pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here
url="http://www.kernel.org/"
@ -120,7 +120,7 @@ source=("https://www.kernel.org/pub/linux/kernel/v5.x/linux-${_basekernel}.tar.x
#0008-5.6-bcachefs.patch
0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch
0009-bmq_v5.6-r3.patch
0009-bmq_v5.6-r4.patch
0011-ZFS-fix.patch
0012-linux-hardened.patch # https://github.com/anthraxx/linux-hardened
0013-tp_smapi_ec.patch
@ -145,7 +145,7 @@ sha256sums=('e342b04a2aa63808ea0ef1baab28fc520bd031ef8cf93d9ee4a31d4058fcb622'
'cd225e86d72eaf6c31ef3d7b20df397f4cc44ddd04389850691292cdf292b204'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
'2340925904efa3594cc65a7bae4fbff233d5d8bc7db605ce08acaca7450d2471'
'1b95d36635c7dc48ce45a33d6b1f4eb6d34f51600901395d28fd22f28daee8e9'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'573914ae79eb564032dce7e0c805fd59440696c148037b77013c8a4d5c4bd3b6'
'4a83c17a33779df304ee44ad8e736069b25d917baec429ecdd193fe1a9a63576')
@ -249,7 +249,7 @@ prepare() {
patch -Np1 -i ../0005-glitched-pds.patch
elif [ "${_cpusched}" == "bmq" ]; then
# BMQ
patch -Np1 -i ../0009-bmq_v5.6-r3.patch
patch -Np1 -i ../0009-bmq_v5.6-r4.patch
if [ "${_aggressive_ondemand}" == "true" ]; then
patch -Np1 -i ../0009-glitched-ondemand-bmq.patch
fi

@ -571,10 +571,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
new file mode 100644
index 000000000000..ad0d073666ae
index 000000000000..10560f7720e2
--- /dev/null
+++ b/kernel/sched/bmq.c
@@ -0,0 +1,5969 @@
@@ -0,0 +1,6015 @@
+/*
+ * kernel/sched/bmq.c
+ *
@ -647,7 +647,7 @@ index 000000000000..ad0d073666ae
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r3 by Alfred Chen.\n");
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r4 by Alfred Chen.\n");
+}
+
+/**
@ -1057,6 +1057,16 @@ index 000000000000..ad0d073666ae
+ * Add/Remove/Requeue task to/from the runqueue routines
+ * Context: rq->lock
+ */
+static inline void __dequeue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
+ sched_info_dequeued(rq, p);
+
+ list_del(&p->bmq_node);
+ if (list_empty(&rq->queue.heads[p->bmq_idx]))
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
+}
+
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+ lockdep_assert_held(&rq->lock);
@ -1064,6 +1074,9 @@ index 000000000000..ad0d073666ae
+ WARN_ONCE(task_rq(p) != rq, "bmq: dequeue task reside on cpu%d from cpu%d\n",
+ task_cpu(p), cpu_of(rq));
+
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
+ sched_info_dequeued(rq, p);
+
+ list_del(&p->bmq_node);
+ if (list_empty(&rq->queue.heads[p->bmq_idx])) {
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
@ -1076,9 +1089,16 @@ index 000000000000..ad0d073666ae
+#endif
+
+ sched_update_tick_dependency(rq);
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
+}
+
+ sched_info_dequeued(rq, p);
+static inline void __enqueue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+ sched_info_queued(rq, p);
+ psi_enqueue(p, flags);
+
+ p->bmq_idx = task_sched_prio(p);
+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);
+ set_bit(p->bmq_idx, rq->queue.bitmap);
+}
+
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
@ -1088,9 +1108,7 @@ index 000000000000..ad0d073666ae
+ WARN_ONCE(task_rq(p) != rq, "bmq: enqueue task reside on cpu%d to cpu%d\n",
+ task_cpu(p), cpu_of(rq));
+
+ p->bmq_idx = task_sched_prio(p);
+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);
+ set_bit(p->bmq_idx, rq->queue.bitmap);
+ __enqueue_task(p, rq, flags);
+ update_sched_rq_watermark(rq);
+ ++rq->nr_running;
+#ifdef CONFIG_SMP
@ -1100,9 +1118,6 @@ index 000000000000..ad0d073666ae
+
+ sched_update_tick_dependency(rq);
+
+ sched_info_queued(rq, p);
+ psi_enqueue(p, flags);
+
+ /*
+ * If in_iowait is set, the code below may not trigger any cpufreq
+ * utilization updates, so do it here explicitly with the IOWAIT flag
@ -3792,9 +3807,9 @@ index 000000000000..ad0d073666ae
+ (p = rq_next_bmq_task(skip, rq)) != rq->idle) {
+ skip = rq_next_bmq_task(p, rq);
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
+ dequeue_task(p, rq, 0);
+ __dequeue_task(p, rq, 0);
+ set_task_cpu(p, dest_cpu);
+ enqueue_task(p, dest_rq, 0);
+ __enqueue_task(p, dest_rq, 0);
+ nr_migrated++;
+ }
+ nr_tries--;
@ -3827,15 +3842,28 @@ index 000000000000..ad0d073666ae
+ spin_acquire(&src_rq->lock.dep_map,
+ SINGLE_DEPTH_NESTING, 1, _RET_IP_);
+
+ nr_migrated = migrate_pending_tasks(src_rq, rq, cpu);
+ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
+ src_rq->nr_running -= nr_migrated;
+#ifdef CONFIG_SMP
+ if (src_rq->nr_running < 2)
+ cpumask_clear_cpu(i, &sched_rq_pending_mask);
+#endif
+ rq->nr_running += nr_migrated;
+#ifdef CONFIG_SMP
+ if (rq->nr_running > 1)
+ cpumask_set_cpu(cpu, &sched_rq_pending_mask);
+#endif
+ update_sched_rq_watermark(rq);
+ cpufreq_update_util(rq, 0);
+
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
+ do_raw_spin_unlock(&src_rq->lock);
+
+ if (nr_migrated) {
+ cpufreq_update_util(rq, 0);
+ return 1;
+ }
+
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
+ do_raw_spin_unlock(&src_rq->lock);
+ }
+ } while (++affinity_mask < end_mask);
+
@ -3871,18 +3899,39 @@ index 000000000000..ad0d073666ae
+
+ if (unlikely(rq->skip)) {
+ next = rq_runnable_task(rq);
+ if (next == rq->idle) {
+#ifdef CONFIG_SMP
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ next = rq_runnable_task(rq);
+ if (!take_other_rq_tasks(rq, cpu)) {
+#endif
+ rq->skip = NULL;
+ schedstat_inc(rq->sched_goidle);
+ return next;
+#ifdef CONFIG_SMP
+ }
+ next = rq_runnable_task(rq);
+#endif
+ }
+ rq->skip = NULL;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ hrtick_start(rq, next->time_slice);
+#endif
+ return next;
+ }
+
+ next = rq_first_bmq_task(rq);
+ if (next == rq->idle) {
+#ifdef CONFIG_SMP
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ return rq_first_bmq_task(rq);
+ if (!take_other_rq_tasks(rq, cpu)) {
+#endif
+ schedstat_inc(rq->sched_goidle);
+ return next;
+#ifdef CONFIG_SMP
+ }
+ next = rq_first_bmq_task(rq);
+#endif
+ }
+#ifdef CONFIG_HIGH_RES_TIMERS
+ hrtick_start(rq, next->time_slice);
+#endif
+ return next;
+}
@ -3982,13 +4031,6 @@ index 000000000000..ad0d073666ae
+
+ next = choose_next_task(rq, cpu, prev);
+
+ if (next == rq->idle)
+ schedstat_inc(rq->sched_goidle);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ else
+ hrtick_start(rq, next->time_slice);
+#endif
+
+ if (likely(prev != next)) {
+ next->last_ran = rq->clock_task;
+ rq->last_ts_switch = rq->clock;
@ -6080,6 +6122,9 @@ index 000000000000..ad0d073666ae
+ cpumask_t *chk;
+
+ for_each_online_cpu(cpu) {
+ /* take chance to reset time slice for idle tasks */
+ cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
+
+ chk = &(per_cpu(sched_cpu_affinity_masks, cpu)[0]);
+
+ cpumask_complement(chk, cpumask_of(cpu));
@ -6116,6 +6161,7 @@ index 000000000000..ad0d073666ae
+#else
+void __init sched_init_smp(void)
+{
+ cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
+}
+#endif /* CONFIG_SMP */
+