|
|
@ -571,10 +571,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
|
|
|
|
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
|
|
|
|
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
|
|
|
|
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
|
|
|
|
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
|
|
|
|
new file mode 100644
|
|
|
|
new file mode 100644
|
|
|
|
index 000000000000..ad0d073666ae
|
|
|
|
index 000000000000..10560f7720e2
|
|
|
|
--- /dev/null
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/kernel/sched/bmq.c
|
|
|
|
+++ b/kernel/sched/bmq.c
|
|
|
|
@@ -0,0 +1,5969 @@
|
|
|
|
@@ -0,0 +1,6015 @@
|
|
|
|
+/*
|
|
|
|
+/*
|
|
|
|
+ * kernel/sched/bmq.c
|
|
|
|
+ * kernel/sched/bmq.c
|
|
|
|
+ *
|
|
|
|
+ *
|
|
|
@ -647,7 +647,7 @@ index 000000000000..ad0d073666ae
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static inline void print_scheduler_version(void)
|
|
|
|
+static inline void print_scheduler_version(void)
|
|
|
|
+{
|
|
|
|
+{
|
|
|
|
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r3 by Alfred Chen.\n");
|
|
|
|
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r4 by Alfred Chen.\n");
|
|
|
|
+}
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+/**
|
|
|
@ -1057,6 +1057,16 @@ index 000000000000..ad0d073666ae
|
|
|
|
+ * Add/Remove/Requeue task to/from the runqueue routines
|
|
|
|
+ * Add/Remove/Requeue task to/from the runqueue routines
|
|
|
|
+ * Context: rq->lock
|
|
|
|
+ * Context: rq->lock
|
|
|
|
+ */
|
|
|
|
+ */
|
|
|
|
|
|
|
|
+static inline void __dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
|
|
|
|
+{
|
|
|
|
|
|
|
|
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
|
|
|
|
|
|
|
+ sched_info_dequeued(rq, p);
|
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
+ list_del(&p->bmq_node);
|
|
|
|
|
|
|
|
+ if (list_empty(&rq->queue.heads[p->bmq_idx]))
|
|
|
|
|
|
|
|
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
|
|
|
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
+
|
|
|
|
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
+{
|
|
|
|
+{
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
|
+ lockdep_assert_held(&rq->lock);
|
|
|
@ -1064,6 +1074,9 @@ index 000000000000..ad0d073666ae
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "bmq: dequeue task reside on cpu%d from cpu%d\n",
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "bmq: dequeue task reside on cpu%d from cpu%d\n",
|
|
|
|
+ task_cpu(p), cpu_of(rq));
|
|
|
|
+ task_cpu(p), cpu_of(rq));
|
|
|
|
+
|
|
|
|
+
|
|
|
|
|
|
|
|
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
|
|
|
|
|
|
|
+ sched_info_dequeued(rq, p);
|
|
|
|
|
|
|
|
+
|
|
|
|
+ list_del(&p->bmq_node);
|
|
|
|
+ list_del(&p->bmq_node);
|
|
|
|
+ if (list_empty(&rq->queue.heads[p->bmq_idx])) {
|
|
|
|
+ if (list_empty(&rq->queue.heads[p->bmq_idx])) {
|
|
|
|
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
|
|
|
|
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
|
|
|
@ -1076,9 +1089,16 @@ index 000000000000..ad0d073666ae
|
|
|
|
+#endif
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ sched_update_tick_dependency(rq);
|
|
|
|
+ sched_update_tick_dependency(rq);
|
|
|
|
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ sched_info_dequeued(rq, p);
|
|
|
|
+static inline void __enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
|
|
|
|
+{
|
|
|
|
|
|
|
|
+ sched_info_queued(rq, p);
|
|
|
|
|
|
|
|
+ psi_enqueue(p, flags);
|
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
+ p->bmq_idx = task_sched_prio(p);
|
|
|
|
|
|
|
|
+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);
|
|
|
|
|
|
|
|
+ set_bit(p->bmq_idx, rq->queue.bitmap);
|
|
|
|
+}
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
|
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
|
|
@ -1088,9 +1108,7 @@ index 000000000000..ad0d073666ae
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "bmq: enqueue task reside on cpu%d to cpu%d\n",
|
|
|
|
+ WARN_ONCE(task_rq(p) != rq, "bmq: enqueue task reside on cpu%d to cpu%d\n",
|
|
|
|
+ task_cpu(p), cpu_of(rq));
|
|
|
|
+ task_cpu(p), cpu_of(rq));
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ p->bmq_idx = task_sched_prio(p);
|
|
|
|
+ __enqueue_task(p, rq, flags);
|
|
|
|
+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);
|
|
|
|
|
|
|
|
+ set_bit(p->bmq_idx, rq->queue.bitmap);
|
|
|
|
|
|
|
|
+ update_sched_rq_watermark(rq);
|
|
|
|
+ update_sched_rq_watermark(rq);
|
|
|
|
+ ++rq->nr_running;
|
|
|
|
+ ++rq->nr_running;
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
@ -1100,9 +1118,6 @@ index 000000000000..ad0d073666ae
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ sched_update_tick_dependency(rq);
|
|
|
|
+ sched_update_tick_dependency(rq);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ sched_info_queued(rq, p);
|
|
|
|
|
|
|
|
+ psi_enqueue(p, flags);
|
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ /*
|
|
|
|
+ * If in_iowait is set, the code below may not trigger any cpufreq
|
|
|
|
+ * If in_iowait is set, the code below may not trigger any cpufreq
|
|
|
|
+ * utilization updates, so do it here explicitly with the IOWAIT flag
|
|
|
|
+ * utilization updates, so do it here explicitly with the IOWAIT flag
|
|
|
@ -3792,9 +3807,9 @@ index 000000000000..ad0d073666ae
|
|
|
|
+ (p = rq_next_bmq_task(skip, rq)) != rq->idle) {
|
|
|
|
+ (p = rq_next_bmq_task(skip, rq)) != rq->idle) {
|
|
|
|
+ skip = rq_next_bmq_task(p, rq);
|
|
|
|
+ skip = rq_next_bmq_task(p, rq);
|
|
|
|
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
|
|
|
|
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
|
|
|
|
+ dequeue_task(p, rq, 0);
|
|
|
|
+ __dequeue_task(p, rq, 0);
|
|
|
|
+ set_task_cpu(p, dest_cpu);
|
|
|
|
+ set_task_cpu(p, dest_cpu);
|
|
|
|
+ enqueue_task(p, dest_rq, 0);
|
|
|
|
+ __enqueue_task(p, dest_rq, 0);
|
|
|
|
+ nr_migrated++;
|
|
|
|
+ nr_migrated++;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ nr_tries--;
|
|
|
|
+ nr_tries--;
|
|
|
@ -3827,15 +3842,28 @@ index 000000000000..ad0d073666ae
|
|
|
|
+ spin_acquire(&src_rq->lock.dep_map,
|
|
|
|
+ spin_acquire(&src_rq->lock.dep_map,
|
|
|
|
+ SINGLE_DEPTH_NESTING, 1, _RET_IP_);
|
|
|
|
+ SINGLE_DEPTH_NESTING, 1, _RET_IP_);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ nr_migrated = migrate_pending_tasks(src_rq, rq, cpu);
|
|
|
|
+ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
|
|
|
|
|
|
|
|
+ src_rq->nr_running -= nr_migrated;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
+ if (src_rq->nr_running < 2)
|
|
|
|
|
|
|
|
+ cpumask_clear_cpu(i, &sched_rq_pending_mask);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+ rq->nr_running += nr_migrated;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
+ if (rq->nr_running > 1)
|
|
|
|
|
|
|
|
+ cpumask_set_cpu(cpu, &sched_rq_pending_mask);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+ update_sched_rq_watermark(rq);
|
|
|
|
|
|
|
|
+ cpufreq_update_util(rq, 0);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
|
|
|
|
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
|
|
|
|
+ do_raw_spin_unlock(&src_rq->lock);
|
|
|
|
+ do_raw_spin_unlock(&src_rq->lock);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (nr_migrated) {
|
|
|
|
|
|
|
|
+ cpufreq_update_util(rq, 0);
|
|
|
|
|
|
|
|
+ return 1;
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
|
|
|
|
|
|
|
|
+ do_raw_spin_unlock(&src_rq->lock);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ } while (++affinity_mask < end_mask);
|
|
|
|
+ } while (++affinity_mask < end_mask);
|
|
|
|
+
|
|
|
|
+
|
|
|
@ -3871,18 +3899,39 @@ index 000000000000..ad0d073666ae
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (unlikely(rq->skip)) {
|
|
|
|
+ if (unlikely(rq->skip)) {
|
|
|
|
+ next = rq_runnable_task(rq);
|
|
|
|
+ next = rq_runnable_task(rq);
|
|
|
|
|
|
|
|
+ if (next == rq->idle) {
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
|
|
|
|
+ if (!take_other_rq_tasks(rq, cpu)) {
|
|
|
|
+ next = rq_runnable_task(rq);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
+#endif
|
|
|
|
+ rq->skip = NULL;
|
|
|
|
+ rq->skip = NULL;
|
|
|
|
|
|
|
|
+ schedstat_inc(rq->sched_goidle);
|
|
|
|
|
|
|
|
+ return next;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+ next = rq_runnable_task(rq);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+ rq->skip = NULL;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
|
|
|
|
+ hrtick_start(rq, next->time_slice);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
+ return next;
|
|
|
|
+ return next;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ next = rq_first_bmq_task(rq);
|
|
|
|
+ next = rq_first_bmq_task(rq);
|
|
|
|
|
|
|
|
+ if (next == rq->idle) {
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
|
|
|
|
+ if (!take_other_rq_tasks(rq, cpu)) {
|
|
|
|
+ return rq_first_bmq_task(rq);
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+ schedstat_inc(rq->sched_goidle);
|
|
|
|
|
|
|
|
+ return next;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+ next = rq_first_bmq_task(rq);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
|
|
|
|
+ hrtick_start(rq, next->time_slice);
|
|
|
|
+#endif
|
|
|
|
+#endif
|
|
|
|
+ return next;
|
|
|
|
+ return next;
|
|
|
|
+}
|
|
|
|
+}
|
|
|
@ -3982,13 +4031,6 @@ index 000000000000..ad0d073666ae
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ next = choose_next_task(rq, cpu, prev);
|
|
|
|
+ next = choose_next_task(rq, cpu, prev);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (next == rq->idle)
|
|
|
|
|
|
|
|
+ schedstat_inc(rq->sched_goidle);
|
|
|
|
|
|
|
|
+#ifdef CONFIG_HIGH_RES_TIMERS
|
|
|
|
|
|
|
|
+ else
|
|
|
|
|
|
|
|
+ hrtick_start(rq, next->time_slice);
|
|
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
+
|
|
|
|
|
|
|
|
+ if (likely(prev != next)) {
|
|
|
|
+ if (likely(prev != next)) {
|
|
|
|
+ next->last_ran = rq->clock_task;
|
|
|
|
+ next->last_ran = rq->clock_task;
|
|
|
|
+ rq->last_ts_switch = rq->clock;
|
|
|
|
+ rq->last_ts_switch = rq->clock;
|
|
|
@ -6080,6 +6122,9 @@ index 000000000000..ad0d073666ae
|
|
|
|
+ cpumask_t *chk;
|
|
|
|
+ cpumask_t *chk;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
|
|
|
|
|
+ /* take chance to reset time slice for idle tasks */
|
|
|
|
|
|
|
|
+ cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
|
|
|
|
|
|
|
|
+
|
|
|
|
+ chk = &(per_cpu(sched_cpu_affinity_masks, cpu)[0]);
|
|
|
|
+ chk = &(per_cpu(sched_cpu_affinity_masks, cpu)[0]);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ cpumask_complement(chk, cpumask_of(cpu));
|
|
|
|
+ cpumask_complement(chk, cpumask_of(cpu));
|
|
|
@ -6116,6 +6161,7 @@ index 000000000000..ad0d073666ae
|
|
|
|
+#else
|
|
|
|
+#else
|
|
|
|
+void __init sched_init_smp(void)
|
|
|
|
+void __init sched_init_smp(void)
|
|
|
|
+{
|
|
|
|
+{
|
|
|
|
|
|
|
|
+ cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
|
|
|
|
+}
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
|
+
|
|
|
|
+
|