diff options
Diffstat (limited to 'patches/boot_time_opt/0111-overload-on-wakeup.patch')
-rw-r--r-- | patches/boot_time_opt/0111-overload-on-wakeup.patch | 57 |
1 files changed, 0 insertions, 57 deletions
diff --git a/patches/boot_time_opt/0111-overload-on-wakeup.patch b/patches/boot_time_opt/0111-overload-on-wakeup.patch deleted file mode 100644 index d5125be..0000000 --- a/patches/boot_time_opt/0111-overload-on-wakeup.patch +++ /dev/null | |||
@@ -1,57 +0,0 @@ | |||
1 | From cc7c761946c6b9fa820acc90d7514795af3f42f5 Mon Sep 17 00:00:00 2001 | ||
2 | From: jplozi <jplozi@unice.fr> | ||
3 | Date: Fri, 11 Mar 2016 15:18:06 +0100 | ||
4 | Subject: [PATCH 111/126] overload on wakeup | ||
5 | |||
6 | source https://github.com/jplozi/wastedcores | ||
7 | |||
8 | as an experiment, apply the learnings from the wasted-cores paper | ||
9 | and see how the performance works out. With the data from this we should | ||
10 | be able to work with Peter and the rest of the scheduler folks on | ||
11 | a more permanent/elegant solution. | ||
12 | --- | ||
13 | kernel/sched/fair.c | 14 ++++++++++++++ | ||
14 | 1 file changed, 14 insertions(+) | ||
15 | |||
16 | --- linux-4.14/kernel/sched/fair.c.org 2017-11-18 23:01:57.353611850 +0000 | ||
17 | +++ linux-4.14/kernel/sched/fair.c 2017-11-19 15:24:38.093637926 +0000 | ||
18 | @@ -5925,6 +5925,8 @@ | ||
19 | return min_cap * 1024 < task_util(p) * capacity_margin; | ||
20 | } | ||
21 | |||
22 | + | ||
23 | +static unsigned int once_in_a_while; | ||
24 | /* | ||
25 | * select_task_rq_fair: Select target runqueue for the waking task in domains | ||
26 | * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, | ||
27 | @@ -5953,6 +5955,30 @@ | ||
28 | } | ||
29 | |||
30 | rcu_read_lock(); | ||
31 | + | ||
32 | + once_in_a_while++; | ||
33 | + | ||
34 | + if (cpu_rq(prev_cpu)->nr_running || (once_in_a_while & 15) == 0) { | ||
35 | + int _cpu; | ||
36 | + int bestprio = -5000; | ||
37 | + int bestcpu = -1; | ||
38 | + | ||
39 | + for_each_online_cpu(_cpu) { | ||
40 | + if (!cpumask_test_cpu(_cpu, &p->cpus_allowed) || | ||
41 | + cpu_rq(_cpu)->nr_running) | ||
42 | + continue; | ||
43 | + if (arch_asym_cpu_priority(_cpu) > bestprio || (prev_cpu == _cpu && bestprio == arch_asym_cpu_priority(_cpu))) { | ||
44 | + bestcpu = _cpu; | ||
45 | + bestprio = arch_asym_cpu_priority(_cpu); | ||
46 | + } | ||
47 | + } | ||
48 | + | ||
49 | + if (bestcpu > 0) { | ||
50 | + rcu_read_unlock(); | ||
51 | + return bestcpu; | ||
52 | + } | ||
53 | + } | ||
54 | + | ||
55 | for_each_domain(cpu, tmp) { | ||
56 | if (!(tmp->flags & SD_LOAD_BALANCE)) | ||
57 | break; | ||