tools: mkimage: provide dtc path during build
[openwrt/staging/wigyori.git] / target / linux / mvebu / patches-4.4 / 042-net-mvneta-Fix-race-condition-during-stopping.patch
1 From: Gregory CLEMENT <gregory.clement@free-electrons.com>
2 Date: Thu, 4 Feb 2016 22:09:29 +0100
3 Subject: [PATCH] net: mvneta: Fix race condition during stopping
4
5 When stopping the port, the CPU notifier are still there whereas the
6 mvneta_stop_dev function calls mvneta_percpu_disable() on each CPUs.
7 It was possible to have a new CPU coming at this point which could be
8 racy.
9
10 This patch adds a flag preventing executing the code notifier for a new
11 CPU when the port is stopping. It also uses the spinlock introduces
12 previously. To avoid the deadlock, the lock has been moved outside the
13 mvneta_percpu_elect function.
14
15 Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
16 Signed-off-by: David S. Miller <davem@davemloft.net>
17 ---
18
19 --- a/drivers/net/ethernet/marvell/mvneta.c
20 +++ b/drivers/net/ethernet/marvell/mvneta.c
21 @@ -374,6 +374,7 @@ struct mvneta_port {
22 * ensuring that the configuration remains coherent.
23 */
24 spinlock_t lock;
25 + bool is_stopped;
26
27 /* Core clock */
28 struct clk *clk;
29 @@ -2857,16 +2858,14 @@ static void mvneta_percpu_disable(void *
30 disable_percpu_irq(pp->dev->irq);
31 }
32
33 +/* Electing a CPU must be done in an atomic way: it should be done
34 + * after or before the removal/insertion of a CPU and this function is
35 + * not reentrant.
36 + */
37 static void mvneta_percpu_elect(struct mvneta_port *pp)
38 {
39 int elected_cpu = 0, max_cpu, cpu, i = 0;
40
41 - /* Electing a CPU must be done in an atomic way: it should be
42 - * done after or before the removal/insertion of a CPU and
43 - * this function is not reentrant.
44 - */
45 - spin_lock(&pp->lock);
46 -
47 /* Use the cpu associated to the rxq when it is online, in all
48 * the other cases, use the cpu 0 which can't be offline.
49 */
50 @@ -2910,7 +2909,6 @@ static void mvneta_percpu_elect(struct m
51 i++;
52
53 }
54 - spin_unlock(&pp->lock);
55 };
56
57 static int mvneta_percpu_notifier(struct notifier_block *nfb,
58 @@ -2924,6 +2922,14 @@ static int mvneta_percpu_notifier(struct
59 switch (action) {
60 case CPU_ONLINE:
61 case CPU_ONLINE_FROZEN:
62 + spin_lock(&pp->lock);
63 + /* Configuring the driver for a new CPU while the
64 + * driver is stopping is racy, so just avoid it.
65 + */
66 + if (pp->is_stopped) {
67 + spin_unlock(&pp->lock);
68 + break;
69 + }
70 netif_tx_stop_all_queues(pp->dev);
71
72 /* We have to synchronise on tha napi of each CPU
73 @@ -2961,6 +2967,7 @@ static int mvneta_percpu_notifier(struct
74 MVNETA_CAUSE_LINK_CHANGE |
75 MVNETA_CAUSE_PSC_SYNC_CHANGE);
76 netif_tx_start_all_queues(pp->dev);
77 + spin_unlock(&pp->lock);
78 break;
79 case CPU_DOWN_PREPARE:
80 case CPU_DOWN_PREPARE_FROZEN:
81 @@ -2985,7 +2992,9 @@ static int mvneta_percpu_notifier(struct
82 case CPU_DEAD:
83 case CPU_DEAD_FROZEN:
84 /* Check if a new CPU must be elected now this on is down */
85 + spin_lock(&pp->lock);
86 mvneta_percpu_elect(pp);
87 + spin_unlock(&pp->lock);
88 /* Unmask all ethernet port interrupts */
89 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
90 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
91 @@ -3037,7 +3046,7 @@ static int mvneta_open(struct net_device
92 */
93 on_each_cpu(mvneta_percpu_enable, pp, true);
94
95 -
96 + pp->is_stopped = false;
97 /* Register a CPU notifier to handle the case where our CPU
98 * might be taken offline.
99 */
100 @@ -3070,9 +3079,18 @@ static int mvneta_stop(struct net_device
101 {
102 struct mvneta_port *pp = netdev_priv(dev);
103
104 + /* Inform that we are stopping so we don't want to setup the
105 + * driver for new CPUs in the notifiers
106 + */
107 + spin_lock(&pp->lock);
108 + pp->is_stopped = true;
109 mvneta_stop_dev(pp);
110 mvneta_mdio_remove(pp);
111 unregister_cpu_notifier(&pp->cpu_notifier);
112 + /* Now that the notifier are unregistered, we can release le
113 + * lock
114 + */
115 + spin_unlock(&pp->lock);
116 on_each_cpu(mvneta_percpu_disable, pp, true);
117 free_percpu_irq(dev->irq, pp->ports);
118 mvneta_cleanup_rxqs(pp);
119 @@ -3343,7 +3361,9 @@ static int mvneta_config_rss(struct mvn
120 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
121
122 /* Update the elected CPU matching the new rxq_def */
123 + spin_lock(&pp->lock);
124 mvneta_percpu_elect(pp);
125 + spin_unlock(&pp->lock);
126
127 /* We have to synchronise on the napi of each CPU */
128 for_each_online_cpu(cpu) {