kernel: bump kernel 4.4 to 4.4.129 for 17.01
[openwrt/openwrt.git] / target / linux / mvebu / patches-4.4 / 033-net-mvneta-Associate-RX-queues-with-each-CPU.patch
1 From: Gregory CLEMENT <gregory.clement@free-electrons.com>
2 Date: Wed, 9 Dec 2015 18:23:49 +0100
3 Subject: [PATCH] net: mvneta: Associate RX queues with each CPU
4
5 We enable the percpu interrupt for all the CPU and we just associate a
6 CPU to a few queue at the neta level. The mapping between the CPUs and
7 the queues is static. The queues are associated to the CPU module the
8 number of CPUs. However currently we only use on RX queue for a given
9 Ethernet port.
10
11 Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
12 Signed-off-by: David S. Miller <davem@davemloft.net>
13 ---
14
15 --- a/drivers/net/ethernet/marvell/mvneta.c
16 +++ b/drivers/net/ethernet/marvell/mvneta.c
17 @@ -110,9 +110,16 @@
18 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
19 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
20 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
21 +#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
22 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
23
24 -/* Exception Interrupt Port/Queue Cause register */
25 +/* Exception Interrupt Port/Queue Cause register
26 + *
27 + * Their behavior depend of the mapping done using the PCPX2Q
28 + * registers. For a given CPU if the bit associated to a queue is not
29 + * set, then for the register a read from this CPU will always return
30 + * 0 and a write won't do anything
31 + */
32
33 #define MVNETA_INTR_NEW_CAUSE 0x25a0
34 #define MVNETA_INTR_NEW_MASK 0x25a4
35 @@ -820,7 +827,13 @@ static void mvneta_port_up(struct mvneta
36 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
37
38 /* Enable all initialized RXQs. */
39 - mvreg_write(pp, MVNETA_RXQ_CMD, BIT(pp->rxq_def));
40 + for (queue = 0; queue < rxq_number; queue++) {
41 + struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
42 +
43 + if (rxq->descs != NULL)
44 + q_map |= (1 << queue);
45 + }
46 + mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
47 }
48
49 /* Stop the Ethernet port activity */
50 @@ -1030,6 +1043,7 @@ static void mvneta_defaults_set(struct m
51 int cpu;
52 int queue;
53 u32 val;
54 + int max_cpu = num_present_cpus();
55
56 /* Clear all Cause registers */
57 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
58 @@ -1045,13 +1059,23 @@ static void mvneta_defaults_set(struct m
59 /* Enable MBUS Retry bit16 */
60 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
61
62 - /* Set CPU queue access map - all CPUs have access to all RX
63 - * queues and to all TX queues
64 + /* Set CPU queue access map. CPUs are assigned to the RX
65 + * queues modulo their number and all the TX queues are
66 + * assigned to the CPU associated to the default RX queue.
67 */
68 - for_each_present_cpu(cpu)
69 - mvreg_write(pp, MVNETA_CPU_MAP(cpu),
70 - (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
71 - MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
72 + for_each_present_cpu(cpu) {
73 + int rxq_map = 0, txq_map = 0;
74 + int rxq;
75 +
76 + for (rxq = 0; rxq < rxq_number; rxq++)
77 + if ((rxq % max_cpu) == cpu)
78 + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
79 +
80 + if (cpu == rxq_def)
81 + txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
82 +
83 + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
84 + }
85
86 /* Reset RX and TX DMAs */
87 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
88 @@ -2178,6 +2202,7 @@ static int mvneta_poll(struct napi_struc
89 {
90 int rx_done = 0;
91 u32 cause_rx_tx;
92 + int rx_queue;
93 struct mvneta_port *pp = netdev_priv(napi->dev);
94 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
95
96 @@ -2209,8 +2234,15 @@ static int mvneta_poll(struct napi_struc
97 /* For the case where the last mvneta_poll did not process all
98 * RX packets
99 */
100 + rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
101 +
102 cause_rx_tx |= port->cause_rx_tx;
103 - rx_done = mvneta_rx(pp, budget, &pp->rxqs[pp->rxq_def]);
104 +
105 + if (rx_queue) {
106 + rx_queue = rx_queue - 1;
107 + rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
108 + }
109 +
110 budget -= rx_done;
111
112 if (budget > 0) {
113 @@ -2423,19 +2455,27 @@ static void mvneta_cleanup_txqs(struct m
114 /* Cleanup all Rx queues */
115 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
116 {
117 - mvneta_rxq_deinit(pp, &pp->rxqs[pp->rxq_def]);
118 + int queue;
119 +
120 + for (queue = 0; queue < txq_number; queue++)
121 + mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
122 }
123
124
125 /* Init all Rx queues */
126 static int mvneta_setup_rxqs(struct mvneta_port *pp)
127 {
128 - int err = mvneta_rxq_init(pp, &pp->rxqs[pp->rxq_def]);
129 - if (err) {
130 - netdev_err(pp->dev, "%s: can't create rxq=%d\n",
131 - __func__, pp->rxq_def);
132 - mvneta_cleanup_rxqs(pp);
133 - return err;
134 + int queue;
135 +
136 + for (queue = 0; queue < rxq_number; queue++) {
137 + int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
138 +
139 + if (err) {
140 + netdev_err(pp->dev, "%s: can't create rxq=%d\n",
141 + __func__, queue);
142 + mvneta_cleanup_rxqs(pp);
143 + return err;
144 + }
145 }
146
147 return 0;
148 @@ -2459,6 +2499,19 @@ static int mvneta_setup_txqs(struct mvne
149 return 0;
150 }
151
152 +static void mvneta_percpu_unmask_interrupt(void *arg)
153 +{
154 + struct mvneta_port *pp = arg;
155 +
156 + /* All the queue are unmasked, but actually only the ones
157 + * maped to this CPU will be unmasked
158 + */
159 + mvreg_write(pp, MVNETA_INTR_NEW_MASK,
160 + MVNETA_RX_INTR_MASK_ALL |
161 + MVNETA_TX_INTR_MASK_ALL |
162 + MVNETA_MISCINTR_INTR_MASK);
163 +}
164 +
165 static void mvneta_start_dev(struct mvneta_port *pp)
166 {
167 unsigned int cpu;
168 @@ -2476,11 +2529,10 @@ static void mvneta_start_dev(struct mvne
169 napi_enable(&port->napi);
170 }
171
172 - /* Unmask interrupts */
173 - mvreg_write(pp, MVNETA_INTR_NEW_MASK,
174 - MVNETA_RX_INTR_MASK(rxq_number) |
175 - MVNETA_TX_INTR_MASK(txq_number) |
176 - MVNETA_MISCINTR_INTR_MASK);
177 + /* Unmask interrupts. It has to be done from each CPU */
178 + for_each_online_cpu(cpu)
179 + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
180 + pp, true);
181 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
182 MVNETA_CAUSE_PHY_STATUS_CHANGE |
183 MVNETA_CAUSE_LINK_CHANGE |
184 @@ -2756,22 +2808,35 @@ static void mvneta_percpu_disable(void *
185
186 static void mvneta_percpu_elect(struct mvneta_port *pp)
187 {
188 - int online_cpu_idx, cpu, i = 0;
189 + int online_cpu_idx, max_cpu, cpu, i = 0;
190
191 online_cpu_idx = pp->rxq_def % num_online_cpus();
192 + max_cpu = num_present_cpus();
193
194 for_each_online_cpu(cpu) {
195 - if (i == online_cpu_idx)
196 - /* Enable per-CPU interrupt on the one CPU we
197 - * just elected
198 + int rxq_map = 0, txq_map = 0;
199 + int rxq;
200 +
201 + for (rxq = 0; rxq < rxq_number; rxq++)
202 + if ((rxq % max_cpu) == cpu)
203 + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
204 +
205 + if (i == online_cpu_idx) {
206 + /* Map the default receive queue and transmit
207 + * queue to the elected CPU
208 */
209 - smp_call_function_single(cpu, mvneta_percpu_enable,
210 - pp, true);
211 - else
212 - /* Disable per-CPU interrupt on all the other CPU */
213 - smp_call_function_single(cpu, mvneta_percpu_disable,
214 - pp, true);
215 + rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
216 + txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
217 + }
218 + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
219 +
220 + /* Update the interrupt mask on each CPU according the
221 + * new mapping
222 + */
223 + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
224 + pp, true);
225 i++;
226 +
227 }
228 };
229
230 @@ -2806,12 +2871,22 @@ static int mvneta_percpu_notifier(struct
231 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
232 napi_enable(&port->napi);
233
234 +
235 + /* Enable per-CPU interrupts on the CPU that is
236 + * brought up.
237 + */
238 + smp_call_function_single(cpu, mvneta_percpu_enable,
239 + pp, true);
240 +
241 /* Enable per-CPU interrupt on the one CPU we care
242 * about.
243 */
244 mvneta_percpu_elect(pp);
245
246 - /* Unmask all ethernet port interrupts */
247 + /* Unmask all ethernet port interrupts, as this
248 + * notifier is called for each CPU then the CPU to
249 + * Queue mapping is applied
250 + */
251 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
252 MVNETA_RX_INTR_MASK(rxq_number) |
253 MVNETA_TX_INTR_MASK(txq_number) |
254 @@ -2862,7 +2937,7 @@ static int mvneta_percpu_notifier(struct
255 static int mvneta_open(struct net_device *dev)
256 {
257 struct mvneta_port *pp = netdev_priv(dev);
258 - int ret;
259 + int ret, cpu;
260
261 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
262 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
263 @@ -2892,8 +2967,13 @@ static int mvneta_open(struct net_device
264 */
265 mvneta_percpu_disable(pp);
266
267 - /* Elect a CPU to handle our RX queue interrupt */
268 - mvneta_percpu_elect(pp);
269 + /* Enable per-CPU interrupt on all the CPU to handle our RX
270 + * queue interrupts
271 + */
272 + for_each_online_cpu(cpu)
273 + smp_call_function_single(cpu, mvneta_percpu_enable,
274 + pp, true);
275 +
276
277 /* Register a CPU notifier to handle the case where our CPU
278 * might be taken offline.