ee33bb01667f55f10c17a7cf4caa99a7462a013b
[openwrt/staging/mans0n.git] / target / linux / ipq40xx / files / drivers / net / ethernet / qualcomm / ipqess / ipqess.c
1 // SPDX-License-Identifier: (GPL-2.0 OR ISC)
2 /* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
4 * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
5 * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
16 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <linux/bitfield.h>
20 #include <linux/clk.h>
21 #include <linux/dsa/ipq4019.h>
22 #include <linux/if_vlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_net.h>
29 #include <linux/phylink.h>
30 #include <linux/platform_device.h>
31 #include <linux/reset.h>
32 #include <linux/skbuff.h>
33 #include <linux/vmalloc.h>
34 #include <net/checksum.h>
35 #include <net/dsa.h>
36 #include <net/ip6_checksum.h>
37
38 #include "ipqess.h"
39
40 #define IPQESS_RRD_SIZE 16
41 #define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
42 #define IPQESS_TX_DMA_BUF_LEN 0x3fff
43
44 static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
45 {
46 writel(val, ess->hw_addr + reg);
47 }
48
49 static u32 ipqess_r32(struct ipqess *ess, u16 reg)
50 {
51 return readl(ess->hw_addr + reg);
52 }
53
54 static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
55 {
56 u32 _val = ipqess_r32(ess, reg);
57 _val &= ~mask;
58 _val |= val;
59 ipqess_w32(ess, reg, _val);
60 }
61
62 void ipqess_update_hw_stats(struct ipqess *ess)
63 {
64 uint32_t *p;
65 u32 stat;
66 int i;
67
68 lockdep_assert_held(&ess->stats_lock);
69
70 p = (uint32_t *)&(ess->ipqessstats);
71 for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
72 stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
73 *p += stat;
74 p++;
75 }
76
77 for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
78 stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
79 *p += stat;
80 p++;
81 }
82
83 for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
84 stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
85 *p += stat;
86 p++;
87 }
88
89 for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
90 stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
91 *p += stat;
92 p++;
93 }
94 }
95
96 static int ipqess_tx_ring_alloc(struct ipqess *ess)
97 {
98 struct device *dev = &ess->pdev->dev;
99 int i;
100
101 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
102 struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
103 size_t size;
104 u32 idx;
105
106 tx_ring->ess = ess;
107 tx_ring->ring_id = i;
108 tx_ring->idx = i * 4;
109 tx_ring->count = IPQESS_TX_RING_SIZE;
110 tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
111
112 size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
113 tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
114 if (!tx_ring->buf) {
115 netdev_err(ess->netdev, "buffer alloc of tx ring failed");
116 return -ENOMEM;
117 }
118
119 size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
120 tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
121 GFP_KERNEL | __GFP_ZERO);
122 if (!tx_ring->hw_desc) {
123 netdev_err(ess->netdev, "descriptor allocation for tx ring failed");
124 return -ENOMEM;
125 }
126
127 ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
128 (u32)tx_ring->dma);
129
130 idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
131 idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
132 idx &= 0xffff;
133 tx_ring->head = tx_ring->tail = idx;
134
135 ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
136 idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
137 ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
138 ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
139 }
140
141 return 0;
142 }
143
144 static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
145 {
146 int len = 0;
147
148 if (buf->flags & IPQESS_DESC_SINGLE)
149 dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
150 else if (buf->flags & IPQESS_DESC_PAGE)
151 dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
152
153 if (buf->flags & IPQESS_DESC_LAST) {
154 len = buf->skb->len;
155 dev_kfree_skb_any(buf->skb);
156 }
157
158 buf->flags = 0;
159
160 return len;
161 }
162
163 static void ipqess_tx_ring_free(struct ipqess *ess)
164 {
165 int i;
166
167 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
168 int j;
169
170 if (ess->tx_ring[i].hw_desc)
171 continue;
172
173 for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
174 struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
175
176 ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
177 }
178
179 ess->tx_ring[i].buf = NULL;
180 }
181 }
182
183 static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
184 struct ipqess_rx_ring *rx_ring)
185 {
186 /* Clean the HW DESC header, otherwise we might end up
187 * with a spurious desc because of random garbage */
188 memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
189
190 buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
191 IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
192 if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
193 dev_err_once(rx_ring->ppdev,
194 "IPQESS DMA mapping failed for linear address %x",
195 buf->dma);
196 dev_kfree_skb_any(buf->skb);
197 buf->skb = NULL;
198 return -EFAULT;
199 }
200
201 buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
202 rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
203 rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
204
205 ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
206 (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
207 IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
208
209 return 0;
210 }
211
212 /* locking is handled by the caller */
213 static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
214 {
215 struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
216
217 buf->skb = napi_alloc_skb(&rx_ring->napi_rx,
218 IPQESS_RX_HEAD_BUFF_SIZE);
219 if (!buf->skb)
220 return -ENOMEM;
221
222 return ipqess_rx_buf_prepare(buf, rx_ring);
223 }
224
225 static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
226 {
227 struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
228
229 buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
230 IPQESS_RX_HEAD_BUFF_SIZE);
231 if (!buf->skb)
232 return -ENOMEM;
233
234 return ipqess_rx_buf_prepare(buf, rx_ring);
235 }
236
237 static void ipqess_refill_work(struct work_struct *work)
238 {
239 struct ipqess_rx_ring_refill *rx_refill = container_of(work,
240 struct ipqess_rx_ring_refill, refill_work);
241 struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
242 int refill = 0;
243
244 /* don't let this loop by accident. */
245 while (atomic_dec_and_test(&rx_ring->refill_count)) {
246 napi_disable(&rx_ring->napi_rx);
247 if (ipqess_rx_buf_alloc(rx_ring)) {
248 refill++;
249 dev_dbg(rx_ring->ppdev,
250 "Not all buffers were reallocated");
251 }
252 napi_enable(&rx_ring->napi_rx);
253 }
254
255 if (atomic_add_return(refill, &rx_ring->refill_count))
256 schedule_work(&rx_refill->refill_work);
257 }
258
259
260 static int ipqess_rx_ring_alloc(struct ipqess *ess)
261 {
262 int i;
263
264 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
265 int j;
266
267 ess->rx_ring[i].ess = ess;
268 ess->rx_ring[i].ppdev = &ess->pdev->dev;
269 ess->rx_ring[i].ring_id = i;
270 ess->rx_ring[i].idx = i * 2;
271
272 ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
273 sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
274 GFP_KERNEL);
275 if (!ess->rx_ring[i].buf)
276 return -ENOMEM;
277
278 ess->rx_ring[i].hw_desc = dmam_alloc_coherent(&ess->pdev->dev,
279 sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
280 &ess->rx_ring[i].dma, GFP_KERNEL);
281 if (!ess->rx_ring[i].hw_desc)
282 return -ENOMEM;
283
284 for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
285 if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
286 return -ENOMEM;
287
288 ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
289 INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
290
291 ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
292 (u32)(ess->rx_ring[i].dma));
293 }
294
295 ipqess_w32(ess, IPQESS_REG_RX_DESC0,
296 (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
297 (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
298
299 return 0;
300 }
301
302 static void ipqess_rx_ring_free(struct ipqess *ess)
303 {
304 int i;
305
306 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
307 int j;
308
309 atomic_set(&ess->rx_ring[i].refill_count, 0);
310 cancel_work_sync(&ess->rx_refill[i].refill_work);
311
312 for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
313 dma_unmap_single(&ess->pdev->dev,
314 ess->rx_ring[i].buf[j].dma,
315 ess->rx_ring[i].buf[j].length,
316 DMA_FROM_DEVICE);
317 dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
318 }
319 }
320 }
321
322 static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
323 {
324 struct ipqess *ess = netdev_priv(netdev);
325
326 spin_lock(&ess->stats_lock);
327 ipqess_update_hw_stats(ess);
328 spin_unlock(&ess->stats_lock);
329
330 return &ess->stats;
331 }
332
333 static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
334 {
335 u32 length = 0, num_desc, tail, rx_ring_tail;
336 int done = 0;
337
338 rx_ring_tail = rx_ring->tail;
339
340 tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
341 tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
342 tail &= IPQESS_RFD_CONS_IDX_MASK;
343
344 while (done < budget) {
345 struct sk_buff *skb;
346 struct ipqess_rx_desc *rd;
347
348 if (rx_ring_tail == tail)
349 break;
350
351 dma_unmap_single(rx_ring->ppdev,
352 rx_ring->buf[rx_ring_tail].dma,
353 rx_ring->buf[rx_ring_tail].length,
354 DMA_FROM_DEVICE);
355
356 skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
357 rd = (struct ipqess_rx_desc *)skb->data;
358 rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
359
360 /* Check if RRD is valid */
361 if (!(rd->rrd7 & IPQESS_RRD_DESC_VALID)) {
362 num_desc = 1;
363 dev_kfree_skb_any(skb);
364 goto skip;
365 }
366
367 num_desc = rd->rrd1 & IPQESS_RRD_NUM_RFD_MASK;
368 length = rd->rrd6 & IPQESS_RRD_PKT_SIZE_MASK;
369
370 skb_reserve(skb, IPQESS_RRD_SIZE);
371 if (num_desc > 1) {
372 /* can we use build_skb here ? */
373 struct sk_buff *skb_prev = NULL;
374 int size_remaining;
375 int i;
376
377 skb->data_len = 0;
378 skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
379 skb->len = skb->truesize = length;
380 size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
381
382 for (i = 1; i < num_desc; i++) {
383 /* TODO: use build_skb ? */
384 struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
385
386 dma_unmap_single(rx_ring->ppdev,
387 rx_ring->buf[rx_ring_tail].dma,
388 rx_ring->buf[rx_ring_tail].length,
389 DMA_FROM_DEVICE);
390
391 skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
392 if (skb_prev)
393 skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
394 else
395 skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
396 skb_prev = rx_ring->buf[rx_ring_tail].skb;
397 rx_ring->buf[rx_ring_tail].skb->next = NULL;
398
399 skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
400 size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
401
402 rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
403 }
404
405 } else {
406 skb_put(skb, length);
407 }
408
409 skb->dev = rx_ring->ess->netdev;
410 skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
411 skb_record_rx_queue(skb, rx_ring->ring_id);
412
413 if (rd->rrd6 & IPQESS_RRD_CSUM_FAIL_MASK)
414 skb_checksum_none_assert(skb);
415 else
416 skb->ip_summed = CHECKSUM_UNNECESSARY;
417
418 if (rd->rrd7 & IPQESS_RRD_CVLAN) {
419 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rd->rrd4);
420 } else if (rd->rrd1 & IPQESS_RRD_SVLAN) {
421 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), rd->rrd4);
422 }
423 napi_gro_receive(&rx_ring->napi_rx, skb);
424
425 /* TODO: do we need to have these here ? */
426 rx_ring->ess->stats.rx_packets++;
427 rx_ring->ess->stats.rx_bytes += length;
428
429 done++;
430 skip:
431
432 num_desc += atomic_xchg(&rx_ring->refill_count, 0);
433 while (num_desc) {
434 if (ipqess_rx_buf_alloc_napi(rx_ring)) {
435 num_desc = atomic_add_return(num_desc,
436 &rx_ring->refill_count);
437 if (num_desc >= ((4 * IPQESS_RX_RING_SIZE + 6) / 7))
438 schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
439 break;
440 }
441 num_desc--;
442 }
443 }
444
445 ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
446 rx_ring_tail);
447 rx_ring->tail = rx_ring_tail;
448
449 return done;
450 }
451
452 static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
453 {
454 u32 tail;
455 int done = 0;
456 int total = 0, ret;
457
458 tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
459 tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
460 tail &= IPQESS_TPD_CONS_IDX_MASK;
461
462 while ((tx_ring->tail != tail) && (done < budget)) {
463 //pr_info("freeing txq:%d tail:%d tailbuf:%p\n", tx_ring->idx, tx_ring->tail, &tx_ring->buf[tx_ring->tail]);
464 ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
465 &tx_ring->buf[tx_ring->tail]);
466 tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
467 if (ret) {
468 total += ret;
469 done++;
470 }
471 }
472
473 ipqess_w32(tx_ring->ess,
474 IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
475 tx_ring->tail);
476
477 if (netif_tx_queue_stopped(tx_ring->nq)) {
478 netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
479 tx_ring->idx);
480 netif_tx_wake_queue(tx_ring->nq);
481 }
482
483 netdev_tx_completed_queue(tx_ring->nq, done, total);
484
485 return done;
486 }
487
488 static int ipqess_tx_napi(struct napi_struct *napi, int budget)
489 {
490 struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
491 napi_tx);
492 u32 tx_status;
493 int work_done = 0;
494
495 tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
496 tx_status &= BIT(tx_ring->idx);
497
498 work_done = ipqess_tx_complete(tx_ring, budget);
499
500 ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
501
502 if (likely(work_done < budget)) {
503 if (napi_complete_done(napi, work_done))
504 ipqess_w32(tx_ring->ess,
505 IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
506 }
507
508 return work_done;
509 }
510
511 static int ipqess_rx_napi(struct napi_struct *napi, int budget)
512 {
513 struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
514 napi_rx);
515 struct ipqess *ess = rx_ring->ess;
516 int remain_budget = budget;
517 int rx_done;
518 u32 rx_mask = BIT(rx_ring->idx);
519 u32 status;
520
521 poll_again:
522 ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
523 rx_done = ipqess_rx_poll(rx_ring, remain_budget);
524
525 if (rx_done == remain_budget)
526 return budget;
527
528 status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
529 if (status & rx_mask) {
530 remain_budget -= rx_done;
531 goto poll_again;
532 }
533
534 if (napi_complete_done(napi, rx_done + budget - remain_budget))
535 ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
536
537 return rx_done + budget - remain_budget;
538 }
539
540 static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
541 {
542 struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *) priv;
543
544 if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
545 ipqess_w32(tx_ring->ess,
546 IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
547 0x0);
548 __napi_schedule(&tx_ring->napi_tx);
549 }
550
551 return IRQ_HANDLED;
552 }
553
554 static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
555 {
556 struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *) priv;
557
558 if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
559 ipqess_w32(rx_ring->ess,
560 IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
561 0x0);
562 __napi_schedule(&rx_ring->napi_rx);
563 }
564
565 return IRQ_HANDLED;
566 }
567
568 static void ipqess_irq_enable(struct ipqess *ess)
569 {
570 int i;
571
572 ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
573 ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
574 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
575 ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
576 ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
577 }
578 }
579
580 static void ipqess_irq_disable(struct ipqess *ess)
581 {
582 int i;
583
584 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
585 ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
586 ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
587 }
588 }
589
590 static int __init ipqess_init(struct net_device *netdev)
591 {
592 struct ipqess *ess = netdev_priv(netdev);
593 struct device_node *of_node = ess->pdev->dev.of_node;
594 return phylink_of_phy_connect(ess->phylink, of_node, 0);
595 }
596
597 static void ipqess_uninit(struct net_device *netdev)
598 {
599 struct ipqess *ess = netdev_priv(netdev);
600
601 phylink_disconnect_phy(ess->phylink);
602 }
603
604 static int ipqess_open(struct net_device *netdev)
605 {
606 struct ipqess *ess = netdev_priv(netdev);
607 int i;
608
609 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
610 napi_enable(&ess->tx_ring[i].napi_tx);
611 napi_enable(&ess->rx_ring[i].napi_rx);
612 }
613 ipqess_irq_enable(ess);
614 phylink_start(ess->phylink);
615 netif_tx_start_all_queues(netdev);
616
617 return 0;
618 }
619
620 static int ipqess_stop(struct net_device *netdev)
621 {
622 struct ipqess *ess = netdev_priv(netdev);
623 int i;
624
625 netif_tx_stop_all_queues(netdev);
626 phylink_stop(ess->phylink);
627 ipqess_irq_disable(ess);
628 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
629 napi_disable(&ess->tx_ring[i].napi_tx);
630 napi_disable(&ess->rx_ring[i].napi_rx);
631 }
632
633 return 0;
634 }
635
636 static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
637 {
638 struct ipqess *ess = netdev_priv(netdev);
639
640 switch (cmd) {
641 case SIOCGMIIPHY:
642 case SIOCGMIIREG:
643 case SIOCSMIIREG:
644 return phylink_mii_ioctl(ess->phylink, ifr, cmd);
645 default:
646 break;
647 }
648
649 return -EOPNOTSUPP;
650 }
651
652
653 static inline u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
654 {
655 u16 count = 0;
656
657 if (tx_ring->tail <= tx_ring->head)
658 count = IPQESS_TX_RING_SIZE;
659
660 count += tx_ring->tail - tx_ring->head - 1;
661
662 return count;
663 }
664
665 static inline int ipqess_cal_txd_req(struct sk_buff *skb)
666 {
667 int tpds;
668
669 /* one TPD for the header, and one for each fragments */
670 tpds = 1 + skb_shinfo(skb)->nr_frags;
671 if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
672 /* for LSOv2 one extra TPD is needed */
673 tpds++;
674 }
675
676 return tpds;
677 }
678
679 static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
680 struct ipqess_tx_desc *desc)
681 {
682 return &tx_ring->buf[desc - tx_ring->hw_desc];
683 }
684
685 static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
686 {
687 struct ipqess_tx_desc *desc;
688
689 desc = &tx_ring->hw_desc[tx_ring->head];
690 tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
691
692 return desc;
693 }
694
695 static void ipqess_rollback_tx(struct ipqess *eth,
696 struct ipqess_tx_desc *first_desc, int ring_id)
697 {
698 struct ipqess_tx_ring *tx_ring = &eth->tx_ring[ring_id];
699 struct ipqess_buf *buf;
700 struct ipqess_tx_desc *desc = NULL;
701 u16 start_index, index;
702
703 start_index = first_desc - tx_ring->hw_desc;
704
705 index = start_index;
706 while (index != tx_ring->head) {
707 desc = &tx_ring->hw_desc[index];
708 buf = &tx_ring->buf[index];
709 ipqess_tx_unmap_and_free(&eth->pdev->dev, buf);
710 memset(desc, 0, sizeof(struct ipqess_tx_desc));
711 if (++index == tx_ring->count)
712 index = 0;
713 }
714 tx_ring->head = start_index;
715 }
716
717 static bool ipqess_process_dsa_tag_sh(struct sk_buff *skb, u32 *word3)
718 {
719 struct skb_shared_info *shinfo = skb_shinfo(skb);
720 struct ipq40xx_dsa_tag_data *tag_data;
721
722 if (shinfo->dsa_tag_proto != DSA_TAG_PROTO_IPQ4019)
723 return false;
724
725 tag_data = (struct ipq40xx_dsa_tag_data *)shinfo->dsa_tag_data;
726
727 pr_debug("SH tag @ %08x, dp:%02x from_cpu:%u\n",
728 (u32)tag_data, tag_data->dp, tag_data->from_cpu);
729
730 *word3 |= tag_data->dp << IPQESS_TPD_PORT_BITMAP_SHIFT;
731 if (tag_data->from_cpu)
732 *word3 |= BIT(IPQESS_TPD_FROM_CPU_SHIFT);
733
734 return true;
735 }
736
737 static void ipqess_get_dp_info(struct ipqess *ess, struct sk_buff *skb,
738 u32 *word3)
739 {
740 if (netdev_uses_dsa(ess->netdev)) {
741
742 if (ipqess_process_dsa_tag_sh(skb, word3))
743 return;
744 }
745
746 *word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
747 }
748
749 static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring, struct sk_buff *skb)
750 {
751 struct ipqess_buf *buf = NULL;
752 struct platform_device *pdev = tx_ring->ess->pdev;
753 struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
754 u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
755 u16 len;
756 int i;
757
758 ipqess_get_dp_info(tx_ring->ess, skb, &word3);
759
760 if (skb_is_gso(skb)) {
761 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
762 lso_word1 |= IPQESS_TPD_IPV4_EN;
763 ip_hdr(skb)->check = 0;
764 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
765 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
766 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
767 lso_word1 |= IPQESS_TPD_LSO_V2_EN;
768 ipv6_hdr(skb)->payload_len = 0;
769 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
770 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
771 }
772
773 lso_word1 |= IPQESS_TPD_LSO_EN |
774 ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) << IPQESS_TPD_MSS_SHIFT) |
775 (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
776 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
777 u8 css, cso;
778 cso = skb_checksum_start_offset(skb);
779 css = cso + skb->csum_offset;
780
781 word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
782 word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
783 word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
784 }
785
786 if (skb_vlan_tag_present(skb)) {
787 switch (skb->vlan_proto) {
788 case htons(ETH_P_8021Q):
789 word3 |= BIT(IPQESS_TX_INS_CVLAN);
790 word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
791 break;
792 case htons(ETH_P_8021AD):
793 word1 |= BIT(IPQESS_TX_INS_SVLAN);
794 svlan_tag = skb_vlan_tag_get(skb);
795 break;
796 default:
797 dev_err(&pdev->dev, "no ctag or stag present\n");
798 goto vlan_tag_error;
799 }
800 }
801
802 if (eth_type_vlan(skb->protocol))
803 word1 |= IPQESS_TPD_VLAN_TAGGED;
804
805 if (skb->protocol == htons(ETH_P_PPP_SES))
806 word1 |= IPQESS_TPD_PPPOE_EN;
807
808 len = skb_headlen(skb);
809
810 first_desc = desc = ipqess_tx_desc_next(tx_ring);
811 if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
812 desc->addr = cpu_to_le16(skb->len);
813 desc->word1 = word1 | lso_word1;
814 desc->svlan_tag = svlan_tag;
815 desc->word3 = word3;
816 desc = ipqess_tx_desc_next(tx_ring);
817 }
818
819 buf = ipqess_get_tx_buffer(tx_ring, desc);
820 buf->length = len;
821 buf->dma = dma_map_single(&pdev->dev,
822 skb->data, len, DMA_TO_DEVICE);
823 if (dma_mapping_error(&pdev->dev, buf->dma))
824 goto dma_error;
825
826 desc->addr = cpu_to_le32(buf->dma);
827 desc->len = cpu_to_le16(len);
828
829 buf->flags |= IPQESS_DESC_SINGLE;
830 desc->word1 = word1 | lso_word1;
831 desc->svlan_tag = svlan_tag;
832 desc->word3 = word3;
833
834 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
835 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
836 len = skb_frag_size(frag);
837 desc = ipqess_tx_desc_next(tx_ring);
838 buf = ipqess_get_tx_buffer(tx_ring, desc);
839 buf->length = len;
840 buf->flags |= IPQESS_DESC_PAGE;
841 buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE);
842 if (dma_mapping_error(&pdev->dev, buf->dma))
843 goto dma_error;
844
845 desc->addr = cpu_to_le32(buf->dma);
846 desc->len = cpu_to_le16(len);
847 desc->svlan_tag = svlan_tag;
848 desc->word1 = word1 | lso_word1;
849 desc->word3 = word3;
850 }
851 desc->word1 |= 1 << IPQESS_TPD_EOP_SHIFT;
852 buf->skb = skb;
853 buf->flags |= IPQESS_DESC_LAST;
854
855 return 0;
856
857 dma_error:
858 ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
859 dev_err(&pdev->dev, "TX DMA map failed\n");
860
861 vlan_tag_error:
862 return -ENOMEM;
863 }
864
865 static inline void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
866 {
867 /* Ensure that all TPDs has been written completely */
868 dma_wmb();
869
870 /* update software producer index */
871 ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
872 tx_ring->head);
873 }
874
875 static netdev_tx_t ipqess_xmit(struct sk_buff *skb,
876 struct net_device *netdev)
877 {
878 struct ipqess *ess = netdev_priv(netdev);
879 struct ipqess_tx_ring *tx_ring;
880 int avail;
881 int tx_num;
882 int ret;
883
884 tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
885 tx_num = ipqess_cal_txd_req(skb);
886 avail = ipqess_tx_desc_available(tx_ring);
887 if (avail < tx_num) {
888 netdev_dbg(netdev,
889 "stopping tx queue %d, avail=%d req=%d im=%x\n",
890 tx_ring->idx, avail, tx_num,
891 ipqess_r32(tx_ring->ess,
892 IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
893 netif_tx_stop_queue(tx_ring->nq);
894 ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
895 ipqess_kick_tx(tx_ring);
896 return NETDEV_TX_BUSY;
897 }
898
899 ret = ipqess_tx_map_and_fill(tx_ring, skb);
900 if (ret) {
901 dev_kfree_skb_any(skb);
902 ess->stats.tx_errors++;
903 goto err_out;
904 }
905
906 ess->stats.tx_packets++;
907 ess->stats.tx_bytes += skb->len;
908 netdev_tx_sent_queue(tx_ring->nq, skb->len);
909
910 if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
911 ipqess_kick_tx(tx_ring);
912
913 err_out:
914 return NETDEV_TX_OK;
915 }
916
917 static int ipqess_set_mac_address(struct net_device *netdev, void *p)
918 {
919 int ret = eth_mac_addr(netdev, p);
920 struct ipqess *ess = netdev_priv(netdev);
921 const char *macaddr = netdev->dev_addr;
922
923 if (ret)
924 return ret;
925
926 // spin_lock_bh(&mac->hw->page_lock);
927 ipqess_w32(ess, IPQESS_REG_MAC_CTRL1,
928 (macaddr[0] << 8) | macaddr[1]);
929 ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
930 (macaddr[2] << 24) | (macaddr[3] << 16) |
931 (macaddr[4] << 8) | macaddr[5]);
932 // spin_unlock_bh(&mac->hw->page_lock);
933
934 return 0;
935 }
936
937 static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
938 {
939 struct ipqess *ess = netdev_priv(netdev);
940 struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
941
942 netdev_warn(netdev, "hardware queue %d is in stuck?\n",
943 tr->idx);
944
945 /* TODO: dump hardware queue */
946 }
947
948 static const struct net_device_ops ipqess_axi_netdev_ops = {
949 .ndo_init = ipqess_init,
950 .ndo_uninit = ipqess_uninit,
951 .ndo_open = ipqess_open,
952 .ndo_stop = ipqess_stop,
953 .ndo_do_ioctl = ipqess_do_ioctl,
954 .ndo_start_xmit = ipqess_xmit,
955 .ndo_get_stats = ipqess_get_stats,
956 .ndo_set_mac_address = ipqess_set_mac_address,
957 .ndo_tx_timeout = ipqess_tx_timeout,
958 };
959
960 static void ipqess_hw_stop(struct ipqess *ess)
961 {
962 int i;
963
964 /* disable all RX queue IRQs */
965 for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
966 ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
967
968 /* disable all TX queue IRQs */
969 for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
970 ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
971
972 /* disable all other IRQs */
973 ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
974 ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
975
976 /* clear the IRQ status registers */
977 ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
978 ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
979 ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
980 ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
981 ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
982
983 /* disable RX and TX queues */
984 ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
985 ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
986 }
987
988 static int ipqess_hw_init(struct ipqess *ess)
989 {
990 u32 tmp;
991 int i, err;
992
993 ipqess_hw_stop(ess);
994
995 ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
996 IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
997 IPQESS_REG_INTR_CTRL);
998
999 /* enable IRQ delay slot */
1000 ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
1001 (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
1002 (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
1003
1004 /* Set Customer and Service VLAN TPIDs */
1005 ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
1006 (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
1007 (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
1008
1009 /* Configure the TX Queue bursting */
1010 ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
1011 (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
1012 (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
1013 IPQESS_TXQ_CTRL_TPD_BURST_EN);
1014
1015 /* Set RSS type */
1016 ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
1017 IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
1018 IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
1019 IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
1020
1021 /* Set RFD ring burst and threshold */
1022 ipqess_w32(ess, IPQESS_REG_RX_DESC1,
1023 (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
1024 (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
1025 (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
1026
1027 /* Set Rx FIFO
1028 * - threshold to start to DMA data to host
1029 */
1030 ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
1031 IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
1032
1033 err = ipqess_rx_ring_alloc(ess);
1034 if (err)
1035 return err;
1036
1037 err = ipqess_tx_ring_alloc(ess);
1038 if (err)
1039 return err;
1040
1041 /* Load all of ring base addresses above into the dma engine */
1042 ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT),
1043 IPQESS_REG_TX_SRAM_PART);
1044
1045 /* Disable TX FIFO low watermark and high watermark */
1046 ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
1047
1048 /* Configure RSS indirection table.
1049 * 128 hash will be configured in the following
1050 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1051 * and so on
1052 */
1053 for (i = 0; i < IPQESS_NUM_IDT; i++)
1054 ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
1055
1056 /* Configure load balance mapping table.
1057 * 4 table entry will be configured according to the
1058 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1059 * respectively.
1060 */
1061 ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
1062
1063 /* Configure Virtual queue for Tx rings */
1064 ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
1065 ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
1066
1067 /* Configure Max AXI Burst write size to 128 bytes*/
1068 ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
1069 IPQESS_AXIW_MAXWRSIZE_VALUE);
1070
1071 /* Enable TX queues */
1072 ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
1073
1074 /* Enable RX queues */
1075 tmp = 0;
1076 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
1077 tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
1078
1079 ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
1080
1081 return 0;
1082 }
1083
1084 static void ipqess_validate(struct phylink_config *config,
1085 unsigned long *supported,
1086 struct phylink_link_state *state)
1087 {
1088 struct ipqess *ess = container_of(config, struct ipqess, phylink_config);
1089 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1090
1091 if (state->interface != PHY_INTERFACE_MODE_INTERNAL) {
1092 dev_err(&ess->pdev->dev, "unsupported interface mode: %d\n",
1093 state->interface);
1094 linkmode_zero(supported);
1095 return;
1096 }
1097
1098 phylink_set_port_modes(mask);
1099 phylink_set(mask, 1000baseT_Full);
1100 phylink_set(mask, Pause);
1101 phylink_set(mask, Asym_Pause);
1102
1103 linkmode_and(supported, supported, mask);
1104 linkmode_and(state->advertising, state->advertising, mask);
1105 }
1106
1107 static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
1108 const struct phylink_link_state *state)
1109 {
1110 /* TODO */
1111 }
1112
1113 static void ipqess_mac_link_down(struct phylink_config *config,
1114 unsigned int mode,
1115 phy_interface_t interface)
1116 {
1117 /* TODO */
1118 }
1119
1120 static void ipqess_mac_link_up(struct phylink_config *config,
1121 struct phy_device *phy, unsigned int mode,
1122 phy_interface_t interface,
1123 int speed, int duplex,
1124 bool tx_pause, bool rx_pause)
1125 {
1126 /* TODO */
1127 }
1128
1129 static struct phylink_mac_ops ipqess_phylink_mac_ops = {
1130 .validate = ipqess_validate,
1131 .mac_config = ipqess_mac_config,
1132 .mac_link_up = ipqess_mac_link_up,
1133 .mac_link_down = ipqess_mac_link_down,
1134 };
1135
1136 static void ipqess_cleanup(struct ipqess *ess)
1137 {
1138 ipqess_hw_stop(ess);
1139 unregister_netdev(ess->netdev);
1140
1141 ipqess_tx_ring_free(ess);
1142 ipqess_rx_ring_free(ess);
1143
1144 if (!IS_ERR_OR_NULL(ess->phylink))
1145 phylink_destroy(ess->phylink);
1146 }
1147
1148 static void ess_reset(struct ipqess *ess)
1149 {
1150 reset_control_assert(ess->ess_rst);
1151
1152 mdelay(10);
1153
1154 reset_control_deassert(ess->ess_rst);
1155
1156 /* Waiting for all inner tables to be flushed and reinitialized.
1157 * This takes between 5 and 10ms.
1158 */
1159 mdelay(10);
1160 }
1161
1162 static int ipqess_axi_probe(struct platform_device *pdev)
1163 {
1164 struct device_node *np = pdev->dev.of_node;
1165 struct ipqess *ess;
1166 struct net_device *netdev;
1167 struct resource *res;
1168 int i, err = 0;
1169
1170 netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct ipqess),
1171 IPQESS_NETDEV_QUEUES,
1172 IPQESS_NETDEV_QUEUES);
1173 if (!netdev)
1174 return -ENOMEM;
1175
1176 ess = netdev_priv(netdev);
1177 ess->netdev = netdev;
1178 ess->pdev = pdev;
1179 spin_lock_init(&ess->stats_lock);
1180 SET_NETDEV_DEV(netdev, &pdev->dev);
1181 platform_set_drvdata(pdev, netdev);
1182
1183 err = of_get_mac_address(np, netdev->dev_addr);
1184 if (err == -EPROBE_DEFER)
1185 return -EPROBE_DEFER;
1186
1187 if (err) {
1188
1189 random_ether_addr(netdev->dev_addr);
1190 dev_info(&ess->pdev->dev, "generated random MAC address %pM\n",
1191 netdev->dev_addr);
1192 netdev->addr_assign_type = NET_ADDR_RANDOM;
1193 }
1194
1195 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1196 ess->hw_addr = devm_ioremap_resource(&pdev->dev, res);
1197 if (IS_ERR(ess->hw_addr)) {
1198 err = PTR_ERR(ess->hw_addr);
1199 goto err_out;
1200 }
1201
1202 ess->ess_clk = of_clk_get_by_name(np, "ess_clk");
1203 if (IS_ERR(ess->ess_clk)) {
1204 dev_err(&pdev->dev, "Failed to get ess_clk\n");
1205 return PTR_ERR(ess->ess_clk);
1206 }
1207
1208 ess->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
1209 if (IS_ERR(ess->ess_rst)) {
1210 dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
1211 return PTR_ERR(ess->ess_rst);
1212 }
1213
1214 clk_prepare_enable(ess->ess_clk);
1215
1216 ess_reset(ess);
1217
1218 ess->phylink_config.dev = &netdev->dev;
1219 ess->phylink_config.type = PHYLINK_NETDEV;
1220 ess->phylink_config.pcs_poll = true;
1221
1222 ess->phylink = phylink_create(&ess->phylink_config,
1223 of_fwnode_handle(np),
1224 PHY_INTERFACE_MODE_INTERNAL,
1225 &ipqess_phylink_mac_ops);
1226 if (IS_ERR(ess->phylink)) {
1227 err = PTR_ERR(ess->phylink);
1228 goto err_out;
1229 }
1230
1231 for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
1232 ess->tx_irq[i] = platform_get_irq(pdev, i);
1233 scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
1234 "%s:txq%d", pdev->name, i);
1235 }
1236
1237 for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
1238 ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
1239 scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
1240 "%s:rxq%d", pdev->name, i);
1241 }
1242
1243 #undef NETIF_F_TSO6
1244 #define NETIF_F_TSO6 0
1245
1246 netdev->netdev_ops = &ipqess_axi_netdev_ops;
1247 netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
1248 NETIF_F_HW_VLAN_CTAG_RX |
1249 NETIF_F_HW_VLAN_CTAG_TX |
1250 NETIF_F_TSO | NETIF_F_TSO6 |
1251 NETIF_F_GRO | NETIF_F_SG;
1252 /* feature change is not supported yet */
1253 netdev->hw_features = 0;
1254 netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
1255 NETIF_F_TSO | NETIF_F_TSO6 |
1256 NETIF_F_GRO;
1257 netdev->watchdog_timeo = 5 * HZ;
1258 netdev->base_addr = (u32) ess->hw_addr;
1259 netdev->max_mtu = 9000;
1260 netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
1261
1262 ipqess_set_ethtool_ops(netdev);
1263
1264 err = register_netdev(netdev);
1265 if (err)
1266 goto err_out;
1267
1268 err = ipqess_hw_init(ess);
1269 if (err)
1270 goto err_out;
1271
1272 dev_set_threaded(netdev, true);
1273
1274 for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
1275 int qid;
1276
1277 netif_tx_napi_add(netdev, &ess->tx_ring[i].napi_tx,
1278 ipqess_tx_napi, 64);
1279 netif_napi_add(netdev,
1280 &ess->rx_ring[i].napi_rx,
1281 ipqess_rx_napi, 64);
1282
1283 qid = ess->tx_ring[i].idx;
1284 err = devm_request_irq(&ess->netdev->dev, ess->tx_irq[qid],
1285 ipqess_interrupt_tx, 0, ess->tx_irq_names[qid],
1286 &ess->tx_ring[i]);
1287 if (err)
1288 goto err_out;
1289
1290 qid = ess->rx_ring[i].idx;
1291 err = devm_request_irq(&ess->netdev->dev, ess->rx_irq[qid],
1292 ipqess_interrupt_rx, 0, ess->rx_irq_names[qid],
1293 &ess->rx_ring[i]);
1294 if (err)
1295 goto err_out;
1296 }
1297
1298 return 0;
1299
1300 err_out:
1301 ipqess_cleanup(ess);
1302 return err;
1303 }
1304
1305 static int ipqess_axi_remove(struct platform_device *pdev)
1306 {
1307 const struct net_device *netdev = platform_get_drvdata(pdev);
1308 struct ipqess *ess = netdev_priv(netdev);
1309
1310 ipqess_cleanup(ess);
1311
1312 return 0;
1313 }
1314
1315 static const struct of_device_id ipqess_of_mtable[] = {
1316 {.compatible = "qcom,ipq4019-ess-edma" },
1317 {}
1318 };
1319 MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
1320
1321 static struct platform_driver ipqess_axi_driver = {
1322 .driver = {
1323 .name = "ipqess-edma",
1324 .of_match_table = ipqess_of_mtable,
1325 },
1326 .probe = ipqess_axi_probe,
1327 .remove = ipqess_axi_remove,
1328 };
1329
1330 module_platform_driver(ipqess_axi_driver);
1331
1332 MODULE_AUTHOR("Qualcomm Atheros Inc");
1333 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
1334 MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
1335 MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
1336 MODULE_LICENSE("GPL");