dbd7e30fbb8a0df82b548458c350ee1927984a0e
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch
1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Sat, 5 Nov 2022 23:36:19 +0100
3 Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support
4
5 Introduce WO chip support to mtk wed driver. MTK WED WO is used to
6 implement RX Wireless Ethernet Dispatch and offload traffic received by
7 wlan nic to the wired interface.
8
9 Tested-by: Daniel Golle <daniel@makrotopia.org>
10 Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
11 Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
12 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
13 Signed-off-by: David S. Miller <davem@davemloft.net>
14 ---
15 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
16
17 --- a/drivers/net/ethernet/mediatek/Makefile
18 +++ b/drivers/net/ethernet/mediatek/Makefile
19 @@ -5,7 +5,7 @@
20
21 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
22 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
23 -mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
24 +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
25 ifdef CONFIG_DEBUG_FS
26 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
27 endif
28 --- a/drivers/net/ethernet/mediatek/mtk_wed.c
29 +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
30 @@ -16,6 +16,7 @@
31 #include "mtk_wed_regs.h"
32 #include "mtk_wed.h"
33 #include "mtk_ppe.h"
34 +#include "mtk_wed_wo.h"
35
36 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
37
38 @@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de
39
40 mtk_wed_free_buffer(dev);
41 mtk_wed_free_tx_rings(dev);
42 + if (hw->version != 1)
43 + mtk_wed_wo_deinit(hw);
44
45 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
46 struct device_node *wlan_node;
47 @@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de
48 }
49
50 mtk_wed_hw_init_early(dev);
51 - if (hw->hifsys)
52 + if (hw->version == 1)
53 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
54 BIT(hw->index), 0);
55 + else
56 + ret = mtk_wed_wo_init(hw);
57
58 out:
59 mutex_unlock(&hw_lock);
60 --- a/drivers/net/ethernet/mediatek/mtk_wed.h
61 +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
62 @@ -10,6 +10,7 @@
63 #include <linux/netdevice.h>
64
65 struct mtk_eth;
66 +struct mtk_wed_wo;
67
68 struct mtk_wed_hw {
69 struct device_node *node;
70 @@ -22,6 +23,7 @@ struct mtk_wed_hw {
71 struct regmap *mirror;
72 struct dentry *debugfs_dir;
73 struct mtk_wed_device *wed_dev;
74 + struct mtk_wed_wo *wed_wo;
75 u32 debugfs_reg;
76 u32 num_flows;
77 u8 version;
78 --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
79 +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
80 @@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_
81 if (id == MTK_WED_MODULE_ID_WO)
82 hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
83
84 - dev_kfree_skb(skb);
85 - return 0;
86 + return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
87 }
88
89 static int
90 --- /dev/null
91 +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
92 @@ -0,0 +1,508 @@
93 +// SPDX-License-Identifier: GPL-2.0-only
94 +/* Copyright (C) 2022 MediaTek Inc.
95 + *
96 + * Author: Lorenzo Bianconi <lorenzo@kernel.org>
97 + * Sujuan Chen <sujuan.chen@mediatek.com>
98 + */
99 +
100 +#include <linux/kernel.h>
101 +#include <linux/dma-mapping.h>
102 +#include <linux/of_platform.h>
103 +#include <linux/interrupt.h>
104 +#include <linux/of_address.h>
105 +#include <linux/mfd/syscon.h>
106 +#include <linux/of_irq.h>
107 +#include <linux/bitfield.h>
108 +
109 +#include "mtk_wed.h"
110 +#include "mtk_wed_regs.h"
111 +#include "mtk_wed_wo.h"
112 +
113 +static u32
114 +mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
115 +{
116 + u32 val;
117 +
118 + if (regmap_read(wo->mmio.regs, reg, &val))
119 + val = ~0;
120 +
121 + return val;
122 +}
123 +
124 +static void
125 +mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
126 +{
127 + regmap_write(wo->mmio.regs, reg, val);
128 +}
129 +
130 +static u32
131 +mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
132 +{
133 + u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
134 +
135 + return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
136 +}
137 +
138 +static void
139 +mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
140 +{
141 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
142 +}
143 +
144 +static void
145 +mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
146 +{
147 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
148 +}
149 +
150 +static void
151 +mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
152 +{
153 + unsigned long flags;
154 +
155 + spin_lock_irqsave(&wo->mmio.lock, flags);
156 + wo->mmio.irq_mask &= ~mask;
157 + wo->mmio.irq_mask |= val;
158 + if (set)
159 + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
160 + spin_unlock_irqrestore(&wo->mmio.lock, flags);
161 +}
162 +
163 +static void
164 +mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
165 +{
166 + mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
167 + tasklet_schedule(&wo->mmio.irq_tasklet);
168 +}
169 +
170 +static void
171 +mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
172 +{
173 + mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
174 +}
175 +
176 +static void
177 +mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
178 +{
179 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
180 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
181 +}
182 +
183 +static void
184 +mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
185 + u32 val)
186 +{
187 + wmb();
188 + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
189 +}
190 +
191 +static void *
192 +mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
193 + bool flush)
194 +{
195 + int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
196 + int index = (q->tail + 1) % q->n_desc;
197 + struct mtk_wed_wo_queue_entry *entry;
198 + struct mtk_wed_wo_queue_desc *desc;
199 + void *buf;
200 +
201 + if (!q->queued)
202 + return NULL;
203 +
204 + if (flush)
205 + q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
206 + else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
207 + return NULL;
208 +
209 + q->tail = index;
210 + q->queued--;
211 +
212 + desc = &q->desc[index];
213 + entry = &q->entry[index];
214 + buf = entry->buf;
215 + if (len)
216 + *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
217 + le32_to_cpu(READ_ONCE(desc->ctrl)));
218 + if (buf)
219 + dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
220 + DMA_FROM_DEVICE);
221 + entry->buf = NULL;
222 +
223 + return buf;
224 +}
225 +
226 +static int
227 +mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
228 + gfp_t gfp, bool rx)
229 +{
230 + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
231 + int n_buf = 0;
232 +
233 + spin_lock_bh(&q->lock);
234 + while (q->queued < q->n_desc) {
235 + void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
236 + struct mtk_wed_wo_queue_entry *entry;
237 + dma_addr_t addr;
238 +
239 + if (!buf)
240 + break;
241 +
242 + addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
243 + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
244 + skb_free_frag(buf);
245 + break;
246 + }
247 +
248 + q->head = (q->head + 1) % q->n_desc;
249 + entry = &q->entry[q->head];
250 + entry->addr = addr;
251 + entry->len = q->buf_size;
252 + q->entry[q->head].buf = buf;
253 +
254 + if (rx) {
255 + struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
256 + u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
257 + FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
258 + entry->len);
259 +
260 + WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
261 + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
262 + }
263 + q->queued++;
264 + n_buf++;
265 + }
266 + spin_unlock_bh(&q->lock);
267 +
268 + return n_buf;
269 +}
270 +
271 +static void
272 +mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
273 +{
274 + mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
275 + mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
276 +}
277 +
278 +static void
279 +mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
280 +{
281 + for (;;) {
282 + struct mtk_wed_mcu_hdr *hdr;
283 + struct sk_buff *skb;
284 + void *data;
285 + u32 len;
286 +
287 + data = mtk_wed_wo_dequeue(wo, q, &len, false);
288 + if (!data)
289 + break;
290 +
291 + skb = build_skb(data, q->buf_size);
292 + if (!skb) {
293 + skb_free_frag(data);
294 + continue;
295 + }
296 +
297 + __skb_put(skb, len);
298 + if (mtk_wed_mcu_check_msg(wo, skb)) {
299 + dev_kfree_skb(skb);
300 + continue;
301 + }
302 +
303 + hdr = (struct mtk_wed_mcu_hdr *)skb->data;
304 + if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
305 + mtk_wed_mcu_rx_event(wo, skb);
306 + else
307 + mtk_wed_mcu_rx_unsolicited_event(wo, skb);
308 + }
309 +
310 + if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
311 + u32 index = (q->head - 1) % q->n_desc;
312 +
313 + mtk_wed_wo_queue_kick(wo, q, index);
314 + }
315 +}
316 +
317 +static irqreturn_t
318 +mtk_wed_wo_irq_handler(int irq, void *data)
319 +{
320 + struct mtk_wed_wo *wo = data;
321 +
322 + mtk_wed_wo_set_isr(wo, 0);
323 + tasklet_schedule(&wo->mmio.irq_tasklet);
324 +
325 + return IRQ_HANDLED;
326 +}
327 +
328 +static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
329 +{
330 + struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
331 + u32 intr, mask;
332 +
333 + /* disable interrupts */
334 + mtk_wed_wo_set_isr(wo, 0);
335 +
336 + intr = mtk_wed_wo_get_isr(wo);
337 + intr &= wo->mmio.irq_mask;
338 + mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
339 + mtk_wed_wo_irq_disable(wo, mask);
340 +
341 + if (intr & MTK_WED_WO_RXCH_INT_MASK) {
342 + mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
343 + mtk_wed_wo_rx_complete(wo);
344 + }
345 +}
346 +
347 +/* mtk wed wo hw queues */
348 +
349 +static int
350 +mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
351 + int n_desc, int buf_size, int index,
352 + struct mtk_wed_wo_queue_regs *regs)
353 +{
354 + spin_lock_init(&q->lock);
355 + q->regs = *regs;
356 + q->n_desc = n_desc;
357 + q->buf_size = buf_size;
358 +
359 + q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
360 + &q->desc_dma, GFP_KERNEL);
361 + if (!q->desc)
362 + return -ENOMEM;
363 +
364 + q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
365 + GFP_KERNEL);
366 + if (!q->entry)
367 + return -ENOMEM;
368 +
369 + return 0;
370 +}
371 +
372 +static void
373 +mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
374 +{
375 + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
376 + dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
377 + q->desc_dma);
378 +}
379 +
380 +static void
381 +mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
382 +{
383 + struct page *page;
384 + int i;
385 +
386 + spin_lock_bh(&q->lock);
387 + for (i = 0; i < q->n_desc; i++) {
388 + struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
389 +
390 + dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
391 + DMA_TO_DEVICE);
392 + skb_free_frag(entry->buf);
393 + entry->buf = NULL;
394 + }
395 + spin_unlock_bh(&q->lock);
396 +
397 + if (!q->cache.va)
398 + return;
399 +
400 + page = virt_to_page(q->cache.va);
401 + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
402 + memset(&q->cache, 0, sizeof(q->cache));
403 +}
404 +
405 +static void
406 +mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
407 +{
408 + struct page *page;
409 +
410 + spin_lock_bh(&q->lock);
411 + for (;;) {
412 + void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
413 +
414 + if (!buf)
415 + break;
416 +
417 + skb_free_frag(buf);
418 + }
419 + spin_unlock_bh(&q->lock);
420 +
421 + if (!q->cache.va)
422 + return;
423 +
424 + page = virt_to_page(q->cache.va);
425 + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
426 + memset(&q->cache, 0, sizeof(q->cache));
427 +}
428 +
429 +static void
430 +mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
431 +{
432 + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
433 + mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
434 + mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
435 +}
436 +
437 +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
438 + struct sk_buff *skb)
439 +{
440 + struct mtk_wed_wo_queue_entry *entry;
441 + struct mtk_wed_wo_queue_desc *desc;
442 + int ret = 0, index;
443 + u32 ctrl;
444 +
445 + spin_lock_bh(&q->lock);
446 +
447 + q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
448 + index = (q->head + 1) % q->n_desc;
449 + if (q->tail == index) {
450 + ret = -ENOMEM;
451 + goto out;
452 + }
453 +
454 + entry = &q->entry[index];
455 + if (skb->len > entry->len) {
456 + ret = -ENOMEM;
457 + goto out;
458 + }
459 +
460 + desc = &q->desc[index];
461 + q->head = index;
462 +
463 + dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
464 + DMA_TO_DEVICE);
465 + memcpy(entry->buf, skb->data, skb->len);
466 + dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
467 + DMA_TO_DEVICE);
468 +
469 + ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
470 + MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
471 + WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
472 + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
473 +
474 + mtk_wed_wo_queue_kick(wo, q, q->head);
475 + mtk_wed_wo_kickout(wo);
476 +out:
477 + spin_unlock_bh(&q->lock);
478 +
479 + dev_kfree_skb(skb);
480 +
481 + return ret;
482 +}
483 +
484 +static int
485 +mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
486 +{
487 + return 0;
488 +}
489 +
490 +static int
491 +mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
492 +{
493 + struct mtk_wed_wo_queue_regs regs;
494 + struct device_node *np;
495 + int ret;
496 +
497 + np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
498 + if (!np)
499 + return -ENODEV;
500 +
501 + wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
502 + if (IS_ERR_OR_NULL(wo->mmio.regs))
503 + return PTR_ERR(wo->mmio.regs);
504 +
505 + wo->mmio.irq = irq_of_parse_and_map(np, 0);
506 + wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
507 + spin_lock_init(&wo->mmio.lock);
508 + tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
509 +
510 + ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
511 + mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
512 + KBUILD_MODNAME, wo);
513 + if (ret)
514 + goto error;
515 +
516 + regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
517 + regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
518 + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
519 + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
520 +
521 + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
522 + MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
523 + &regs);
524 + if (ret)
525 + goto error;
526 +
527 + mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
528 + mtk_wed_wo_queue_reset(wo, &wo->q_tx);
529 +
530 + regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
531 + regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
532 + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
533 + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
534 +
535 + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
536 + MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
537 + &regs);
538 + if (ret)
539 + goto error;
540 +
541 + mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
542 + mtk_wed_wo_queue_reset(wo, &wo->q_rx);
543 +
544 + /* rx queue irqmask */
545 + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
546 +
547 + return 0;
548 +
549 +error:
550 + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
551 +
552 + return ret;
553 +}
554 +
555 +static void
556 +mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
557 +{
558 + /* disable interrupts */
559 + mtk_wed_wo_set_isr(wo, 0);
560 +
561 + tasklet_disable(&wo->mmio.irq_tasklet);
562 +
563 + disable_irq(wo->mmio.irq);
564 + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
565 +
566 + mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
567 + mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
568 + mtk_wed_wo_queue_free(wo, &wo->q_tx);
569 + mtk_wed_wo_queue_free(wo, &wo->q_rx);
570 +}
571 +
572 +int mtk_wed_wo_init(struct mtk_wed_hw *hw)
573 +{
574 + struct mtk_wed_wo *wo;
575 + int ret;
576 +
577 + wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
578 + if (!wo)
579 + return -ENOMEM;
580 +
581 + hw->wed_wo = wo;
582 + wo->hw = hw;
583 +
584 + ret = mtk_wed_wo_hardware_init(wo);
585 + if (ret)
586 + return ret;
587 +
588 + ret = mtk_wed_mcu_init(wo);
589 + if (ret)
590 + return ret;
591 +
592 + return mtk_wed_wo_exception_init(wo);
593 +}
594 +
595 +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
596 +{
597 + struct mtk_wed_wo *wo = hw->wed_wo;
598 +
599 + mtk_wed_wo_hw_deinit(wo);
600 +}
601 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
602 +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
603 @@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx {
604 #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
605 #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
606
607 +#define MTK_WED_WO_RING_SIZE 256
608 +#define MTK_WED_WO_CMD_LEN 1504
609 +
610 +#define MTK_WED_WO_TXCH_NUM 0
611 +#define MTK_WED_WO_RXCH_NUM 1
612 +#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
613 +
614 +#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
615 +#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
616 +#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
617 +#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
618 + MTK_WED_WO_EXCEPTION_INT_MASK)
619 +
620 +#define MTK_WED_WO_CCIF_BUSY 0x004
621 +#define MTK_WED_WO_CCIF_START 0x008
622 +#define MTK_WED_WO_CCIF_TCHNUM 0x00c
623 +#define MTK_WED_WO_CCIF_RCHNUM 0x010
624 +#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
625 +
626 +#define MTK_WED_WO_CCIF_ACK 0x014
627 +#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
628 +#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
629 +#define MTK_WED_WO_CCIF_DUMMY1 0x020
630 +#define MTK_WED_WO_CCIF_DUMMY2 0x024
631 +#define MTK_WED_WO_CCIF_DUMMY3 0x028
632 +#define MTK_WED_WO_CCIF_DUMMY4 0x02c
633 +#define MTK_WED_WO_CCIF_SHADOW1 0x030
634 +#define MTK_WED_WO_CCIF_SHADOW2 0x034
635 +#define MTK_WED_WO_CCIF_SHADOW3 0x038
636 +#define MTK_WED_WO_CCIF_SHADOW4 0x03c
637 +#define MTK_WED_WO_CCIF_DUMMY5 0x050
638 +#define MTK_WED_WO_CCIF_DUMMY6 0x054
639 +#define MTK_WED_WO_CCIF_DUMMY7 0x058
640 +#define MTK_WED_WO_CCIF_DUMMY8 0x05c
641 +#define MTK_WED_WO_CCIF_SHADOW5 0x060
642 +#define MTK_WED_WO_CCIF_SHADOW6 0x064
643 +#define MTK_WED_WO_CCIF_SHADOW7 0x068
644 +#define MTK_WED_WO_CCIF_SHADOW8 0x06c
645 +
646 +#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
647 +#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
648 +#define MTK_WED_WO_CTL_BURST BIT(15)
649 +#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
650 +#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
651 +#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
652 +#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
653 +#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
654 +
655 struct mtk_wed_wo_memory_region {
656 const char *name;
657 void __iomem *addr;
658 @@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer {
659 u32 crc;
660 };
661
662 +struct mtk_wed_wo_queue_regs {
663 + u32 desc_base;
664 + u32 ring_size;
665 + u32 cpu_idx;
666 + u32 dma_idx;
667 +};
668 +
669 +struct mtk_wed_wo_queue_desc {
670 + __le32 buf0;
671 + __le32 ctrl;
672 + __le32 buf1;
673 + __le32 info;
674 + __le32 reserved[4];
675 +} __packed __aligned(32);
676 +
677 +struct mtk_wed_wo_queue_entry {
678 + dma_addr_t addr;
679 + void *buf;
680 + u32 len;
681 +};
682 +
683 +struct mtk_wed_wo_queue {
684 + struct mtk_wed_wo_queue_regs regs;
685 +
686 + struct page_frag_cache cache;
687 + spinlock_t lock;
688 +
689 + struct mtk_wed_wo_queue_desc *desc;
690 + dma_addr_t desc_dma;
691 +
692 + struct mtk_wed_wo_queue_entry *entry;
693 +
694 + u16 head;
695 + u16 tail;
696 + int n_desc;
697 + int queued;
698 + int buf_size;
699 +
700 +};
701 +
702 struct mtk_wed_wo {
703 struct mtk_wed_hw *hw;
704 struct mtk_wed_wo_memory_region boot;
705
706 + struct mtk_wed_wo_queue q_tx;
707 + struct mtk_wed_wo_queue q_rx;
708 +
709 struct {
710 struct mutex mutex;
711 int timeout;
712 @@ -124,6 +215,15 @@ struct mtk_wed_wo {
713 struct sk_buff_head res_q;
714 wait_queue_head_t wait;
715 } mcu;
716 +
717 + struct {
718 + struct regmap *regs;
719 +
720 + spinlock_t lock;
721 + struct tasklet_struct irq_tasklet;
722 + int irq;
723 + u32 irq_mask;
724 + } mmio;
725 };
726
727 static inline int
728 @@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st
729 int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
730 const void *data, int len, bool wait_resp);
731 int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
732 +int mtk_wed_wo_init(struct mtk_wed_hw *hw);
733 +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
734 +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
735 + struct sk_buff *skb);
736
737 #endif /* __MTK_WED_WO_H */