1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Sat, 5 Nov 2022 23:36:19 +0100
3 Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support
5 Introduce WO chip support to mtk wed driver. MTK WED WO is used to
6 implement RX Wireless Ethernet Dispatch and offload traffic received by
7 wlan nic to the wired interface.
9 Tested-by: Daniel Golle <daniel@makrotopia.org>
10 Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
11 Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
12 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
13 Signed-off-by: David S. Miller <davem@davemloft.net>
15 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
17 --- a/drivers/net/ethernet/mediatek/Makefile
18 +++ b/drivers/net/ethernet/mediatek/Makefile
21 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
22 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
23 -mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
24 +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
26 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
28 --- a/drivers/net/ethernet/mediatek/mtk_wed.c
29 +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
31 #include "mtk_wed_regs.h"
34 +#include "mtk_wed_wo.h"
36 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
38 @@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de
40 mtk_wed_free_buffer(dev);
41 mtk_wed_free_tx_rings(dev);
42 + if (hw->version != 1)
43 + mtk_wed_wo_deinit(hw);
45 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
46 struct device_node *wlan_node;
47 @@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de
50 mtk_wed_hw_init_early(dev);
52 + if (hw->version == 1)
53 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
56 + ret = mtk_wed_wo_init(hw);
59 mutex_unlock(&hw_lock);
60 --- a/drivers/net/ethernet/mediatek/mtk_wed.h
61 +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
63 #include <linux/netdevice.h>
69 struct device_node *node;
70 @@ -22,6 +23,7 @@ struct mtk_wed_hw {
71 struct regmap *mirror;
72 struct dentry *debugfs_dir;
73 struct mtk_wed_device *wed_dev;
74 + struct mtk_wed_wo *wed_wo;
78 --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
79 +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
80 @@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_
81 if (id == MTK_WED_MODULE_ID_WO)
82 hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
86 + return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
91 +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
93 +// SPDX-License-Identifier: GPL-2.0-only
94 +/* Copyright (C) 2022 MediaTek Inc.
96 + * Author: Lorenzo Bianconi <lorenzo@kernel.org>
97 + * Sujuan Chen <sujuan.chen@mediatek.com>
100 +#include <linux/kernel.h>
101 +#include <linux/dma-mapping.h>
102 +#include <linux/of_platform.h>
103 +#include <linux/interrupt.h>
104 +#include <linux/of_address.h>
105 +#include <linux/mfd/syscon.h>
106 +#include <linux/of_irq.h>
107 +#include <linux/bitfield.h>
109 +#include "mtk_wed.h"
110 +#include "mtk_wed_regs.h"
111 +#include "mtk_wed_wo.h"
114 +mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
118 + if (regmap_read(wo->mmio.regs, reg, &val))
125 +mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
127 + regmap_write(wo->mmio.regs, reg, val);
131 +mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
133 + u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
135 + return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
139 +mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
141 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
145 +mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
147 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
151 +mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
153 + unsigned long flags;
155 + spin_lock_irqsave(&wo->mmio.lock, flags);
156 + wo->mmio.irq_mask &= ~mask;
157 + wo->mmio.irq_mask |= val;
159 + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
160 + spin_unlock_irqrestore(&wo->mmio.lock, flags);
164 +mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
166 + mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
167 + tasklet_schedule(&wo->mmio.irq_tasklet);
171 +mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
173 + mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
177 +mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
179 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
180 + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
184 +mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
188 + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
192 +mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
195 + int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
196 + int index = (q->tail + 1) % q->n_desc;
197 + struct mtk_wed_wo_queue_entry *entry;
198 + struct mtk_wed_wo_queue_desc *desc;
205 + q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
206 + else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
212 + desc = &q->desc[index];
213 + entry = &q->entry[index];
216 + *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
217 + le32_to_cpu(READ_ONCE(desc->ctrl)));
219 + dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
227 +mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
228 + gfp_t gfp, bool rx)
230 + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
233 + spin_lock_bh(&q->lock);
234 + while (q->queued < q->n_desc) {
235 + void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
236 + struct mtk_wed_wo_queue_entry *entry;
242 + addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
243 + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
244 + skb_free_frag(buf);
248 + q->head = (q->head + 1) % q->n_desc;
249 + entry = &q->entry[q->head];
250 + entry->addr = addr;
251 + entry->len = q->buf_size;
252 + q->entry[q->head].buf = buf;
255 + struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
256 + u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
257 + FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
260 + WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
261 + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
266 + spin_unlock_bh(&q->lock);
272 +mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
274 + mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
275 + mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
279 +mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
282 + struct mtk_wed_mcu_hdr *hdr;
283 + struct sk_buff *skb;
287 + data = mtk_wed_wo_dequeue(wo, q, &len, false);
291 + skb = build_skb(data, q->buf_size);
293 + skb_free_frag(data);
297 + __skb_put(skb, len);
298 + if (mtk_wed_mcu_check_msg(wo, skb)) {
299 + dev_kfree_skb(skb);
303 + hdr = (struct mtk_wed_mcu_hdr *)skb->data;
304 + if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
305 + mtk_wed_mcu_rx_event(wo, skb);
307 + mtk_wed_mcu_rx_unsolicited_event(wo, skb);
310 + if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
311 + u32 index = (q->head - 1) % q->n_desc;
313 + mtk_wed_wo_queue_kick(wo, q, index);
318 +mtk_wed_wo_irq_handler(int irq, void *data)
320 + struct mtk_wed_wo *wo = data;
322 + mtk_wed_wo_set_isr(wo, 0);
323 + tasklet_schedule(&wo->mmio.irq_tasklet);
325 + return IRQ_HANDLED;
328 +static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
330 + struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
333 + /* disable interrupts */
334 + mtk_wed_wo_set_isr(wo, 0);
336 + intr = mtk_wed_wo_get_isr(wo);
337 + intr &= wo->mmio.irq_mask;
338 + mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
339 + mtk_wed_wo_irq_disable(wo, mask);
341 + if (intr & MTK_WED_WO_RXCH_INT_MASK) {
342 + mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
343 + mtk_wed_wo_rx_complete(wo);
347 +/* mtk wed wo hw queues */
350 +mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
351 + int n_desc, int buf_size, int index,
352 + struct mtk_wed_wo_queue_regs *regs)
354 + spin_lock_init(&q->lock);
356 + q->n_desc = n_desc;
357 + q->buf_size = buf_size;
359 + q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
360 + &q->desc_dma, GFP_KERNEL);
364 + q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
373 +mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
375 + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
376 + dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
381 +mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
386 + spin_lock_bh(&q->lock);
387 + for (i = 0; i < q->n_desc; i++) {
388 + struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
390 + dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
392 + skb_free_frag(entry->buf);
395 + spin_unlock_bh(&q->lock);
400 + page = virt_to_page(q->cache.va);
401 + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
402 + memset(&q->cache, 0, sizeof(q->cache));
406 +mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
410 + spin_lock_bh(&q->lock);
412 + void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
417 + skb_free_frag(buf);
419 + spin_unlock_bh(&q->lock);
424 + page = virt_to_page(q->cache.va);
425 + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
426 + memset(&q->cache, 0, sizeof(q->cache));
430 +mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
432 + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
433 + mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
434 + mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
437 +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
438 + struct sk_buff *skb)
440 + struct mtk_wed_wo_queue_entry *entry;
441 + struct mtk_wed_wo_queue_desc *desc;
442 + int ret = 0, index;
445 + spin_lock_bh(&q->lock);
447 + q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
448 + index = (q->head + 1) % q->n_desc;
449 + if (q->tail == index) {
454 + entry = &q->entry[index];
455 + if (skb->len > entry->len) {
460 + desc = &q->desc[index];
463 + dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
465 + memcpy(entry->buf, skb->data, skb->len);
466 + dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
469 + ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
470 + MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
471 + WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
472 + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
474 + mtk_wed_wo_queue_kick(wo, q, q->head);
475 + mtk_wed_wo_kickout(wo);
477 + spin_unlock_bh(&q->lock);
479 + dev_kfree_skb(skb);
485 +mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
491 +mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
493 + struct mtk_wed_wo_queue_regs regs;
494 + struct device_node *np;
497 + np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
501 + wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
502 + if (IS_ERR_OR_NULL(wo->mmio.regs))
503 + return PTR_ERR(wo->mmio.regs);
505 + wo->mmio.irq = irq_of_parse_and_map(np, 0);
506 + wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
507 + spin_lock_init(&wo->mmio.lock);
508 + tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
510 + ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
511 + mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
512 + KBUILD_MODNAME, wo);
516 + regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
517 + regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
518 + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
519 + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
521 + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
522 + MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
527 + mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
528 + mtk_wed_wo_queue_reset(wo, &wo->q_tx);
530 + regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
531 + regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
532 + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
533 + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
535 + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
536 + MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
541 + mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
542 + mtk_wed_wo_queue_reset(wo, &wo->q_rx);
544 + /* rx queue irqmask */
545 + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
550 + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
556 +mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
558 + /* disable interrupts */
559 + mtk_wed_wo_set_isr(wo, 0);
561 + tasklet_disable(&wo->mmio.irq_tasklet);
563 + disable_irq(wo->mmio.irq);
564 + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
566 + mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
567 + mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
568 + mtk_wed_wo_queue_free(wo, &wo->q_tx);
569 + mtk_wed_wo_queue_free(wo, &wo->q_rx);
572 +int mtk_wed_wo_init(struct mtk_wed_hw *hw)
574 + struct mtk_wed_wo *wo;
577 + wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
584 + ret = mtk_wed_wo_hardware_init(wo);
588 + ret = mtk_wed_mcu_init(wo);
592 + return mtk_wed_wo_exception_init(wo);
595 +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
597 + struct mtk_wed_wo *wo = hw->wed_wo;
599 + mtk_wed_wo_hw_deinit(wo);
601 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
602 +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
603 @@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx {
604 #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
605 #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
607 +#define MTK_WED_WO_RING_SIZE 256
608 +#define MTK_WED_WO_CMD_LEN 1504
610 +#define MTK_WED_WO_TXCH_NUM 0
611 +#define MTK_WED_WO_RXCH_NUM 1
612 +#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
614 +#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
615 +#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
616 +#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
617 +#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
618 + MTK_WED_WO_EXCEPTION_INT_MASK)
620 +#define MTK_WED_WO_CCIF_BUSY 0x004
621 +#define MTK_WED_WO_CCIF_START 0x008
622 +#define MTK_WED_WO_CCIF_TCHNUM 0x00c
623 +#define MTK_WED_WO_CCIF_RCHNUM 0x010
624 +#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
626 +#define MTK_WED_WO_CCIF_ACK 0x014
627 +#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
628 +#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
629 +#define MTK_WED_WO_CCIF_DUMMY1 0x020
630 +#define MTK_WED_WO_CCIF_DUMMY2 0x024
631 +#define MTK_WED_WO_CCIF_DUMMY3 0x028
632 +#define MTK_WED_WO_CCIF_DUMMY4 0x02c
633 +#define MTK_WED_WO_CCIF_SHADOW1 0x030
634 +#define MTK_WED_WO_CCIF_SHADOW2 0x034
635 +#define MTK_WED_WO_CCIF_SHADOW3 0x038
636 +#define MTK_WED_WO_CCIF_SHADOW4 0x03c
637 +#define MTK_WED_WO_CCIF_DUMMY5 0x050
638 +#define MTK_WED_WO_CCIF_DUMMY6 0x054
639 +#define MTK_WED_WO_CCIF_DUMMY7 0x058
640 +#define MTK_WED_WO_CCIF_DUMMY8 0x05c
641 +#define MTK_WED_WO_CCIF_SHADOW5 0x060
642 +#define MTK_WED_WO_CCIF_SHADOW6 0x064
643 +#define MTK_WED_WO_CCIF_SHADOW7 0x068
644 +#define MTK_WED_WO_CCIF_SHADOW8 0x06c
646 +#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
647 +#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
648 +#define MTK_WED_WO_CTL_BURST BIT(15)
649 +#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
650 +#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
651 +#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
652 +#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
653 +#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
655 struct mtk_wed_wo_memory_region {
658 @@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer {
662 +struct mtk_wed_wo_queue_regs {
669 +struct mtk_wed_wo_queue_desc {
674 + __le32 reserved[4];
675 +} __packed __aligned(32);
677 +struct mtk_wed_wo_queue_entry {
683 +struct mtk_wed_wo_queue {
684 + struct mtk_wed_wo_queue_regs regs;
686 + struct page_frag_cache cache;
689 + struct mtk_wed_wo_queue_desc *desc;
690 + dma_addr_t desc_dma;
692 + struct mtk_wed_wo_queue_entry *entry;
703 struct mtk_wed_hw *hw;
704 struct mtk_wed_wo_memory_region boot;
706 + struct mtk_wed_wo_queue q_tx;
707 + struct mtk_wed_wo_queue q_rx;
712 @@ -124,6 +215,15 @@ struct mtk_wed_wo {
713 struct sk_buff_head res_q;
714 wait_queue_head_t wait;
718 + struct regmap *regs;
721 + struct tasklet_struct irq_tasklet;
728 @@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st
729 int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
730 const void *data, int len, bool wait_resp);
731 int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
732 +int mtk_wed_wo_init(struct mtk_wed_hw *hw);
733 +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
734 +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
735 + struct sk_buff *skb);
737 #endif /* __MTK_WED_WO_H */