kernel: pick patches for MediaTek Ethernet from linux-next
[openwrt/staging/dedeckeh.git] / target / linux / generic / pending-5.15 / 790-net-ethernet-mtk_eth_soc-add-the-capability-to-run-m.patch
1 From patchwork Thu Sep 8 19:33:39 2022
2 Content-Type: text/plain; charset="utf-8"
3 MIME-Version: 1.0
4 Content-Transfer-Encoding: 7bit
5 X-Patchwork-Submitter: Lorenzo Bianconi <lorenzo@kernel.org>
6 X-Patchwork-Id: 12970559
7 X-Patchwork-Delegate: kuba@kernel.org
8 Return-Path: <netdev-owner@kernel.org>
9 From: Lorenzo Bianconi <lorenzo@kernel.org>
10 To: netdev@vger.kernel.org
11 Cc: nbd@nbd.name, john@phrozen.org, sean.wang@mediatek.com,
12 Mark-MC.Lee@mediatek.com, davem@davemloft.net, edumazet@google.com,
13 kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com,
14 linux-mediatek@lists.infradead.org, lorenzo.bianconi@redhat.com,
15 Bo.Jiao@mediatek.com, sujuan.chen@mediatek.com,
16 ryder.Lee@mediatek.com, evelyn.tsai@mediatek.com,
17 devicetree@vger.kernel.org, robh@kernel.org
18 Subject: [PATCH net-next 05/12] net: ethernet: mtk_eth_soc: add the capability
19 to run multiple ppe
20 Date: Thu, 8 Sep 2022 21:33:39 +0200
21 Message-Id:
22 <dd0254775390eb031c67c448df8b19e87df58558.1662661555.git.lorenzo@kernel.org>
23 X-Mailer: git-send-email 2.37.3
24 In-Reply-To: <cover.1662661555.git.lorenzo@kernel.org>
25 References: <cover.1662661555.git.lorenzo@kernel.org>
26 MIME-Version: 1.0
27 Precedence: bulk
28 List-ID: <netdev.vger.kernel.org>
29 X-Mailing-List: netdev@vger.kernel.org
30 X-Patchwork-Delegate: kuba@kernel.org
31
32 mt7986 chipset support multiple packet engines for wlan <-> eth
33 packet forwarding.
34
35 Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
36 Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
37 Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
38 Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
39 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
40 ---
41 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 35 ++++++++++++-------
42 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +-
43 drivers/net/ethernet/mediatek/mtk_ppe.c | 14 +++++---
44 drivers/net/ethernet/mediatek/mtk_ppe.h | 9 +++--
45 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 8 ++---
46 .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +++----
47 6 files changed, 48 insertions(+), 33 deletions(-)
48
49 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
50 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
51 @@ -1871,7 +1871,7 @@ static int mtk_poll_rx(struct napi_struc
52
53 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
54 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
55 - mtk_ppe_check_skb(eth->ppe, skb, hash);
56 + mtk_ppe_check_skb(eth->ppe[0], skb, hash);
57
58 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
59 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
60 @@ -2929,15 +2929,19 @@ static int mtk_open(struct net_device *d
61 /* we run 2 netdevs on the same dma ring so we only bring it up once */
62 if (!refcount_read(&eth->dma_refcnt)) {
63 const struct mtk_soc_data *soc = eth->soc;
64 - u32 gdm_config = MTK_GDMA_TO_PDMA;
65 + u32 gdm_config;
66 + int i;
67 int err;
68
69 err = mtk_start_dma(eth);
70 if (err)
71 return err;
72
73 - if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
74 - gdm_config = soc->reg_map->gdma_to_ppe0;
75 + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
76 + mtk_ppe_start(eth->ppe[i]);
77 +
78 + gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe0
79 + : MTK_GDMA_TO_PDMA;
80
81 mtk_gdm_config(eth, gdm_config);
82
83 @@ -2982,6 +2986,7 @@ static int mtk_stop(struct net_device *d
84 {
85 struct mtk_mac *mac = netdev_priv(dev);
86 struct mtk_eth *eth = mac->hw;
87 + int i;
88
89 phylink_stop(mac->phylink);
90
91 @@ -3009,8 +3014,8 @@ static int mtk_stop(struct net_device *d
92
93 mtk_dma_free(eth);
94
95 - if (eth->soc->offload_version)
96 - mtk_ppe_stop(eth->ppe);
97 + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
98 + mtk_ppe_stop(eth->ppe[i]);
99
100 return 0;
101 }
102 @@ -4050,12 +4055,19 @@ static int mtk_probe(struct platform_dev
103 }
104
105 if (eth->soc->offload_version) {
106 - u32 ppe_addr = eth->soc->reg_map->ppe_base;
107 + u32 num_ppe;
108
109 - eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2);
110 - if (!eth->ppe) {
111 - err = -ENOMEM;
112 - goto err_free_dev;
113 + num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
114 + num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
115 + for (i = 0; i < num_ppe; i++) {
116 + u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
117 +
118 + eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
119 + eth->soc->offload_version, i);
120 + if (!eth->ppe[i]) {
121 + err = -ENOMEM;
122 + goto err_free_dev;
123 + }
124 }
125
126 err = mtk_eth_offload_init(eth);
127 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
128 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
129 @@ -1111,7 +1111,7 @@ struct mtk_eth {
130
131 int ip_align;
132
133 - struct mtk_ppe *ppe;
134 + struct mtk_ppe *ppe[2];
135 struct rhashtable flow_table;
136
137 struct bpf_prog __rcu *prog;
138 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
139 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
140 @@ -682,7 +682,7 @@ int mtk_foe_entry_idle_time(struct mtk_p
141 }
142
143 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
144 - int version)
145 + int version, int index)
146 {
147 const struct mtk_soc_data *soc = eth->soc;
148 struct device *dev = eth->dev;
149 @@ -717,7 +717,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
150 if (!ppe->foe_flow)
151 return NULL;
152
153 - mtk_ppe_debugfs_init(ppe);
154 + mtk_ppe_debugfs_init(ppe, index);
155
156 return ppe;
157 }
158 @@ -738,10 +738,13 @@ static void mtk_ppe_init_foe_table(struc
159 ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
160 }
161
162 -int mtk_ppe_start(struct mtk_ppe *ppe)
163 +void mtk_ppe_start(struct mtk_ppe *ppe)
164 {
165 u32 val;
166
167 + if (!ppe)
168 + return;
169 +
170 mtk_ppe_init_foe_table(ppe);
171 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
172
173 @@ -809,8 +812,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
174 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
175
176 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
177 -
178 - return 0;
179 }
180
181 int mtk_ppe_stop(struct mtk_ppe *ppe)
182 @@ -818,6 +819,9 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
183 u32 val;
184 int i;
185
186 + if (!ppe)
187 + return 0;
188 +
189 for (i = 0; i < MTK_PPE_ENTRIES; i++)
190 ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
191 MTK_FOE_STATE_INVALID);
192 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
193 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
194 @@ -247,6 +247,7 @@ struct mtk_flow_entry {
195 };
196 u8 type;
197 s8 wed_index;
198 + u8 ppe_index;
199 u16 hash;
200 union {
201 struct mtk_foe_entry data;
202 @@ -265,6 +266,7 @@ struct mtk_ppe {
203 struct device *dev;
204 void __iomem *base;
205 int version;
206 + char dirname[5];
207
208 struct mtk_foe_entry *foe_table;
209 dma_addr_t foe_phys;
210 @@ -277,8 +279,9 @@ struct mtk_ppe {
211 void *acct_table;
212 };
213
214 -struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
215 -int mtk_ppe_start(struct mtk_ppe *ppe);
216 +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
217 + int version, int index);
218 +void mtk_ppe_start(struct mtk_ppe *ppe);
219 int mtk_ppe_stop(struct mtk_ppe *ppe);
220
221 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
222 @@ -317,6 +320,6 @@ int mtk_foe_entry_set_wdma(struct mtk_fo
223 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
224 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
225 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
226 -int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
227 +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
228
229 #endif
230 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
231 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
232 @@ -187,7 +187,7 @@ mtk_ppe_debugfs_foe_open_bind(struct ino
233 inode->i_private);
234 }
235
236 -int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
237 +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
238 {
239 static const struct file_operations fops_all = {
240 .open = mtk_ppe_debugfs_foe_open_all,
241 @@ -195,17 +195,17 @@ int mtk_ppe_debugfs_init(struct mtk_ppe
242 .llseek = seq_lseek,
243 .release = single_release,
244 };
245 -
246 static const struct file_operations fops_bind = {
247 .open = mtk_ppe_debugfs_foe_open_bind,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251 };
252 -
253 struct dentry *root;
254
255 - root = debugfs_create_dir("mtk_ppe", NULL);
256 + snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
257 +
258 + root = debugfs_create_dir(ppe->dirname, NULL);
259 if (!root)
260 return -ENOMEM;
261
262 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
263 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
264 @@ -434,7 +434,7 @@ mtk_flow_offload_replace(struct mtk_eth
265 memcpy(&entry->data, &foe, sizeof(entry->data));
266 entry->wed_index = wed_index;
267
268 - err = mtk_foe_entry_commit(eth->ppe, entry);
269 + err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
270 if (err < 0)
271 goto free;
272
273 @@ -446,7 +446,7 @@ mtk_flow_offload_replace(struct mtk_eth
274 return 0;
275
276 clear:
277 - mtk_foe_entry_clear(eth->ppe, entry);
278 + mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
279 free:
280 kfree(entry);
281 if (wed_index >= 0)
282 @@ -464,7 +464,7 @@ mtk_flow_offload_destroy(struct mtk_eth
283 if (!entry)
284 return -ENOENT;
285
286 - mtk_foe_entry_clear(eth->ppe, entry);
287 + mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
288 rhashtable_remove_fast(&eth->flow_table, &entry->node,
289 mtk_flow_ht_params);
290 if (entry->wed_index >= 0)
291 @@ -485,7 +485,7 @@ mtk_flow_offload_stats(struct mtk_eth *e
292 if (!entry)
293 return -ENOENT;
294
295 - idle = mtk_foe_entry_idle_time(eth->ppe, entry);
296 + idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
297 f->stats.lastused = jiffies - idle * HZ;
298
299 return 0;
300 @@ -537,7 +537,7 @@ mtk_eth_setup_tc_block(struct net_device
301 struct flow_block_cb *block_cb;
302 flow_setup_cb_t *cb;
303
304 - if (!eth->ppe || !eth->ppe->foe_table)
305 + if (!eth->soc->offload_version)
306 return -EOPNOTSUPP;
307
308 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
309 @@ -589,8 +589,5 @@ int mtk_eth_setup_tc(struct net_device *
310
311 int mtk_eth_offload_init(struct mtk_eth *eth)
312 {
313 - if (!eth->ppe || !eth->ppe->foe_table)
314 - return 0;
315 -
316 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
317 }