6e814ad72e7aae478a3f44092dd8b2d4cb094da0
[openwrt/openwrt.git] / target / linux / ramips / files-4.14 / drivers / net / ethernet / mediatek / mtk_offload.c
1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2018 John Crispin <john@phrozen.org>
11 */
12
13 #include "mtk_offload.h"
14
15 #define INVALID 0
16 #define UNBIND 1
17 #define BIND 2
18 #define FIN 3
19
20 #define IPV4_HNAPT 0
21 #define IPV4_HNAT 1
22
23 static u32
24 mtk_flow_hash_v4(struct flow_offload_tuple *tuple)
25 {
26 u32 ports = ntohs(tuple->src_port) << 16 | ntohs(tuple->dst_port);
27 u32 src = ntohl(tuple->dst_v4.s_addr);
28 u32 dst = ntohl(tuple->src_v4.s_addr);
29 u32 hash = (ports & src) | ((~ports) & dst);
30 u32 hash_23_0 = hash & 0xffffff;
31 u32 hash_31_24 = hash & 0xff000000;
32
33 hash = ports ^ src ^ dst ^ ((hash_23_0 << 8) | (hash_31_24 >> 24));
34 hash = ((hash & 0xffff0000) >> 16 ) ^ (hash & 0xfffff);
35 hash &= 0x7ff;
36 hash *= 2;;
37
38 return hash;
39 }
40
41 static int
42 mtk_foe_prepare_v4(struct mtk_foe_entry *entry,
43 struct flow_offload_tuple *tuple,
44 struct flow_offload_tuple *dest_tuple,
45 struct flow_offload_hw_path *src,
46 struct flow_offload_hw_path *dest)
47 {
48 int is_mcast = !!is_multicast_ether_addr(dest->eth_dest);
49
50 if (tuple->l4proto == IPPROTO_UDP)
51 entry->ipv4_hnapt.bfib1.udp = 1;
52
53 entry->ipv4_hnapt.etype = htons(ETH_P_IP);
54 entry->ipv4_hnapt.bfib1.pkt_type = IPV4_HNAPT;
55 entry->ipv4_hnapt.iblk2.fqos = 0;
56 entry->ipv4_hnapt.bfib1.ttl = 1;
57 entry->ipv4_hnapt.bfib1.cah = 1;
58 entry->ipv4_hnapt.bfib1.ka = 1;
59 entry->ipv4_hnapt.iblk2.mcast = is_mcast;
60 entry->ipv4_hnapt.iblk2.dscp = 0;
61 entry->ipv4_hnapt.iblk2.port_mg = 0x3f;
62 entry->ipv4_hnapt.iblk2.port_ag = 0x1f;
63 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
64 entry->ipv4_hnapt.iblk2.qid = 1;
65 entry->ipv4_hnapt.iblk2.fqos = 1;
66 #endif
67 #ifdef CONFIG_RALINK
68 entry->ipv4_hnapt.iblk2.dp = 1;
69 if ((dest->flags & FLOW_OFFLOAD_PATH_VLAN) && (dest->vlan_id > 1))
70 entry->ipv4_hnapt.iblk2.qid += 8;
71 #else
72 entry->ipv4_hnapt.iblk2.dp = (dest->dev->name[3] - '0') + 1;
73 #endif
74
75 entry->ipv4_hnapt.sip = ntohl(tuple->src_v4.s_addr);
76 entry->ipv4_hnapt.dip = ntohl(tuple->dst_v4.s_addr);
77 entry->ipv4_hnapt.sport = ntohs(tuple->src_port);
78 entry->ipv4_hnapt.dport = ntohs(tuple->dst_port);
79
80 entry->ipv4_hnapt.new_sip = ntohl(dest_tuple->dst_v4.s_addr);
81 entry->ipv4_hnapt.new_dip = ntohl(dest_tuple->src_v4.s_addr);
82 entry->ipv4_hnapt.new_sport = ntohs(dest_tuple->dst_port);
83 entry->ipv4_hnapt.new_dport = ntohs(dest_tuple->src_port);
84
85 entry->bfib1.state = BIND;
86
87 if (dest->flags & FLOW_OFFLOAD_PATH_PPPOE) {
88 entry->bfib1.psn = 1;
89 entry->ipv4_hnapt.etype = htons(ETH_P_PPP_SES);
90 entry->ipv4_hnapt.pppoe_id = dest->pppoe_sid;
91 }
92
93 if (dest->flags & FLOW_OFFLOAD_PATH_VLAN) {
94 entry->ipv4_hnapt.vlan1 = dest->vlan_id;
95 entry->bfib1.vlan_layer = 1;
96
97 switch (dest->vlan_proto) {
98 case htons(ETH_P_8021Q):
99 entry->ipv4_hnapt.bfib1.vpm = 1;
100 break;
101 case htons(ETH_P_8021AD):
102 entry->ipv4_hnapt.bfib1.vpm = 2;
103 break;
104 default:
105 return -EINVAL;
106 }
107 }
108
109 return 0;
110 }
111
112 static void
113 mtk_foe_set_mac(struct mtk_foe_entry *entry, u8 *smac, u8 *dmac)
114 {
115 entry->ipv4_hnapt.dmac_hi = swab32(*((u32*) dmac));
116 entry->ipv4_hnapt.dmac_lo = swab16(*((u16*) &dmac[4]));
117 entry->ipv4_hnapt.smac_hi = swab32(*((u32*) smac));
118 entry->ipv4_hnapt.smac_lo = swab16(*((u16*) &smac[4]));
119 }
120
121 static int
122 mtk_check_hashcollision(struct mtk_eth *eth, u32 hash)
123 {
124 struct mtk_foe_entry entry = ((struct mtk_foe_entry *)eth->foe_table)[hash];
125
126 return (entry.bfib1.state != BIND)? 0:1;
127 }
128
129 static void
130 mtk_foe_write(struct mtk_eth *eth, u32 hash,
131 struct mtk_foe_entry *entry)
132 {
133 struct mtk_foe_entry *table = (struct mtk_foe_entry *)eth->foe_table;
134
135 memcpy(&table[hash], entry, sizeof(*entry));
136 }
137
138 int mtk_flow_offload(struct mtk_eth *eth,
139 enum flow_offload_type type,
140 struct flow_offload *flow,
141 struct flow_offload_hw_path *src,
142 struct flow_offload_hw_path *dest)
143 {
144 struct flow_offload_tuple *otuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple;
145 struct flow_offload_tuple *rtuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple;
146 u32 time_stamp = mtk_r32(eth, 0x0010) & (0x7fff);
147 u32 ohash, rhash;
148 struct mtk_foe_entry orig = {
149 .bfib1.time_stamp = time_stamp,
150 .bfib1.psn = 0,
151 };
152 struct mtk_foe_entry reply = {
153 .bfib1.time_stamp = time_stamp,
154 .bfib1.psn = 0,
155 };
156
157 if (otuple->l4proto != IPPROTO_TCP && otuple->l4proto != IPPROTO_UDP)
158 return -EINVAL;
159
160 switch (otuple->l3proto) {
161 case AF_INET:
162 if (mtk_foe_prepare_v4(&orig, otuple, rtuple, src, dest) ||
163 mtk_foe_prepare_v4(&reply, rtuple, otuple, dest, src))
164 return -EINVAL;
165
166 ohash = mtk_flow_hash_v4(otuple);
167 rhash = mtk_flow_hash_v4(rtuple);
168 break;
169
170 case AF_INET6:
171 return -EINVAL;
172
173 default:
174 return -EINVAL;
175 }
176
177 if (type == FLOW_OFFLOAD_DEL) {
178 orig.bfib1.state = INVALID;
179 reply.bfib1.state = INVALID;
180 flow = NULL;
181 goto write;
182 }
183
184 /* Two-way hash: when hash collision occurs, the hash value will be shifted to the next position. */
185 if(mtk_check_hashcollision(eth, ohash))
186 ohash += 1;
187 if(mtk_check_hashcollision(eth, rhash))
188 rhash += 1;
189 mtk_foe_set_mac(&orig, dest->eth_src, dest->eth_dest);
190 mtk_foe_set_mac(&reply, src->eth_src, src->eth_dest);
191
192 write:
193 mtk_foe_write(eth, ohash, &orig);
194 mtk_foe_write(eth, rhash, &reply);
195 rcu_assign_pointer(eth->foe_flow_table[ohash], flow);
196 rcu_assign_pointer(eth->foe_flow_table[rhash], flow);
197
198 if (type == FLOW_OFFLOAD_DEL)
199 synchronize_rcu();
200
201 return 0;
202 }
203
204 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
205
206 #define QDMA_TX_SCH_TX 0x1a14
207
208 static void mtk_ppe_scheduler(struct mtk_eth *eth, int id, u32 rate)
209 {
210 int exp = 0, shift = 0;
211 u32 reg = mtk_r32(eth, QDMA_TX_SCH_TX);
212 u32 val = 0;
213
214 if (rate)
215 val = BIT(11);
216
217 while (rate > 127) {
218 rate /= 10;
219 exp++;
220 }
221
222 val |= (rate & 0x7f) << 4;
223 val |= exp & 0xf;
224 if (id)
225 shift = 16;
226 reg &= ~(0xffff << shift);
227 reg |= val << shift;
228 mtk_w32(eth, val, QDMA_TX_SCH_TX);
229 }
230
231 #define QTX_CFG(x) (0x1800 + (x * 0x10))
232 #define QTX_SCH(x) (0x1804 + (x * 0x10))
233
234 static void mtk_ppe_queue(struct mtk_eth *eth, int id, int sched, int weight, int resv, u32 min_rate, u32 max_rate)
235 {
236 int max_exp = 0, min_exp = 0;
237 u32 reg;
238
239 if (id >= 16)
240 return;
241
242 reg = mtk_r32(eth, QTX_SCH(id));
243 reg &= 0x70000000;
244
245 if (sched)
246 reg |= BIT(31);
247
248 if (min_rate)
249 reg |= BIT(27);
250
251 if (max_rate)
252 reg |= BIT(11);
253
254 while (max_rate > 127) {
255 max_rate /= 10;
256 max_exp++;
257 }
258
259 while (min_rate > 127) {
260 min_rate /= 10;
261 min_exp++;
262 }
263
264 reg |= (min_rate & 0x7f) << 20;
265 reg |= (min_exp & 0xf) << 16;
266 reg |= (weight & 0xf) << 12;
267 reg |= (max_rate & 0x7f) << 4;
268 reg |= max_exp & 0xf;
269 mtk_w32(eth, reg, QTX_SCH(id));
270
271 resv &= 0xff;
272 reg = mtk_r32(eth, QTX_CFG(id));
273 reg &= 0xffff0000;
274 reg |= (resv << 8) | resv;
275 mtk_w32(eth, reg, QTX_CFG(id));
276 }
277 #endif
278
279 static int mtk_init_foe_table(struct mtk_eth *eth)
280 {
281 if (eth->foe_table)
282 return 0;
283
284 eth->foe_flow_table = devm_kcalloc(eth->dev, MTK_PPE_ENTRY_CNT,
285 sizeof(*eth->foe_flow_table),
286 GFP_KERNEL);
287 if (!eth->foe_flow_table)
288 return -EINVAL;
289
290 /* map the FOE table */
291 eth->foe_table = dmam_alloc_coherent(eth->dev, MTK_PPE_TBL_SZ,
292 &eth->foe_table_phys, GFP_KERNEL);
293 if (!eth->foe_table) {
294 dev_err(eth->dev, "failed to allocate foe table\n");
295 kfree(eth->foe_flow_table);
296 return -ENOMEM;
297 }
298
299
300 return 0;
301 }
302
303 static int mtk_ppe_start(struct mtk_eth *eth)
304 {
305 int ret;
306
307 ret = mtk_init_foe_table(eth);
308 if (ret)
309 return ret;
310
311 /* tell the PPE about the tables base address */
312 mtk_w32(eth, eth->foe_table_phys, MTK_REG_PPE_TB_BASE);
313
314 /* flush the table */
315 memset(eth->foe_table, 0, MTK_PPE_TBL_SZ);
316
317 /* setup hashing */
318 mtk_m32(eth,
319 MTK_PPE_TB_CFG_HASH_MODE_MASK | MTK_PPE_TB_CFG_TBL_SZ_MASK,
320 MTK_PPE_TB_CFG_HASH_MODE1 | MTK_PPE_TB_CFG_TBL_SZ_4K,
321 MTK_REG_PPE_TB_CFG);
322
323 /* set the default hashing seed */
324 mtk_w32(eth, MTK_PPE_HASH_SEED, MTK_REG_PPE_HASH_SEED);
325
326 /* each foe entry is 64bytes and is setup by cpu forwarding*/
327 mtk_m32(eth, MTK_PPE_CAH_CTRL_X_MODE | MTK_PPE_TB_CFG_ENTRY_SZ_MASK |
328 MTK_PPE_TB_CFG_SMA_MASK,
329 MTK_PPE_TB_CFG_ENTRY_SZ_64B | MTK_PPE_TB_CFG_SMA_FWD_CPU,
330 MTK_REG_PPE_TB_CFG);
331
332 /* set ip proto */
333 mtk_w32(eth, 0xFFFFFFFF, MTK_REG_PPE_IP_PROT_CHK);
334
335 /* setup caching */
336 mtk_m32(eth, 0, MTK_PPE_CAH_CTRL_X_MODE, MTK_REG_PPE_CAH_CTRL);
337 mtk_m32(eth, MTK_PPE_CAH_CTRL_X_MODE, MTK_PPE_CAH_CTRL_EN,
338 MTK_REG_PPE_CAH_CTRL);
339
340 /* enable FOE */
341 mtk_m32(eth, 0, MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN |
342 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN | MTK_PPE_FLOW_CFG_IPV4_NAT_EN |
343 MTK_PPE_FLOW_CFG_IPV4_GREK_EN,
344 MTK_REG_PPE_FLOW_CFG);
345
346 /* setup flow entry un/bind aging */
347 mtk_m32(eth, 0,
348 MTK_PPE_TB_CFG_UNBD_AGE | MTK_PPE_TB_CFG_NTU_AGE |
349 MTK_PPE_TB_CFG_FIN_AGE | MTK_PPE_TB_CFG_UDP_AGE |
350 MTK_PPE_TB_CFG_TCP_AGE,
351 MTK_REG_PPE_TB_CFG);
352
353 mtk_m32(eth, MTK_PPE_UNB_AGE_MNP_MASK | MTK_PPE_UNB_AGE_DLTA_MASK,
354 MTK_PPE_UNB_AGE_MNP | MTK_PPE_UNB_AGE_DLTA,
355 MTK_REG_PPE_UNB_AGE);
356 mtk_m32(eth, MTK_PPE_BND_AGE0_NTU_DLTA_MASK |
357 MTK_PPE_BND_AGE0_UDP_DLTA_MASK,
358 MTK_PPE_BND_AGE0_NTU_DLTA | MTK_PPE_BND_AGE0_UDP_DLTA,
359 MTK_REG_PPE_BND_AGE0);
360 mtk_m32(eth, MTK_PPE_BND_AGE1_FIN_DLTA_MASK |
361 MTK_PPE_BND_AGE1_TCP_DLTA_MASK,
362 MTK_PPE_BND_AGE1_FIN_DLTA | MTK_PPE_BND_AGE1_TCP_DLTA,
363 MTK_REG_PPE_BND_AGE1);
364
365 /* setup flow entry keep alive */
366 mtk_m32(eth, MTK_PPE_TB_CFG_KA_MASK, MTK_PPE_TB_CFG_KA,
367 MTK_REG_PPE_TB_CFG);
368 mtk_w32(eth, MTK_PPE_KA_UDP | MTK_PPE_KA_TCP | MTK_PPE_KA_T, MTK_REG_PPE_KA);
369
370 /* setup flow entry rate limit */
371 mtk_w32(eth, (0x3fff << 16) | 0x3fff, MTK_REG_PPE_BIND_LMT_0);
372 mtk_w32(eth, MTK_PPE_NTU_KA | 0x3fff, MTK_REG_PPE_BIND_LMT_1);
373 mtk_m32(eth, MTK_PPE_BNDR_RATE_MASK, 1, MTK_REG_PPE_BNDR);
374
375 /* enable the PPE */
376 mtk_m32(eth, 0, MTK_PPE_GLO_CFG_EN, MTK_REG_PPE_GLO_CFG);
377
378 #ifdef CONFIG_RALINK
379 /* set the default forwarding port to QDMA */
380 mtk_w32(eth, 0x0, MTK_REG_PPE_DFT_CPORT);
381 #else
382 /* set the default forwarding port to QDMA */
383 mtk_w32(eth, 0x55555555, MTK_REG_PPE_DFT_CPORT);
384 #endif
385
386 /* drop packets with TTL=0 */
387 mtk_m32(eth, 0, MTK_PPE_GLO_CFG_TTL0_DROP, MTK_REG_PPE_GLO_CFG);
388
389 /* send all traffic from gmac to the ppe */
390 mtk_m32(eth, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(0));
391 mtk_m32(eth, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(1));
392
393 dev_info(eth->dev, "PPE started\n");
394
395 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
396 mtk_ppe_scheduler(eth, 0, 500000);
397 mtk_ppe_scheduler(eth, 1, 500000);
398 mtk_ppe_queue(eth, 0, 0, 7, 32, 250000, 0);
399 mtk_ppe_queue(eth, 1, 0, 7, 32, 250000, 0);
400 mtk_ppe_queue(eth, 8, 1, 7, 32, 250000, 0);
401 mtk_ppe_queue(eth, 9, 1, 7, 32, 250000, 0);
402 #endif
403
404 return 0;
405 }
406
407 static int mtk_ppe_busy_wait(struct mtk_eth *eth)
408 {
409 unsigned long t_start = jiffies;
410 u32 r = 0;
411
412 while (1) {
413 r = mtk_r32(eth, MTK_REG_PPE_GLO_CFG);
414 if (!(r & MTK_PPE_GLO_CFG_BUSY))
415 return 0;
416 if (time_after(jiffies, t_start + HZ))
417 break;
418 usleep_range(10, 20);
419 }
420
421 dev_err(eth->dev, "ppe: table busy timeout - resetting\n");
422 reset_control_reset(eth->rst_ppe);
423
424 return -ETIMEDOUT;
425 }
426
427 static int mtk_ppe_stop(struct mtk_eth *eth)
428 {
429 u32 r1 = 0, r2 = 0;
430 int i;
431
432 /* discard all traffic while we disable the PPE */
433 mtk_m32(eth, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(0));
434 mtk_m32(eth, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(1));
435
436 if (mtk_ppe_busy_wait(eth))
437 return -ETIMEDOUT;
438
439 /* invalidate all flow table entries */
440 for (i = 0; i < MTK_PPE_ENTRY_CNT; i++)
441 eth->foe_table[i].bfib1.state = FOE_STATE_INVALID;
442
443 /* disable caching */
444 mtk_m32(eth, 0, MTK_PPE_CAH_CTRL_X_MODE, MTK_REG_PPE_CAH_CTRL);
445 mtk_m32(eth, MTK_PPE_CAH_CTRL_X_MODE | MTK_PPE_CAH_CTRL_EN, 0,
446 MTK_REG_PPE_CAH_CTRL);
447
448 /* flush cache has to be ahead of hnat diable --*/
449 mtk_m32(eth, MTK_PPE_GLO_CFG_EN, 0, MTK_REG_PPE_GLO_CFG);
450
451 /* disable FOE */
452 mtk_m32(eth,
453 MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN |
454 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN | MTK_PPE_FLOW_CFG_IPV4_NAT_EN |
455 MTK_PPE_FLOW_CFG_FUC_FOE | MTK_PPE_FLOW_CFG_FMC_FOE,
456 0, MTK_REG_PPE_FLOW_CFG);
457
458 /* disable FOE aging */
459 mtk_m32(eth, 0,
460 MTK_PPE_TB_CFG_FIN_AGE | MTK_PPE_TB_CFG_UDP_AGE |
461 MTK_PPE_TB_CFG_TCP_AGE | MTK_PPE_TB_CFG_UNBD_AGE |
462 MTK_PPE_TB_CFG_NTU_AGE, MTK_REG_PPE_TB_CFG);
463
464 r1 = mtk_r32(eth, 0x100);
465 r2 = mtk_r32(eth, 0x10c);
466
467 dev_info(eth->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2);
468
469 if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) ||
470 ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) {
471 dev_info(eth->dev, "reset pse\n");
472 mtk_w32(eth, 0x1, 0x4);
473 }
474
475 /* set the foe entry base address to 0 */
476 mtk_w32(eth, 0, MTK_REG_PPE_TB_BASE);
477
478 if (mtk_ppe_busy_wait(eth))
479 return -ETIMEDOUT;
480
481 /* send all traffic back to the DMA engine */
482 #ifdef CONFIG_RALINK
483 mtk_m32(eth, 0xffff, 0x0, MTK_GDMA_FWD_CFG(0));
484 mtk_m32(eth, 0xffff, 0x0, MTK_GDMA_FWD_CFG(1));
485 #else
486 mtk_m32(eth, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(0));
487 mtk_m32(eth, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(1));
488 #endif
489 return 0;
490 }
491
492 static void mtk_offload_keepalive(struct fe_priv *eth, unsigned int hash)
493 {
494 struct flow_offload *flow;
495
496 rcu_read_lock();
497 flow = rcu_dereference(eth->foe_flow_table[hash]);
498 if (flow)
499 flow->timeout = jiffies + 30 * HZ;
500 rcu_read_unlock();
501 }
502
503 int mtk_offload_check_rx(struct fe_priv *eth, struct sk_buff *skb, u32 rxd4)
504 {
505 unsigned int hash;
506
507 switch (FIELD_GET(MTK_RXD4_CPU_REASON, rxd4)) {
508 case MTK_CPU_REASON_KEEPALIVE_UC_OLD_HDR:
509 case MTK_CPU_REASON_KEEPALIVE_MC_NEW_HDR:
510 case MTK_CPU_REASON_KEEPALIVE_DUP_OLD_HDR:
511 hash = FIELD_GET(MTK_RXD4_FOE_ENTRY, rxd4);
512 mtk_offload_keepalive(eth, hash);
513 return -1;
514 case MTK_CPU_REASON_PACKET_SAMPLING:
515 return -1;
516 default:
517 return 0;
518 }
519 }
520
521 int mtk_ppe_probe(struct mtk_eth *eth)
522 {
523 int err;
524
525 err = mtk_ppe_start(eth);
526 if (err)
527 return err;
528
529 err = mtk_ppe_debugfs_init(eth);
530 if (err)
531 return err;
532
533 return 0;
534 }
535
536 void mtk_ppe_remove(struct mtk_eth *eth)
537 {
538 mtk_ppe_stop(eth);
539 }