1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2018 John Crispin <john@phrozen.org>
13 #include "mtk_offload.h"
24 mtk_flow_hash_v4(struct flow_offload_tuple
*tuple
)
26 u32 ports
= ntohs(tuple
->src_port
) << 16 | ntohs(tuple
->dst_port
);
27 u32 src
= ntohl(tuple
->dst_v4
.s_addr
);
28 u32 dst
= ntohl(tuple
->src_v4
.s_addr
);
29 u32 hash
= (ports
& src
) | ((~ports
) & dst
);
30 u32 hash_23_0
= hash
& 0xffffff;
31 u32 hash_31_24
= hash
& 0xff000000;
33 hash
= ports
^ src
^ dst
^ ((hash_23_0
<< 8) | (hash_31_24
>> 24));
34 hash
= ((hash
& 0xffff0000) >> 16 ) ^ (hash
& 0xfffff);
42 mtk_foe_prepare_v4(struct mtk_foe_entry
*entry
,
43 struct flow_offload_tuple
*tuple
,
44 struct flow_offload_tuple
*dest_tuple
,
45 struct flow_offload_hw_path
*src
,
46 struct flow_offload_hw_path
*dest
)
48 int is_mcast
= !!is_multicast_ether_addr(dest
->eth_dest
);
50 if (tuple
->l4proto
== IPPROTO_UDP
)
51 entry
->ipv4_hnapt
.bfib1
.udp
= 1;
53 entry
->ipv4_hnapt
.etype
= htons(ETH_P_IP
);
54 entry
->ipv4_hnapt
.bfib1
.pkt_type
= IPV4_HNAPT
;
55 entry
->ipv4_hnapt
.iblk2
.fqos
= 0;
56 entry
->ipv4_hnapt
.bfib1
.ttl
= 1;
57 entry
->ipv4_hnapt
.bfib1
.cah
= 1;
58 entry
->ipv4_hnapt
.bfib1
.ka
= 1;
59 entry
->ipv4_hnapt
.iblk2
.mcast
= is_mcast
;
60 entry
->ipv4_hnapt
.iblk2
.dscp
= 0;
61 entry
->ipv4_hnapt
.iblk2
.port_mg
= 0x3f;
62 entry
->ipv4_hnapt
.iblk2
.port_ag
= 0x1f;
63 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
64 entry
->ipv4_hnapt
.iblk2
.qid
= 1;
65 entry
->ipv4_hnapt
.iblk2
.fqos
= 1;
68 entry
->ipv4_hnapt
.iblk2
.dp
= 1;
69 if ((dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) && (dest
->vlan_id
> 1))
70 entry
->ipv4_hnapt
.iblk2
.qid
+= 8;
72 entry
->ipv4_hnapt
.iblk2
.dp
= (dest
->dev
->name
[3] - '0') + 1;
75 entry
->ipv4_hnapt
.sip
= ntohl(tuple
->src_v4
.s_addr
);
76 entry
->ipv4_hnapt
.dip
= ntohl(tuple
->dst_v4
.s_addr
);
77 entry
->ipv4_hnapt
.sport
= ntohs(tuple
->src_port
);
78 entry
->ipv4_hnapt
.dport
= ntohs(tuple
->dst_port
);
80 entry
->ipv4_hnapt
.new_sip
= ntohl(dest_tuple
->dst_v4
.s_addr
);
81 entry
->ipv4_hnapt
.new_dip
= ntohl(dest_tuple
->src_v4
.s_addr
);
82 entry
->ipv4_hnapt
.new_sport
= ntohs(dest_tuple
->dst_port
);
83 entry
->ipv4_hnapt
.new_dport
= ntohs(dest_tuple
->src_port
);
85 entry
->bfib1
.state
= BIND
;
87 if (dest
->flags
& FLOW_OFFLOAD_PATH_PPPOE
) {
89 entry
->ipv4_hnapt
.etype
= htons(ETH_P_PPP_SES
);
90 entry
->ipv4_hnapt
.pppoe_id
= dest
->pppoe_sid
;
93 if (dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) {
94 entry
->ipv4_hnapt
.vlan1
= dest
->vlan_id
;
95 entry
->bfib1
.vlan_layer
= 1;
97 switch (dest
->vlan_proto
) {
98 case htons(ETH_P_8021Q
):
99 entry
->ipv4_hnapt
.bfib1
.vpm
= 1;
101 case htons(ETH_P_8021AD
):
102 entry
->ipv4_hnapt
.bfib1
.vpm
= 2;
113 mtk_foe_set_mac(struct mtk_foe_entry
*entry
, u8
*smac
, u8
*dmac
)
115 entry
->ipv4_hnapt
.dmac_hi
= swab32(*((u32
*) dmac
));
116 entry
->ipv4_hnapt
.dmac_lo
= swab16(*((u16
*) &dmac
[4]));
117 entry
->ipv4_hnapt
.smac_hi
= swab32(*((u32
*) smac
));
118 entry
->ipv4_hnapt
.smac_lo
= swab16(*((u16
*) &smac
[4]));
122 mtk_check_entry_available(struct mtk_eth
*eth
, u32 hash
)
124 struct mtk_foe_entry entry
= ((struct mtk_foe_entry
*)eth
->foe_table
)[hash
];
126 return (entry
.bfib1
.state
== BIND
)? 0:1;
130 mtk_foe_write(struct mtk_eth
*eth
, u32 hash
,
131 struct mtk_foe_entry
*entry
)
133 struct mtk_foe_entry
*table
= (struct mtk_foe_entry
*)eth
->foe_table
;
135 memcpy(&table
[hash
], entry
, sizeof(*entry
));
138 int mtk_flow_offload(struct mtk_eth
*eth
,
139 enum flow_offload_type type
,
140 struct flow_offload
*flow
,
141 struct flow_offload_hw_path
*src
,
142 struct flow_offload_hw_path
*dest
)
144 struct flow_offload_tuple
*otuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
;
145 struct flow_offload_tuple
*rtuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
;
146 u32 time_stamp
= mtk_r32(eth
, 0x0010) & (0x7fff);
148 struct mtk_foe_entry orig
= {
149 .bfib1
.time_stamp
= time_stamp
,
152 struct mtk_foe_entry reply
= {
153 .bfib1
.time_stamp
= time_stamp
,
157 if (otuple
->l4proto
!= IPPROTO_TCP
&& otuple
->l4proto
!= IPPROTO_UDP
)
160 if (type
== FLOW_OFFLOAD_DEL
) {
166 switch (otuple
->l3proto
) {
168 if (mtk_foe_prepare_v4(&orig
, otuple
, rtuple
, src
, dest
) ||
169 mtk_foe_prepare_v4(&reply
, rtuple
, otuple
, dest
, src
))
172 ohash
= mtk_flow_hash_v4(otuple
);
173 rhash
= mtk_flow_hash_v4(rtuple
);
183 /* Two-way hash: when hash collision occurs, the hash value will be shifted to the next position. */
184 if (!mtk_check_entry_available(eth
, ohash
)){
185 if (!mtk_check_entry_available(eth
, ohash
+ 1))
189 if (!mtk_check_entry_available(eth
, rhash
)){
190 if (!mtk_check_entry_available(eth
, rhash
+ 1))
195 mtk_foe_set_mac(&orig
, dest
->eth_src
, dest
->eth_dest
);
196 mtk_foe_set_mac(&reply
, src
->eth_src
, src
->eth_dest
);
197 mtk_foe_write(eth
, ohash
, &orig
);
198 mtk_foe_write(eth
, rhash
, &reply
);
199 rcu_assign_pointer(eth
->foe_flow_table
[ohash
], flow
);
200 rcu_assign_pointer(eth
->foe_flow_table
[rhash
], flow
);
205 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
207 #define QDMA_TX_SCH_TX 0x1a14
209 static void mtk_ppe_scheduler(struct mtk_eth
*eth
, int id
, u32 rate
)
211 int exp
= 0, shift
= 0;
212 u32 reg
= mtk_r32(eth
, QDMA_TX_SCH_TX
);
223 val
|= (rate
& 0x7f) << 4;
227 reg
&= ~(0xffff << shift
);
229 mtk_w32(eth
, val
, QDMA_TX_SCH_TX
);
232 #define QTX_CFG(x) (0x1800 + (x * 0x10))
233 #define QTX_SCH(x) (0x1804 + (x * 0x10))
235 static void mtk_ppe_queue(struct mtk_eth
*eth
, int id
, int sched
, int weight
, int resv
, u32 min_rate
, u32 max_rate
)
237 int max_exp
= 0, min_exp
= 0;
243 reg
= mtk_r32(eth
, QTX_SCH(id
));
255 while (max_rate
> 127) {
260 while (min_rate
> 127) {
265 reg
|= (min_rate
& 0x7f) << 20;
266 reg
|= (min_exp
& 0xf) << 16;
267 reg
|= (weight
& 0xf) << 12;
268 reg
|= (max_rate
& 0x7f) << 4;
269 reg
|= max_exp
& 0xf;
270 mtk_w32(eth
, reg
, QTX_SCH(id
));
273 reg
= mtk_r32(eth
, QTX_CFG(id
));
275 reg
|= (resv
<< 8) | resv
;
276 mtk_w32(eth
, reg
, QTX_CFG(id
));
280 static int mtk_init_foe_table(struct mtk_eth
*eth
)
285 eth
->foe_flow_table
= devm_kcalloc(eth
->dev
, MTK_PPE_ENTRY_CNT
,
286 sizeof(*eth
->foe_flow_table
),
288 if (!eth
->foe_flow_table
)
291 /* map the FOE table */
292 eth
->foe_table
= dmam_alloc_coherent(eth
->dev
, MTK_PPE_TBL_SZ
,
293 ð
->foe_table_phys
, GFP_KERNEL
);
294 if (!eth
->foe_table
) {
295 dev_err(eth
->dev
, "failed to allocate foe table\n");
296 kfree(eth
->foe_flow_table
);
304 static int mtk_ppe_start(struct mtk_eth
*eth
)
308 ret
= mtk_init_foe_table(eth
);
312 /* tell the PPE about the tables base address */
313 mtk_w32(eth
, eth
->foe_table_phys
, MTK_REG_PPE_TB_BASE
);
315 /* flush the table */
316 memset(eth
->foe_table
, 0, MTK_PPE_TBL_SZ
);
320 MTK_PPE_TB_CFG_HASH_MODE_MASK
| MTK_PPE_TB_CFG_TBL_SZ_MASK
,
321 MTK_PPE_TB_CFG_HASH_MODE1
| MTK_PPE_TB_CFG_TBL_SZ_4K
,
324 /* set the default hashing seed */
325 mtk_w32(eth
, MTK_PPE_HASH_SEED
, MTK_REG_PPE_HASH_SEED
);
327 /* each foe entry is 64bytes and is setup by cpu forwarding*/
328 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_TB_CFG_ENTRY_SZ_MASK
|
329 MTK_PPE_TB_CFG_SMA_MASK
,
330 MTK_PPE_TB_CFG_ENTRY_SZ_64B
| MTK_PPE_TB_CFG_SMA_FWD_CPU
,
334 mtk_w32(eth
, 0xFFFFFFFF, MTK_REG_PPE_IP_PROT_CHK
);
337 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
338 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
, MTK_PPE_CAH_CTRL_EN
,
339 MTK_REG_PPE_CAH_CTRL
);
342 mtk_m32(eth
, 0, MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
343 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
344 MTK_PPE_FLOW_CFG_IPV4_GREK_EN
,
345 MTK_REG_PPE_FLOW_CFG
);
347 /* setup flow entry un/bind aging */
349 MTK_PPE_TB_CFG_UNBD_AGE
| MTK_PPE_TB_CFG_NTU_AGE
|
350 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
351 MTK_PPE_TB_CFG_TCP_AGE
,
354 mtk_m32(eth
, MTK_PPE_UNB_AGE_MNP_MASK
| MTK_PPE_UNB_AGE_DLTA_MASK
,
355 MTK_PPE_UNB_AGE_MNP
| MTK_PPE_UNB_AGE_DLTA
,
356 MTK_REG_PPE_UNB_AGE
);
357 mtk_m32(eth
, MTK_PPE_BND_AGE0_NTU_DLTA_MASK
|
358 MTK_PPE_BND_AGE0_UDP_DLTA_MASK
,
359 MTK_PPE_BND_AGE0_NTU_DLTA
| MTK_PPE_BND_AGE0_UDP_DLTA
,
360 MTK_REG_PPE_BND_AGE0
);
361 mtk_m32(eth
, MTK_PPE_BND_AGE1_FIN_DLTA_MASK
|
362 MTK_PPE_BND_AGE1_TCP_DLTA_MASK
,
363 MTK_PPE_BND_AGE1_FIN_DLTA
| MTK_PPE_BND_AGE1_TCP_DLTA
,
364 MTK_REG_PPE_BND_AGE1
);
366 /* setup flow entry keep alive */
367 mtk_m32(eth
, MTK_PPE_TB_CFG_KA_MASK
, MTK_PPE_TB_CFG_KA
,
369 mtk_w32(eth
, MTK_PPE_KA_UDP
| MTK_PPE_KA_TCP
| MTK_PPE_KA_T
, MTK_REG_PPE_KA
);
371 /* setup flow entry rate limit */
372 mtk_w32(eth
, (0x3fff << 16) | 0x3fff, MTK_REG_PPE_BIND_LMT_0
);
373 mtk_w32(eth
, MTK_PPE_NTU_KA
| 0x3fff, MTK_REG_PPE_BIND_LMT_1
);
374 mtk_m32(eth
, MTK_PPE_BNDR_RATE_MASK
, 1, MTK_REG_PPE_BNDR
);
377 mtk_m32(eth
, 0, MTK_PPE_GLO_CFG_EN
, MTK_REG_PPE_GLO_CFG
);
380 /* set the default forwarding port to QDMA */
381 mtk_w32(eth
, 0x0, MTK_REG_PPE_DFT_CPORT
);
383 /* set the default forwarding port to QDMA */
384 mtk_w32(eth
, 0x55555555, MTK_REG_PPE_DFT_CPORT
);
387 /* allow packets with TTL=0 */
388 mtk_m32(eth
, MTK_PPE_GLO_CFG_TTL0_DROP
, 0, MTK_REG_PPE_GLO_CFG
);
390 /* send all traffic from gmac to the ppe */
391 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(0));
392 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(1));
394 dev_info(eth
->dev
, "PPE started\n");
396 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
397 mtk_ppe_scheduler(eth
, 0, 500000);
398 mtk_ppe_scheduler(eth
, 1, 500000);
399 mtk_ppe_queue(eth
, 0, 0, 7, 32, 250000, 0);
400 mtk_ppe_queue(eth
, 1, 0, 7, 32, 250000, 0);
401 mtk_ppe_queue(eth
, 8, 1, 7, 32, 250000, 0);
402 mtk_ppe_queue(eth
, 9, 1, 7, 32, 250000, 0);
408 static int mtk_ppe_busy_wait(struct mtk_eth
*eth
)
410 unsigned long t_start
= jiffies
;
414 r
= mtk_r32(eth
, MTK_REG_PPE_GLO_CFG
);
415 if (!(r
& MTK_PPE_GLO_CFG_BUSY
))
417 if (time_after(jiffies
, t_start
+ HZ
))
419 usleep_range(10, 20);
422 dev_err(eth
->dev
, "ppe: table busy timeout - resetting\n");
423 reset_control_reset(eth
->rst_ppe
);
428 static int mtk_ppe_stop(struct mtk_eth
*eth
)
433 /* discard all traffic while we disable the PPE */
434 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(0));
435 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(1));
437 if (mtk_ppe_busy_wait(eth
))
440 /* invalidate all flow table entries */
441 for (i
= 0; i
< MTK_PPE_ENTRY_CNT
; i
++)
442 eth
->foe_table
[i
].bfib1
.state
= FOE_STATE_INVALID
;
444 /* disable caching */
445 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
446 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_CAH_CTRL_EN
, 0,
447 MTK_REG_PPE_CAH_CTRL
);
449 /* flush cache has to be ahead of hnat diable --*/
450 mtk_m32(eth
, MTK_PPE_GLO_CFG_EN
, 0, MTK_REG_PPE_GLO_CFG
);
454 MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
455 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
456 MTK_PPE_FLOW_CFG_FUC_FOE
| MTK_PPE_FLOW_CFG_FMC_FOE
,
457 0, MTK_REG_PPE_FLOW_CFG
);
459 /* disable FOE aging */
461 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
462 MTK_PPE_TB_CFG_TCP_AGE
| MTK_PPE_TB_CFG_UNBD_AGE
|
463 MTK_PPE_TB_CFG_NTU_AGE
, MTK_REG_PPE_TB_CFG
);
465 r1
= mtk_r32(eth
, 0x100);
466 r2
= mtk_r32(eth
, 0x10c);
468 dev_info(eth
->dev
, "0x100 = 0x%x, 0x10c = 0x%x\n", r1
, r2
);
470 if (((r1
& 0xff00) >> 0x8) >= (r1
& 0xff) ||
471 ((r1
& 0xff00) >> 0x8) >= (r2
& 0xff)) {
472 dev_info(eth
->dev
, "reset pse\n");
473 mtk_w32(eth
, 0x1, 0x4);
476 /* set the foe entry base address to 0 */
477 mtk_w32(eth
, 0, MTK_REG_PPE_TB_BASE
);
479 if (mtk_ppe_busy_wait(eth
))
482 /* send all traffic back to the DMA engine */
484 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(0));
485 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(1));
487 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(0));
488 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(1));
493 static void mtk_offload_keepalive(struct fe_priv
*eth
, unsigned int hash
)
495 struct flow_offload
*flow
;
498 flow
= rcu_dereference(eth
->foe_flow_table
[hash
]);
500 flow
->timeout
= jiffies
+ 30 * HZ
;
504 int mtk_offload_check_rx(struct fe_priv
*eth
, struct sk_buff
*skb
, u32 rxd4
)
508 switch (FIELD_GET(MTK_RXD4_CPU_REASON
, rxd4
)) {
509 case MTK_CPU_REASON_KEEPALIVE_UC_OLD_HDR
:
510 case MTK_CPU_REASON_KEEPALIVE_MC_NEW_HDR
:
511 case MTK_CPU_REASON_KEEPALIVE_DUP_OLD_HDR
:
512 hash
= FIELD_GET(MTK_RXD4_FOE_ENTRY
, rxd4
);
513 mtk_offload_keepalive(eth
, hash
);
515 case MTK_CPU_REASON_PACKET_SAMPLING
:
522 int mtk_ppe_probe(struct mtk_eth
*eth
)
526 err
= mtk_ppe_start(eth
);
530 err
= mtk_ppe_debugfs_init(eth
);
537 void mtk_ppe_remove(struct mtk_eth
*eth
)