1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2018 John Crispin <john@phrozen.org>
13 #include "mtk_offload.h"
24 mtk_flow_hash_v4(struct flow_offload_tuple
*tuple
)
26 u32 ports
= ntohs(tuple
->src_port
) << 16 | ntohs(tuple
->dst_port
);
27 u32 src
= ntohl(tuple
->dst_v4
.s_addr
);
28 u32 dst
= ntohl(tuple
->src_v4
.s_addr
);
29 u32 hash
= (ports
& src
) | ((~ports
) & dst
);
30 u32 hash_23_0
= hash
& 0xffffff;
31 u32 hash_31_24
= hash
& 0xff000000;
33 hash
= ports
^ src
^ dst
^ ((hash_23_0
<< 8) | (hash_31_24
>> 24));
34 hash
= ((hash
& 0xffff0000) >> 16 ) ^ (hash
& 0xfffff);
42 mtk_foe_prepare_v4(struct mtk_foe_entry
*entry
,
43 struct flow_offload_tuple
*tuple
,
44 struct flow_offload_tuple
*dest_tuple
,
45 struct flow_offload_hw_path
*src
,
46 struct flow_offload_hw_path
*dest
)
48 int is_mcast
= !!is_multicast_ether_addr(dest
->eth_dest
);
50 if (tuple
->l4proto
== IPPROTO_UDP
)
51 entry
->ipv4_hnapt
.bfib1
.udp
= 1;
53 entry
->ipv4_hnapt
.etype
= htons(ETH_P_IP
);
54 entry
->ipv4_hnapt
.bfib1
.pkt_type
= IPV4_HNAPT
;
55 entry
->ipv4_hnapt
.iblk2
.fqos
= 0;
56 entry
->ipv4_hnapt
.bfib1
.ttl
= 1;
57 entry
->ipv4_hnapt
.bfib1
.cah
= 1;
58 entry
->ipv4_hnapt
.bfib1
.ka
= 1;
59 entry
->ipv4_hnapt
.iblk2
.mcast
= is_mcast
;
60 entry
->ipv4_hnapt
.iblk2
.dscp
= 0;
61 entry
->ipv4_hnapt
.iblk2
.port_mg
= 0x3f;
62 entry
->ipv4_hnapt
.iblk2
.port_ag
= 0x1f;
63 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
64 entry
->ipv4_hnapt
.iblk2
.qid
= 1;
65 entry
->ipv4_hnapt
.iblk2
.fqos
= 1;
68 entry
->ipv4_hnapt
.iblk2
.dp
= 1;
69 if ((dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) && (dest
->vlan_id
> 1))
70 entry
->ipv4_hnapt
.iblk2
.qid
+= 8;
72 entry
->ipv4_hnapt
.iblk2
.dp
= (dest
->dev
->name
[3] - '0') + 1;
75 entry
->ipv4_hnapt
.sip
= ntohl(tuple
->src_v4
.s_addr
);
76 entry
->ipv4_hnapt
.dip
= ntohl(tuple
->dst_v4
.s_addr
);
77 entry
->ipv4_hnapt
.sport
= ntohs(tuple
->src_port
);
78 entry
->ipv4_hnapt
.dport
= ntohs(tuple
->dst_port
);
80 entry
->ipv4_hnapt
.new_sip
= ntohl(dest_tuple
->dst_v4
.s_addr
);
81 entry
->ipv4_hnapt
.new_dip
= ntohl(dest_tuple
->src_v4
.s_addr
);
82 entry
->ipv4_hnapt
.new_sport
= ntohs(dest_tuple
->dst_port
);
83 entry
->ipv4_hnapt
.new_dport
= ntohs(dest_tuple
->src_port
);
85 entry
->bfib1
.state
= BIND
;
87 if (dest
->flags
& FLOW_OFFLOAD_PATH_PPPOE
) {
89 entry
->ipv4_hnapt
.etype
= htons(ETH_P_PPP_SES
);
90 entry
->ipv4_hnapt
.pppoe_id
= dest
->pppoe_sid
;
93 if (dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) {
94 entry
->ipv4_hnapt
.vlan1
= dest
->vlan_id
;
95 entry
->bfib1
.vlan_layer
= 1;
97 switch (dest
->vlan_proto
) {
98 case htons(ETH_P_8021Q
):
99 entry
->ipv4_hnapt
.bfib1
.vpm
= 1;
101 case htons(ETH_P_8021AD
):
102 entry
->ipv4_hnapt
.bfib1
.vpm
= 2;
113 mtk_foe_set_mac(struct mtk_foe_entry
*entry
, u8
*smac
, u8
*dmac
)
115 entry
->ipv4_hnapt
.dmac_hi
= swab32(*((u32
*) dmac
));
116 entry
->ipv4_hnapt
.dmac_lo
= swab16(*((u16
*) &dmac
[4]));
117 entry
->ipv4_hnapt
.smac_hi
= swab32(*((u32
*) smac
));
118 entry
->ipv4_hnapt
.smac_lo
= swab16(*((u16
*) &smac
[4]));
122 mtk_check_hashcollision(struct mtk_eth
*eth
, u32 hash
)
124 struct mtk_foe_entry entry
= ((struct mtk_foe_entry
*)eth
->foe_table
)[hash
];
125 return (entry
.bfib1
.state
!= BIND
)? 0:1;
129 mtk_foe_write(struct mtk_eth
*eth
, u32 hash
,
130 struct mtk_foe_entry
*entry
)
132 struct mtk_foe_entry
*table
= (struct mtk_foe_entry
*)eth
->foe_table
;
134 memcpy(&table
[hash
], entry
, sizeof(*entry
));
137 int mtk_flow_offload(struct mtk_eth
*eth
,
138 enum flow_offload_type type
,
139 struct flow_offload
*flow
,
140 struct flow_offload_hw_path
*src
,
141 struct flow_offload_hw_path
*dest
)
143 struct flow_offload_tuple
*otuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
;
144 struct flow_offload_tuple
*rtuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
;
145 u32 time_stamp
= mtk_r32(eth
, 0x0010) & (0x7fff);
147 struct mtk_foe_entry orig
= {
148 .bfib1
.time_stamp
= time_stamp
,
151 struct mtk_foe_entry reply
= {
152 .bfib1
.time_stamp
= time_stamp
,
156 if (otuple
->l4proto
!= IPPROTO_TCP
&& otuple
->l4proto
!= IPPROTO_UDP
)
159 switch (otuple
->l3proto
) {
161 if (mtk_foe_prepare_v4(&orig
, otuple
, rtuple
, src
, dest
) ||
162 mtk_foe_prepare_v4(&reply
, rtuple
, otuple
, dest
, src
))
165 ohash
= mtk_flow_hash_v4(otuple
);
166 rhash
= mtk_flow_hash_v4(rtuple
);
176 if (type
== FLOW_OFFLOAD_DEL
) {
177 orig
.bfib1
.state
= INVALID
;
178 reply
.bfib1
.state
= INVALID
;
183 if(mtk_check_hashcollision(eth
, ohash
)) // Two-way hash: when hash collision occurs, the hash value will be shifted to the next position.
185 if(mtk_check_hashcollision(eth
, rhash
))
187 mtk_foe_set_mac(&orig
, dest
->eth_src
, dest
->eth_dest
);
188 mtk_foe_set_mac(&reply
, src
->eth_src
, src
->eth_dest
);
191 mtk_foe_write(eth
, ohash
, &orig
);
192 mtk_foe_write(eth
, rhash
, &reply
);
193 rcu_assign_pointer(eth
->foe_flow_table
[ohash
], flow
);
194 rcu_assign_pointer(eth
->foe_flow_table
[rhash
], flow
);
196 if (type
== FLOW_OFFLOAD_DEL
)
202 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
204 #define QDMA_TX_SCH_TX 0x1a14
206 static void mtk_ppe_scheduler(struct mtk_eth
*eth
, int id
, u32 rate
)
208 int exp
= 0, shift
= 0;
209 u32 reg
= mtk_r32(eth
, QDMA_TX_SCH_TX
);
220 val
|= (rate
& 0x7f) << 4;
224 reg
&= ~(0xffff << shift
);
226 mtk_w32(eth
, val
, QDMA_TX_SCH_TX
);
229 #define QTX_CFG(x) (0x1800 + (x * 0x10))
230 #define QTX_SCH(x) (0x1804 + (x * 0x10))
232 static void mtk_ppe_queue(struct mtk_eth
*eth
, int id
, int sched
, int weight
, int resv
, u32 min_rate
, u32 max_rate
)
234 int max_exp
= 0, min_exp
= 0;
240 reg
= mtk_r32(eth
, QTX_SCH(id
));
252 while (max_rate
> 127) {
257 while (min_rate
> 127) {
262 reg
|= (min_rate
& 0x7f) << 20;
263 reg
|= (min_exp
& 0xf) << 16;
264 reg
|= (weight
& 0xf) << 12;
265 reg
|= (max_rate
& 0x7f) << 4;
266 reg
|= max_exp
& 0xf;
267 mtk_w32(eth
, reg
, QTX_SCH(id
));
270 reg
= mtk_r32(eth
, QTX_CFG(id
));
272 reg
|= (resv
<< 8) | resv
;
273 mtk_w32(eth
, reg
, QTX_CFG(id
));
277 static int mtk_init_foe_table(struct mtk_eth
*eth
)
282 eth
->foe_flow_table
= devm_kcalloc(eth
->dev
, MTK_PPE_ENTRY_CNT
,
283 sizeof(*eth
->foe_flow_table
),
285 if (!eth
->foe_flow_table
)
288 /* map the FOE table */
289 eth
->foe_table
= dmam_alloc_coherent(eth
->dev
, MTK_PPE_TBL_SZ
,
290 ð
->foe_table_phys
, GFP_KERNEL
);
291 if (!eth
->foe_table
) {
292 dev_err(eth
->dev
, "failed to allocate foe table\n");
293 kfree(eth
->foe_flow_table
);
301 static int mtk_ppe_start(struct mtk_eth
*eth
)
305 ret
= mtk_init_foe_table(eth
);
309 /* tell the PPE about the tables base address */
310 mtk_w32(eth
, eth
->foe_table_phys
, MTK_REG_PPE_TB_BASE
);
312 /* flush the table */
313 memset(eth
->foe_table
, 0, MTK_PPE_TBL_SZ
);
317 MTK_PPE_TB_CFG_HASH_MODE_MASK
| MTK_PPE_TB_CFG_TBL_SZ_MASK
,
318 MTK_PPE_TB_CFG_HASH_MODE1
| MTK_PPE_TB_CFG_TBL_SZ_4K
,
321 /* set the default hashing seed */
322 mtk_w32(eth
, MTK_PPE_HASH_SEED
, MTK_REG_PPE_HASH_SEED
);
324 /* each foe entry is 64bytes and is setup by cpu forwarding*/
325 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_TB_CFG_ENTRY_SZ_MASK
|
326 MTK_PPE_TB_CFG_SMA_MASK
,
327 MTK_PPE_TB_CFG_ENTRY_SZ_64B
| MTK_PPE_TB_CFG_SMA_FWD_CPU
,
331 mtk_w32(eth
, 0xFFFFFFFF, MTK_REG_PPE_IP_PROT_CHK
);
334 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
335 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
, MTK_PPE_CAH_CTRL_EN
,
336 MTK_REG_PPE_CAH_CTRL
);
339 mtk_m32(eth
, 0, MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
340 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
341 MTK_PPE_FLOW_CFG_IPV4_GREK_EN
,
342 MTK_REG_PPE_FLOW_CFG
);
344 /* setup flow entry un/bind aging */
346 MTK_PPE_TB_CFG_UNBD_AGE
| MTK_PPE_TB_CFG_NTU_AGE
|
347 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
348 MTK_PPE_TB_CFG_TCP_AGE
,
351 mtk_m32(eth
, MTK_PPE_UNB_AGE_MNP_MASK
| MTK_PPE_UNB_AGE_DLTA_MASK
,
352 MTK_PPE_UNB_AGE_MNP
| MTK_PPE_UNB_AGE_DLTA
,
353 MTK_REG_PPE_UNB_AGE
);
354 mtk_m32(eth
, MTK_PPE_BND_AGE0_NTU_DLTA_MASK
|
355 MTK_PPE_BND_AGE0_UDP_DLTA_MASK
,
356 MTK_PPE_BND_AGE0_NTU_DLTA
| MTK_PPE_BND_AGE0_UDP_DLTA
,
357 MTK_REG_PPE_BND_AGE0
);
358 mtk_m32(eth
, MTK_PPE_BND_AGE1_FIN_DLTA_MASK
|
359 MTK_PPE_BND_AGE1_TCP_DLTA_MASK
,
360 MTK_PPE_BND_AGE1_FIN_DLTA
| MTK_PPE_BND_AGE1_TCP_DLTA
,
361 MTK_REG_PPE_BND_AGE1
);
363 /* setup flow entry keep alive */
364 mtk_m32(eth
, MTK_PPE_TB_CFG_KA_MASK
, MTK_PPE_TB_CFG_KA
,
366 mtk_w32(eth
, MTK_PPE_KA_UDP
| MTK_PPE_KA_TCP
| MTK_PPE_KA_T
, MTK_REG_PPE_KA
);
368 /* setup flow entry rate limit */
369 mtk_w32(eth
, (0x3fff << 16) | 0x3fff, MTK_REG_PPE_BIND_LMT_0
);
370 mtk_w32(eth
, MTK_PPE_NTU_KA
| 0x3fff, MTK_REG_PPE_BIND_LMT_1
);
371 mtk_m32(eth
, MTK_PPE_BNDR_RATE_MASK
, 1, MTK_REG_PPE_BNDR
);
374 mtk_m32(eth
, 0, MTK_PPE_GLO_CFG_EN
, MTK_REG_PPE_GLO_CFG
);
377 /* set the default forwarding port to QDMA */
378 mtk_w32(eth
, 0x0, MTK_REG_PPE_DFT_CPORT
);
380 /* set the default forwarding port to QDMA */
381 mtk_w32(eth
, 0x55555555, MTK_REG_PPE_DFT_CPORT
);
384 /* drop packets with TTL=0 */
385 mtk_m32(eth
, 0, MTK_PPE_GLO_CFG_TTL0_DROP
, MTK_REG_PPE_GLO_CFG
);
387 /* send all traffic from gmac to the ppe */
388 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(0));
389 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(1));
391 dev_info(eth
->dev
, "PPE started\n");
393 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
394 mtk_ppe_scheduler(eth
, 0, 500000);
395 mtk_ppe_scheduler(eth
, 1, 500000);
396 mtk_ppe_queue(eth
, 0, 0, 7, 32, 250000, 0);
397 mtk_ppe_queue(eth
, 1, 0, 7, 32, 250000, 0);
398 mtk_ppe_queue(eth
, 8, 1, 7, 32, 250000, 0);
399 mtk_ppe_queue(eth
, 9, 1, 7, 32, 250000, 0);
405 static int mtk_ppe_busy_wait(struct mtk_eth
*eth
)
407 unsigned long t_start
= jiffies
;
411 r
= mtk_r32(eth
, MTK_REG_PPE_GLO_CFG
);
412 if (!(r
& MTK_PPE_GLO_CFG_BUSY
))
414 if (time_after(jiffies
, t_start
+ HZ
))
416 usleep_range(10, 20);
419 dev_err(eth
->dev
, "ppe: table busy timeout - resetting\n");
420 reset_control_reset(eth
->rst_ppe
);
425 static int mtk_ppe_stop(struct mtk_eth
*eth
)
430 /* discard all traffic while we disable the PPE */
431 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(0));
432 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(1));
434 if (mtk_ppe_busy_wait(eth
))
437 /* invalidate all flow table entries */
438 for (i
= 0; i
< MTK_PPE_ENTRY_CNT
; i
++)
439 eth
->foe_table
[i
].bfib1
.state
= FOE_STATE_INVALID
;
441 /* disable caching */
442 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
443 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_CAH_CTRL_EN
, 0,
444 MTK_REG_PPE_CAH_CTRL
);
446 /* flush cache has to be ahead of hnat diable --*/
447 mtk_m32(eth
, MTK_PPE_GLO_CFG_EN
, 0, MTK_REG_PPE_GLO_CFG
);
451 MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
452 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
453 MTK_PPE_FLOW_CFG_FUC_FOE
| MTK_PPE_FLOW_CFG_FMC_FOE
,
454 0, MTK_REG_PPE_FLOW_CFG
);
456 /* disable FOE aging */
458 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
459 MTK_PPE_TB_CFG_TCP_AGE
| MTK_PPE_TB_CFG_UNBD_AGE
|
460 MTK_PPE_TB_CFG_NTU_AGE
, MTK_REG_PPE_TB_CFG
);
462 r1
= mtk_r32(eth
, 0x100);
463 r2
= mtk_r32(eth
, 0x10c);
465 dev_info(eth
->dev
, "0x100 = 0x%x, 0x10c = 0x%x\n", r1
, r2
);
467 if (((r1
& 0xff00) >> 0x8) >= (r1
& 0xff) ||
468 ((r1
& 0xff00) >> 0x8) >= (r2
& 0xff)) {
469 dev_info(eth
->dev
, "reset pse\n");
470 mtk_w32(eth
, 0x1, 0x4);
473 /* set the foe entry base address to 0 */
474 mtk_w32(eth
, 0, MTK_REG_PPE_TB_BASE
);
476 if (mtk_ppe_busy_wait(eth
))
479 /* send all traffic back to the DMA engine */
481 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(0));
482 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(1));
484 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(0));
485 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(1));
490 static void mtk_offload_keepalive(struct fe_priv
*eth
, unsigned int hash
)
492 struct flow_offload
*flow
;
495 flow
= rcu_dereference(eth
->foe_flow_table
[hash
]);
497 flow
->timeout
= jiffies
+ 30 * HZ
;
501 int mtk_offload_check_rx(struct fe_priv
*eth
, struct sk_buff
*skb
, u32 rxd4
)
505 switch (FIELD_GET(MTK_RXD4_CPU_REASON
, rxd4
)) {
506 case MTK_CPU_REASON_KEEPALIVE_UC_OLD_HDR
:
507 case MTK_CPU_REASON_KEEPALIVE_MC_NEW_HDR
:
508 case MTK_CPU_REASON_KEEPALIVE_DUP_OLD_HDR
:
509 hash
= FIELD_GET(MTK_RXD4_FOE_ENTRY
, rxd4
);
510 mtk_offload_keepalive(eth
, hash
);
512 case MTK_CPU_REASON_PACKET_SAMPLING
:
519 int mtk_ppe_probe(struct mtk_eth
*eth
)
523 err
= mtk_ppe_start(eth
);
527 err
= mtk_ppe_debugfs_init(eth
);
534 void mtk_ppe_remove(struct mtk_eth
*eth
)