1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2018 John Crispin <john@phrozen.org>
13 #include "mtk_offload.h"
24 mtk_flow_hash_v4(struct flow_offload_tuple
*tuple
)
26 u32 ports
= ntohs(tuple
->src_port
) << 16 | ntohs(tuple
->dst_port
);
27 u32 src
= ntohl(tuple
->dst_v4
.s_addr
);
28 u32 dst
= ntohl(tuple
->src_v4
.s_addr
);
29 u32 hash
= (ports
& src
) | ((~ports
) & dst
);
30 u32 hash_23_0
= hash
& 0xffffff;
31 u32 hash_31_24
= hash
& 0xff000000;
33 hash
= ports
^ src
^ dst
^ ((hash_23_0
<< 8) | (hash_31_24
>> 24));
34 hash
= ((hash
& 0xffff0000) >> 16 ) ^ (hash
& 0xfffff);
42 mtk_foe_prepare_v4(struct mtk_foe_entry
*entry
,
43 struct flow_offload_tuple
*tuple
,
44 struct flow_offload_tuple
*dest_tuple
,
45 struct flow_offload_hw_path
*src
,
46 struct flow_offload_hw_path
*dest
)
48 int is_mcast
= !!is_multicast_ether_addr(dest
->eth_dest
);
50 if (tuple
->l4proto
== IPPROTO_UDP
)
51 entry
->ipv4_hnapt
.bfib1
.udp
= 1;
53 entry
->ipv4_hnapt
.etype
= htons(ETH_P_IP
);
54 entry
->ipv4_hnapt
.bfib1
.pkt_type
= IPV4_HNAPT
;
55 entry
->ipv4_hnapt
.iblk2
.fqos
= 0;
56 entry
->ipv4_hnapt
.bfib1
.ttl
= 1;
57 entry
->ipv4_hnapt
.bfib1
.cah
= 1;
58 entry
->ipv4_hnapt
.bfib1
.ka
= 1;
59 entry
->ipv4_hnapt
.iblk2
.mcast
= is_mcast
;
60 entry
->ipv4_hnapt
.iblk2
.dscp
= 0;
61 entry
->ipv4_hnapt
.iblk2
.port_mg
= 0x3f;
62 entry
->ipv4_hnapt
.iblk2
.port_ag
= 0x1f;
63 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
64 entry
->ipv4_hnapt
.iblk2
.qid
= 1;
65 entry
->ipv4_hnapt
.iblk2
.fqos
= 1;
68 entry
->ipv4_hnapt
.iblk2
.dp
= 1;
69 if ((dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) && (dest
->vlan_id
> 1))
70 entry
->ipv4_hnapt
.iblk2
.qid
+= 8;
72 entry
->ipv4_hnapt
.iblk2
.dp
= (dest
->dev
->name
[3] - '0') + 1;
75 entry
->ipv4_hnapt
.sip
= ntohl(tuple
->src_v4
.s_addr
);
76 entry
->ipv4_hnapt
.dip
= ntohl(tuple
->dst_v4
.s_addr
);
77 entry
->ipv4_hnapt
.sport
= ntohs(tuple
->src_port
);
78 entry
->ipv4_hnapt
.dport
= ntohs(tuple
->dst_port
);
80 entry
->ipv4_hnapt
.new_sip
= ntohl(dest_tuple
->dst_v4
.s_addr
);
81 entry
->ipv4_hnapt
.new_dip
= ntohl(dest_tuple
->src_v4
.s_addr
);
82 entry
->ipv4_hnapt
.new_sport
= ntohs(dest_tuple
->dst_port
);
83 entry
->ipv4_hnapt
.new_dport
= ntohs(dest_tuple
->src_port
);
85 entry
->bfib1
.state
= BIND
;
87 if (dest
->flags
& FLOW_OFFLOAD_PATH_PPPOE
) {
89 entry
->ipv4_hnapt
.etype
= htons(ETH_P_PPP_SES
);
90 entry
->ipv4_hnapt
.pppoe_id
= dest
->pppoe_sid
;
93 if (dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) {
94 entry
->ipv4_hnapt
.vlan1
= dest
->vlan_id
;
95 entry
->bfib1
.vlan_layer
= 1;
97 switch (dest
->vlan_proto
) {
98 case htons(ETH_P_8021Q
):
99 entry
->ipv4_hnapt
.bfib1
.vpm
= 1;
101 case htons(ETH_P_8021AD
):
102 entry
->ipv4_hnapt
.bfib1
.vpm
= 2;
113 mtk_foe_set_mac(struct mtk_foe_entry
*entry
, u8
*smac
, u8
*dmac
)
115 entry
->ipv4_hnapt
.dmac_hi
= swab32(*((u32
*) dmac
));
116 entry
->ipv4_hnapt
.dmac_lo
= swab16(*((u16
*) &dmac
[4]));
117 entry
->ipv4_hnapt
.smac_hi
= swab32(*((u32
*) smac
));
118 entry
->ipv4_hnapt
.smac_lo
= swab16(*((u16
*) &smac
[4]));
122 mtk_foe_write(struct mtk_eth
*eth
, u32 hash
,
123 struct mtk_foe_entry
*entry
)
125 struct mtk_foe_entry
*table
= (struct mtk_foe_entry
*)eth
->foe_table
;
127 memcpy(&table
[hash
], entry
, sizeof(*entry
));
130 int mtk_flow_offload(struct mtk_eth
*eth
,
131 enum flow_offload_type type
,
132 struct flow_offload
*flow
,
133 struct flow_offload_hw_path
*src
,
134 struct flow_offload_hw_path
*dest
)
136 struct flow_offload_tuple
*otuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
;
137 struct flow_offload_tuple
*rtuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
;
138 u32 time_stamp
= mtk_r32(eth
, 0x0010) & (0x7fff);
140 struct mtk_foe_entry orig
= {
141 .bfib1
.time_stamp
= time_stamp
,
144 struct mtk_foe_entry reply
= {
145 .bfib1
.time_stamp
= time_stamp
,
149 if (otuple
->l4proto
!= IPPROTO_TCP
&& otuple
->l4proto
!= IPPROTO_UDP
)
152 switch (otuple
->l3proto
) {
154 if (mtk_foe_prepare_v4(&orig
, otuple
, rtuple
, src
, dest
) ||
155 mtk_foe_prepare_v4(&reply
, rtuple
, otuple
, dest
, src
))
158 ohash
= mtk_flow_hash_v4(otuple
);
159 rhash
= mtk_flow_hash_v4(rtuple
);
169 if (type
== FLOW_OFFLOAD_DEL
) {
170 orig
.bfib1
.state
= INVALID
;
171 reply
.bfib1
.state
= INVALID
;
176 mtk_foe_set_mac(&orig
, dest
->eth_src
, dest
->eth_dest
);
177 mtk_foe_set_mac(&reply
, src
->eth_src
, src
->eth_dest
);
180 mtk_foe_write(eth
, ohash
, &orig
);
181 mtk_foe_write(eth
, rhash
, &reply
);
182 rcu_assign_pointer(eth
->foe_flow_table
[ohash
], flow
);
183 rcu_assign_pointer(eth
->foe_flow_table
[rhash
], flow
);
185 if (type
== FLOW_OFFLOAD_DEL
)
191 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
193 #define QDMA_TX_SCH_TX 0x1a14
195 static void mtk_ppe_scheduler(struct mtk_eth
*eth
, int id
, u32 rate
)
197 int exp
= 0, shift
= 0;
198 u32 reg
= mtk_r32(eth
, QDMA_TX_SCH_TX
);
209 val
|= (rate
& 0x7f) << 4;
213 reg
&= ~(0xffff << shift
);
215 mtk_w32(eth
, val
, QDMA_TX_SCH_TX
);
218 #define QTX_CFG(x) (0x1800 + (x * 0x10))
219 #define QTX_SCH(x) (0x1804 + (x * 0x10))
221 static void mtk_ppe_queue(struct mtk_eth
*eth
, int id
, int sched
, int weight
, int resv
, u32 min_rate
, u32 max_rate
)
223 int max_exp
= 0, min_exp
= 0;
229 reg
= mtk_r32(eth
, QTX_SCH(id
));
241 while (max_rate
> 127) {
246 while (min_rate
> 127) {
251 reg
|= (min_rate
& 0x7f) << 20;
252 reg
|= (min_exp
& 0xf) << 16;
253 reg
|= (weight
& 0xf) << 12;
254 reg
|= (max_rate
& 0x7f) << 4;
255 reg
|= max_exp
& 0xf;
256 mtk_w32(eth
, reg
, QTX_SCH(id
));
259 reg
= mtk_r32(eth
, QTX_CFG(id
));
261 reg
|= (resv
<< 8) | resv
;
262 mtk_w32(eth
, reg
, QTX_CFG(id
));
266 static int mtk_init_foe_table(struct mtk_eth
*eth
)
271 eth
->foe_flow_table
= devm_kcalloc(eth
->dev
, MTK_PPE_ENTRY_CNT
,
272 sizeof(*eth
->foe_flow_table
),
274 if (!eth
->foe_flow_table
)
277 /* map the FOE table */
278 eth
->foe_table
= dmam_alloc_coherent(eth
->dev
, MTK_PPE_TBL_SZ
,
279 ð
->foe_table_phys
, GFP_KERNEL
);
280 if (!eth
->foe_table
) {
281 dev_err(eth
->dev
, "failed to allocate foe table\n");
282 kfree(eth
->foe_flow_table
);
290 static int mtk_ppe_start(struct mtk_eth
*eth
)
294 ret
= mtk_init_foe_table(eth
);
298 /* tell the PPE about the tables base address */
299 mtk_w32(eth
, eth
->foe_table_phys
, MTK_REG_PPE_TB_BASE
);
301 /* flush the table */
302 memset(eth
->foe_table
, 0, MTK_PPE_TBL_SZ
);
306 MTK_PPE_TB_CFG_HASH_MODE_MASK
| MTK_PPE_TB_CFG_TBL_SZ_MASK
,
307 MTK_PPE_TB_CFG_HASH_MODE1
| MTK_PPE_TB_CFG_TBL_SZ_4K
,
310 /* set the default hashing seed */
311 mtk_w32(eth
, MTK_PPE_HASH_SEED
, MTK_REG_PPE_HASH_SEED
);
313 /* each foe entry is 64bytes and is setup by cpu forwarding*/
314 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_TB_CFG_ENTRY_SZ_MASK
|
315 MTK_PPE_TB_CFG_SMA_MASK
,
316 MTK_PPE_TB_CFG_ENTRY_SZ_64B
| MTK_PPE_TB_CFG_SMA_FWD_CPU
,
320 mtk_w32(eth
, 0xFFFFFFFF, MTK_REG_PPE_IP_PROT_CHK
);
323 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
324 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
, MTK_PPE_CAH_CTRL_EN
,
325 MTK_REG_PPE_CAH_CTRL
);
328 mtk_m32(eth
, 0, MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
329 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
330 MTK_PPE_FLOW_CFG_IPV4_GREK_EN
,
331 MTK_REG_PPE_FLOW_CFG
);
333 /* setup flow entry un/bind aging */
335 MTK_PPE_TB_CFG_UNBD_AGE
| MTK_PPE_TB_CFG_NTU_AGE
|
336 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
337 MTK_PPE_TB_CFG_TCP_AGE
,
340 mtk_m32(eth
, MTK_PPE_UNB_AGE_MNP_MASK
| MTK_PPE_UNB_AGE_DLTA_MASK
,
341 MTK_PPE_UNB_AGE_MNP
| MTK_PPE_UNB_AGE_DLTA
,
342 MTK_REG_PPE_UNB_AGE
);
343 mtk_m32(eth
, MTK_PPE_BND_AGE0_NTU_DLTA_MASK
|
344 MTK_PPE_BND_AGE0_UDP_DLTA_MASK
,
345 MTK_PPE_BND_AGE0_NTU_DLTA
| MTK_PPE_BND_AGE0_UDP_DLTA
,
346 MTK_REG_PPE_BND_AGE0
);
347 mtk_m32(eth
, MTK_PPE_BND_AGE1_FIN_DLTA_MASK
|
348 MTK_PPE_BND_AGE1_TCP_DLTA_MASK
,
349 MTK_PPE_BND_AGE1_FIN_DLTA
| MTK_PPE_BND_AGE1_TCP_DLTA
,
350 MTK_REG_PPE_BND_AGE1
);
352 /* setup flow entry keep alive */
353 mtk_m32(eth
, MTK_PPE_TB_CFG_KA_MASK
, MTK_PPE_TB_CFG_KA
,
355 mtk_w32(eth
, MTK_PPE_KA_UDP
| MTK_PPE_KA_TCP
| MTK_PPE_KA_T
, MTK_REG_PPE_KA
);
357 /* setup flow entry rate limit */
358 mtk_w32(eth
, (0x3fff << 16) | 0x3fff, MTK_REG_PPE_BIND_LMT_0
);
359 mtk_w32(eth
, MTK_PPE_NTU_KA
| 0x3fff, MTK_REG_PPE_BIND_LMT_1
);
360 mtk_m32(eth
, MTK_PPE_BNDR_RATE_MASK
, 1, MTK_REG_PPE_BNDR
);
363 mtk_m32(eth
, 0, MTK_PPE_GLO_CFG_EN
, MTK_REG_PPE_GLO_CFG
);
366 /* set the default forwarding port to QDMA */
367 mtk_w32(eth
, 0x0, MTK_REG_PPE_DFT_CPORT
);
369 /* set the default forwarding port to QDMA */
370 mtk_w32(eth
, 0x55555555, MTK_REG_PPE_DFT_CPORT
);
373 /* allow packets with TTL=0 */
374 mtk_m32(eth
, MTK_PPE_GLO_CFG_TTL0_DROP
, 0, MTK_REG_PPE_GLO_CFG
);
376 /* send all traffic from gmac to the ppe */
377 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(0));
378 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(1));
380 dev_info(eth
->dev
, "PPE started\n");
382 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
383 mtk_ppe_scheduler(eth
, 0, 500000);
384 mtk_ppe_scheduler(eth
, 1, 500000);
385 mtk_ppe_queue(eth
, 0, 0, 7, 32, 250000, 0);
386 mtk_ppe_queue(eth
, 1, 0, 7, 32, 250000, 0);
387 mtk_ppe_queue(eth
, 8, 1, 7, 32, 250000, 0);
388 mtk_ppe_queue(eth
, 9, 1, 7, 32, 250000, 0);
394 static int mtk_ppe_busy_wait(struct mtk_eth
*eth
)
396 unsigned long t_start
= jiffies
;
400 r
= mtk_r32(eth
, MTK_REG_PPE_GLO_CFG
);
401 if (!(r
& MTK_PPE_GLO_CFG_BUSY
))
403 if (time_after(jiffies
, t_start
+ HZ
))
405 usleep_range(10, 20);
408 dev_err(eth
->dev
, "ppe: table busy timeout - resetting\n");
409 reset_control_reset(eth
->rst_ppe
);
414 static int mtk_ppe_stop(struct mtk_eth
*eth
)
419 /* discard all traffic while we disable the PPE */
420 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(0));
421 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(1));
423 if (mtk_ppe_busy_wait(eth
))
426 /* invalidate all flow table entries */
427 for (i
= 0; i
< MTK_PPE_ENTRY_CNT
; i
++)
428 eth
->foe_table
[i
].bfib1
.state
= FOE_STATE_INVALID
;
430 /* disable caching */
431 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
432 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_CAH_CTRL_EN
, 0,
433 MTK_REG_PPE_CAH_CTRL
);
435 /* flush cache has to be ahead of hnat diable --*/
436 mtk_m32(eth
, MTK_PPE_GLO_CFG_EN
, 0, MTK_REG_PPE_GLO_CFG
);
440 MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
441 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
442 MTK_PPE_FLOW_CFG_FUC_FOE
| MTK_PPE_FLOW_CFG_FMC_FOE
,
443 0, MTK_REG_PPE_FLOW_CFG
);
445 /* disable FOE aging */
447 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
448 MTK_PPE_TB_CFG_TCP_AGE
| MTK_PPE_TB_CFG_UNBD_AGE
|
449 MTK_PPE_TB_CFG_NTU_AGE
, MTK_REG_PPE_TB_CFG
);
451 r1
= mtk_r32(eth
, 0x100);
452 r2
= mtk_r32(eth
, 0x10c);
454 dev_info(eth
->dev
, "0x100 = 0x%x, 0x10c = 0x%x\n", r1
, r2
);
456 if (((r1
& 0xff00) >> 0x8) >= (r1
& 0xff) ||
457 ((r1
& 0xff00) >> 0x8) >= (r2
& 0xff)) {
458 dev_info(eth
->dev
, "reset pse\n");
459 mtk_w32(eth
, 0x1, 0x4);
462 /* set the foe entry base address to 0 */
463 mtk_w32(eth
, 0, MTK_REG_PPE_TB_BASE
);
465 if (mtk_ppe_busy_wait(eth
))
468 /* send all traffic back to the DMA engine */
470 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(0));
471 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(1));
473 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(0));
474 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(1));
479 static void mtk_offload_keepalive(struct fe_priv
*eth
, unsigned int hash
)
481 struct flow_offload
*flow
;
484 flow
= rcu_dereference(eth
->foe_flow_table
[hash
]);
486 flow
->timeout
= jiffies
+ 30 * HZ
;
490 int mtk_offload_check_rx(struct fe_priv
*eth
, struct sk_buff
*skb
, u32 rxd4
)
494 switch (FIELD_GET(MTK_RXD4_CPU_REASON
, rxd4
)) {
495 case MTK_CPU_REASON_KEEPALIVE_UC_OLD_HDR
:
496 case MTK_CPU_REASON_KEEPALIVE_MC_NEW_HDR
:
497 case MTK_CPU_REASON_KEEPALIVE_DUP_OLD_HDR
:
498 hash
= FIELD_GET(MTK_RXD4_FOE_ENTRY
, rxd4
);
499 mtk_offload_keepalive(eth
, hash
);
501 case MTK_CPU_REASON_PACKET_SAMPLING
:
508 int mtk_ppe_probe(struct mtk_eth
*eth
)
512 err
= mtk_ppe_start(eth
);
516 err
= mtk_ppe_debugfs_init(eth
);
523 void mtk_ppe_remove(struct mtk_eth
*eth
)