1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2018 John Crispin <john@phrozen.org>
13 #include "mtk_offload.h"
24 mtk_flow_hash_v4(struct flow_offload_tuple
*tuple
)
26 u32 ports
= ntohs(tuple
->src_port
) << 16 | ntohs(tuple
->dst_port
);
27 u32 src
= ntohl(tuple
->dst_v4
.s_addr
);
28 u32 dst
= ntohl(tuple
->src_v4
.s_addr
);
29 u32 hash
= (ports
& src
) | ((~ports
) & dst
);
30 u32 hash_23_0
= hash
& 0xffffff;
31 u32 hash_31_24
= hash
& 0xff000000;
33 hash
= ports
^ src
^ dst
^ ((hash_23_0
<< 8) | (hash_31_24
>> 24));
34 hash
= ((hash
& 0xffff0000) >> 16 ) ^ (hash
& 0xfffff);
42 mtk_foe_prepare_v4(struct mtk_foe_entry
*entry
,
43 struct flow_offload_tuple
*tuple
,
44 struct flow_offload_tuple
*dest_tuple
,
45 struct flow_offload_hw_path
*src
,
46 struct flow_offload_hw_path
*dest
)
48 int is_mcast
= !!is_multicast_ether_addr(dest
->eth_dest
);
50 if (tuple
->l4proto
== IPPROTO_UDP
)
51 entry
->ipv4_hnapt
.bfib1
.udp
= 1;
53 entry
->ipv4_hnapt
.etype
= htons(ETH_P_IP
);
54 entry
->ipv4_hnapt
.bfib1
.pkt_type
= IPV4_HNAPT
;
55 entry
->ipv4_hnapt
.iblk2
.fqos
= 0;
56 entry
->ipv4_hnapt
.bfib1
.ttl
= 1;
57 entry
->ipv4_hnapt
.bfib1
.cah
= 1;
58 entry
->ipv4_hnapt
.bfib1
.ka
= 1;
59 entry
->ipv4_hnapt
.iblk2
.mcast
= is_mcast
;
60 entry
->ipv4_hnapt
.iblk2
.dscp
= 0;
61 entry
->ipv4_hnapt
.iblk2
.port_mg
= 0x3f;
62 entry
->ipv4_hnapt
.iblk2
.port_ag
= 0x1f;
63 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
64 entry
->ipv4_hnapt
.iblk2
.qid
= 1;
65 entry
->ipv4_hnapt
.iblk2
.fqos
= 1;
68 entry
->ipv4_hnapt
.iblk2
.dp
= 1;
69 if ((dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) && (dest
->vlan_id
> 1))
70 entry
->ipv4_hnapt
.iblk2
.qid
+= 8;
72 entry
->ipv4_hnapt
.iblk2
.dp
= (dest
->dev
->name
[3] - '0') + 1;
75 entry
->ipv4_hnapt
.sip
= ntohl(tuple
->src_v4
.s_addr
);
76 entry
->ipv4_hnapt
.dip
= ntohl(tuple
->dst_v4
.s_addr
);
77 entry
->ipv4_hnapt
.sport
= ntohs(tuple
->src_port
);
78 entry
->ipv4_hnapt
.dport
= ntohs(tuple
->dst_port
);
80 entry
->ipv4_hnapt
.new_sip
= ntohl(dest_tuple
->dst_v4
.s_addr
);
81 entry
->ipv4_hnapt
.new_dip
= ntohl(dest_tuple
->src_v4
.s_addr
);
82 entry
->ipv4_hnapt
.new_sport
= ntohs(dest_tuple
->dst_port
);
83 entry
->ipv4_hnapt
.new_dport
= ntohs(dest_tuple
->src_port
);
85 entry
->bfib1
.state
= BIND
;
87 if (dest
->flags
& FLOW_OFFLOAD_PATH_PPPOE
) {
89 entry
->ipv4_hnapt
.etype
= htons(ETH_P_PPP_SES
);
90 entry
->ipv4_hnapt
.pppoe_id
= dest
->pppoe_sid
;
93 if (dest
->flags
& FLOW_OFFLOAD_PATH_VLAN
) {
94 entry
->ipv4_hnapt
.vlan1
= dest
->vlan_id
;
95 entry
->bfib1
.vlan_layer
= 1;
97 switch (dest
->vlan_proto
) {
98 case htons(ETH_P_8021Q
):
99 entry
->ipv4_hnapt
.bfib1
.vpm
= 1;
101 case htons(ETH_P_8021AD
):
102 entry
->ipv4_hnapt
.bfib1
.vpm
= 2;
113 mtk_foe_set_mac(struct mtk_foe_entry
*entry
, u8
*smac
, u8
*dmac
)
115 entry
->ipv4_hnapt
.dmac_hi
= swab32(*((u32
*) dmac
));
116 entry
->ipv4_hnapt
.dmac_lo
= swab16(*((u16
*) &dmac
[4]));
117 entry
->ipv4_hnapt
.smac_hi
= swab32(*((u32
*) smac
));
118 entry
->ipv4_hnapt
.smac_lo
= swab16(*((u16
*) &smac
[4]));
122 mtk_check_hashcollision(struct mtk_eth
*eth
, u32 hash
)
124 struct mtk_foe_entry entry
= ((struct mtk_foe_entry
*)eth
->foe_table
)[hash
];
126 return (entry
.bfib1
.state
!= BIND
)? 0:1;
130 mtk_foe_write(struct mtk_eth
*eth
, u32 hash
,
131 struct mtk_foe_entry
*entry
)
133 struct mtk_foe_entry
*table
= (struct mtk_foe_entry
*)eth
->foe_table
;
135 memcpy(&table
[hash
], entry
, sizeof(*entry
));
138 int mtk_flow_offload(struct mtk_eth
*eth
,
139 enum flow_offload_type type
,
140 struct flow_offload
*flow
,
141 struct flow_offload_hw_path
*src
,
142 struct flow_offload_hw_path
*dest
)
144 struct flow_offload_tuple
*otuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
;
145 struct flow_offload_tuple
*rtuple
= &flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
;
146 u32 time_stamp
= mtk_r32(eth
, 0x0010) & (0x7fff);
148 struct mtk_foe_entry orig
= {
149 .bfib1
.time_stamp
= time_stamp
,
152 struct mtk_foe_entry reply
= {
153 .bfib1
.time_stamp
= time_stamp
,
157 if (otuple
->l4proto
!= IPPROTO_TCP
&& otuple
->l4proto
!= IPPROTO_UDP
)
160 switch (otuple
->l3proto
) {
162 if (mtk_foe_prepare_v4(&orig
, otuple
, rtuple
, src
, dest
) ||
163 mtk_foe_prepare_v4(&reply
, rtuple
, otuple
, dest
, src
))
166 ohash
= mtk_flow_hash_v4(otuple
);
167 rhash
= mtk_flow_hash_v4(rtuple
);
177 if (type
== FLOW_OFFLOAD_DEL
) {
178 orig
.bfib1
.state
= INVALID
;
179 reply
.bfib1
.state
= INVALID
;
184 /* Two-way hash: when hash collision occurs, the hash value will be shifted to the next position. */
185 if(mtk_check_hashcollision(eth
, ohash
))
187 if(mtk_check_hashcollision(eth
, rhash
))
189 mtk_foe_set_mac(&orig
, dest
->eth_src
, dest
->eth_dest
);
190 mtk_foe_set_mac(&reply
, src
->eth_src
, src
->eth_dest
);
193 mtk_foe_write(eth
, ohash
, &orig
);
194 mtk_foe_write(eth
, rhash
, &reply
);
195 rcu_assign_pointer(eth
->foe_flow_table
[ohash
], flow
);
196 rcu_assign_pointer(eth
->foe_flow_table
[rhash
], flow
);
198 if (type
== FLOW_OFFLOAD_DEL
)
204 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
206 #define QDMA_TX_SCH_TX 0x1a14
208 static void mtk_ppe_scheduler(struct mtk_eth
*eth
, int id
, u32 rate
)
210 int exp
= 0, shift
= 0;
211 u32 reg
= mtk_r32(eth
, QDMA_TX_SCH_TX
);
222 val
|= (rate
& 0x7f) << 4;
226 reg
&= ~(0xffff << shift
);
228 mtk_w32(eth
, val
, QDMA_TX_SCH_TX
);
231 #define QTX_CFG(x) (0x1800 + (x * 0x10))
232 #define QTX_SCH(x) (0x1804 + (x * 0x10))
234 static void mtk_ppe_queue(struct mtk_eth
*eth
, int id
, int sched
, int weight
, int resv
, u32 min_rate
, u32 max_rate
)
236 int max_exp
= 0, min_exp
= 0;
242 reg
= mtk_r32(eth
, QTX_SCH(id
));
254 while (max_rate
> 127) {
259 while (min_rate
> 127) {
264 reg
|= (min_rate
& 0x7f) << 20;
265 reg
|= (min_exp
& 0xf) << 16;
266 reg
|= (weight
& 0xf) << 12;
267 reg
|= (max_rate
& 0x7f) << 4;
268 reg
|= max_exp
& 0xf;
269 mtk_w32(eth
, reg
, QTX_SCH(id
));
272 reg
= mtk_r32(eth
, QTX_CFG(id
));
274 reg
|= (resv
<< 8) | resv
;
275 mtk_w32(eth
, reg
, QTX_CFG(id
));
279 static int mtk_init_foe_table(struct mtk_eth
*eth
)
284 eth
->foe_flow_table
= devm_kcalloc(eth
->dev
, MTK_PPE_ENTRY_CNT
,
285 sizeof(*eth
->foe_flow_table
),
287 if (!eth
->foe_flow_table
)
290 /* map the FOE table */
291 eth
->foe_table
= dmam_alloc_coherent(eth
->dev
, MTK_PPE_TBL_SZ
,
292 ð
->foe_table_phys
, GFP_KERNEL
);
293 if (!eth
->foe_table
) {
294 dev_err(eth
->dev
, "failed to allocate foe table\n");
295 kfree(eth
->foe_flow_table
);
303 static int mtk_ppe_start(struct mtk_eth
*eth
)
307 ret
= mtk_init_foe_table(eth
);
311 /* tell the PPE about the tables base address */
312 mtk_w32(eth
, eth
->foe_table_phys
, MTK_REG_PPE_TB_BASE
);
314 /* flush the table */
315 memset(eth
->foe_table
, 0, MTK_PPE_TBL_SZ
);
319 MTK_PPE_TB_CFG_HASH_MODE_MASK
| MTK_PPE_TB_CFG_TBL_SZ_MASK
,
320 MTK_PPE_TB_CFG_HASH_MODE1
| MTK_PPE_TB_CFG_TBL_SZ_4K
,
323 /* set the default hashing seed */
324 mtk_w32(eth
, MTK_PPE_HASH_SEED
, MTK_REG_PPE_HASH_SEED
);
326 /* each foe entry is 64bytes and is setup by cpu forwarding*/
327 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_TB_CFG_ENTRY_SZ_MASK
|
328 MTK_PPE_TB_CFG_SMA_MASK
,
329 MTK_PPE_TB_CFG_ENTRY_SZ_64B
| MTK_PPE_TB_CFG_SMA_FWD_CPU
,
333 mtk_w32(eth
, 0xFFFFFFFF, MTK_REG_PPE_IP_PROT_CHK
);
336 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
337 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
, MTK_PPE_CAH_CTRL_EN
,
338 MTK_REG_PPE_CAH_CTRL
);
341 mtk_m32(eth
, 0, MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
342 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
343 MTK_PPE_FLOW_CFG_IPV4_GREK_EN
,
344 MTK_REG_PPE_FLOW_CFG
);
346 /* setup flow entry un/bind aging */
348 MTK_PPE_TB_CFG_UNBD_AGE
| MTK_PPE_TB_CFG_NTU_AGE
|
349 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
350 MTK_PPE_TB_CFG_TCP_AGE
,
353 mtk_m32(eth
, MTK_PPE_UNB_AGE_MNP_MASK
| MTK_PPE_UNB_AGE_DLTA_MASK
,
354 MTK_PPE_UNB_AGE_MNP
| MTK_PPE_UNB_AGE_DLTA
,
355 MTK_REG_PPE_UNB_AGE
);
356 mtk_m32(eth
, MTK_PPE_BND_AGE0_NTU_DLTA_MASK
|
357 MTK_PPE_BND_AGE0_UDP_DLTA_MASK
,
358 MTK_PPE_BND_AGE0_NTU_DLTA
| MTK_PPE_BND_AGE0_UDP_DLTA
,
359 MTK_REG_PPE_BND_AGE0
);
360 mtk_m32(eth
, MTK_PPE_BND_AGE1_FIN_DLTA_MASK
|
361 MTK_PPE_BND_AGE1_TCP_DLTA_MASK
,
362 MTK_PPE_BND_AGE1_FIN_DLTA
| MTK_PPE_BND_AGE1_TCP_DLTA
,
363 MTK_REG_PPE_BND_AGE1
);
365 /* setup flow entry keep alive */
366 mtk_m32(eth
, MTK_PPE_TB_CFG_KA_MASK
, MTK_PPE_TB_CFG_KA
,
368 mtk_w32(eth
, MTK_PPE_KA_UDP
| MTK_PPE_KA_TCP
| MTK_PPE_KA_T
, MTK_REG_PPE_KA
);
370 /* setup flow entry rate limit */
371 mtk_w32(eth
, (0x3fff << 16) | 0x3fff, MTK_REG_PPE_BIND_LMT_0
);
372 mtk_w32(eth
, MTK_PPE_NTU_KA
| 0x3fff, MTK_REG_PPE_BIND_LMT_1
);
373 mtk_m32(eth
, MTK_PPE_BNDR_RATE_MASK
, 1, MTK_REG_PPE_BNDR
);
376 mtk_m32(eth
, 0, MTK_PPE_GLO_CFG_EN
, MTK_REG_PPE_GLO_CFG
);
379 /* set the default forwarding port to QDMA */
380 mtk_w32(eth
, 0x0, MTK_REG_PPE_DFT_CPORT
);
382 /* set the default forwarding port to QDMA */
383 mtk_w32(eth
, 0x55555555, MTK_REG_PPE_DFT_CPORT
);
386 /* drop packets with TTL=0 */
387 mtk_m32(eth
, 0, MTK_PPE_GLO_CFG_TTL0_DROP
, MTK_REG_PPE_GLO_CFG
);
389 /* send all traffic from gmac to the ppe */
390 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(0));
391 mtk_m32(eth
, 0xffff, 0x4444, MTK_GDMA_FWD_CFG(1));
393 dev_info(eth
->dev
, "PPE started\n");
395 #ifdef CONFIG_NET_MEDIATEK_HW_QOS
396 mtk_ppe_scheduler(eth
, 0, 500000);
397 mtk_ppe_scheduler(eth
, 1, 500000);
398 mtk_ppe_queue(eth
, 0, 0, 7, 32, 250000, 0);
399 mtk_ppe_queue(eth
, 1, 0, 7, 32, 250000, 0);
400 mtk_ppe_queue(eth
, 8, 1, 7, 32, 250000, 0);
401 mtk_ppe_queue(eth
, 9, 1, 7, 32, 250000, 0);
407 static int mtk_ppe_busy_wait(struct mtk_eth
*eth
)
409 unsigned long t_start
= jiffies
;
413 r
= mtk_r32(eth
, MTK_REG_PPE_GLO_CFG
);
414 if (!(r
& MTK_PPE_GLO_CFG_BUSY
))
416 if (time_after(jiffies
, t_start
+ HZ
))
418 usleep_range(10, 20);
421 dev_err(eth
->dev
, "ppe: table busy timeout - resetting\n");
422 reset_control_reset(eth
->rst_ppe
);
427 static int mtk_ppe_stop(struct mtk_eth
*eth
)
432 /* discard all traffic while we disable the PPE */
433 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(0));
434 mtk_m32(eth
, 0xffff, 0x7777, MTK_GDMA_FWD_CFG(1));
436 if (mtk_ppe_busy_wait(eth
))
439 /* invalidate all flow table entries */
440 for (i
= 0; i
< MTK_PPE_ENTRY_CNT
; i
++)
441 eth
->foe_table
[i
].bfib1
.state
= FOE_STATE_INVALID
;
443 /* disable caching */
444 mtk_m32(eth
, 0, MTK_PPE_CAH_CTRL_X_MODE
, MTK_REG_PPE_CAH_CTRL
);
445 mtk_m32(eth
, MTK_PPE_CAH_CTRL_X_MODE
| MTK_PPE_CAH_CTRL_EN
, 0,
446 MTK_REG_PPE_CAH_CTRL
);
448 /* flush cache has to be ahead of hnat diable --*/
449 mtk_m32(eth
, MTK_PPE_GLO_CFG_EN
, 0, MTK_REG_PPE_GLO_CFG
);
453 MTK_PPE_FLOW_CFG_IPV4_NAT_FRAG_EN
|
454 MTK_PPE_FLOW_CFG_IPV4_NAPT_EN
| MTK_PPE_FLOW_CFG_IPV4_NAT_EN
|
455 MTK_PPE_FLOW_CFG_FUC_FOE
| MTK_PPE_FLOW_CFG_FMC_FOE
,
456 0, MTK_REG_PPE_FLOW_CFG
);
458 /* disable FOE aging */
460 MTK_PPE_TB_CFG_FIN_AGE
| MTK_PPE_TB_CFG_UDP_AGE
|
461 MTK_PPE_TB_CFG_TCP_AGE
| MTK_PPE_TB_CFG_UNBD_AGE
|
462 MTK_PPE_TB_CFG_NTU_AGE
, MTK_REG_PPE_TB_CFG
);
464 r1
= mtk_r32(eth
, 0x100);
465 r2
= mtk_r32(eth
, 0x10c);
467 dev_info(eth
->dev
, "0x100 = 0x%x, 0x10c = 0x%x\n", r1
, r2
);
469 if (((r1
& 0xff00) >> 0x8) >= (r1
& 0xff) ||
470 ((r1
& 0xff00) >> 0x8) >= (r2
& 0xff)) {
471 dev_info(eth
->dev
, "reset pse\n");
472 mtk_w32(eth
, 0x1, 0x4);
475 /* set the foe entry base address to 0 */
476 mtk_w32(eth
, 0, MTK_REG_PPE_TB_BASE
);
478 if (mtk_ppe_busy_wait(eth
))
481 /* send all traffic back to the DMA engine */
483 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(0));
484 mtk_m32(eth
, 0xffff, 0x0, MTK_GDMA_FWD_CFG(1));
486 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(0));
487 mtk_m32(eth
, 0xffff, 0x5555, MTK_GDMA_FWD_CFG(1));
492 static void mtk_offload_keepalive(struct fe_priv
*eth
, unsigned int hash
)
494 struct flow_offload
*flow
;
497 flow
= rcu_dereference(eth
->foe_flow_table
[hash
]);
499 flow
->timeout
= jiffies
+ 30 * HZ
;
503 int mtk_offload_check_rx(struct fe_priv
*eth
, struct sk_buff
*skb
, u32 rxd4
)
507 switch (FIELD_GET(MTK_RXD4_CPU_REASON
, rxd4
)) {
508 case MTK_CPU_REASON_KEEPALIVE_UC_OLD_HDR
:
509 case MTK_CPU_REASON_KEEPALIVE_MC_NEW_HDR
:
510 case MTK_CPU_REASON_KEEPALIVE_DUP_OLD_HDR
:
511 hash
= FIELD_GET(MTK_RXD4_FOE_ENTRY
, rxd4
);
512 mtk_offload_keepalive(eth
, hash
);
514 case MTK_CPU_REASON_PACKET_SAMPLING
:
521 int mtk_ppe_probe(struct mtk_eth
*eth
)
525 err
= mtk_ppe_start(eth
);
529 err
= mtk_ppe_debugfs_init(eth
);
536 void mtk_ppe_remove(struct mtk_eth
*eth
)