1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 23 Mar 2023 11:05:22 +0100
3 Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2
6 For L2 flows, the packet/byte counters should report the sum of the
7 counters of their subflows, both current and expired.
8 In order to make this work, change the way that accounting data is tracked.
9 Reset counters when a flow enters bind. Once it expires (or enters unbind),
10 store the last counter value in struct mtk_flow_entry.
12 Signed-off-by: Felix Fietkau <nbd@nbd.name>
15 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
16 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
17 @@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
21 - ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
22 - !(val & MTK_PPE_MIB_SER_CR_ST),
23 - 20, MTK_PPE_WAIT_TIMEOUT_US);
24 + ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val,
25 + !(val & MTK_PPE_MIB_SER_CR_ST),
26 + 20, MTK_PPE_WAIT_TIMEOUT_US);
29 dev_err(ppe->dev, "MIB table busy");
30 @@ -90,18 +90,32 @@ static int mtk_ppe_mib_wait_busy(struct
34 -static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
35 +static inline struct mtk_foe_accounting *
36 +mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index)
38 + if (!ppe->acct_table)
41 + return ppe->acct_table + index * sizeof(struct mtk_foe_accounting);
44 +struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index)
46 u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
47 u32 val, cnt_r0, cnt_r1, cnt_r2;
48 + struct mtk_foe_accounting *acct;
51 val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
52 ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
54 + acct = mtk_ppe_acct_data(ppe, index);
58 ret = mtk_ppe_mib_wait_busy(ppe);
63 cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
64 cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
65 @@ -111,10 +125,11 @@ static int mtk_mib_entry_read(struct mtk
66 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
67 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
68 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
69 - *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
70 - *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
73 + acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low;
74 + acct->packets += (pkt_cnt_high << 16) | pkt_cnt_low;
79 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
80 @@ -510,13 +525,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
81 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
82 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
84 - if (ppe->accounting) {
85 - struct mtk_foe_accounting *acct;
87 - acct = ppe->acct_table + entry->hash * sizeof(*acct);
94 @@ -540,8 +548,10 @@ static int __mtk_foe_entry_idle_time(str
98 -mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
99 +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
100 + u64 *packets, u64 *bytes)
102 + struct mtk_foe_accounting *acct;
103 struct mtk_foe_entry foe = {};
104 struct mtk_foe_entry *hwe;
105 u16 hash = entry->hash;
106 @@ -555,16 +565,29 @@ mtk_flow_entry_update(struct mtk_ppe *pp
107 memcpy(&foe, hwe, len);
109 if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
110 - FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
111 + FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) {
112 + acct = mtk_ppe_acct_data(ppe, hash);
114 + entry->packets += acct->packets;
115 + entry->bytes += acct->bytes;
121 entry->data.ib1 = foe.ib1;
122 + acct = mtk_ppe_mib_entry_read(ppe, hash);
124 + *packets += acct->packets;
125 + *bytes += acct->bytes;
132 -mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
133 +mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
134 + u64 *packets, u64 *bytes)
136 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
137 struct mtk_flow_entry *cur;
138 @@ -575,7 +598,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
139 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, list) {
142 - if (!mtk_flow_entry_update(ppe, cur)) {
143 + if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) {
144 + entry->packets += cur->packets;
145 + entry->bytes += cur->bytes;
146 __mtk_foe_entry_clear(ppe, entry, false);
149 @@ -590,10 +615,31 @@ mtk_flow_entry_update_l2(struct mtk_ppe
153 +void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
154 + int *idle, u64 *packets, u64 *bytes)
159 + spin_lock_bh(&ppe_lock);
161 + if (entry->type == MTK_FLOW_TYPE_L2)
162 + mtk_flow_entry_update_l2(ppe, entry, packets, bytes);
164 + mtk_flow_entry_update(ppe, entry, packets, bytes);
166 + *packets += entry->packets;
167 + *bytes += entry->bytes;
168 + *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
170 + spin_unlock_bh(&ppe_lock);
174 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
177 + struct mtk_foe_accounting *acct;
178 struct mtk_eth *eth = ppe->eth;
179 u16 timestamp = mtk_eth_timestamp(eth);
180 struct mtk_foe_entry *hwe;
181 @@ -618,6 +664,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
185 + acct = mtk_ppe_mib_entry_read(ppe, hash);
191 mtk_ppe_cache_clear(ppe);
194 @@ -782,21 +834,6 @@ out:
195 spin_unlock_bh(&ppe_lock);
198 -int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
202 - spin_lock_bh(&ppe_lock);
203 - if (entry->type == MTK_FLOW_TYPE_L2)
204 - mtk_flow_entry_update_l2(ppe, entry);
206 - mtk_flow_entry_update(ppe, entry);
207 - idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
208 - spin_unlock_bh(&ppe_lock);
213 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
216 @@ -824,32 +861,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
217 return mtk_ppe_wait_busy(ppe);
220 -struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
221 - struct mtk_foe_accounting *diff)
223 - struct mtk_foe_accounting *acct;
224 - int size = sizeof(struct mtk_foe_accounting);
225 - u64 bytes, packets;
227 - if (!ppe->accounting)
230 - if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
233 - acct = ppe->acct_table + index * size;
235 - acct->bytes += bytes;
236 - acct->packets += packets;
239 - diff->bytes = bytes;
240 - diff->packets = packets;
246 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
248 bool accounting = eth->soc->has_accounting;
249 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
250 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
251 @@ -278,6 +278,8 @@ struct mtk_flow_entry {
252 struct mtk_foe_entry data;
253 struct rhash_head node;
254 unsigned long cookie;
259 struct mtk_mib_entry {
260 @@ -320,6 +322,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
261 void mtk_ppe_start(struct mtk_ppe *ppe);
262 int mtk_ppe_stop(struct mtk_ppe *ppe);
263 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
264 +struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index);
266 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
268 @@ -368,9 +371,8 @@ int mtk_foe_entry_set_queue(struct mtk_e
270 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
271 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
272 -int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
273 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
274 -struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
275 - struct mtk_foe_accounting *diff);
276 +void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
277 + int *idle, u64 *packets, u64 *bytes);
280 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
281 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
282 @@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
283 if (bind && state != MTK_FOE_STATE_BIND)
286 - acct = mtk_foe_entry_get_mib(ppe, i, NULL);
287 + acct = mtk_ppe_mib_entry_read(ppe, i);
289 type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
290 seq_printf(m, "%05x %s %7s", i,
291 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
292 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
293 @@ -499,24 +499,17 @@ static int
294 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
296 struct mtk_flow_entry *entry;
297 - struct mtk_foe_accounting diff;
301 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
306 - idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
307 + mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle,
308 + &f->stats.pkts, &f->stats.bytes);
309 f->stats.lastused = jiffies - idle * HZ;
311 - if (entry->hash != 0xFFFF &&
312 - mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
314 - f->stats.pkts += diff.packets;
315 - f->stats.bytes += diff.bytes;