2a60f61c95821428ff7275cd0500e059fe17f1d3
[openwrt/openwrt.git] / target / linux / realtek / files-5.10 / drivers / net / dsa / rtl83xx / common.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/of_mdio.h>
4 #include <linux/of_platform.h>
5 #include <net/arp.h>
6 #include <net/nexthop.h>
7 #include <net/neighbour.h>
8 #include <net/netevent.h>
9 #include <linux/inetdevice.h>
10 #include <linux/rhashtable.h>
11 #include <linux/of_net.h>
12
13 #include <asm/mach-rtl838x/mach-rtl83xx.h>
14 #include "rtl83xx.h"
15
16 extern struct rtl83xx_soc_info soc_info;
17
18 extern const struct rtl838x_reg rtl838x_reg;
19 extern const struct rtl838x_reg rtl839x_reg;
20 extern const struct rtl838x_reg rtl930x_reg;
21 extern const struct rtl838x_reg rtl931x_reg;
22
23 extern const struct dsa_switch_ops rtl83xx_switch_ops;
24 extern const struct dsa_switch_ops rtl930x_switch_ops;
25
26 DEFINE_MUTEX(smi_lock);
27
28 int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
29 {
30 u32 msti = 0;
31 u32 port_state[4];
32 int index, bit;
33 int pos = port;
34 int n = priv->port_width << 1;
35
36 /* Ports above or equal CPU port can never be configured */
37 if (port >= priv->cpu_port)
38 return -1;
39
40 mutex_lock(&priv->reg_mutex);
41
42 /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
43 if (priv->family_id == RTL8390_FAMILY_ID)
44 pos += 12;
45 if (priv->family_id == RTL9300_FAMILY_ID)
46 pos += 3;
47 if (priv->family_id == RTL9310_FAMILY_ID)
48 pos += 8;
49
50 index = n - (pos >> 4) - 1;
51 bit = (pos << 1) % 32;
52
53 priv->r->stp_get(priv, msti, port_state);
54
55 mutex_unlock(&priv->reg_mutex);
56
57 return (port_state[index] >> bit) & 3;
58 }
59
60 static struct table_reg rtl838x_tbl_regs[] = {
61 TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), // RTL8380_TBL_L2
62 TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), // RTL8380_TBL_0
63 TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), // RTL8380_TBL_1
64
65 TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), // RTL8390_TBL_L2
66 TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), // RTL8390_TBL_0
67 TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), // RTL8390_TBL_1
68 TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), // RTL8390_TBL_2
69
70 TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), // RTL9300_TBL_L2
71 TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), // RTL9300_TBL_0
72 TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), // RTL9300_TBL_1
73 TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), // RTL9300_TBL_2
74 TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), // RTL9300_TBL_HSB
75 TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), // RTL9300_TBL_HSA
76
77 TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), // RTL9310_TBL_0
78 TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), // RTL9310_TBL_1
79 TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), // RTL9310_TBL_2
80 TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), // RTL9310_TBL_3
81 TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), // RTL9310_TBL_4
82 TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), // RTL9310_TBL_5
83 };
84
85 void rtl_table_init(void)
86 {
87 int i;
88
89 for (i = 0; i < RTL_TBL_END; i++)
90 mutex_init(&rtl838x_tbl_regs[i].lock);
91 }
92
93 /*
94 * Request access to table t in table access register r
95 * Returns a handle to a lock for that table
96 */
97 struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
98 {
99 if (r >= RTL_TBL_END)
100 return NULL;
101
102 if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
103 return NULL;
104
105 mutex_lock(&rtl838x_tbl_regs[r].lock);
106 rtl838x_tbl_regs[r].tbl = t;
107
108 return &rtl838x_tbl_regs[r];
109 }
110
111 /*
112 * Release a table r, unlock the corresponding lock
113 */
114 void rtl_table_release(struct table_reg *r)
115 {
116 if (!r)
117 return;
118
119 // pr_info("Unlocking %08x\n", (u32)r);
120 mutex_unlock(&r->lock);
121 // pr_info("Unlock done\n");
122 }
123
124 /*
125 * Reads table index idx into the data registers of the table
126 */
127 void rtl_table_read(struct table_reg *r, int idx)
128 {
129 u32 cmd = r->rmode ? BIT(r->c_bit) : 0;
130
131 cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1));
132 sw_w32(cmd, r->addr);
133 do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1));
134 }
135
136 /*
137 * Writes the content of the table data registers into the table at index idx
138 */
139 void rtl_table_write(struct table_reg *r, int idx)
140 {
141 u32 cmd = r->rmode ? 0 : BIT(r->c_bit);
142
143 cmd |= BIT(r->c_bit + 1) | (r->tbl << r->t_bit) | (idx & (BIT(r->t_bit) - 1));
144 sw_w32(cmd, r->addr);
145 do { } while (sw_r32(r->addr) & BIT(r->c_bit + 1));
146 }
147
148 /*
149 * Returns the address of the ith data register of table register r
150 * the address is relative to the beginning of the Switch-IO block at 0xbb000000
151 */
152 inline u16 rtl_table_data(struct table_reg *r, int i)
153 {
154 if (i >= r->max_data)
155 i = r->max_data - 1;
156 return r->data + i * 4;
157 }
158
159 inline u32 rtl_table_data_r(struct table_reg *r, int i)
160 {
161 return sw_r32(rtl_table_data(r, i));
162 }
163
164 inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
165 {
166 sw_w32(v, rtl_table_data(r, i));
167 }
168
169 /* Port register accessor functions for the RTL838x and RTL930X SoCs */
170 void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
171 {
172 sw_w32_mask((u32)clear, (u32)set, reg);
173 }
174
175 void rtl838x_set_port_reg(u64 set, int reg)
176 {
177 sw_w32((u32)set, reg);
178 }
179
180 u64 rtl838x_get_port_reg(int reg)
181 {
182 return ((u64) sw_r32(reg));
183 }
184
185 /* Port register accessor functions for the RTL839x and RTL931X SoCs */
186 void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
187 {
188 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
189 sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
190 }
191
192 u64 rtl839x_get_port_reg_be(int reg)
193 {
194 u64 v = sw_r32(reg);
195
196 v <<= 32;
197 v |= sw_r32(reg + 4);
198 return v;
199 }
200
201 void rtl839x_set_port_reg_be(u64 set, int reg)
202 {
203 sw_w32(set >> 32, reg);
204 sw_w32(set & 0xffffffff, reg + 4);
205 }
206
207 void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
208 {
209 sw_w32_mask((u32)clear, (u32)set, reg);
210 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
211 }
212
213 void rtl839x_set_port_reg_le(u64 set, int reg)
214 {
215 sw_w32(set, reg);
216 sw_w32(set >> 32, reg + 4);
217 }
218
219 u64 rtl839x_get_port_reg_le(int reg)
220 {
221 u64 v = sw_r32(reg + 4);
222
223 v <<= 32;
224 v |= sw_r32(reg);
225 return v;
226 }
227
228 int read_phy(u32 port, u32 page, u32 reg, u32 *val)
229 {
230 switch (soc_info.family) {
231 case RTL8380_FAMILY_ID:
232 return rtl838x_read_phy(port, page, reg, val);
233 case RTL8390_FAMILY_ID:
234 return rtl839x_read_phy(port, page, reg, val);
235 case RTL9300_FAMILY_ID:
236 return rtl930x_read_phy(port, page, reg, val);
237 case RTL9310_FAMILY_ID:
238 return rtl931x_read_phy(port, page, reg, val);
239 }
240 return -1;
241 }
242
243 int write_phy(u32 port, u32 page, u32 reg, u32 val)
244 {
245 switch (soc_info.family) {
246 case RTL8380_FAMILY_ID:
247 return rtl838x_write_phy(port, page, reg, val);
248 case RTL8390_FAMILY_ID:
249 return rtl839x_write_phy(port, page, reg, val);
250 case RTL9300_FAMILY_ID:
251 return rtl930x_write_phy(port, page, reg, val);
252 case RTL9310_FAMILY_ID:
253 return rtl931x_write_phy(port, page, reg, val);
254 }
255 return -1;
256 }
257
258 static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
259 {
260 struct device *dev = priv->dev;
261 struct device_node *dn, *phy_node, *mii_np = dev->of_node;
262 struct mii_bus *bus;
263 int ret;
264 u32 pn;
265
266 pr_debug("In %s\n", __func__);
267 mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
268 if (mii_np) {
269 pr_debug("Found compatible MDIO node!\n");
270 } else {
271 dev_err(priv->dev, "no %s child node found", "mdio-bus");
272 return -ENODEV;
273 }
274
275 priv->mii_bus = of_mdio_find_bus(mii_np);
276 if (!priv->mii_bus) {
277 pr_debug("Deferring probe of mdio bus\n");
278 return -EPROBE_DEFER;
279 }
280 if (!of_device_is_available(mii_np))
281 ret = -ENODEV;
282
283 bus = devm_mdiobus_alloc(priv->ds->dev);
284 if (!bus)
285 return -ENOMEM;
286
287 bus->name = "rtl838x slave mii";
288
289 /*
290 * Since the NIC driver is loaded first, we can use the mdio rw functions
291 * assigned there.
292 */
293 bus->read = priv->mii_bus->read;
294 bus->write = priv->mii_bus->write;
295 bus->read_paged = priv->mii_bus->read_paged;
296 bus->write_paged = priv->mii_bus->write_paged;
297 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
298
299 bus->parent = dev;
300 priv->ds->slave_mii_bus = bus;
301 priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
302 priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
303
304 ret = mdiobus_register(priv->ds->slave_mii_bus);
305 if (ret && mii_np) {
306 of_node_put(dn);
307 return ret;
308 }
309
310 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
311 if (!dn) {
312 dev_err(priv->dev, "No RTL switch node in DTS\n");
313 return -ENODEV;
314 }
315
316 for_each_node_by_name(dn, "port") {
317 phy_interface_t interface;
318 u32 led_set;
319
320 if (!of_device_is_available(dn))
321 continue;
322
323 if (of_property_read_u32(dn, "reg", &pn))
324 continue;
325
326 pr_info("%s found port %d\n", __func__, pn);
327 phy_node = of_parse_phandle(dn, "phy-handle", 0);
328 if (!phy_node) {
329 if (pn != priv->cpu_port)
330 dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
331 continue;
332 }
333
334 pr_info("%s port %d has phandle\n", __func__, pn);
335 if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
336 priv->ports[pn].sds_num = -1;
337 else {
338 pr_info("%s sds port %d is %d\n", __func__, pn,
339 priv->ports[pn].sds_num);
340 }
341 pr_info("%s port %d has SDS\n", __func__, priv->ports[pn].sds_num);
342
343 if (of_get_phy_mode(dn, &interface))
344 interface = PHY_INTERFACE_MODE_NA;
345 if (interface == PHY_INTERFACE_MODE_HSGMII)
346 priv->ports[pn].is2G5 = true;
347 if (interface == PHY_INTERFACE_MODE_USXGMII)
348 priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
349 if (interface == PHY_INTERFACE_MODE_10GBASER)
350 priv->ports[pn].is10G = true;
351
352 if (of_property_read_u32(dn, "led-set", &led_set))
353 led_set = 0;
354 priv->ports[pn].led_set = led_set;
355
356 // Check for the integrated SerDes of the RTL8380M first
357 if (of_property_read_bool(phy_node, "phy-is-integrated")
358 && priv->id == 0x8380 && pn >= 24) {
359 pr_debug("----> FÓUND A SERDES\n");
360 priv->ports[pn].phy = PHY_RTL838X_SDS;
361 continue;
362 }
363
364 if (priv->id >= 0x9300) {
365 priv->ports[pn].phy_is_integrated = false;
366 if (of_property_read_bool(phy_node, "phy-is-integrated")) {
367 priv->ports[pn].phy_is_integrated = true;
368 priv->ports[pn].phy = PHY_RTL930X_SDS;
369 }
370 } else {
371 if (of_property_read_bool(phy_node, "phy-is-integrated")
372 && !of_property_read_bool(phy_node, "sfp")) {
373 priv->ports[pn].phy = PHY_RTL8218B_INT;
374 continue;
375 }
376 }
377
378 if (!of_property_read_bool(phy_node, "phy-is-integrated")
379 && of_property_read_bool(phy_node, "sfp")) {
380 priv->ports[pn].phy = PHY_RTL8214FC;
381 continue;
382 }
383
384 if (!of_property_read_bool(phy_node, "phy-is-integrated")
385 && !of_property_read_bool(phy_node, "sfp")) {
386 priv->ports[pn].phy = PHY_RTL8218B_EXT;
387 continue;
388 }
389 }
390
391 /* Disable MAC polling the PHY so that we can start configuration */
392 priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
393
394 /* Enable PHY control via SoC */
395 if (priv->family_id == RTL8380_FAMILY_ID) {
396 /* Enable SerDes NWAY and PHY control via SoC */
397 sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
398 } else if (priv->family_id == RTL8390_FAMILY_ID) {
399 /* Disable PHY polling via SoC */
400 sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
401 }
402
403 /* Power on fibre ports and reset them if necessary */
404 if (priv->ports[24].phy == PHY_RTL838X_SDS) {
405 pr_debug("Powering on fibre ports & reset\n");
406 rtl8380_sds_power(24, 1);
407 rtl8380_sds_power(26, 1);
408 }
409
410 pr_debug("%s done\n", __func__);
411 return 0;
412 }
413
414 static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
415 {
416 int t = sw_r32(priv->r->l2_ctrl_1);
417
418 t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
419
420 if (priv->family_id == RTL8380_FAMILY_ID)
421 t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
422 else
423 t = (t * 3) / 5;
424
425 pr_debug("L2 AGING time: %d sec\n", t);
426 pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
427 return t;
428 }
429
430 /* Caller must hold priv->reg_mutex */
431 int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
432 {
433 struct rtl838x_switch_priv *priv = ds->priv;
434 int i;
435 u32 algomsk = 0;
436 u32 algoidx = 0;
437
438 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
439 pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
440 return -EINVAL;
441 }
442
443 if (group >= priv->n_lags) {
444 pr_err("%s: LAG %d invalid.\n", __func__, group);
445 return -EINVAL;
446 }
447
448 if (port >= priv->cpu_port) {
449 pr_err("%s: Port %d invalid.\n", __func__, port);
450 return -EINVAL;
451 }
452
453 for (i = 0; i < priv->n_lags; i++) {
454 if (priv->lags_port_members[i] & BIT_ULL(port))
455 break;
456 }
457 if (i != priv->n_lags) {
458 pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
459 return -ENOSPC;
460 }
461 switch(info->hash_type) {
462 case NETDEV_LAG_HASH_L2:
463 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
464 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
465 break;
466 case NETDEV_LAG_HASH_L23:
467 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
468 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
469 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
470 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
471 algoidx = 1;
472 break;
473 case NETDEV_LAG_HASH_L34:
474 algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport
475 algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport
476 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
477 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
478 algoidx = 2;
479 break;
480 default:
481 algomsk |= 0x7f;
482 }
483 priv->r->set_distribution_algorithm(group, algoidx, algomsk);
484 priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
485 priv->lags_port_members[group] |= BIT_ULL(port);
486
487 pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
488 __func__, port, group, priv->lags_port_members[group]);
489 return 0;
490 }
491
492 /* Caller must hold priv->reg_mutex */
493 int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
494 {
495 struct rtl838x_switch_priv *priv = ds->priv;
496
497 if (group >= priv->n_lags) {
498 pr_err("%s: LAG %d invalid.\n", __func__, group);
499 return -EINVAL;
500 }
501
502 if (port >= priv->cpu_port) {
503 pr_err("%s: Port %d invalid.\n", __func__, port);
504 return -EINVAL;
505 }
506
507 if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
508 pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
509 return -ENOSPC;
510 }
511
512 // 0x7f algo mask all
513 priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
514 priv->lags_port_members[group] &= ~BIT_ULL(port);
515
516 pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
517 __func__, port, group, priv->lags_port_members[group]);
518 return 0;
519 }
520
521 /*
522 * Allocate a 64 bit octet counter located in the LOG HW table
523 */
524 static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
525 {
526 int idx;
527
528 mutex_lock(&priv->reg_mutex);
529
530 idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
531 if (idx >= priv->n_counters) {
532 mutex_unlock(&priv->reg_mutex);
533 return -1;
534 }
535
536 set_bit(idx, priv->octet_cntr_use_bm);
537 mutex_unlock(&priv->reg_mutex);
538
539 return idx;
540 }
541
542 /*
543 * Allocate a 32-bit packet counter
544 * 2 32-bit packet counters share the location of a 64-bit octet counter
545 * Initially there are no free packet counters and 2 new ones need to be freed
546 * by allocating the corresponding octet counter
547 */
548 int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
549 {
550 int idx, j;
551
552 mutex_lock(&priv->reg_mutex);
553
554 /* Because initially no packet counters are free, the logic is reversed:
555 * a 0-bit means the counter is already allocated (for octets)
556 */
557 idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
558 if (idx >= priv->n_counters * 2) {
559 j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
560 if (j >= priv->n_counters) {
561 mutex_unlock(&priv->reg_mutex);
562 return -1;
563 }
564 set_bit(j, priv->octet_cntr_use_bm);
565 idx = j * 2;
566 set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
567
568 } else {
569 clear_bit(idx, priv->packet_cntr_use_bm);
570 }
571
572 mutex_unlock(&priv->reg_mutex);
573
574 return idx;
575 }
576
577 /*
578 * Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
579 * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
580 * or mark an existing entry as a nexthop by setting it's nexthop bit
581 * Called from the L3 layer
582 * The index in the L2 hash table is filled into nh->l2_id;
583 */
584 int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
585 {
586 struct rtl838x_l2_entry e;
587 u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
588 u32 key = priv->r->l2_hash_key(priv, seed);
589 int i, idx = -1;
590 u64 entry;
591
592 pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
593 __func__, nh->mac, nh->rvid, key, seed);
594
595 e.type = L2_UNICAST;
596 u64_to_ether_addr(nh->mac, &e.mac[0]);
597 e.port = nh->port;
598
599 // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
600 for (i = 0; i < priv->l2_bucket_size; i++) {
601 entry = priv->r->read_l2_entry_using_hash(key, i, &e);
602
603 if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
604 idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
605 : ((key << 2) | i) & 0xffff;
606 break;
607 }
608 }
609
610 if (idx < 0) {
611 pr_err("%s: No more L2 forwarding entries available\n", __func__);
612 return -1;
613 }
614
615 // Found an existing (e->valid is true) or empty entry, make it a nexthop entry
616 nh->l2_id = idx;
617 if (e.valid) {
618 nh->port = e.port;
619 nh->vid = e.vid; // Save VID
620 nh->rvid = e.rvid;
621 nh->dev_id = e.stack_dev;
622 // If the entry is already a valid next hop entry, don't change it
623 if (e.next_hop)
624 return 0;
625 } else {
626 e.valid = true;
627 e.is_static = true;
628 e.rvid = nh->rvid;
629 e.is_ip_mc = false;
630 e.is_ipv6_mc = false;
631 e.block_da = false;
632 e.block_sa = false;
633 e.suspended = false;
634 e.age = 0; // With port-ignore
635 e.port = priv->port_ignore;
636 u64_to_ether_addr(nh->mac, &e.mac[0]);
637 }
638 e.next_hop = true;
639 e.nh_route_id = nh->id; // NH route ID takes place of VID
640 e.nh_vlan_target = false;
641
642 priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
643
644 return 0;
645 }
646
647 /*
648 * Removes a Layer 2 next hop entry in the forwarding database
649 * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
650 * and we wait until the entry ages out
651 */
652 int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
653 {
654 struct rtl838x_l2_entry e;
655 u32 key = nh->l2_id >> 2;
656 int i = nh->l2_id & 0x3;
657 u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
658
659 pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
660 if (!e.valid) {
661 dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
662 return -1;
663 }
664
665 if (e.is_static)
666 e.valid = false;
667 e.next_hop = false;
668 e.vid = nh->vid; // Restore VID
669 e.rvid = nh->rvid;
670
671 priv->r->write_l2_entry_using_hash(key, i, &e);
672
673 return 0;
674 }
675
676 static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
677 struct net_device *ndev,
678 struct netdev_notifier_changeupper_info *info)
679 {
680 struct net_device *upper = info->upper_dev;
681 struct netdev_lag_upper_info *lag_upper_info = NULL;
682 int i, j, err;
683
684 if (!netif_is_lag_master(upper))
685 return 0;
686
687 mutex_lock(&priv->reg_mutex);
688
689 for (i = 0; i < priv->n_lags; i++) {
690 if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
691 break;
692 }
693 for (j = 0; j < priv->cpu_port; j++) {
694 if (priv->ports[j].dp->slave == ndev)
695 break;
696 }
697 if (j >= priv->cpu_port) {
698 err = -EINVAL;
699 goto out;
700 }
701
702 if (info->linking) {
703 lag_upper_info = info->upper_info;
704 if (!priv->lag_devs[i])
705 priv->lag_devs[i] = upper;
706 err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
707 if (err) {
708 err = -EINVAL;
709 goto out;
710 }
711 } else {
712 if (!priv->lag_devs[i])
713 err = -EINVAL;
714 err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
715 if (err) {
716 err = -EINVAL;
717 goto out;
718 }
719 if (!priv->lags_port_members[i])
720 priv->lag_devs[i] = NULL;
721 }
722
723 out:
724 mutex_unlock(&priv->reg_mutex);
725 return 0;
726 }
727
728 /*
729 * Is the lower network device a DSA slave network device of our RTL930X-switch?
730 * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
731 * DSA master device.
732 */
733 int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
734 {
735 int i;
736
737 // TODO: On 5.12:
738 // if(!dsa_slave_dev_check(dev)) {
739 // netdev_info(dev, "%s: not a DSA device.\n", __func__);
740 // return -EINVAL;
741 // }
742
743 for (i = 0; i < priv->cpu_port; i++) {
744 if (!priv->ports[i].dp)
745 continue;
746 if (priv->ports[i].dp->slave == dev)
747 return i;
748 }
749 return -EINVAL;
750 }
751
752 static int rtl83xx_netdevice_event(struct notifier_block *this,
753 unsigned long event, void *ptr)
754 {
755 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
756 struct rtl838x_switch_priv *priv;
757 int err;
758
759 pr_debug("In: %s, event: %lu\n", __func__, event);
760
761 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
762 return NOTIFY_DONE;
763
764 priv = container_of(this, struct rtl838x_switch_priv, nb);
765 switch (event) {
766 case NETDEV_CHANGEUPPER:
767 err = rtl83xx_handle_changeupper(priv, ndev, ptr);
768 break;
769 }
770
771 if (err)
772 return err;
773
774 return NOTIFY_DONE;
775 }
776
777 const static struct rhashtable_params route_ht_params = {
778 .key_len = sizeof(u32),
779 .key_offset = offsetof(struct rtl83xx_route, gw_ip),
780 .head_offset = offsetof(struct rtl83xx_route, linkage),
781 };
782
783 /*
784 * Updates an L3 next hop entry in the ROUTING table
785 */
786 static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
787 {
788 struct rtl83xx_route *r;
789 struct rhlist_head *tmp, *list;
790
791 rcu_read_lock();
792 list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
793 if (!list) {
794 rcu_read_unlock();
795 return -ENOENT;
796 }
797
798 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
799 pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
800 __func__, &ip_addr, mac);
801
802 // Reads the ROUTING table entry associated with the route
803 priv->r->route_read(r->id, r);
804 pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
805
806 r->nh.mac = r->nh.gw = mac;
807 r->nh.port = priv->port_ignore;
808 r->nh.id = r->id;
809
810 // Do we need to explicitly add a DMAC entry with the route's nh index?
811 if (priv->r->set_l3_egress_mac)
812 priv->r->set_l3_egress_mac(r->id, mac);
813
814 // Update ROUTING table: map gateway-mac and switch-mac id to route id
815 rtl83xx_l2_nexthop_add(priv, &r->nh);
816
817 r->attr.valid = true;
818 r->attr.action = ROUTE_ACT_FORWARD;
819 r->attr.type = 0;
820 r->attr.hit = false; // Reset route-used indicator
821
822 // Add PIE entry with dst_ip and prefix_len
823 r->pr.dip = r->dst_ip;
824 r->pr.dip_m = inet_make_mask(r->prefix_len);
825
826 if (r->is_host_route) {
827 int slot = priv->r->find_l3_slot(r, false);
828
829 pr_info("%s: Got slot for route: %d\n", __func__, slot);
830 priv->r->host_route_write(slot, r);
831 } else {
832 priv->r->route_write(r->id, r);
833 r->pr.fwd_sel = true;
834 r->pr.fwd_data = r->nh.l2_id;
835 r->pr.fwd_act = PIE_ACT_ROUTE_UC;
836 }
837
838 if (priv->r->set_l3_nexthop)
839 priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
840
841 if (r->pr.id < 0) {
842 r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
843 if (r->pr.packet_cntr >= 0) {
844 pr_info("Using packet counter %d\n", r->pr.packet_cntr);
845 r->pr.log_sel = true;
846 r->pr.log_data = r->pr.packet_cntr;
847 }
848 priv->r->pie_rule_add(priv, &r->pr);
849 } else {
850 int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
851 pr_info("%s: total packets: %d\n", __func__, pkts);
852
853 priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
854 }
855 }
856 rcu_read_unlock();
857 return 0;
858 }
859
860 static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
861 struct net_device *dev, __be32 ip_addr)
862 {
863 struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
864 int err = 0;
865 u64 mac;
866
867 if (!n) {
868 n = neigh_create(&arp_tbl, &ip_addr, dev);
869 if (IS_ERR(n))
870 return PTR_ERR(n);
871 }
872
873 /* If the neigh is already resolved, then go ahead and
874 * install the entry, otherwise start the ARP process to
875 * resolve the neigh.
876 */
877 if (n->nud_state & NUD_VALID) {
878 mac = ether_addr_to_u64(n->ha);
879 pr_info("%s: resolved mac: %016llx\n", __func__, mac);
880 rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
881 } else {
882 pr_info("%s: need to wait\n", __func__);
883 neigh_event_send(n, NULL);
884 }
885
886 neigh_release(n);
887 return err;
888 }
889
890 struct rtl83xx_walk_data {
891 struct rtl838x_switch_priv *priv;
892 int port;
893 };
894
895 static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
896 {
897 struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
898 struct rtl838x_switch_priv *priv = data->priv;
899 int ret = 0;
900 int index;
901
902 index = rtl83xx_port_is_under(lower, priv);
903 data->port = index;
904 if (index >= 0) {
905 pr_debug("Found DSA-port, index %d\n", index);
906 ret = 1;
907 }
908
909 return ret;
910 }
911
912 int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
913 {
914 struct rtl83xx_walk_data data;
915 struct netdev_nested_priv _priv;
916
917 data.priv = priv;
918 data.port = 0;
919 _priv.data = (void *)&data;
920
921 netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
922
923 return data.port;
924 }
925
926 static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
927 {
928 struct rtl83xx_route *r;
929 int idx = 0, err;
930
931 mutex_lock(&priv->reg_mutex);
932
933 idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
934 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
935
936 r = kzalloc(sizeof(*r), GFP_KERNEL);
937 if (!r) {
938 mutex_unlock(&priv->reg_mutex);
939 return r;
940 }
941
942 r->id = idx;
943 r->gw_ip = ip;
944 r->pr.id = -1; // We still need to allocate a rule in HW
945 r->is_host_route = false;
946
947 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
948 if (err) {
949 pr_err("Could not insert new rule\n");
950 mutex_unlock(&priv->reg_mutex);
951 goto out_free;
952 }
953
954 set_bit(idx, priv->route_use_bm);
955
956 mutex_unlock(&priv->reg_mutex);
957
958 return r;
959
960 out_free:
961 kfree(r);
962 return NULL;
963 }
964
965
966 static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
967 {
968 struct rtl83xx_route *r;
969 int idx = 0, err;
970
971 mutex_lock(&priv->reg_mutex);
972
973 idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
974 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
975
976 r = kzalloc(sizeof(*r), GFP_KERNEL);
977 if (!r) {
978 mutex_unlock(&priv->reg_mutex);
979 return r;
980 }
981
982 /* We require a unique route ID irrespective of whether it is a prefix or host
983 * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry */
984 r->id = idx + MAX_ROUTES;
985
986 r->gw_ip = ip;
987 r->pr.id = -1; // We still need to allocate a rule in HW
988 r->is_host_route = true;
989
990 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
991 if (err) {
992 pr_err("Could not insert new rule\n");
993 mutex_unlock(&priv->reg_mutex);
994 goto out_free;
995 }
996
997 set_bit(idx, priv->host_route_use_bm);
998
999 mutex_unlock(&priv->reg_mutex);
1000
1001 return r;
1002
1003 out_free:
1004 kfree(r);
1005 return NULL;
1006 }
1007
1008
1009
1010 static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
1011 {
1012 int id;
1013
1014 if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
1015 dev_warn(priv->dev, "Could not remove route\n");
1016
1017 if (r->is_host_route) {
1018 id = priv->r->find_l3_slot(r, false);
1019 pr_debug("%s: Got id for host route: %d\n", __func__, id);
1020 r->attr.valid = false;
1021 priv->r->host_route_write(id, r);
1022 clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
1023 } else {
1024 // If there is a HW representation of the route, delete it
1025 if (priv->r->route_lookup_hw) {
1026 id = priv->r->route_lookup_hw(r);
1027 pr_info("%s: Got id for prefix route: %d\n", __func__, id);
1028 r->attr.valid = false;
1029 priv->r->route_write(id, r);
1030 }
1031 clear_bit(r->id, priv->route_use_bm);
1032 }
1033
1034 kfree(r);
1035 }
1036
1037 static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
1038 struct fib_entry_notifier_info *info)
1039 {
1040 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1041 struct rtl83xx_route *r;
1042 struct rhlist_head *tmp, *list;
1043
1044 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1045 rcu_read_lock();
1046 list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
1047 if (!list) {
1048 rcu_read_unlock();
1049 pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
1050 return -ENOENT;
1051 }
1052 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
1053 if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
1054 pr_info("%s: found a route with id %d, nh-id %d\n",
1055 __func__, r->id, r->nh.id);
1056 break;
1057 }
1058 }
1059 rcu_read_unlock();
1060
1061 rtl83xx_l2_nexthop_rm(priv, &r->nh);
1062
1063 pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
1064 set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
1065 priv->r->pie_rule_rm(priv, &r->pr);
1066
1067 rtl83xx_route_rm(priv, r);
1068
1069 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
1070
1071 return 0;
1072 }
1073
1074 /*
1075 * On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
1076 * for packets to be routed needs to be allocated.
1077 */
1078 static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
1079 {
1080 int i, free_mac = -1;
1081 struct rtl93xx_rt_mac m;
1082
1083 mutex_lock(&priv->reg_mutex);
1084 for (i = 0; i < MAX_ROUTER_MACS; i++) {
1085 priv->r->get_l3_router_mac(i, &m);
1086 if (free_mac < 0 && !m.valid) {
1087 free_mac = i;
1088 continue;
1089 }
1090 if (m.valid && m.mac == mac) {
1091 free_mac = i;
1092 break;
1093 }
1094 }
1095
1096 if (free_mac < 0) {
1097 pr_err("No free router MACs, cannot offload\n");
1098 mutex_unlock(&priv->reg_mutex);
1099 return -1;
1100 }
1101
1102 m.valid = true;
1103 m.mac = mac;
1104 m.p_type = 0; // An individual port, not a trunk port
1105 m.p_id = 0x3f; // Listen on any port
1106 m.p_id_mask = 0;
1107 m.vid = 0; // Listen on any VLAN...
1108 m.vid_mask = 0; // ... so mask needs to be 0
1109 m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC
1110 m.action = L3_FORWARD; // Route the packet
1111 priv->r->set_l3_router_mac(free_mac, &m);
1112
1113 mutex_unlock(&priv->reg_mutex);
1114
1115 return 0;
1116 }
1117
1118 static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
1119 {
1120 int i, free_mac = -1;
1121 struct rtl838x_l3_intf intf;
1122 u64 m;
1123
1124 mutex_lock(&priv->reg_mutex);
1125 for (i = 0; i < MAX_SMACS; i++) {
1126 m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
1127 if (free_mac < 0 && !m) {
1128 free_mac = i;
1129 continue;
1130 }
1131 if (m == mac) {
1132 mutex_unlock(&priv->reg_mutex);
1133 return i;
1134 }
1135 }
1136
1137 if (free_mac < 0) {
1138 pr_err("No free egress interface, cannot offload\n");
1139 return -1;
1140 }
1141
1142 // Set up default egress interface 1
1143 intf.vid = vlan;
1144 intf.smac_idx = free_mac;
1145 intf.ip4_mtu_id = 1;
1146 intf.ip6_mtu_id = 1;
1147 intf.ttl_scope = 1; // TTL
1148 intf.hl_scope = 1; // Hop Limit
1149 intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD
1150 intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD;
1151 priv->r->set_l3_egress_intf(free_mac, &intf);
1152
1153 priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
1154
1155 mutex_unlock(&priv->reg_mutex);
1156
1157 return free_mac;
1158 }
1159
1160 static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
1161 struct fib_entry_notifier_info *info)
1162 {
1163 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1164 struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
1165 int port;
1166 struct rtl83xx_route *r;
1167 bool to_localhost;
1168 int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
1169
1170 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1171 if (!info->dst) {
1172 pr_info("Not offloading default route for now\n");
1173 return 0;
1174 }
1175
1176 pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
1177 ether_addr_to_u64(dev->dev_addr), vlan
1178 );
1179
1180 port = rtl83xx_port_dev_lower_find(dev, priv);
1181 if (port < 0)
1182 return -1;
1183
1184 // For now we only work with routes that have a gateway and are not ourself
1185 // if ((!nh->fib_nh_gw4) && (info->dst_len != 32))
1186 // return 0;
1187
1188 if ((info->dst & 0xff) == 0xff)
1189 return 0;
1190
1191 // Do not offload routes to 192.168.100.x
1192 if ((info->dst & 0xffffff00) == 0xc0a86400)
1193 return 0;
1194
1195 // Do not offload routes to 127.x.x.x
1196 if ((info->dst & 0xff000000) == 0x7f000000)
1197 return 0;
1198
1199 // Allocate route or host-route (entry if hardware supports this)
1200 if (info->dst_len == 32 && priv->r->host_route_write)
1201 r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
1202 else
1203 r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
1204
1205 if (!r) {
1206 pr_err("%s: No more free route entries\n", __func__);
1207 return -1;
1208 }
1209
1210 r->dst_ip = info->dst;
1211 r->prefix_len = info->dst_len;
1212 r->nh.rvid = vlan;
1213 to_localhost = !nh->fib_nh_gw4;
1214
1215 if (priv->r->set_l3_router_mac) {
1216 u64 mac = ether_addr_to_u64(dev->dev_addr);
1217
1218 pr_debug("Local route and router mac %016llx\n", mac);
1219
1220 if (rtl83xx_alloc_router_mac(priv, mac))
1221 goto out_free_rt;
1222
1223 // vid = 0: Do not care about VID
1224 r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
1225 if (r->nh.if_id < 0)
1226 goto out_free_rmac;
1227
1228 if (to_localhost) {
1229 int slot;
1230
1231 r->nh.mac = mac;
1232 r->nh.port = priv->port_ignore;
1233 r->attr.valid = true;
1234 r->attr.action = ROUTE_ACT_TRAP2CPU;
1235 r->attr.type = 0;
1236
1237 slot = priv->r->find_l3_slot(r, false);
1238 pr_debug("%s: Got slot for route: %d\n", __func__, slot);
1239 priv->r->host_route_write(slot, r);
1240 }
1241 }
1242
1243 // We need to resolve the mac address of the GW
1244 if (!to_localhost)
1245 rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
1246
1247 nh->fib_nh_flags |= RTNH_F_OFFLOAD;
1248
1249 return 0;
1250
1251 out_free_rmac:
1252 out_free_rt:
1253 return 0;
1254 }
1255
1256 static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
1257 struct fib6_entry_notifier_info *info)
1258 {
1259 pr_debug("In %s\n", __func__);
1260 // nh->fib_nh_flags |= RTNH_F_OFFLOAD;
1261 return 0;
1262 }
1263
1264 struct net_event_work {
1265 struct work_struct work;
1266 struct rtl838x_switch_priv *priv;
1267 u64 mac;
1268 u32 gw_addr;
1269 };
1270
1271 static void rtl83xx_net_event_work_do(struct work_struct *work)
1272 {
1273 struct net_event_work *net_work =
1274 container_of(work, struct net_event_work, work);
1275 struct rtl838x_switch_priv *priv = net_work->priv;
1276
1277 rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
1278 }
1279
1280 static int rtl83xx_netevent_event(struct notifier_block *this,
1281 unsigned long event, void *ptr)
1282 {
1283 struct rtl838x_switch_priv *priv;
1284 struct net_device *dev;
1285 struct neighbour *n = ptr;
1286 int err, port;
1287 struct net_event_work *net_work;
1288
1289 priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
1290
1291 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
1292 if (!net_work)
1293 return NOTIFY_BAD;
1294
1295 INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
1296 net_work->priv = priv;
1297
1298 switch (event) {
1299 case NETEVENT_NEIGH_UPDATE:
1300 if (n->tbl != &arp_tbl)
1301 return NOTIFY_DONE;
1302 dev = n->dev;
1303 port = rtl83xx_port_dev_lower_find(dev, priv);
1304 if (port < 0 || !(n->nud_state & NUD_VALID)) {
1305 pr_debug("%s: Neigbour invalid, not updating\n", __func__);
1306 kfree(net_work);
1307 return NOTIFY_DONE;
1308 }
1309
1310 net_work->mac = ether_addr_to_u64(n->ha);
1311 net_work->gw_addr = *(__be32 *) n->primary_key;
1312
1313 pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
1314 __func__, port, net_work->mac);
1315 schedule_work(&net_work->work);
1316 if (err)
1317 netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
1318 break;
1319 }
1320
1321 return NOTIFY_DONE;
1322 }
1323
1324 struct rtl83xx_fib_event_work {
1325 struct work_struct work;
1326 union {
1327 struct fib_entry_notifier_info fen_info;
1328 struct fib6_entry_notifier_info fen6_info;
1329 struct fib_rule_notifier_info fr_info;
1330 };
1331 struct rtl838x_switch_priv *priv;
1332 bool is_fib6;
1333 unsigned long event;
1334 };
1335
1336 static void rtl83xx_fib_event_work_do(struct work_struct *work)
1337 {
1338 struct rtl83xx_fib_event_work *fib_work =
1339 container_of(work, struct rtl83xx_fib_event_work, work);
1340 struct rtl838x_switch_priv *priv = fib_work->priv;
1341 struct fib_rule *rule;
1342 int err;
1343
1344 /* Protect internal structures from changes */
1345 rtnl_lock();
1346 pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
1347 switch (fib_work->event) {
1348 case FIB_EVENT_ENTRY_ADD:
1349 case FIB_EVENT_ENTRY_REPLACE:
1350 case FIB_EVENT_ENTRY_APPEND:
1351 if (fib_work->is_fib6) {
1352 err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
1353 } else {
1354 err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
1355 fib_info_put(fib_work->fen_info.fi);
1356 }
1357 if (err)
1358 pr_err("%s: FIB4 failed\n", __func__);
1359 break;
1360 case FIB_EVENT_ENTRY_DEL:
1361 rtl83xx_fib4_del(priv, &fib_work->fen_info);
1362 fib_info_put(fib_work->fen_info.fi);
1363 break;
1364 case FIB_EVENT_RULE_ADD:
1365 case FIB_EVENT_RULE_DEL:
1366 rule = fib_work->fr_info.rule;
1367 if (!fib4_rule_default(rule))
1368 pr_err("%s: FIB4 default rule failed\n", __func__);
1369 fib_rule_put(rule);
1370 break;
1371 }
1372 rtnl_unlock();
1373 kfree(fib_work);
1374 }
1375
1376 /* Called with rcu_read_lock() */
1377 static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
1378 {
1379 struct fib_notifier_info *info = ptr;
1380 struct rtl838x_switch_priv *priv;
1381 struct rtl83xx_fib_event_work *fib_work;
1382
1383 if ((info->family != AF_INET && info->family != AF_INET6 &&
1384 info->family != RTNL_FAMILY_IPMR &&
1385 info->family != RTNL_FAMILY_IP6MR))
1386 return NOTIFY_DONE;
1387
1388 priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
1389
1390 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
1391 if (!fib_work)
1392 return NOTIFY_BAD;
1393
1394 INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
1395 fib_work->priv = priv;
1396 fib_work->event = event;
1397 fib_work->is_fib6 = false;
1398
1399 switch (event) {
1400 case FIB_EVENT_ENTRY_ADD:
1401 case FIB_EVENT_ENTRY_REPLACE:
1402 case FIB_EVENT_ENTRY_APPEND:
1403 case FIB_EVENT_ENTRY_DEL:
1404 pr_debug("%s: FIB_ENTRY ADD/DELL, event %ld\n", __func__, event);
1405 if (info->family == AF_INET) {
1406 struct fib_entry_notifier_info *fen_info = ptr;
1407
1408 if (fen_info->fi->fib_nh_is_v6) {
1409 NL_SET_ERR_MSG_MOD(info->extack,
1410 "IPv6 gateway with IPv4 route is not supported");
1411 kfree(fib_work);
1412 return notifier_from_errno(-EINVAL);
1413 }
1414
1415 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
1416 /* Take referece on fib_info to prevent it from being
1417 * freed while work is queued. Release it afterwards.
1418 */
1419 fib_info_hold(fib_work->fen_info.fi);
1420
1421 } else if (info->family == AF_INET6) {
1422 struct fib6_entry_notifier_info *fen6_info = ptr;
1423 pr_warn("%s: FIB_RULE ADD/DELL for IPv6 not supported\n", __func__);
1424 kfree(fib_work);
1425 return NOTIFY_DONE;
1426 }
1427 break;
1428
1429 case FIB_EVENT_RULE_ADD:
1430 case FIB_EVENT_RULE_DEL:
1431 pr_debug("%s: FIB_RULE ADD/DELL, event: %ld\n", __func__, event);
1432 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
1433 fib_rule_get(fib_work->fr_info.rule);
1434 break;
1435 }
1436
1437 schedule_work(&fib_work->work);
1438
1439 return NOTIFY_DONE;
1440 }
1441
1442 static int __init rtl83xx_sw_probe(struct platform_device *pdev)
1443 {
1444 int err = 0, i;
1445 struct rtl838x_switch_priv *priv;
1446 struct device *dev = &pdev->dev;
1447 u64 bpdu_mask;
1448
1449 pr_debug("Probing RTL838X switch device\n");
1450 if (!pdev->dev.of_node) {
1451 dev_err(dev, "No DT found\n");
1452 return -EINVAL;
1453 }
1454
1455 // Initialize access to RTL switch tables
1456 rtl_table_init();
1457
1458 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1459 if (!priv)
1460 return -ENOMEM;
1461
1462 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
1463
1464 if (!priv->ds)
1465 return -ENOMEM;
1466 priv->ds->dev = dev;
1467 priv->ds->priv = priv;
1468 priv->ds->ops = &rtl83xx_switch_ops;
1469 priv->dev = dev;
1470
1471 mutex_init(&priv->reg_mutex);
1472
1473 priv->family_id = soc_info.family;
1474 priv->id = soc_info.id;
1475 switch(soc_info.family) {
1476 case RTL8380_FAMILY_ID:
1477 priv->ds->ops = &rtl83xx_switch_ops;
1478 priv->cpu_port = RTL838X_CPU_PORT;
1479 priv->port_mask = 0x1f;
1480 priv->port_width = 1;
1481 priv->irq_mask = 0x0FFFFFFF;
1482 priv->r = &rtl838x_reg;
1483 priv->ds->num_ports = 29;
1484 priv->fib_entries = 8192;
1485 rtl8380_get_version(priv);
1486 priv->n_lags = 8;
1487 priv->l2_bucket_size = 4;
1488 priv->n_pie_blocks = 12;
1489 priv->port_ignore = 0x1f;
1490 priv->n_counters = 128;
1491 break;
1492 case RTL8390_FAMILY_ID:
1493 priv->ds->ops = &rtl83xx_switch_ops;
1494 priv->cpu_port = RTL839X_CPU_PORT;
1495 priv->port_mask = 0x3f;
1496 priv->port_width = 2;
1497 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1498 priv->r = &rtl839x_reg;
1499 priv->ds->num_ports = 53;
1500 priv->fib_entries = 16384;
1501 rtl8390_get_version(priv);
1502 priv->n_lags = 16;
1503 priv->l2_bucket_size = 4;
1504 priv->n_pie_blocks = 18;
1505 priv->port_ignore = 0x3f;
1506 priv->n_counters = 1024;
1507 break;
1508 case RTL9300_FAMILY_ID:
1509 priv->ds->ops = &rtl930x_switch_ops;
1510 priv->cpu_port = RTL930X_CPU_PORT;
1511 priv->port_mask = 0x1f;
1512 priv->port_width = 1;
1513 priv->irq_mask = 0x0FFFFFFF;
1514 priv->r = &rtl930x_reg;
1515 priv->ds->num_ports = 29;
1516 priv->fib_entries = 16384;
1517 priv->version = RTL8390_VERSION_A;
1518 priv->n_lags = 16;
1519 sw_w32(1, RTL930X_ST_CTRL);
1520 priv->l2_bucket_size = 8;
1521 priv->n_pie_blocks = 16;
1522 priv->port_ignore = 0x3f;
1523 priv->n_counters = 2048;
1524 break;
1525 case RTL9310_FAMILY_ID:
1526 priv->ds->ops = &rtl930x_switch_ops;
1527 priv->cpu_port = RTL931X_CPU_PORT;
1528 priv->port_mask = 0x3f;
1529 priv->port_width = 2;
1530 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1531 priv->r = &rtl931x_reg;
1532 priv->ds->num_ports = 57;
1533 priv->fib_entries = 16384;
1534 priv->version = RTL8390_VERSION_A;
1535 priv->n_lags = 16;
1536 priv->l2_bucket_size = 8;
1537 break;
1538 }
1539 pr_debug("Chip version %c\n", priv->version);
1540
1541 err = rtl83xx_mdio_probe(priv);
1542 if (err) {
1543 /* Probing fails the 1st time because of missing ethernet driver
1544 * initialization. Use this to disable traffic in case the bootloader left if on
1545 */
1546 return err;
1547 }
1548 err = dsa_register_switch(priv->ds);
1549 if (err) {
1550 dev_err(dev, "Error registering switch: %d\n", err);
1551 return err;
1552 }
1553
1554 /*
1555 * dsa_to_port returns dsa_port from the port list in
1556 * dsa_switch_tree, the tree is built when the switch
1557 * is registered by dsa_register_switch
1558 */
1559 for (i = 0; i <= priv->cpu_port; i++)
1560 priv->ports[i].dp = dsa_to_port(priv->ds, i);
1561
1562 /* Enable link and media change interrupts. Are the SERDES masks needed? */
1563 sw_w32_mask(0, 3, priv->r->isr_glb_src);
1564
1565 priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
1566 priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
1567
1568 priv->link_state_irq = platform_get_irq(pdev, 0);
1569 pr_info("LINK state irq: %d\n", priv->link_state_irq);
1570 switch (priv->family_id) {
1571 case RTL8380_FAMILY_ID:
1572 err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
1573 IRQF_SHARED, "rtl838x-link-state", priv->ds);
1574 break;
1575 case RTL8390_FAMILY_ID:
1576 err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
1577 IRQF_SHARED, "rtl839x-link-state", priv->ds);
1578 break;
1579 case RTL9300_FAMILY_ID:
1580 err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
1581 IRQF_SHARED, "rtl930x-link-state", priv->ds);
1582 break;
1583 case RTL9310_FAMILY_ID:
1584 err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
1585 IRQF_SHARED, "rtl931x-link-state", priv->ds);
1586 break;
1587 }
1588 if (err) {
1589 dev_err(dev, "Error setting up switch interrupt.\n");
1590 /* Need to free allocated switch here */
1591 }
1592
1593 /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
1594 if (soc_info.family != RTL9310_FAMILY_ID)
1595 sw_w32(0x1, priv->r->imr_glb);
1596
1597 rtl83xx_get_l2aging(priv);
1598
1599 rtl83xx_setup_qos(priv);
1600
1601 priv->r->l3_setup(priv);
1602
1603 /* Clear all destination ports for mirror groups */
1604 for (i = 0; i < 4; i++)
1605 priv->mirror_group_ports[i] = -1;
1606
1607 /*
1608 * Register netdevice event callback to catch changes in link aggregation groups
1609 */
1610 priv->nb.notifier_call = rtl83xx_netdevice_event;
1611 if (register_netdevice_notifier(&priv->nb)) {
1612 priv->nb.notifier_call = NULL;
1613 dev_err(dev, "Failed to register LAG netdev notifier\n");
1614 goto err_register_nb;
1615 }
1616
1617 // Initialize hash table for L3 routing
1618 rhltable_init(&priv->routes, &route_ht_params);
1619
1620 /*
1621 * Register netevent notifier callback to catch notifications about neighboring
1622 * changes to update nexthop entries for L3 routing.
1623 */
1624 priv->ne_nb.notifier_call = rtl83xx_netevent_event;
1625 if (register_netevent_notifier(&priv->ne_nb)) {
1626 priv->ne_nb.notifier_call = NULL;
1627 dev_err(dev, "Failed to register netevent notifier\n");
1628 goto err_register_ne_nb;
1629 }
1630
1631 priv->fib_nb.notifier_call = rtl83xx_fib_event;
1632
1633 /*
1634 * Register Forwarding Information Base notifier to offload routes where
1635 * where possible
1636 * Only FIBs pointing to our own netdevs are programmed into
1637 * the device, so no need to pass a callback.
1638 */
1639 err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
1640 if (err)
1641 goto err_register_fib_nb;
1642
1643 // TODO: put this into l2_setup()
1644 // Flood BPDUs to all ports including cpu-port
1645 if (soc_info.family != RTL9300_FAMILY_ID) {
1646 bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
1647 priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
1648
1649 // TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs
1650 sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
1651
1652 rtl838x_dbgfs_init(priv);
1653 } else {
1654 rtl930x_dbgfs_init(priv);
1655 }
1656
1657 return 0;
1658
1659 err_register_fib_nb:
1660 unregister_netevent_notifier(&priv->ne_nb);
1661 err_register_ne_nb:
1662 unregister_netdevice_notifier(&priv->nb);
1663 err_register_nb:
1664 return err;
1665 }
1666
1667 static int rtl83xx_sw_remove(struct platform_device *pdev)
1668 {
1669 // TODO:
1670 pr_debug("Removing platform driver for rtl83xx-sw\n");
1671 return 0;
1672 }
1673
1674 static const struct of_device_id rtl83xx_switch_of_ids[] = {
1675 { .compatible = "realtek,rtl83xx-switch"},
1676 { /* sentinel */ }
1677 };
1678
1679
1680 MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
1681
1682 static struct platform_driver rtl83xx_switch_driver = {
1683 .probe = rtl83xx_sw_probe,
1684 .remove = rtl83xx_sw_remove,
1685 .driver = {
1686 .name = "rtl83xx-switch",
1687 .pm = NULL,
1688 .of_match_table = rtl83xx_switch_of_ids,
1689 },
1690 };
1691
1692 module_platform_driver(rtl83xx_switch_driver);
1693
1694 MODULE_AUTHOR("B. Koblitz");
1695 MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
1696 MODULE_LICENSE("GPL");