rtl930x: Rework per port LED configuration
[openwrt/staging/jow.git] / target / linux / realtek / files-5.15 / drivers / net / dsa / rtl83xx / common.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/of_mdio.h>
4 #include <linux/of_platform.h>
5 #include <net/arp.h>
6 #include <net/nexthop.h>
7 #include <net/neighbour.h>
8 #include <net/netevent.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/inetdevice.h>
12 #include <linux/rhashtable.h>
13 #include <linux/of_net.h>
14 #include <asm/mach-rtl838x/mach-rtl83xx.h>
15
16 #include "rtl83xx.h"
17
18 extern struct rtl83xx_soc_info soc_info;
19
20 extern const struct rtl838x_reg rtl838x_reg;
21 extern const struct rtl838x_reg rtl839x_reg;
22 extern const struct rtl838x_reg rtl930x_reg;
23 extern const struct rtl838x_reg rtl931x_reg;
24
25 extern const struct dsa_switch_ops rtl83xx_switch_ops;
26 extern const struct dsa_switch_ops rtl930x_switch_ops;
27
28 DEFINE_MUTEX(smi_lock);
29
30 int rtl83xx_port_get_stp_state(struct rtl838x_switch_priv *priv, int port)
31 {
32 u32 msti = 0;
33 u32 port_state[4];
34 int index, bit;
35 int pos = port;
36 int n = priv->port_width << 1;
37
38 /* Ports above or equal CPU port can never be configured */
39 if (port >= priv->cpu_port)
40 return -1;
41
42 mutex_lock(&priv->reg_mutex);
43
44 /* For the RTL839x and following, the bits are left-aligned in the 64/128 bit field */
45 if (priv->family_id == RTL8390_FAMILY_ID)
46 pos += 12;
47 if (priv->family_id == RTL9300_FAMILY_ID)
48 pos += 3;
49 if (priv->family_id == RTL9310_FAMILY_ID)
50 pos += 8;
51
52 index = n - (pos >> 4) - 1;
53 bit = (pos << 1) % 32;
54
55 priv->r->stp_get(priv, msti, port_state);
56
57 mutex_unlock(&priv->reg_mutex);
58
59 return (port_state[index] >> bit) & 3;
60 }
61
62 static struct table_reg rtl838x_tbl_regs[] = {
63 TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), /* RTL8380_TBL_L2 */
64 TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), /* RTL8380_TBL_0 */
65 TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), /* RTL8380_TBL_1 */
66
67 TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), /* RTL8390_TBL_L2 */
68 TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), /* RTL8390_TBL_0 */
69 TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), /* RTL8390_TBL_1 */
70 TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), /* RTL8390_TBL_2 */
71
72 TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), /* RTL9300_TBL_L2 */
73 TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), /* RTL9300_TBL_0 */
74 TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), /* RTL9300_TBL_1 */
75 TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), /* RTL9300_TBL_2 */
76 TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), /* RTL9300_TBL_HSB */
77 TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), /* RTL9300_TBL_HSA */
78
79 TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), /* RTL9310_TBL_0 */
80 TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), /* RTL9310_TBL_1 */
81 TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), /* RTL9310_TBL_2 */
82 TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), /* RTL9310_TBL_3 */
83 TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), /* RTL9310_TBL_4 */
84 TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), /* RTL9310_TBL_5 */
85 };
86
87 void rtl_table_init(void)
88 {
89 for (int i = 0; i < RTL_TBL_END; i++)
90 mutex_init(&rtl838x_tbl_regs[i].lock);
91 }
92
93 /* Request access to table t in table access register r
94 * Returns a handle to a lock for that table
95 */
96 struct table_reg *rtl_table_get(rtl838x_tbl_reg_t r, int t)
97 {
98 if (r >= RTL_TBL_END)
99 return NULL;
100
101 if (t >= BIT(rtl838x_tbl_regs[r].c_bit-rtl838x_tbl_regs[r].t_bit))
102 return NULL;
103
104 mutex_lock(&rtl838x_tbl_regs[r].lock);
105 rtl838x_tbl_regs[r].tbl = t;
106
107 return &rtl838x_tbl_regs[r];
108 }
109
110 /* Release a table r, unlock the corresponding lock */
111 void rtl_table_release(struct table_reg *r)
112 {
113 if (!r)
114 return;
115
116 /* pr_info("Unlocking %08x\n", (u32)r); */
117 mutex_unlock(&r->lock);
118 /* pr_info("Unlock done\n"); */
119 }
120
121 static int rtl_table_exec(struct table_reg *r, bool is_write, int idx)
122 {
123 int ret = 0;
124 u32 cmd, val;
125
126 /* Read/write bit has inverted meaning on RTL838x */
127 if (r->rmode)
128 cmd = is_write ? 0 : BIT(r->c_bit);
129 else
130 cmd = is_write ? BIT(r->c_bit) : 0;
131
132 cmd |= BIT(r->c_bit + 1); /* Execute bit */
133 cmd |= r->tbl << r->t_bit; /* Table type */
134 cmd |= idx & (BIT(r->t_bit) - 1); /* Index */
135
136 sw_w32(cmd, r->addr);
137
138 ret = readx_poll_timeout(sw_r32, r->addr, val,
139 !(val & BIT(r->c_bit + 1)), 20, 10000);
140 if (ret)
141 pr_err("%s: timeout\n", __func__);
142
143 return ret;
144 }
145
146 /* Reads table index idx into the data registers of the table */
147 int rtl_table_read(struct table_reg *r, int idx)
148 {
149 return rtl_table_exec(r, false, idx);
150 }
151
152 /* Writes the content of the table data registers into the table at index idx */
153 int rtl_table_write(struct table_reg *r, int idx)
154 {
155 return rtl_table_exec(r, true, idx);
156 }
157
158 /* Returns the address of the ith data register of table register r
159 * the address is relative to the beginning of the Switch-IO block at 0xbb000000
160 */
161 inline u16 rtl_table_data(struct table_reg *r, int i)
162 {
163 if (i >= r->max_data)
164 i = r->max_data - 1;
165 return r->data + i * 4;
166 }
167
168 inline u32 rtl_table_data_r(struct table_reg *r, int i)
169 {
170 return sw_r32(rtl_table_data(r, i));
171 }
172
173 inline void rtl_table_data_w(struct table_reg *r, u32 v, int i)
174 {
175 sw_w32(v, rtl_table_data(r, i));
176 }
177
178 /* Port register accessor functions for the RTL838x and RTL930X SoCs */
179 void rtl838x_mask_port_reg(u64 clear, u64 set, int reg)
180 {
181 sw_w32_mask((u32)clear, (u32)set, reg);
182 }
183
184 void rtl838x_set_port_reg(u64 set, int reg)
185 {
186 sw_w32((u32)set, reg);
187 }
188
189 u64 rtl838x_get_port_reg(int reg)
190 {
191 return ((u64)sw_r32(reg));
192 }
193
194 /* Port register accessor functions for the RTL839x and RTL931X SoCs */
195 void rtl839x_mask_port_reg_be(u64 clear, u64 set, int reg)
196 {
197 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg);
198 sw_w32_mask((u32)(clear & 0xffffffff), (u32)(set & 0xffffffff), reg + 4);
199 }
200
201 u64 rtl839x_get_port_reg_be(int reg)
202 {
203 u64 v = sw_r32(reg);
204
205 v <<= 32;
206 v |= sw_r32(reg + 4);
207
208 return v;
209 }
210
211 void rtl839x_set_port_reg_be(u64 set, int reg)
212 {
213 sw_w32(set >> 32, reg);
214 sw_w32(set & 0xffffffff, reg + 4);
215 }
216
217 void rtl839x_mask_port_reg_le(u64 clear, u64 set, int reg)
218 {
219 sw_w32_mask((u32)clear, (u32)set, reg);
220 sw_w32_mask((u32)(clear >> 32), (u32)(set >> 32), reg + 4);
221 }
222
223 void rtl839x_set_port_reg_le(u64 set, int reg)
224 {
225 sw_w32(set, reg);
226 sw_w32(set >> 32, reg + 4);
227 }
228
229 u64 rtl839x_get_port_reg_le(int reg)
230 {
231 u64 v = sw_r32(reg + 4);
232
233 v <<= 32;
234 v |= sw_r32(reg);
235
236 return v;
237 }
238
239 int read_phy(u32 port, u32 page, u32 reg, u32 *val)
240 {
241 switch (soc_info.family) {
242 case RTL8380_FAMILY_ID:
243 return rtl838x_read_phy(port, page, reg, val);
244 case RTL8390_FAMILY_ID:
245 return rtl839x_read_phy(port, page, reg, val);
246 case RTL9300_FAMILY_ID:
247 return rtl930x_read_phy(port, page, reg, val);
248 case RTL9310_FAMILY_ID:
249 return rtl931x_read_phy(port, page, reg, val);
250 }
251
252 return -1;
253 }
254
255 int write_phy(u32 port, u32 page, u32 reg, u32 val)
256 {
257 switch (soc_info.family) {
258 case RTL8380_FAMILY_ID:
259 return rtl838x_write_phy(port, page, reg, val);
260 case RTL8390_FAMILY_ID:
261 return rtl839x_write_phy(port, page, reg, val);
262 case RTL9300_FAMILY_ID:
263 return rtl930x_write_phy(port, page, reg, val);
264 case RTL9310_FAMILY_ID:
265 return rtl931x_write_phy(port, page, reg, val);
266 }
267
268 return -1;
269 }
270
271 static int __init rtl83xx_mdio_probe(struct rtl838x_switch_priv *priv)
272 {
273 struct device *dev = priv->dev;
274 struct device_node *dn, *phy_node, *led_node, *mii_np = dev->of_node;
275 struct mii_bus *bus;
276 int ret;
277 u32 pn;
278
279 pr_debug("In %s\n", __func__);
280 mii_np = of_find_compatible_node(NULL, NULL, "realtek,rtl838x-mdio");
281 if (mii_np) {
282 pr_debug("Found compatible MDIO node!\n");
283 } else {
284 dev_err(priv->dev, "no %s child node found", "mdio-bus");
285 return -ENODEV;
286 }
287
288 priv->mii_bus = of_mdio_find_bus(mii_np);
289 if (!priv->mii_bus) {
290 pr_debug("Deferring probe of mdio bus\n");
291 return -EPROBE_DEFER;
292 }
293 if (!of_device_is_available(mii_np))
294 ret = -ENODEV;
295
296 bus = devm_mdiobus_alloc(priv->ds->dev);
297 if (!bus)
298 return -ENOMEM;
299
300 bus->name = "rtl838x slave mii";
301
302 /* Since the NIC driver is loaded first, we can use the mdio rw functions
303 * assigned there.
304 */
305 bus->read = priv->mii_bus->read;
306 bus->write = priv->mii_bus->write;
307 bus->read_paged = priv->mii_bus->read_paged;
308 bus->write_paged = priv->mii_bus->write_paged;
309 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", bus->name, dev->id);
310
311 bus->parent = dev;
312 priv->ds->slave_mii_bus = bus;
313 priv->ds->slave_mii_bus->priv = priv->mii_bus->priv;
314 priv->ds->slave_mii_bus->access_capabilities = priv->mii_bus->access_capabilities;
315
316 ret = mdiobus_register(priv->ds->slave_mii_bus);
317 if (ret && mii_np) {
318 of_node_put(dn);
319 return ret;
320 }
321
322 dn = of_find_compatible_node(NULL, NULL, "realtek,rtl83xx-switch");
323 if (!dn) {
324 dev_err(priv->dev, "No RTL switch node in DTS\n");
325 return -ENODEV;
326 }
327
328 led_node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds");
329
330 for_each_node_by_name(dn, "port") {
331 phy_interface_t interface;
332 u32 led_set;
333 char led_set_str[16] = {0};
334
335 if (!of_device_is_available(dn))
336 continue;
337
338 if (of_property_read_u32(dn, "reg", &pn))
339 continue;
340
341 phy_node = of_parse_phandle(dn, "phy-handle", 0);
342 if (!phy_node) {
343 if (pn != priv->cpu_port)
344 dev_err(priv->dev, "Port node %d misses phy-handle\n", pn);
345 continue;
346 }
347
348 if (of_property_read_u32(phy_node, "sds", &priv->ports[pn].sds_num))
349 priv->ports[pn].sds_num = -1;
350 pr_debug("%s port %d has SDS %d\n", __func__, pn, priv->ports[pn].sds_num);
351
352 if (of_get_phy_mode(dn, &interface))
353 interface = PHY_INTERFACE_MODE_NA;
354 if (interface == PHY_INTERFACE_MODE_HSGMII)
355 priv->ports[pn].is2G5 = true;
356 if (interface == PHY_INTERFACE_MODE_USXGMII)
357 priv->ports[pn].is2G5 = priv->ports[pn].is10G = true;
358 if (interface == PHY_INTERFACE_MODE_10GBASER)
359 priv->ports[pn].is10G = true;
360
361 priv->ports[pn].leds_on_this_port = 0;
362 if (led_node) {
363 if (of_property_read_u32(dn, "led-set", &led_set))
364 led_set = 0;
365 priv->ports[pn].led_set = led_set;
366 sprintf(led_set_str, "led_set%d", led_set);
367 priv->ports[pn].leds_on_this_port = of_property_count_u32_elems(led_node, led_set_str);
368 if (priv->ports[pn].leds_on_this_port > 4) {
369 dev_err(priv->dev, "led_set %d for port %d configuration is invalid\n", led_set, pn);
370 return -ENODEV;
371 }
372 }
373
374 /* Check for the integrated SerDes of the RTL8380M first */
375 if (of_property_read_bool(phy_node, "phy-is-integrated")
376 && priv->id == 0x8380 && pn >= 24) {
377 pr_debug("----> FÓUND A SERDES\n");
378 priv->ports[pn].phy = PHY_RTL838X_SDS;
379 continue;
380 }
381
382 if (priv->id >= 0x9300) {
383 priv->ports[pn].phy_is_integrated = false;
384 if (of_property_read_bool(phy_node, "phy-is-integrated")) {
385 priv->ports[pn].phy_is_integrated = true;
386 priv->ports[pn].phy = PHY_RTL930X_SDS;
387 }
388 } else {
389 if (of_property_read_bool(phy_node, "phy-is-integrated") &&
390 !of_property_read_bool(phy_node, "sfp")) {
391 priv->ports[pn].phy = PHY_RTL8218B_INT;
392 continue;
393 }
394 }
395
396 if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
397 of_property_read_bool(phy_node, "sfp")) {
398 priv->ports[pn].phy = PHY_RTL8214FC;
399 continue;
400 }
401
402 if (!of_property_read_bool(phy_node, "phy-is-integrated") &&
403 !of_property_read_bool(phy_node, "sfp")) {
404 priv->ports[pn].phy = PHY_RTL8218B_EXT;
405 continue;
406 }
407 }
408
409 /* Disable MAC polling the PHY so that we can start configuration */
410 priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
411
412 /* Enable PHY control via SoC */
413 if (priv->family_id == RTL8380_FAMILY_ID) {
414 /* Enable SerDes NWAY and PHY control via SoC */
415 sw_w32_mask(BIT(7), BIT(15), RTL838X_SMI_GLB_CTRL);
416 } else if (priv->family_id == RTL8390_FAMILY_ID) {
417 /* Disable PHY polling via SoC */
418 sw_w32_mask(BIT(7), 0, RTL839X_SMI_GLB_CTRL);
419 }
420
421 /* Power on fibre ports and reset them if necessary */
422 if (priv->ports[24].phy == PHY_RTL838X_SDS) {
423 pr_debug("Powering on fibre ports & reset\n");
424 rtl8380_sds_power(24, 1);
425 rtl8380_sds_power(26, 1);
426 }
427
428 pr_debug("%s done\n", __func__);
429
430 return 0;
431 }
432
433 static int __init rtl83xx_get_l2aging(struct rtl838x_switch_priv *priv)
434 {
435 int t = sw_r32(priv->r->l2_ctrl_1);
436
437 t &= priv->family_id == RTL8380_FAMILY_ID ? 0x7fffff : 0x1FFFFF;
438
439 if (priv->family_id == RTL8380_FAMILY_ID)
440 t = t * 128 / 625; /* Aging time in seconds. 0: L2 aging disabled */
441 else
442 t = (t * 3) / 5;
443
444 pr_debug("L2 AGING time: %d sec\n", t);
445 pr_debug("Dynamic aging for ports: %x\n", sw_r32(priv->r->l2_port_aging_out));
446
447 return t;
448 }
449
450 /* Caller must hold priv->reg_mutex */
451 int rtl83xx_lag_add(struct dsa_switch *ds, int group, int port, struct netdev_lag_upper_info *info)
452 {
453 struct rtl838x_switch_priv *priv = ds->priv;
454 int i;
455 u32 algomsk = 0;
456 u32 algoidx = 0;
457
458 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
459 pr_err("%s: Only mode LACP 802.3ad (4) allowed.\n", __func__);
460 return -EINVAL;
461 }
462
463 if (group >= priv->n_lags) {
464 pr_err("%s: LAG %d invalid.\n", __func__, group);
465 return -EINVAL;
466 }
467
468 if (port >= priv->cpu_port) {
469 pr_err("%s: Port %d invalid.\n", __func__, port);
470 return -EINVAL;
471 }
472
473 for (i = 0; i < priv->n_lags; i++) {
474 if (priv->lags_port_members[i] & BIT_ULL(port))
475 break;
476 }
477 if (i != priv->n_lags) {
478 pr_err("%s: Port %d already member of LAG %d.\n", __func__, port, i);
479 return -ENOSPC;
480 }
481
482 switch(info->hash_type) {
483 case NETDEV_LAG_HASH_L2:
484 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
485 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
486 break;
487 case NETDEV_LAG_HASH_L23:
488 algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
489 algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
490 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
491 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
492 algoidx = 1;
493 break;
494 case NETDEV_LAG_HASH_L34:
495 algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; /* sport */
496 algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; /* dport */
497 algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
498 algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
499 algoidx = 2;
500 break;
501 default:
502 algomsk |= 0x7f;
503 }
504 priv->r->set_distribution_algorithm(group, algoidx, algomsk);
505 priv->r->mask_port_reg_be(0, BIT_ULL(port), priv->r->trk_mbr_ctr(group));
506 priv->lags_port_members[group] |= BIT_ULL(port);
507
508 pr_info("%s: Added port %d to LAG %d. Members now %016llx.\n",
509 __func__, port, group, priv->lags_port_members[group]);
510
511 return 0;
512 }
513
514 /* Caller must hold priv->reg_mutex */
515 int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
516 {
517 struct rtl838x_switch_priv *priv = ds->priv;
518
519 if (group >= priv->n_lags) {
520 pr_err("%s: LAG %d invalid.\n", __func__, group);
521 return -EINVAL;
522 }
523
524 if (port >= priv->cpu_port) {
525 pr_err("%s: Port %d invalid.\n", __func__, port);
526 return -EINVAL;
527 }
528
529 if (!(priv->lags_port_members[group] & BIT_ULL(port))) {
530 pr_err("%s: Port %d not member of LAG %d.\n", __func__, port, group);
531 return -ENOSPC;
532 }
533
534 /* 0x7f algo mask all */
535 priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
536 priv->lags_port_members[group] &= ~BIT_ULL(port);
537
538 pr_info("%s: Removed port %d from LAG %d. Members now %016llx.\n",
539 __func__, port, group, priv->lags_port_members[group]);
540
541 return 0;
542 }
543
544 // Currently Unused
545 // /* Allocate a 64 bit octet counter located in the LOG HW table */
546 // static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
547 // {
548 // int idx;
549
550 // mutex_lock(&priv->reg_mutex);
551
552 // idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
553 // if (idx >= priv->n_counters) {
554 // mutex_unlock(&priv->reg_mutex);
555 // return -1;
556 // }
557
558 // set_bit(idx, priv->octet_cntr_use_bm);
559 // mutex_unlock(&priv->reg_mutex);
560
561 // return idx;
562 // }
563
564 /* Allocate a 32-bit packet counter
565 * 2 32-bit packet counters share the location of a 64-bit octet counter
566 * Initially there are no free packet counters and 2 new ones need to be freed
567 * by allocating the corresponding octet counter
568 */
569 int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
570 {
571 int idx, j;
572
573 mutex_lock(&priv->reg_mutex);
574
575 /* Because initially no packet counters are free, the logic is reversed:
576 * a 0-bit means the counter is already allocated (for octets)
577 */
578 idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
579 if (idx >= priv->n_counters * 2) {
580 j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
581 if (j >= priv->n_counters) {
582 mutex_unlock(&priv->reg_mutex);
583 return -1;
584 }
585 set_bit(j, priv->octet_cntr_use_bm);
586 idx = j * 2;
587 set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
588
589 } else {
590 clear_bit(idx, priv->packet_cntr_use_bm);
591 }
592
593 mutex_unlock(&priv->reg_mutex);
594
595 return idx;
596 }
597
598 /* Add an L2 nexthop entry for the L3 routing system / PIE forwarding in the SoC
599 * Use VID and MAC in rtl838x_l2_entry to identify either a free slot in the L2 hash table
600 * or mark an existing entry as a nexthop by setting it's nexthop bit
601 * Called from the L3 layer
602 * The index in the L2 hash table is filled into nh->l2_id;
603 */
604 int rtl83xx_l2_nexthop_add(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
605 {
606 struct rtl838x_l2_entry e;
607 u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
608 u32 key = priv->r->l2_hash_key(priv, seed);
609 int idx = -1;
610 u64 entry;
611
612 pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
613 __func__, nh->mac, nh->rvid, key, seed);
614
615 e.type = L2_UNICAST;
616 u64_to_ether_addr(nh->mac, &e.mac[0]);
617 e.port = nh->port;
618
619 /* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
620 for (int i = 0; i < priv->l2_bucket_size; i++) {
621 entry = priv->r->read_l2_entry_using_hash(key, i, &e);
622
623 if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
624 idx = i > 3 ? ((key >> 14) & 0xffff) | i >> 1
625 : ((key << 2) | i) & 0xffff;
626 break;
627 }
628 }
629
630 if (idx < 0) {
631 pr_err("%s: No more L2 forwarding entries available\n", __func__);
632 return -1;
633 }
634
635 /* Found an existing (e->valid is true) or empty entry, make it a nexthop entry */
636 nh->l2_id = idx;
637 if (e.valid) {
638 nh->port = e.port;
639 nh->vid = e.vid; /* Save VID */
640 nh->rvid = e.rvid;
641 nh->dev_id = e.stack_dev;
642 /* If the entry is already a valid next hop entry, don't change it */
643 if (e.next_hop)
644 return 0;
645 } else {
646 e.valid = true;
647 e.is_static = true;
648 e.rvid = nh->rvid;
649 e.is_ip_mc = false;
650 e.is_ipv6_mc = false;
651 e.block_da = false;
652 e.block_sa = false;
653 e.suspended = false;
654 e.age = 0; /* With port-ignore */
655 e.port = priv->port_ignore;
656 u64_to_ether_addr(nh->mac, &e.mac[0]);
657 }
658 e.next_hop = true;
659 e.nh_route_id = nh->id; /* NH route ID takes place of VID */
660 e.nh_vlan_target = false;
661
662 priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
663
664 return 0;
665 }
666
667 /* Removes a Layer 2 next hop entry in the forwarding database
668 * If it was static, the entire entry is removed, otherwise the nexthop bit is cleared
669 * and we wait until the entry ages out
670 */
671 int rtl83xx_l2_nexthop_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_nexthop *nh)
672 {
673 struct rtl838x_l2_entry e;
674 u32 key = nh->l2_id >> 2;
675 int i = nh->l2_id & 0x3;
676 u64 entry = entry = priv->r->read_l2_entry_using_hash(key, i, &e);
677
678 pr_debug("%s: id %d, key %d, index %d\n", __func__, nh->l2_id, key, i);
679 if (!e.valid) {
680 dev_err(priv->dev, "unknown nexthop, id %x\n", nh->l2_id);
681 return -1;
682 }
683
684 if (e.is_static)
685 e.valid = false;
686 e.next_hop = false;
687 e.vid = nh->vid; /* Restore VID */
688 e.rvid = nh->rvid;
689
690 priv->r->write_l2_entry_using_hash(key, i, &e);
691
692 return 0;
693 }
694
695 static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
696 struct net_device *ndev,
697 struct netdev_notifier_changeupper_info *info)
698 {
699 struct net_device *upper = info->upper_dev;
700 struct netdev_lag_upper_info *lag_upper_info = NULL;
701 int i, j, err;
702
703 if (!netif_is_lag_master(upper))
704 return 0;
705
706 mutex_lock(&priv->reg_mutex);
707
708 for (i = 0; i < priv->n_lags; i++) {
709 if ((!priv->lag_devs[i]) || (priv->lag_devs[i] == upper))
710 break;
711 }
712 for (j = 0; j < priv->cpu_port; j++) {
713 if (priv->ports[j].dp->slave == ndev)
714 break;
715 }
716 if (j >= priv->cpu_port) {
717 err = -EINVAL;
718 goto out;
719 }
720
721 if (info->linking) {
722 lag_upper_info = info->upper_info;
723 if (!priv->lag_devs[i])
724 priv->lag_devs[i] = upper;
725 err = rtl83xx_lag_add(priv->ds, i, priv->ports[j].dp->index, lag_upper_info);
726 if (err) {
727 err = -EINVAL;
728 goto out;
729 }
730 } else {
731 if (!priv->lag_devs[i])
732 err = -EINVAL;
733 err = rtl83xx_lag_del(priv->ds, i, priv->ports[j].dp->index);
734 if (err) {
735 err = -EINVAL;
736 goto out;
737 }
738 if (!priv->lags_port_members[i])
739 priv->lag_devs[i] = NULL;
740 }
741
742 out:
743 mutex_unlock(&priv->reg_mutex);
744
745 return 0;
746 }
747
748 /* Is the lower network device a DSA slave network device of our RTL930X-switch?
749 * Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
750 * DSA master device.
751 */
752 int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
753 {
754 /* TODO: On 5.12:
755 * if(!dsa_slave_dev_check(dev)) {
756 * netdev_info(dev, "%s: not a DSA device.\n", __func__);
757 * return -EINVAL;
758 * }
759 */
760
761 for (int i = 0; i < priv->cpu_port; i++) {
762 if (!priv->ports[i].dp)
763 continue;
764 if (priv->ports[i].dp->slave == dev)
765 return i;
766 }
767
768 return -EINVAL;
769 }
770
771 static int rtl83xx_netdevice_event(struct notifier_block *this,
772 unsigned long event, void *ptr)
773 {
774 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
775 struct rtl838x_switch_priv *priv;
776 int err;
777
778 pr_debug("In: %s, event: %lu\n", __func__, event);
779
780 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
781 return NOTIFY_DONE;
782
783 priv = container_of(this, struct rtl838x_switch_priv, nb);
784 switch (event) {
785 case NETDEV_CHANGEUPPER:
786 err = rtl83xx_handle_changeupper(priv, ndev, ptr);
787 break;
788 }
789
790 if (err)
791 return err;
792
793 return NOTIFY_DONE;
794 }
795
796 const static struct rhashtable_params route_ht_params = {
797 .key_len = sizeof(u32),
798 .key_offset = offsetof(struct rtl83xx_route, gw_ip),
799 .head_offset = offsetof(struct rtl83xx_route, linkage),
800 };
801
802 /* Updates an L3 next hop entry in the ROUTING table */
803 static int rtl83xx_l3_nexthop_update(struct rtl838x_switch_priv *priv, __be32 ip_addr, u64 mac)
804 {
805 struct rtl83xx_route *r;
806 struct rhlist_head *tmp, *list;
807
808 rcu_read_lock();
809 list = rhltable_lookup(&priv->routes, &ip_addr, route_ht_params);
810 if (!list) {
811 rcu_read_unlock();
812 return -ENOENT;
813 }
814
815 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
816 pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
817 __func__, &ip_addr, mac);
818
819 /* Reads the ROUTING table entry associated with the route */
820 priv->r->route_read(r->id, r);
821 pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
822
823 r->nh.mac = r->nh.gw = mac;
824 r->nh.port = priv->port_ignore;
825 r->nh.id = r->id;
826
827 /* Do we need to explicitly add a DMAC entry with the route's nh index? */
828 if (priv->r->set_l3_egress_mac)
829 priv->r->set_l3_egress_mac(r->id, mac);
830
831 /* Update ROUTING table: map gateway-mac and switch-mac id to route id */
832 rtl83xx_l2_nexthop_add(priv, &r->nh);
833
834 r->attr.valid = true;
835 r->attr.action = ROUTE_ACT_FORWARD;
836 r->attr.type = 0;
837 r->attr.hit = false; /* Reset route-used indicator */
838
839 /* Add PIE entry with dst_ip and prefix_len */
840 r->pr.dip = r->dst_ip;
841 r->pr.dip_m = inet_make_mask(r->prefix_len);
842
843 if (r->is_host_route) {
844 int slot = priv->r->find_l3_slot(r, false);
845
846 pr_info("%s: Got slot for route: %d\n", __func__, slot);
847 priv->r->host_route_write(slot, r);
848 } else {
849 priv->r->route_write(r->id, r);
850 r->pr.fwd_sel = true;
851 r->pr.fwd_data = r->nh.l2_id;
852 r->pr.fwd_act = PIE_ACT_ROUTE_UC;
853 }
854
855 if (priv->r->set_l3_nexthop)
856 priv->r->set_l3_nexthop(r->nh.id, r->nh.l2_id, r->nh.if_id);
857
858 if (r->pr.id < 0) {
859 r->pr.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
860 if (r->pr.packet_cntr >= 0) {
861 pr_info("Using packet counter %d\n", r->pr.packet_cntr);
862 r->pr.log_sel = true;
863 r->pr.log_data = r->pr.packet_cntr;
864 }
865 priv->r->pie_rule_add(priv, &r->pr);
866 } else {
867 int pkts = priv->r->packet_cntr_read(r->pr.packet_cntr);
868 pr_info("%s: total packets: %d\n", __func__, pkts);
869
870 priv->r->pie_rule_write(priv, r->pr.id, &r->pr);
871 }
872 }
873 rcu_read_unlock();
874
875 return 0;
876 }
877
878 static int rtl83xx_port_ipv4_resolve(struct rtl838x_switch_priv *priv,
879 struct net_device *dev, __be32 ip_addr)
880 {
881 struct neighbour *n = neigh_lookup(&arp_tbl, &ip_addr, dev);
882 int err = 0;
883 u64 mac;
884
885 if (!n) {
886 n = neigh_create(&arp_tbl, &ip_addr, dev);
887 if (IS_ERR(n))
888 return PTR_ERR(n);
889 }
890
891 /* If the neigh is already resolved, then go ahead and
892 * install the entry, otherwise start the ARP process to
893 * resolve the neigh.
894 */
895 if (n->nud_state & NUD_VALID) {
896 mac = ether_addr_to_u64(n->ha);
897 pr_info("%s: resolved mac: %016llx\n", __func__, mac);
898 rtl83xx_l3_nexthop_update(priv, ip_addr, mac);
899 } else {
900 pr_info("%s: need to wait\n", __func__);
901 neigh_event_send(n, NULL);
902 }
903
904 neigh_release(n);
905
906 return err;
907 }
908
909 struct rtl83xx_walk_data {
910 struct rtl838x_switch_priv *priv;
911 int port;
912 };
913
914 static int rtl83xx_port_lower_walk(struct net_device *lower, struct netdev_nested_priv *_priv)
915 {
916 struct rtl83xx_walk_data *data = (struct rtl83xx_walk_data *)_priv->data;
917 struct rtl838x_switch_priv *priv = data->priv;
918 int ret = 0;
919 int index;
920
921 index = rtl83xx_port_is_under(lower, priv);
922 data->port = index;
923 if (index >= 0) {
924 pr_debug("Found DSA-port, index %d\n", index);
925 ret = 1;
926 }
927
928 return ret;
929 }
930
931 int rtl83xx_port_dev_lower_find(struct net_device *dev, struct rtl838x_switch_priv *priv)
932 {
933 struct rtl83xx_walk_data data;
934 struct netdev_nested_priv _priv;
935
936 data.priv = priv;
937 data.port = 0;
938 _priv.data = (void *)&data;
939
940 netdev_walk_all_lower_dev(dev, rtl83xx_port_lower_walk, &_priv);
941
942 return data.port;
943 }
944
945 static struct rtl83xx_route *rtl83xx_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
946 {
947 struct rtl83xx_route *r;
948 int idx = 0, err;
949
950 mutex_lock(&priv->reg_mutex);
951
952 idx = find_first_zero_bit(priv->route_use_bm, MAX_ROUTES);
953 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
954
955 r = kzalloc(sizeof(*r), GFP_KERNEL);
956 if (!r) {
957 mutex_unlock(&priv->reg_mutex);
958 return r;
959 }
960
961 r->id = idx;
962 r->gw_ip = ip;
963 r->pr.id = -1; /* We still need to allocate a rule in HW */
964 r->is_host_route = false;
965
966 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
967 if (err) {
968 pr_err("Could not insert new rule\n");
969 mutex_unlock(&priv->reg_mutex);
970 goto out_free;
971 }
972
973 set_bit(idx, priv->route_use_bm);
974
975 mutex_unlock(&priv->reg_mutex);
976
977 return r;
978
979 out_free:
980 kfree(r);
981
982 return NULL;
983 }
984
985
986 static struct rtl83xx_route *rtl83xx_host_route_alloc(struct rtl838x_switch_priv *priv, u32 ip)
987 {
988 struct rtl83xx_route *r;
989 int idx = 0, err;
990
991 mutex_lock(&priv->reg_mutex);
992
993 idx = find_first_zero_bit(priv->host_route_use_bm, MAX_HOST_ROUTES);
994 pr_debug("%s id: %d, ip %pI4\n", __func__, idx, &ip);
995
996 r = kzalloc(sizeof(*r), GFP_KERNEL);
997 if (!r) {
998 mutex_unlock(&priv->reg_mutex);
999 return r;
1000 }
1001
1002 /* We require a unique route ID irrespective of whether it is a prefix or host
1003 * route (on RTL93xx) as we use this ID to associate a DMAC and next-hop entry
1004 */
1005 r->id = idx + MAX_ROUTES;
1006
1007 r->gw_ip = ip;
1008 r->pr.id = -1; /* We still need to allocate a rule in HW */
1009 r->is_host_route = true;
1010
1011 err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
1012 if (err) {
1013 pr_err("Could not insert new rule\n");
1014 mutex_unlock(&priv->reg_mutex);
1015 goto out_free;
1016 }
1017
1018 set_bit(idx, priv->host_route_use_bm);
1019
1020 mutex_unlock(&priv->reg_mutex);
1021
1022 return r;
1023
1024 out_free:
1025 kfree(r);
1026
1027 return NULL;
1028 }
1029
1030
1031
1032 static void rtl83xx_route_rm(struct rtl838x_switch_priv *priv, struct rtl83xx_route *r)
1033 {
1034 int id;
1035
1036 if (rhltable_remove(&priv->routes, &r->linkage, route_ht_params))
1037 dev_warn(priv->dev, "Could not remove route\n");
1038
1039 if (r->is_host_route) {
1040 id = priv->r->find_l3_slot(r, false);
1041 pr_debug("%s: Got id for host route: %d\n", __func__, id);
1042 r->attr.valid = false;
1043 priv->r->host_route_write(id, r);
1044 clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
1045 } else {
1046 /* If there is a HW representation of the route, delete it */
1047 if (priv->r->route_lookup_hw) {
1048 id = priv->r->route_lookup_hw(r);
1049 pr_info("%s: Got id for prefix route: %d\n", __func__, id);
1050 r->attr.valid = false;
1051 priv->r->route_write(id, r);
1052 }
1053 clear_bit(r->id, priv->route_use_bm);
1054 }
1055
1056 kfree(r);
1057 }
1058
1059 static int rtl83xx_fib4_del(struct rtl838x_switch_priv *priv,
1060 struct fib_entry_notifier_info *info)
1061 {
1062 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1063 struct rtl83xx_route *r;
1064 struct rhlist_head *tmp, *list;
1065
1066 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1067 rcu_read_lock();
1068 list = rhltable_lookup(&priv->routes, &nh->fib_nh_gw4, route_ht_params);
1069 if (!list) {
1070 rcu_read_unlock();
1071 pr_err("%s: no such gateway: %pI4\n", __func__, &nh->fib_nh_gw4);
1072 return -ENOENT;
1073 }
1074 rhl_for_each_entry_rcu(r, tmp, list, linkage) {
1075 if (r->dst_ip == info->dst && r->prefix_len == info->dst_len) {
1076 pr_info("%s: found a route with id %d, nh-id %d\n",
1077 __func__, r->id, r->nh.id);
1078 break;
1079 }
1080 }
1081 rcu_read_unlock();
1082
1083 rtl83xx_l2_nexthop_rm(priv, &r->nh);
1084
1085 pr_debug("%s: Releasing packet counter %d\n", __func__, r->pr.packet_cntr);
1086 set_bit(r->pr.packet_cntr, priv->packet_cntr_use_bm);
1087 priv->r->pie_rule_rm(priv, &r->pr);
1088
1089 rtl83xx_route_rm(priv, r);
1090
1091 nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
1092
1093 return 0;
1094 }
1095
1096 /* On the RTL93xx, an L3 termination endpoint MAC address on which the router waits
1097 * for packets to be routed needs to be allocated.
1098 */
1099 static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
1100 {
1101 int free_mac = -1;
1102 struct rtl93xx_rt_mac m;
1103
1104 mutex_lock(&priv->reg_mutex);
1105 for (int i = 0; i < MAX_ROUTER_MACS; i++) {
1106 priv->r->get_l3_router_mac(i, &m);
1107 if (free_mac < 0 && !m.valid) {
1108 free_mac = i;
1109 continue;
1110 }
1111 if (m.valid && m.mac == mac) {
1112 free_mac = i;
1113 break;
1114 }
1115 }
1116
1117 if (free_mac < 0) {
1118 pr_err("No free router MACs, cannot offload\n");
1119 mutex_unlock(&priv->reg_mutex);
1120 return -1;
1121 }
1122
1123 m.valid = true;
1124 m.mac = mac;
1125 m.p_type = 0; /* An individual port, not a trunk port */
1126 m.p_id = 0x3f; /* Listen on any port */
1127 m.p_id_mask = 0;
1128 m.vid = 0; /* Listen on any VLAN... */
1129 m.vid_mask = 0; /* ... so mask needs to be 0 */
1130 m.mac_mask = 0xffffffffffffULL; /* We want an exact match of the interface MAC */
1131 m.action = L3_FORWARD; /* Route the packet */
1132 priv->r->set_l3_router_mac(free_mac, &m);
1133
1134 mutex_unlock(&priv->reg_mutex);
1135
1136 return 0;
1137 }
1138
1139 static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
1140 {
1141 int free_mac = -1;
1142 struct rtl838x_l3_intf intf;
1143 u64 m;
1144
1145 mutex_lock(&priv->reg_mutex);
1146 for (int i = 0; i < MAX_SMACS; i++) {
1147 m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
1148 if (free_mac < 0 && !m) {
1149 free_mac = i;
1150 continue;
1151 }
1152 if (m == mac) {
1153 mutex_unlock(&priv->reg_mutex);
1154 return i;
1155 }
1156 }
1157
1158 if (free_mac < 0) {
1159 pr_err("No free egress interface, cannot offload\n");
1160 return -1;
1161 }
1162
1163 /* Set up default egress interface 1 */
1164 intf.vid = vlan;
1165 intf.smac_idx = free_mac;
1166 intf.ip4_mtu_id = 1;
1167 intf.ip6_mtu_id = 1;
1168 intf.ttl_scope = 1; /* TTL */
1169 intf.hl_scope = 1; /* Hop Limit */
1170 intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; /* FORWARD */
1171 intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; /* FORWARD; */
1172 priv->r->set_l3_egress_intf(free_mac, &intf);
1173
1174 priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
1175
1176 mutex_unlock(&priv->reg_mutex);
1177
1178 return free_mac;
1179 }
1180
1181 static int rtl83xx_fib4_add(struct rtl838x_switch_priv *priv,
1182 struct fib_entry_notifier_info *info)
1183 {
1184 struct fib_nh *nh = fib_info_nh(info->fi, 0);
1185 struct net_device *dev = fib_info_nh(info->fi, 0)->fib_nh_dev;
1186 int port;
1187 struct rtl83xx_route *r;
1188 bool to_localhost;
1189 int vlan = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0;
1190
1191 pr_debug("In %s, ip %pI4, len %d\n", __func__, &info->dst, info->dst_len);
1192 if (!info->dst) {
1193 pr_info("Not offloading default route for now\n");
1194 return 0;
1195 }
1196
1197 pr_debug("GW: %pI4, interface name %s, mac %016llx, vlan %d\n", &nh->fib_nh_gw4, dev->name,
1198 ether_addr_to_u64(dev->dev_addr), vlan
1199 );
1200
1201 port = rtl83xx_port_dev_lower_find(dev, priv);
1202 if (port < 0)
1203 return -1;
1204
1205 /* For now we only work with routes that have a gateway and are not ourself */
1206 /* if ((!nh->fib_nh_gw4) && (info->dst_len != 32)) */
1207 /* return 0; */
1208
1209 if ((info->dst & 0xff) == 0xff)
1210 return 0;
1211
1212 /* Do not offload routes to 192.168.100.x */
1213 if ((info->dst & 0xffffff00) == 0xc0a86400)
1214 return 0;
1215
1216 /* Do not offload routes to 127.x.x.x */
1217 if ((info->dst & 0xff000000) == 0x7f000000)
1218 return 0;
1219
1220 /* Allocate route or host-route (entry if hardware supports this) */
1221 if (info->dst_len == 32 && priv->r->host_route_write)
1222 r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
1223 else
1224 r = rtl83xx_route_alloc(priv, nh->fib_nh_gw4);
1225
1226 if (!r) {
1227 pr_err("%s: No more free route entries\n", __func__);
1228 return -1;
1229 }
1230
1231 r->dst_ip = info->dst;
1232 r->prefix_len = info->dst_len;
1233 r->nh.rvid = vlan;
1234 to_localhost = !nh->fib_nh_gw4;
1235
1236 if (priv->r->set_l3_router_mac) {
1237 u64 mac = ether_addr_to_u64(dev->dev_addr);
1238
1239 pr_debug("Local route and router mac %016llx\n", mac);
1240
1241 if (rtl83xx_alloc_router_mac(priv, mac))
1242 goto out_free_rt;
1243
1244 /* vid = 0: Do not care about VID */
1245 r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
1246 if (r->nh.if_id < 0)
1247 goto out_free_rmac;
1248
1249 if (to_localhost) {
1250 int slot;
1251
1252 r->nh.mac = mac;
1253 r->nh.port = priv->port_ignore;
1254 r->attr.valid = true;
1255 r->attr.action = ROUTE_ACT_TRAP2CPU;
1256 r->attr.type = 0;
1257
1258 slot = priv->r->find_l3_slot(r, false);
1259 pr_debug("%s: Got slot for route: %d\n", __func__, slot);
1260 priv->r->host_route_write(slot, r);
1261 }
1262 }
1263
1264 /* We need to resolve the mac address of the GW */
1265 if (!to_localhost)
1266 rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
1267
1268 nh->fib_nh_flags |= RTNH_F_OFFLOAD;
1269
1270 return 0;
1271
1272 out_free_rmac:
1273 out_free_rt:
1274 return 0;
1275 }
1276
1277 static int rtl83xx_fib6_add(struct rtl838x_switch_priv *priv,
1278 struct fib6_entry_notifier_info *info)
1279 {
1280 pr_debug("In %s\n", __func__);
1281 /* nh->fib_nh_flags |= RTNH_F_OFFLOAD; */
1282
1283 return 0;
1284 }
1285
1286 struct net_event_work {
1287 struct work_struct work;
1288 struct rtl838x_switch_priv *priv;
1289 u64 mac;
1290 u32 gw_addr;
1291 };
1292
1293 static void rtl83xx_net_event_work_do(struct work_struct *work)
1294 {
1295 struct net_event_work *net_work =
1296 container_of(work, struct net_event_work, work);
1297 struct rtl838x_switch_priv *priv = net_work->priv;
1298
1299 rtl83xx_l3_nexthop_update(priv, net_work->gw_addr, net_work->mac);
1300
1301 kfree(net_work);
1302 }
1303
1304 static int rtl83xx_netevent_event(struct notifier_block *this,
1305 unsigned long event, void *ptr)
1306 {
1307 struct rtl838x_switch_priv *priv;
1308 struct net_device *dev;
1309 struct neighbour *n = ptr;
1310 int err, port;
1311 struct net_event_work *net_work;
1312
1313 priv = container_of(this, struct rtl838x_switch_priv, ne_nb);
1314
1315 switch (event) {
1316 case NETEVENT_NEIGH_UPDATE:
1317 if (n->tbl != &arp_tbl)
1318 return NOTIFY_DONE;
1319 dev = n->dev;
1320 port = rtl83xx_port_dev_lower_find(dev, priv);
1321 if (port < 0 || !(n->nud_state & NUD_VALID)) {
1322 pr_debug("%s: Neigbour invalid, not updating\n", __func__);
1323 return NOTIFY_DONE;
1324 }
1325
1326 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
1327 if (!net_work)
1328 return NOTIFY_BAD;
1329
1330 INIT_WORK(&net_work->work, rtl83xx_net_event_work_do);
1331 net_work->priv = priv;
1332
1333 net_work->mac = ether_addr_to_u64(n->ha);
1334 net_work->gw_addr = *(__be32 *) n->primary_key;
1335
1336 pr_debug("%s: updating neighbour on port %d, mac %016llx\n",
1337 __func__, port, net_work->mac);
1338 schedule_work(&net_work->work);
1339 if (err)
1340 netdev_warn(dev, "failed to handle neigh update (err %d)\n", err);
1341 break;
1342 }
1343
1344 return NOTIFY_DONE;
1345 }
1346
1347 struct rtl83xx_fib_event_work {
1348 struct work_struct work;
1349 union {
1350 struct fib_entry_notifier_info fen_info;
1351 struct fib6_entry_notifier_info fen6_info;
1352 struct fib_rule_notifier_info fr_info;
1353 };
1354 struct rtl838x_switch_priv *priv;
1355 bool is_fib6;
1356 unsigned long event;
1357 };
1358
1359 static void rtl83xx_fib_event_work_do(struct work_struct *work)
1360 {
1361 struct rtl83xx_fib_event_work *fib_work =
1362 container_of(work, struct rtl83xx_fib_event_work, work);
1363 struct rtl838x_switch_priv *priv = fib_work->priv;
1364 struct fib_rule *rule;
1365 int err;
1366
1367 /* Protect internal structures from changes */
1368 rtnl_lock();
1369 pr_debug("%s: doing work, event %ld\n", __func__, fib_work->event);
1370 switch (fib_work->event) {
1371 case FIB_EVENT_ENTRY_ADD:
1372 case FIB_EVENT_ENTRY_REPLACE:
1373 case FIB_EVENT_ENTRY_APPEND:
1374 if (fib_work->is_fib6) {
1375 err = rtl83xx_fib6_add(priv, &fib_work->fen6_info);
1376 } else {
1377 err = rtl83xx_fib4_add(priv, &fib_work->fen_info);
1378 fib_info_put(fib_work->fen_info.fi);
1379 }
1380 if (err)
1381 pr_err("%s: FIB4 failed\n", __func__);
1382 break;
1383 case FIB_EVENT_ENTRY_DEL:
1384 rtl83xx_fib4_del(priv, &fib_work->fen_info);
1385 fib_info_put(fib_work->fen_info.fi);
1386 break;
1387 case FIB_EVENT_RULE_ADD:
1388 case FIB_EVENT_RULE_DEL:
1389 rule = fib_work->fr_info.rule;
1390 if (!fib4_rule_default(rule))
1391 pr_err("%s: FIB4 default rule failed\n", __func__);
1392 fib_rule_put(rule);
1393 break;
1394 }
1395 rtnl_unlock();
1396 kfree(fib_work);
1397 }
1398
1399 /* Called with rcu_read_lock() */
1400 static int rtl83xx_fib_event(struct notifier_block *this, unsigned long event, void *ptr)
1401 {
1402 struct fib_notifier_info *info = ptr;
1403 struct rtl838x_switch_priv *priv;
1404 struct rtl83xx_fib_event_work *fib_work;
1405
1406 if ((info->family != AF_INET && info->family != AF_INET6 &&
1407 info->family != RTNL_FAMILY_IPMR &&
1408 info->family != RTNL_FAMILY_IP6MR))
1409 return NOTIFY_DONE;
1410
1411 priv = container_of(this, struct rtl838x_switch_priv, fib_nb);
1412
1413 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
1414 if (!fib_work)
1415 return NOTIFY_BAD;
1416
1417 INIT_WORK(&fib_work->work, rtl83xx_fib_event_work_do);
1418 fib_work->priv = priv;
1419 fib_work->event = event;
1420 fib_work->is_fib6 = false;
1421
1422 switch (event) {
1423 case FIB_EVENT_ENTRY_ADD:
1424 case FIB_EVENT_ENTRY_REPLACE:
1425 case FIB_EVENT_ENTRY_APPEND:
1426 case FIB_EVENT_ENTRY_DEL:
1427 pr_debug("%s: FIB_ENTRY ADD/DEL, event %ld\n", __func__, event);
1428 if (info->family == AF_INET) {
1429 struct fib_entry_notifier_info *fen_info = ptr;
1430
1431 if (fen_info->fi->fib_nh_is_v6) {
1432 NL_SET_ERR_MSG_MOD(info->extack,
1433 "IPv6 gateway with IPv4 route is not supported");
1434 kfree(fib_work);
1435 return notifier_from_errno(-EINVAL);
1436 }
1437
1438 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
1439 /* Take referece on fib_info to prevent it from being
1440 * freed while work is queued. Release it afterwards.
1441 */
1442 fib_info_hold(fib_work->fen_info.fi);
1443
1444 } else if (info->family == AF_INET6) {
1445 //struct fib6_entry_notifier_info *fen6_info = ptr;
1446 pr_warn("%s: FIB_RULE ADD/DEL for IPv6 not supported\n", __func__);
1447 kfree(fib_work);
1448 return NOTIFY_DONE;
1449 }
1450 break;
1451
1452 case FIB_EVENT_RULE_ADD:
1453 case FIB_EVENT_RULE_DEL:
1454 pr_debug("%s: FIB_RULE ADD/DEL, event: %ld\n", __func__, event);
1455 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
1456 fib_rule_get(fib_work->fr_info.rule);
1457 break;
1458 }
1459
1460 schedule_work(&fib_work->work);
1461
1462 return NOTIFY_DONE;
1463 }
1464
1465 static int __init rtl83xx_sw_probe(struct platform_device *pdev)
1466 {
1467 int err = 0;
1468 struct rtl838x_switch_priv *priv;
1469 struct device *dev = &pdev->dev;
1470 u64 bpdu_mask;
1471
1472 pr_debug("Probing RTL838X switch device\n");
1473 if (!pdev->dev.of_node) {
1474 dev_err(dev, "No DT found\n");
1475 return -EINVAL;
1476 }
1477
1478 /* Initialize access to RTL switch tables */
1479 rtl_table_init();
1480
1481 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1482 if (!priv)
1483 return -ENOMEM;
1484
1485 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
1486
1487 if (!priv->ds)
1488 return -ENOMEM;
1489 priv->ds->dev = dev;
1490 priv->ds->priv = priv;
1491 priv->ds->ops = &rtl83xx_switch_ops;
1492 priv->ds->needs_standalone_vlan_filtering = true;
1493 priv->dev = dev;
1494
1495 mutex_init(&priv->reg_mutex);
1496
1497 priv->family_id = soc_info.family;
1498 priv->id = soc_info.id;
1499 switch(soc_info.family) {
1500 case RTL8380_FAMILY_ID:
1501 priv->ds->ops = &rtl83xx_switch_ops;
1502 priv->cpu_port = RTL838X_CPU_PORT;
1503 priv->port_mask = 0x1f;
1504 priv->port_width = 1;
1505 priv->irq_mask = 0x0FFFFFFF;
1506 priv->r = &rtl838x_reg;
1507 priv->ds->num_ports = 29;
1508 priv->fib_entries = 8192;
1509 rtl8380_get_version(priv);
1510 priv->n_lags = 8;
1511 priv->l2_bucket_size = 4;
1512 priv->n_pie_blocks = 12;
1513 priv->port_ignore = 0x1f;
1514 priv->n_counters = 128;
1515 break;
1516 case RTL8390_FAMILY_ID:
1517 priv->ds->ops = &rtl83xx_switch_ops;
1518 priv->cpu_port = RTL839X_CPU_PORT;
1519 priv->port_mask = 0x3f;
1520 priv->port_width = 2;
1521 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1522 priv->r = &rtl839x_reg;
1523 priv->ds->num_ports = 53;
1524 priv->fib_entries = 16384;
1525 rtl8390_get_version(priv);
1526 priv->n_lags = 16;
1527 priv->l2_bucket_size = 4;
1528 priv->n_pie_blocks = 18;
1529 priv->port_ignore = 0x3f;
1530 priv->n_counters = 1024;
1531 break;
1532 case RTL9300_FAMILY_ID:
1533 priv->ds->ops = &rtl930x_switch_ops;
1534 priv->cpu_port = RTL930X_CPU_PORT;
1535 priv->port_mask = 0x1f;
1536 priv->port_width = 1;
1537 priv->irq_mask = 0x0FFFFFFF;
1538 priv->r = &rtl930x_reg;
1539 priv->ds->num_ports = 29;
1540 priv->fib_entries = 16384;
1541 priv->version = RTL8390_VERSION_A;
1542 priv->n_lags = 16;
1543 sw_w32(1, RTL930X_ST_CTRL);
1544 priv->l2_bucket_size = 8;
1545 priv->n_pie_blocks = 16;
1546 priv->port_ignore = 0x3f;
1547 priv->n_counters = 2048;
1548 break;
1549 case RTL9310_FAMILY_ID:
1550 priv->ds->ops = &rtl930x_switch_ops;
1551 priv->cpu_port = RTL931X_CPU_PORT;
1552 priv->port_mask = 0x3f;
1553 priv->port_width = 2;
1554 priv->irq_mask = 0xFFFFFFFFFFFFFULL;
1555 priv->r = &rtl931x_reg;
1556 priv->ds->num_ports = 57;
1557 priv->fib_entries = 16384;
1558 priv->version = RTL8390_VERSION_A;
1559 priv->n_lags = 16;
1560 priv->l2_bucket_size = 8;
1561 break;
1562 }
1563 pr_debug("Chip version %c\n", priv->version);
1564
1565 err = rtl83xx_mdio_probe(priv);
1566 if (err) {
1567 /* Probing fails the 1st time because of missing ethernet driver
1568 * initialization. Use this to disable traffic in case the bootloader left if on
1569 */
1570 return err;
1571 }
1572
1573 err = dsa_register_switch(priv->ds);
1574 if (err) {
1575 dev_err(dev, "Error registering switch: %d\n", err);
1576 return err;
1577 }
1578
1579 /* dsa_to_port returns dsa_port from the port list in
1580 * dsa_switch_tree, the tree is built when the switch
1581 * is registered by dsa_register_switch
1582 */
1583 for (int i = 0; i <= priv->cpu_port; i++)
1584 priv->ports[i].dp = dsa_to_port(priv->ds, i);
1585
1586 /* Enable link and media change interrupts. Are the SERDES masks needed? */
1587 sw_w32_mask(0, 3, priv->r->isr_glb_src);
1588
1589 priv->r->set_port_reg_le(priv->irq_mask, priv->r->isr_port_link_sts_chg);
1590 priv->r->set_port_reg_le(priv->irq_mask, priv->r->imr_port_link_sts_chg);
1591
1592 priv->link_state_irq = platform_get_irq(pdev, 0);
1593 pr_info("LINK state irq: %d\n", priv->link_state_irq);
1594 switch (priv->family_id) {
1595 case RTL8380_FAMILY_ID:
1596 err = request_irq(priv->link_state_irq, rtl838x_switch_irq,
1597 IRQF_SHARED, "rtl838x-link-state", priv->ds);
1598 break;
1599 case RTL8390_FAMILY_ID:
1600 err = request_irq(priv->link_state_irq, rtl839x_switch_irq,
1601 IRQF_SHARED, "rtl839x-link-state", priv->ds);
1602 break;
1603 case RTL9300_FAMILY_ID:
1604 err = request_irq(priv->link_state_irq, rtl930x_switch_irq,
1605 IRQF_SHARED, "rtl930x-link-state", priv->ds);
1606 break;
1607 case RTL9310_FAMILY_ID:
1608 err = request_irq(priv->link_state_irq, rtl931x_switch_irq,
1609 IRQF_SHARED, "rtl931x-link-state", priv->ds);
1610 break;
1611 }
1612 if (err) {
1613 dev_err(dev, "Error setting up switch interrupt.\n");
1614 /* Need to free allocated switch here */
1615 }
1616
1617 /* Enable interrupts for switch, on RTL931x, the IRQ is always on globally */
1618 if (soc_info.family != RTL9310_FAMILY_ID)
1619 sw_w32(0x1, priv->r->imr_glb);
1620
1621 rtl83xx_get_l2aging(priv);
1622
1623 rtl83xx_setup_qos(priv);
1624
1625 priv->r->l3_setup(priv);
1626
1627 /* Clear all destination ports for mirror groups */
1628 for (int i = 0; i < 4; i++)
1629 priv->mirror_group_ports[i] = -1;
1630
1631 /* Register netdevice event callback to catch changes in link aggregation groups */
1632 priv->nb.notifier_call = rtl83xx_netdevice_event;
1633 if (register_netdevice_notifier(&priv->nb)) {
1634 priv->nb.notifier_call = NULL;
1635 dev_err(dev, "Failed to register LAG netdev notifier\n");
1636 goto err_register_nb;
1637 }
1638
1639 /* Initialize hash table for L3 routing */
1640 rhltable_init(&priv->routes, &route_ht_params);
1641
1642 /* Register netevent notifier callback to catch notifications about neighboring
1643 * changes to update nexthop entries for L3 routing.
1644 */
1645 priv->ne_nb.notifier_call = rtl83xx_netevent_event;
1646 if (register_netevent_notifier(&priv->ne_nb)) {
1647 priv->ne_nb.notifier_call = NULL;
1648 dev_err(dev, "Failed to register netevent notifier\n");
1649 goto err_register_ne_nb;
1650 }
1651
1652 priv->fib_nb.notifier_call = rtl83xx_fib_event;
1653
1654 /* Register Forwarding Information Base notifier to offload routes where
1655 * where possible
1656 * Only FIBs pointing to our own netdevs are programmed into
1657 * the device, so no need to pass a callback.
1658 */
1659 err = register_fib_notifier(&init_net, &priv->fib_nb, NULL, NULL);
1660 if (err)
1661 goto err_register_fib_nb;
1662
1663 /* TODO: put this into l2_setup() */
1664 /* Flood BPDUs to all ports including cpu-port */
1665 if (soc_info.family != RTL9300_FAMILY_ID) {
1666 bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
1667 priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
1668
1669 /* TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs */
1670 sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
1671
1672 rtl838x_dbgfs_init(priv);
1673 } else {
1674 rtl930x_dbgfs_init(priv);
1675 }
1676
1677 return 0;
1678
1679 err_register_fib_nb:
1680 unregister_netevent_notifier(&priv->ne_nb);
1681 err_register_ne_nb:
1682 unregister_netdevice_notifier(&priv->nb);
1683 err_register_nb:
1684 return err;
1685 }
1686
1687 static int rtl83xx_sw_remove(struct platform_device *pdev)
1688 {
1689 /* TODO: */
1690 pr_debug("Removing platform driver for rtl83xx-sw\n");
1691
1692 return 0;
1693 }
1694
1695 static const struct of_device_id rtl83xx_switch_of_ids[] = {
1696 { .compatible = "realtek,rtl83xx-switch"},
1697 { /* sentinel */ }
1698 };
1699
1700
1701 MODULE_DEVICE_TABLE(of, rtl83xx_switch_of_ids);
1702
1703 static struct platform_driver rtl83xx_switch_driver = {
1704 .probe = rtl83xx_sw_probe,
1705 .remove = rtl83xx_sw_remove,
1706 .driver = {
1707 .name = "rtl83xx-switch",
1708 .pm = NULL,
1709 .of_match_table = rtl83xx_switch_of_ids,
1710 },
1711 };
1712
1713 module_platform_driver(rtl83xx_switch_driver);
1714
1715 MODULE_AUTHOR("B. Koblitz");
1716 MODULE_DESCRIPTION("RTL83XX SoC Switch Driver");
1717 MODULE_LICENSE("GPL");