layerscape: define ls-append function
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 705-dpaa2-support-layerscape.patch
1 From 77cc39e936f87463f92f7fddaaf0de51eec3972f Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Fri, 6 Jul 2018 15:30:21 +0800
4 Subject: [PATCH] dpaa2: support layerscape
5
6 This is an integrated patch for layerscape dpaa2 support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
10 Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
11 Signed-off-by: costi <constantin.tudor@freescale.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Mathew McBride <matt@traverse.com.au>
14 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
15 ---
16 drivers/soc/fsl/ls2-console/Kconfig | 4 +
17 drivers/soc/fsl/ls2-console/Makefile | 1 +
18 drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
19 drivers/staging/fsl-dpaa2/ethernet/Makefile | 12 +
20 drivers/staging/fsl-dpaa2/ethernet/README | 186 +
21 drivers/staging/fsl-dpaa2/ethernet/TODO | 18 +
22 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1253 ++++++
23 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 182 +
24 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 357 ++
25 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
26 .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 +
27 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3734 +++++++++++++++++
28 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 601 +++
29 .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 878 ++++
30 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
31 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 719 ++++
32 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 2112 ++++++++++
33 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1172 ++++++
34 drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
35 drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
36 drivers/staging/fsl-dpaa2/ethsw/README | 106 +
37 drivers/staging/fsl-dpaa2/ethsw/TODO | 14 +
38 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 359 ++
39 drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1165 +++++
40 drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 592 +++
41 .../staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 206 +
42 drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1438 +++++++
43 drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 90 +
44 drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
45 drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
46 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
47 drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1111 +++++
48 drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 ++
49 drivers/staging/fsl-dpaa2/evb/evb.c | 1354 ++++++
50 drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
51 drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
52 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
53 drivers/staging/fsl-dpaa2/mac/dpmac.c | 619 +++
54 drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
55 drivers/staging/fsl-dpaa2/mac/mac.c | 673 +++
56 drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
57 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
58 drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 ++++
59 drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
60 drivers/staging/fsl-dpaa2/rtc/rtc.c | 242 ++
61 include/linux/filter.h | 3 +
62 46 files changed, 22780 insertions(+)
63 create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
64 create mode 100644 drivers/soc/fsl/ls2-console/Makefile
65 create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
66 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
67 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
68 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/TODO
69 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
70 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
71 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
72 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
73 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
74 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
75 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
76 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
77 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
78 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
79 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
80 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
81 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
82 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
83 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
84 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
85 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
86 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
87 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
88 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
89 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
90 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
91 create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
92 create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
93 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
94 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
95 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
96 create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
97 create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
98 create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
99 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
100 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
101 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
102 create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
103 create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
104 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
105 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
106 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
107 create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
108
109 --- /dev/null
110 +++ b/drivers/soc/fsl/ls2-console/Kconfig
111 @@ -0,0 +1,4 @@
112 +config FSL_LS2_CONSOLE
113 + tristate "Layerscape MC and AIOP console support"
114 + depends on ARCH_LAYERSCAPE
115 + default y
116 --- /dev/null
117 +++ b/drivers/soc/fsl/ls2-console/Makefile
118 @@ -0,0 +1 @@
119 +obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
120 --- /dev/null
121 +++ b/drivers/soc/fsl/ls2-console/ls2-console.c
122 @@ -0,0 +1,284 @@
123 +/* Copyright 2015-2016 Freescale Semiconductor Inc.
124 + *
125 + * Redistribution and use in source and binary forms, with or without
126 + * modification, are permitted provided that the following conditions are met:
127 + * * Redistributions of source code must retain the above copyright
128 + * notice, this list of conditions and the following disclaimer.
129 + * * Redistributions in binary form must reproduce the above copyright
130 + * notice, this list of conditions and the following disclaimer in the
131 + * documentation and/or other materials provided with the distribution.
132 + * * Neither the name of the above-listed copyright holders nor the
133 + * names of any contributors may be used to endorse or promote products
134 + * derived from this software without specific prior written permission.
135 + *
136 + *
137 + * ALTERNATIVELY, this software may be distributed under the terms of the
138 + * GNU General Public License ("GPL") as published by the Free Software
139 + * Foundation, either version 2 of that License or (at your option) any
140 + * later version.
141 + *
142 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
143 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
144 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
145 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
146 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
147 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
148 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
149 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
150 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
151 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
152 + * POSSIBILITY OF SUCH DAMAGE.
153 + */
154 +
155 +#include <linux/miscdevice.h>
156 +#include <linux/uaccess.h>
157 +#include <linux/poll.h>
158 +#include <linux/compat.h>
159 +#include <linux/module.h>
160 +#include <linux/slab.h>
161 +#include <linux/io.h>
162 +
163 +/* SoC address for the MC firmware base low/high registers */
164 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
165 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
166 +/* MC firmware base low/high registers indexes */
167 +#define MCFBALR_OFFSET 0
168 +#define MCFBAHR_OFFSET 1
169 +
170 +/* Bit mask used to obtain the most significant part of the MC base address */
171 +#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
172 +/* Bit mask used to obtain the least significant part of the MC base address */
173 +#define MC_FW_LOW_ADDR_MASK 0xE0000000
174 +
175 +#define MC_BUFFER_OFFSET 0x01000000
176 +#define MC_BUFFER_SIZE (1024*1024*16)
177 +#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
178 +
179 +#define AIOP_BUFFER_OFFSET 0x06000000
180 +#define AIOP_BUFFER_SIZE (1024*1024*16)
181 +#define AIOP_OFFSET_DELTA (0)
182 +
183 +struct log_header {
184 + char magic_word[8]; /* magic word */
185 + uint32_t buf_start; /* holds the 32-bit little-endian
186 + * offset of the start of the buffer
187 + */
188 + uint32_t buf_length; /* holds the 32-bit little-endian
189 + * length of the buffer
190 + */
191 + uint32_t last_byte; /* holds the 32-bit little-endian offset
192 + * of the byte after the last byte that
193 + * was written
194 + */
195 + char reserved[44];
196 +};
197 +
198 +#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
199 +#define LOG_VERSION_MAJOR 1
200 +#define LOG_VERSION_MINOR 0
201 +
202 +
203 +#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
204 +
205 +struct console_data {
206 + char *map_addr;
207 + struct log_header *hdr;
208 + char *start_addr; /* Start of buffer */
209 + char *end_addr; /* End of buffer */
210 + char *end_of_data; /* Current end of data */
211 + char *cur_ptr; /* Last data sent to console */
212 +};
213 +
214 +#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
215 +
216 +static inline void __adjust_end(struct console_data *cd)
217 +{
218 + cd->end_of_data = cd->start_addr
219 + + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
220 +}
221 +
222 +static inline void adjust_end(struct console_data *cd)
223 +{
224 + invalidate(cd->hdr);
225 + __adjust_end(cd);
226 +}
227 +
228 +static inline uint64_t get_mc_fw_base_address(void)
229 +{
230 + u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
231 + SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
232 + u64 mcfwbase = 0ULL;
233 +
234 + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
235 + mcfwbase <<= 32;
236 + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
237 + iounmap(mcfbaregs);
238 + pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
239 + return mcfwbase;
240 +}
241 +
242 +static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
243 + u64 offset, u64 size,
244 + uint8_t *emagic, uint8_t magic_len,
245 + u32 offset_delta)
246 +{
247 + struct console_data *cd;
248 + uint8_t *magic;
249 + uint32_t wrapped;
250 +
251 + cd = kmalloc(sizeof(*cd), GFP_KERNEL);
252 + if (cd == NULL)
253 + return -ENOMEM;
254 + fp->private_data = cd;
255 + cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
256 +
257 + cd->hdr = (struct log_header *) cd->map_addr;
258 + invalidate(cd->hdr);
259 +
260 + magic = cd->hdr->magic_word;
261 + if (memcmp(magic, emagic, magic_len)) {
262 + pr_info("magic didn't match!\n");
263 + pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
264 + emagic[0], emagic[1], emagic[2], emagic[3],
265 + emagic[4], emagic[5], emagic[6], emagic[7]);
266 + pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
267 + magic[0], magic[1], magic[2], magic[3],
268 + magic[4], magic[5], magic[6], magic[7]);
269 + kfree(cd);
270 + iounmap(cd->map_addr);
271 + return -EIO;
272 + }
273 +
274 + cd->start_addr = cd->map_addr
275 + + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
276 + cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
277 +
278 + wrapped = le32_to_cpu(cd->hdr->last_byte)
279 + & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
280 +
281 + __adjust_end(cd);
282 + if (wrapped && (cd->end_of_data != cd->end_addr))
283 + cd->cur_ptr = cd->end_of_data+1;
284 + else
285 + cd->cur_ptr = cd->start_addr;
286 +
287 + return 0;
288 +}
289 +
290 +static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
291 +{
292 + uint8_t magic_word[] = { 0, 1, 'C', 'M' };
293 +
294 + return fsl_ls2_generic_console_open(node, fp,
295 + MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
296 + magic_word, sizeof(magic_word),
297 + MC_OFFSET_DELTA);
298 +}
299 +
300 +static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
301 +{
302 + uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
303 +
304 + return fsl_ls2_generic_console_open(node, fp,
305 + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
306 + magic_word, sizeof(magic_word),
307 + AIOP_OFFSET_DELTA);
308 +}
309 +
310 +static int fsl_ls2_console_close(struct inode *node, struct file *fp)
311 +{
312 + struct console_data *cd = fp->private_data;
313 +
314 + iounmap(cd->map_addr);
315 + kfree(cd);
316 + return 0;
317 +}
318 +
319 +ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
320 + loff_t *f_pos)
321 +{
322 + struct console_data *cd = fp->private_data;
323 + size_t bytes = 0;
324 + char data;
325 +
326 + /* Check if we need to adjust the end of data addr */
327 + adjust_end(cd);
328 +
329 + while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
330 + if (((u64)cd->cur_ptr) % 64 == 0)
331 + invalidate(cd->cur_ptr);
332 +
333 + data = *(cd->cur_ptr);
334 + if (copy_to_user(&buf[bytes], &data, 1))
335 + return -EFAULT;
336 + cd->cur_ptr++;
337 + if (cd->cur_ptr >= cd->end_addr)
338 + cd->cur_ptr = cd->start_addr;
339 + ++bytes;
340 + }
341 + return bytes;
342 +}
343 +
344 +static const struct file_operations fsl_ls2_mc_console_fops = {
345 + .owner = THIS_MODULE,
346 + .open = fsl_ls2_mc_console_open,
347 + .release = fsl_ls2_console_close,
348 + .read = fsl_ls2_console_read,
349 +};
350 +
351 +static struct miscdevice fsl_ls2_mc_console_dev = {
352 + .minor = MISC_DYNAMIC_MINOR,
353 + .name = "fsl_mc_console",
354 + .fops = &fsl_ls2_mc_console_fops
355 +};
356 +
357 +static const struct file_operations fsl_ls2_aiop_console_fops = {
358 + .owner = THIS_MODULE,
359 + .open = fsl_ls2_aiop_console_open,
360 + .release = fsl_ls2_console_close,
361 + .read = fsl_ls2_console_read,
362 +};
363 +
364 +static struct miscdevice fsl_ls2_aiop_console_dev = {
365 + .minor = MISC_DYNAMIC_MINOR,
366 + .name = "fsl_aiop_console",
367 + .fops = &fsl_ls2_aiop_console_fops
368 +};
369 +
370 +static int __init fsl_ls2_console_init(void)
371 +{
372 + int err = 0;
373 +
374 + pr_info("Freescale LS2 console driver\n");
375 + err = misc_register(&fsl_ls2_mc_console_dev);
376 + if (err) {
377 + pr_err("fsl_mc_console: cannot register device\n");
378 + return err;
379 + }
380 + pr_info("fsl-ls2-console: device %s registered\n",
381 + fsl_ls2_mc_console_dev.name);
382 +
383 + err = misc_register(&fsl_ls2_aiop_console_dev);
384 + if (err) {
385 + pr_err("fsl_aiop_console: cannot register device\n");
386 + return err;
387 + }
388 + pr_info("fsl-ls2-console: device %s registered\n",
389 + fsl_ls2_aiop_console_dev.name);
390 +
391 + return 0;
392 +}
393 +
394 +static void __exit fsl_ls2_console_exit(void)
395 +{
396 + misc_deregister(&fsl_ls2_mc_console_dev);
397 +
398 + misc_deregister(&fsl_ls2_aiop_console_dev);
399 +}
400 +
401 +module_init(fsl_ls2_console_init);
402 +module_exit(fsl_ls2_console_exit);
403 +
404 +MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
405 +MODULE_LICENSE("Dual BSD/GPL");
406 +MODULE_DESCRIPTION("Freescale LS2 console driver");
407 --- /dev/null
408 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
409 @@ -0,0 +1,12 @@
410 +#
411 +# Makefile for the Freescale DPAA2 Ethernet controller
412 +#
413 +
414 +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
415 +
416 +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
417 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
418 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
419 +
420 +# Needed by the tracing framework
421 +CFLAGS_dpaa2-eth.o := -I$(src)
422 --- /dev/null
423 +++ b/drivers/staging/fsl-dpaa2/ethernet/README
424 @@ -0,0 +1,186 @@
425 +Freescale DPAA2 Ethernet driver
426 +===============================
427 +
428 +This file provides documentation for the Freescale DPAA2 Ethernet driver.
429 +
430 +
431 +Contents
432 +========
433 + Supported Platforms
434 + Architecture Overview
435 + Creating a Network Interface
436 + Features & Offloads
437 +
438 +
439 +Supported Platforms
440 +===================
441 +This driver provides networking support for Freescale DPAA2 SoCs, e.g.
442 +LS2080A, LS2088A, LS1088A.
443 +
444 +
445 +Architecture Overview
446 +=====================
447 +Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
448 +representing network interfaces; instead, several separate hardware resources
449 +concur to provide the networking functionality:
450 + - network interfaces
451 + - queues, channels
452 + - buffer pools
453 + - MAC/PHY
454 +
455 +All hardware resources are allocated and configured through the Management
456 +Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
457 +and exposes ABIs through which they can be configured and controlled. A few
458 +hardware resources, like queues, do not have a corresponding MC object and
459 +are treated as internal resources of other objects.
460 +
461 +For a more detailed description of the DPAA2 architecture and its object
462 +abstractions see:
463 + drivers/staging/fsl-mc/README.txt
464 +
465 +Each Linux net device is built on top of a Datapath Network Interface (DPNI)
466 +object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
467 +(DPCONs).
468 +
469 +Configuration interface:
470 +
471 + -----------------------
472 + | DPAA2 Ethernet Driver |
473 + -----------------------
474 + . . .
475 + . . .
476 + . . . . . . . . . . . .
477 + . . .
478 + . . .
479 + ---------- ---------- -----------
480 + | DPBP API | | DPNI API | | DPCON API |
481 + ---------- ---------- -----------
482 + . . . software
483 +=========== . ========== . ============ . ===================
484 + . . . hardware
485 + ------------------------------------------
486 + | MC hardware portals |
487 + ------------------------------------------
488 + . . .
489 + . . .
490 + ------ ------ -------
491 + | DPBP | | DPNI | | DPCON |
492 + ------ ------ -------
493 +
494 +The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
495 +DPBPs represent hardware buffer pools. Packet I/O is performed in the context
496 +of DPCON objects, using DPIO portals for managing and communicating with the
497 +hardware resources.
498 +
499 +Datapath (I/O) interface:
500 +
501 + -----------------------------------------------
502 + | DPAA2 Ethernet Driver |
503 + -----------------------------------------------
504 + | ^ ^ | |
505 + | | | | |
506 + enqueue| dequeue| data | dequeue| seed |
507 + (Tx) | (Rx, TxC)| avail.| request| buffers|
508 + | | notify| | |
509 + | | | | |
510 + V | | V V
511 + -----------------------------------------------
512 + | DPIO Driver |
513 + -----------------------------------------------
514 + | | | | | software
515 + | | | | | ================
516 + | | | | | hardware
517 + -----------------------------------------------
518 + | I/O hardware portals |
519 + -----------------------------------------------
520 + | ^ ^ | |
521 + | | | | |
522 + | | | V |
523 + V | ================ V
524 + ---------------------- | -------------
525 + queues ---------------------- | | Buffer pool |
526 + ---------------------- | -------------
527 + =======================
528 + Channel
529 +
530 +Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
531 +availability notifications and buffer pool management. DPIOs are shared between
532 +all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
533 +frames, but must be affine to the CPUs for the purpose of traffic distribution.
534 +
535 +Frames are transmitted and received through hardware frame queues, which can be
536 +grouped in channels for the purpose of hardware scheduling. The Ethernet driver
537 +enqueues TX frames on egress queues and after transmission is complete a TX
538 +confirmation frame is sent back to the CPU.
539 +
540 +When frames are available on ingress queues, a data availability notification
541 +is sent to the CPU; notifications are raised per channel, so even if multiple
542 +queues in the same channel have available frames, only one notification is sent.
543 +After a channel fires a notification, is must be explicitly rearmed.
544 +
545 +Each network interface can have multiple Rx, Tx and confirmation queues affined
546 +to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
547 +DPCONs are used to distribute ingress traffic to different CPUs via the cores'
548 +affine DPIOs.
549 +
550 +The role of hardware buffer pools is storage of ingress frame data. Each network
551 +interface has a privately owned buffer pool which it seeds with kernel allocated
552 +buffers.
553 +
554 +
555 +DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
556 +object or to another DPNI through an internal link, but the connection is
557 +managed by MC and completely transparent to the Ethernet driver.
558 +
559 + --------- --------- ---------
560 + | eth if1 | | eth if2 | | eth ifn |
561 + --------- --------- ---------
562 + . . .
563 + . . .
564 + . . .
565 + ---------------------------
566 + | DPAA2 Ethernet Driver |
567 + ---------------------------
568 + . . .
569 + . . .
570 + . . .
571 + ------ ------ ------ -------
572 + | DPNI | | DPNI | | DPNI | | DPMAC |----+
573 + ------ ------ ------ ------- |
574 + | | | | |
575 + | | | | -----
576 + =========== ================== | PHY |
577 + -----
578 +
579 +Creating a Network Interface
580 +============================
581 +A net device is created for each DPNI object probed on the MC bus. Each DPNI has
582 +a number of properties which determine the network interface configuration
583 +options and associated hardware resources.
584 +
585 +DPNI objects (and the other DPAA2 objects needed for a network interface) can be
586 +added to a container on the MC bus in one of two ways: statically, through a
587 +Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
588 +dynamically at runtime, via the DPAA2 objects APIs.
589 +
590 +
591 +Features & Offloads
592 +===================
593 +Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
594 +The checksum offloads can be independently configured on RX and TX through
595 +ethtool.
596 +
597 +Hardware offload of unicast and multicast MAC filtering is supported on the
598 +ingress path and permanently enabled.
599 +
600 +Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
601 +is configurable via ethtool; on RX it is always enabled.
602 +
603 +The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
604 +
605 +The Ethernet driver defines a static flow hashing scheme that distributes
606 +traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
607 +L4 dst port. No user configuration is supported for now.
608 +
609 +Hardware specific statistics for the network interface as well as some
610 +non-standard driver stats can be consulted through ethtool -S option.
611 --- /dev/null
612 +++ b/drivers/staging/fsl-dpaa2/ethernet/TODO
613 @@ -0,0 +1,18 @@
614 +* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently
615 + the DPMAC objects and their link to DPNIs are handled by MC internally
616 + and all PHYs are seen as fixed-link
617 +* add more debug support: decide how to expose detailed debug statistics,
618 + add ingress error queue support
619 +* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to
620 + be kept in sync with binary interface changes in MC
621 +* refine README file
622 +* cleanup
623 +
624 +NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver
625 +out of staging. The main requirement for that is to have the drivers it
626 +depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc
627 +respectively.
628 +
629 + Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
630 + ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org,
631 + linux-kernel@vger.kernel.org
632 --- /dev/null
633 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
634 @@ -0,0 +1,1253 @@
635 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
636 +/*
637 + * Copyright 2017 NXP
638 + *
639 + */
640 +
641 +#include <linux/init.h>
642 +#include <linux/module.h>
643 +
644 +#include "dpaa2-eth-ceetm.h"
645 +#include "dpaa2-eth.h"
646 +
647 +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
648 +/* Conversion formula from userspace passed Bps to expected Mbit */
649 +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
650 +
651 +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
652 + [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
653 + [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
654 +};
655 +
656 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
657 +
658 +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
659 + struct dpni_tx_shaping_cfg *scfg,
660 + struct dpni_tx_shaping_cfg *ecfg,
661 + int coupled, int ch_id)
662 +{
663 + int err = 0;
664 +
665 + netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
666 + ch_id, scfg->rate_limit);
667 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
668 + ecfg, coupled);
669 + if (err)
670 + netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
671 +
672 + return err;
673 +}
674 +
675 +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
676 + int ch_id)
677 +{
678 + struct dpni_tx_shaping_cfg cfg = { 0 };
679 +
680 + return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
681 +}
682 +
683 +static inline int
684 +dpaa2_eth_update_shaping_cfg(struct net_device *dev,
685 + struct dpaa2_ceetm_shaping_cfg cfg,
686 + struct dpni_tx_shaping_cfg *scfg,
687 + struct dpni_tx_shaping_cfg *ecfg)
688 +{
689 + scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
690 + ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
691 +
692 + if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
693 + netdev_err(dev, "Committed burst size must be under %d\n",
694 + DPAA2_ETH_MAX_BURST_SIZE);
695 + return -EINVAL;
696 + }
697 +
698 + scfg->max_burst_size = cfg.cbs;
699 +
700 + if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
701 + netdev_err(dev, "Excess burst size must be under %d\n",
702 + DPAA2_ETH_MAX_BURST_SIZE);
703 + return -EINVAL;
704 + }
705 +
706 + ecfg->max_burst_size = cfg.ebs;
707 +
708 + if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
709 + netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
710 + return -EINVAL;
711 + }
712 +
713 + return 0;
714 +}
715 +
716 +enum update_tx_prio {
717 + DPAA2_ETH_ADD_CQ,
718 + DPAA2_ETH_DEL_CQ,
719 +};
720 +
721 +/* Normalize weights based on max passed value */
722 +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
723 +{
724 + struct dpni_tx_schedule_cfg *sched_cfg;
725 + struct dpaa2_ceetm_class *cl;
726 + u32 qpri;
727 + u16 weight_max = 0, increment;
728 + int i;
729 +
730 + /* Check the boundaries of the provided values */
731 + for (i = 0; i < priv->clhash.hashsize; i++)
732 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
733 + weight_max = (weight_max == 0 ? cl->prio.weight :
734 + (weight_max < cl->prio.weight ?
735 + cl->prio.weight : weight_max));
736 +
737 + /* If there are no elements, there's nothing to do */
738 + if (weight_max == 0)
739 + return 0;
740 +
741 + increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
742 + weight_max;
743 +
744 + for (i = 0; i < priv->clhash.hashsize; i++) {
745 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
746 + if (cl->prio.mode == STRICT_PRIORITY)
747 + continue;
748 +
749 + qpri = cl->prio.qpri;
750 + sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
751 +
752 + sched_cfg->delta_bandwidth =
753 + DPAA2_CEETM_MIN_WEIGHT +
754 + (cl->prio.weight * increment);
755 +
756 + pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
757 + __func__, qpri, sched_cfg->delta_bandwidth);
758 + }
759 + }
760 +
761 + return 0;
762 +}
763 +
764 +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
765 + struct dpaa2_ceetm_class *cl,
766 + enum update_tx_prio type)
767 +{
768 + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
769 + struct dpni_congestion_notification_cfg notif_cfg = {0};
770 + struct dpni_tx_schedule_cfg *sched_cfg;
771 + struct dpni_taildrop td = {0};
772 + u8 ch_id = 0, tc_id = 0;
773 + u32 qpri = 0;
774 + int err = 0;
775 +
776 + qpri = cl->prio.qpri;
777 + tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
778 +
779 + switch (type) {
780 + case DPAA2_ETH_ADD_CQ:
781 + /* Disable congestion notifications */
782 + notif_cfg.threshold_entry = 0;
783 + notif_cfg.threshold_exit = 0;
784 + err = dpni_set_congestion_notification(priv->mc_io, 0,
785 + priv->mc_token,
786 + DPNI_QUEUE_TX, tc_id,
787 + &notif_cfg);
788 + if (err) {
789 + netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n",
790 + err);
791 + return err;
792 + }
793 + /* Enable taildrop */
794 + td.enable = 1;
795 + td.units = DPNI_CONGESTION_UNIT_FRAMES;
796 + td.threshold = DPAA2_CEETM_TD_THRESHOLD;
797 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
798 + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
799 + 0, &td);
800 + if (err) {
801 + netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
802 + err);
803 + return err;
804 + }
805 + break;
806 + case DPAA2_ETH_DEL_CQ:
807 + /* Disable taildrop */
808 + td.enable = 0;
809 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
810 + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
811 + 0, &td);
812 + if (err) {
813 + netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
814 + err);
815 + return err;
816 + }
817 + /* Enable congestion notifications */
818 + notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
819 + notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
820 + notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
821 + notif_cfg.message_ctx = (u64)priv;
822 + notif_cfg.message_iova = priv->cscn_dma;
823 + notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
824 + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
825 + DPNI_CONG_OPT_COHERENT_WRITE;
826 + err = dpni_set_congestion_notification(priv->mc_io, 0,
827 + priv->mc_token,
828 + DPNI_QUEUE_TX, tc_id,
829 + &notif_cfg);
830 + if (err) {
831 + netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n",
832 + err);
833 + return err;
834 + }
835 + break;
836 + }
837 +
838 + /* We can zero out the structure in the tx_prio_conf array */
839 + if (type == DPAA2_ETH_DEL_CQ) {
840 + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
841 + memset(sched_cfg, 0, sizeof(*sched_cfg));
842 + }
843 +
844 + /* Normalize priorities */
845 + err = dpaa2_eth_normalize_tx_prio(sch);
846 +
847 + /* Debug print goes here */
848 + print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
849 + &sch->prio.tx_prio_cfg,
850 + sizeof(sch->prio.tx_prio_cfg), 0);
851 +
852 + /* Call dpni_set_tx_priorities for the entire prio qdisc */
853 + err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
854 + &sch->prio.tx_prio_cfg);
855 + if (err)
856 + netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
857 + err);
858 +
859 + return err;
860 +}
861 +
862 +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
863 +{
864 + priv->ceetm_en = true;
865 +}
866 +
867 +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
868 +{
869 + priv->ceetm_en = false;
870 +}
871 +
872 +/* Find class in qdisc hash table using given handle */
873 +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
874 + struct Qdisc *sch)
875 +{
876 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
877 + struct Qdisc_class_common *clc;
878 +
879 + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
880 + __func__, handle, sch->handle);
881 +
882 + clc = qdisc_class_find(&priv->clhash, handle);
883 + return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
884 +}
885 +
886 +/* Insert a class in the qdisc's class hash */
887 +static void dpaa2_ceetm_link_class(struct Qdisc *sch,
888 + struct Qdisc_class_hash *clhash,
889 + struct Qdisc_class_common *common)
890 +{
891 + sch_tree_lock(sch);
892 + qdisc_class_hash_insert(clhash, common);
893 + sch_tree_unlock(sch);
894 + qdisc_class_hash_grow(sch, clhash);
895 +}
896 +
897 +/* Destroy a ceetm class */
898 +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
899 + struct dpaa2_ceetm_class *cl)
900 +{
901 + struct net_device *dev = qdisc_dev(sch);
902 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
903 +
904 + if (!cl)
905 + return;
906 +
907 + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
908 + __func__, cl->common.classid, sch->handle);
909 +
910 + /* Recurse into child first */
911 + if (cl->child) {
912 + qdisc_destroy(cl->child);
913 + cl->child = NULL;
914 + }
915 +
916 + switch (cl->type) {
917 + case CEETM_ROOT:
918 + if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
919 + netdev_err(dev, "Error resetting channel shaping\n");
920 +
921 + break;
922 +
923 + case CEETM_PRIO:
924 + if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
925 + netdev_err(dev, "Error resetting tx_priorities\n");
926 +
927 + if (cl->prio.cstats)
928 + free_percpu(cl->prio.cstats);
929 +
930 + break;
931 + }
932 +
933 + tcf_destroy_chain(&cl->filter_list);
934 + kfree(cl);
935 +}
936 +
937 +/* Destroy a ceetm qdisc */
938 +static void dpaa2_ceetm_destroy(struct Qdisc *sch)
939 +{
940 + unsigned int i;
941 + struct hlist_node *next;
942 + struct dpaa2_ceetm_class *cl;
943 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
944 + struct net_device *dev = qdisc_dev(sch);
945 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
946 +
947 + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
948 + __func__, sch->handle);
949 +
950 + /* All filters need to be removed before destroying the classes */
951 + tcf_destroy_chain(&priv->filter_list);
952 +
953 + for (i = 0; i < priv->clhash.hashsize; i++) {
954 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
955 + tcf_destroy_chain(&cl->filter_list);
956 + }
957 +
958 + for (i = 0; i < priv->clhash.hashsize; i++) {
959 + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
960 + common.hnode)
961 + dpaa2_ceetm_cls_destroy(sch, cl);
962 + }
963 +
964 + qdisc_class_hash_destroy(&priv->clhash);
965 +
966 + switch (priv->type) {
967 + case CEETM_ROOT:
968 + dpaa2_eth_ceetm_disable(priv_eth);
969 +
970 + if (priv->root.qstats)
971 + free_percpu(priv->root.qstats);
972 +
973 + if (!priv->root.qdiscs)
974 + break;
975 +
976 + /* Destroy the pfifo qdiscs in case they haven't been attached
977 + * to the netdev queues yet.
978 + */
979 + for (i = 0; i < dev->num_tx_queues; i++)
980 + if (priv->root.qdiscs[i])
981 + qdisc_destroy(priv->root.qdiscs[i]);
982 +
983 + kfree(priv->root.qdiscs);
984 + break;
985 +
986 + case CEETM_PRIO:
987 + if (priv->prio.parent)
988 + priv->prio.parent->child = NULL;
989 + break;
990 + }
991 +}
992 +
993 +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
994 +{
995 + struct Qdisc *qdisc;
996 + unsigned int ntx, i;
997 + struct nlattr *nest;
998 + struct dpaa2_ceetm_tc_qopt qopt;
999 + struct dpaa2_ceetm_qdisc_stats *qstats;
1000 + struct net_device *dev = qdisc_dev(sch);
1001 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1002 +
1003 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1004 +
1005 + sch_tree_lock(sch);
1006 + memset(&qopt, 0, sizeof(qopt));
1007 + qopt.type = priv->type;
1008 + qopt.shaped = priv->shaped;
1009 +
1010 + switch (priv->type) {
1011 + case CEETM_ROOT:
1012 + /* Gather statistics from the underlying pfifo qdiscs */
1013 + sch->q.qlen = 0;
1014 + memset(&sch->bstats, 0, sizeof(sch->bstats));
1015 + memset(&sch->qstats, 0, sizeof(sch->qstats));
1016 +
1017 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1018 + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
1019 + sch->q.qlen += qdisc->q.qlen;
1020 + sch->bstats.bytes += qdisc->bstats.bytes;
1021 + sch->bstats.packets += qdisc->bstats.packets;
1022 + sch->qstats.qlen += qdisc->qstats.qlen;
1023 + sch->qstats.backlog += qdisc->qstats.backlog;
1024 + sch->qstats.drops += qdisc->qstats.drops;
1025 + sch->qstats.requeues += qdisc->qstats.requeues;
1026 + sch->qstats.overlimits += qdisc->qstats.overlimits;
1027 + }
1028 +
1029 + for_each_online_cpu(i) {
1030 + qstats = per_cpu_ptr(priv->root.qstats, i);
1031 + sch->qstats.drops += qstats->drops;
1032 + }
1033 +
1034 + break;
1035 +
1036 + case CEETM_PRIO:
1037 + qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
1038 + qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
1039 + qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
1040 + break;
1041 +
1042 + default:
1043 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
1044 + sch_tree_unlock(sch);
1045 + return -EINVAL;
1046 + }
1047 +
1048 + nest = nla_nest_start(skb, TCA_OPTIONS);
1049 + if (!nest)
1050 + goto nla_put_failure;
1051 + if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
1052 + goto nla_put_failure;
1053 + nla_nest_end(skb, nest);
1054 +
1055 + sch_tree_unlock(sch);
1056 + return skb->len;
1057 +
1058 +nla_put_failure:
1059 + sch_tree_unlock(sch);
1060 + nla_nest_cancel(skb, nest);
1061 + return -EMSGSIZE;
1062 +}
1063 +
1064 +static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
1065 + struct dpaa2_ceetm_qdisc *priv,
1066 + struct dpaa2_ceetm_tc_qopt *qopt)
1067 +{
1068 + /* TODO: Once LX2 support is added */
1069 + /* priv->shaped = parent_cl->shaped; */
1070 + priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
1071 + priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
1072 + priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
1073 +
1074 + return 0;
1075 +}
1076 +
1077 +/* Edit a ceetm qdisc */
1078 +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
1079 +{
1080 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1081 + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
1082 + struct dpaa2_ceetm_tc_qopt *qopt;
1083 + int err;
1084 +
1085 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1086 +
1087 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
1088 + dpaa2_ceetm_policy);
1089 + if (err < 0) {
1090 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
1091 + "nla_parse_nested");
1092 + return err;
1093 + }
1094 +
1095 + if (!tb[DPAA2_CEETM_TCA_QOPS]) {
1096 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
1097 + "tb");
1098 + return -EINVAL;
1099 + }
1100 +
1101 + if (TC_H_MIN(sch->handle)) {
1102 + pr_err("CEETM: a qdisc should not have a minor\n");
1103 + return -EINVAL;
1104 + }
1105 +
1106 + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
1107 +
1108 + if (priv->type != qopt->type) {
1109 + pr_err("CEETM: qdisc %X is not of the provided type\n",
1110 + sch->handle);
1111 + return -EINVAL;
1112 + }
1113 +
1114 + switch (priv->type) {
1115 + case CEETM_PRIO:
1116 + err = dpaa2_ceetm_change_prio(sch, priv, qopt);
1117 + break;
1118 + default:
1119 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
1120 + err = -EINVAL;
1121 + }
1122 +
1123 + return err;
1124 +}
1125 +
1126 +/* Configure a root ceetm qdisc */
1127 +static int dpaa2_ceetm_init_root(struct Qdisc *sch,
1128 + struct dpaa2_ceetm_qdisc *priv,
1129 + struct dpaa2_ceetm_tc_qopt *qopt)
1130 +{
1131 + struct net_device *dev = qdisc_dev(sch);
1132 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
1133 + struct netdev_queue *dev_queue;
1134 + unsigned int i, parent_id;
1135 + struct Qdisc *qdisc;
1136 + int err;
1137 +
1138 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1139 +
1140 + /* Validate inputs */
1141 + if (sch->parent != TC_H_ROOT) {
1142 + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
1143 + tcf_destroy_chain(&priv->filter_list);
1144 + qdisc_class_hash_destroy(&priv->clhash);
1145 + return -EINVAL;
1146 + }
1147 +
1148 + /* Pre-allocate underlying pfifo qdiscs.
1149 + *
1150 + * We want to offload shaping and scheduling decisions to the hardware.
1151 + * The pfifo qdiscs will be attached to the netdev queues and will
1152 + * guide the traffic from the IP stack down to the driver with minimum
1153 + * interference.
1154 + *
1155 + * The CEETM qdiscs and classes will be crossed when the traffic
1156 + * reaches the driver.
1157 + */
1158 + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
1159 + sizeof(priv->root.qdiscs[0]),
1160 + GFP_KERNEL);
1161 + if (!priv->root.qdiscs) {
1162 + err = -ENOMEM;
1163 + goto err_init_root;
1164 + }
1165 +
1166 + for (i = 0; i < dev->num_tx_queues; i++) {
1167 + dev_queue = netdev_get_tx_queue(dev, i);
1168 + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
1169 + TC_H_MIN(i + PFIFO_MIN_OFFSET));
1170 +
1171 + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1172 + parent_id);
1173 + if (!qdisc) {
1174 + err = -ENOMEM;
1175 + goto err_init_root;
1176 + }
1177 +
1178 + priv->root.qdiscs[i] = qdisc;
1179 + qdisc->flags |= TCQ_F_ONETXQUEUE;
1180 + }
1181 +
1182 + sch->flags |= TCQ_F_MQROOT;
1183 +
1184 + priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
1185 + if (!priv->root.qstats) {
1186 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
1187 + __func__);
1188 + err = -ENOMEM;
1189 + goto err_init_root;
1190 + }
1191 +
1192 + dpaa2_eth_ceetm_enable(priv_eth);
1193 + return 0;
1194 +
1195 +err_init_root:
1196 + dpaa2_ceetm_destroy(sch);
1197 + return err;
1198 +}
1199 +
1200 +/* Configure a prio ceetm qdisc */
1201 +static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
1202 + struct dpaa2_ceetm_qdisc *priv,
1203 + struct dpaa2_ceetm_tc_qopt *qopt)
1204 +{
1205 + struct net_device *dev = qdisc_dev(sch);
1206 + struct dpaa2_ceetm_class *parent_cl;
1207 + struct Qdisc *parent_qdisc;
1208 + int err;
1209 +
1210 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1211 +
1212 + if (sch->parent == TC_H_ROOT) {
1213 + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
1214 + err = -EINVAL;
1215 + goto err_init_prio;
1216 + }
1217 +
1218 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
1219 + if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
1220 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
1221 + err = -EINVAL;
1222 + goto err_init_prio;
1223 + }
1224 +
1225 + /* Obtain the parent root ceetm_class */
1226 + parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
1227 +
1228 + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
1229 + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
1230 + err = -EINVAL;
1231 + goto err_init_prio;
1232 + }
1233 +
1234 + priv->prio.parent = parent_cl;
1235 + parent_cl->child = sch;
1236 +
1237 + err = dpaa2_ceetm_change_prio(sch, priv, qopt);
1238 +
1239 + return 0;
1240 +
1241 +err_init_prio:
1242 + dpaa2_ceetm_destroy(sch);
1243 + return err;
1244 +}
1245 +
1246 +/* Configure a generic ceetm qdisc */
1247 +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
1248 +{
1249 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1250 + struct net_device *dev = qdisc_dev(sch);
1251 + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
1252 + struct dpaa2_ceetm_tc_qopt *qopt;
1253 + int err;
1254 +
1255 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1256 +
1257 + if (!netif_is_multiqueue(dev))
1258 + return -EOPNOTSUPP;
1259 +
1260 + RCU_INIT_POINTER(priv->filter_list, NULL);
1261 +
1262 + if (!opt) {
1263 + pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
1264 + __func__);
1265 + return -EINVAL;
1266 + }
1267 +
1268 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
1269 + dpaa2_ceetm_policy);
1270 + if (err < 0) {
1271 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
1272 + "nla_parse_nested");
1273 + return err;
1274 + }
1275 +
1276 + if (!tb[DPAA2_CEETM_TCA_QOPS]) {
1277 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
1278 + "tb");
1279 + return -EINVAL;
1280 + }
1281 +
1282 + if (TC_H_MIN(sch->handle)) {
1283 + pr_err("CEETM: a qdisc should not have a minor\n");
1284 + return -EINVAL;
1285 + }
1286 +
1287 + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
1288 +
1289 + /* Initialize the class hash list. Each qdisc has its own class hash */
1290 + err = qdisc_class_hash_init(&priv->clhash);
1291 + if (err < 0) {
1292 + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
1293 + __func__);
1294 + return err;
1295 + }
1296 +
1297 + priv->type = qopt->type;
1298 + priv->shaped = qopt->shaped;
1299 +
1300 + switch (priv->type) {
1301 + case CEETM_ROOT:
1302 + err = dpaa2_ceetm_init_root(sch, priv, qopt);
1303 + break;
1304 + case CEETM_PRIO:
1305 + err = dpaa2_ceetm_init_prio(sch, priv, qopt);
1306 + break;
1307 + default:
1308 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
1309 + dpaa2_ceetm_destroy(sch);
1310 + err = -EINVAL;
1311 + }
1312 +
1313 + return err;
1314 +}
1315 +
1316 +/* Attach the underlying pfifo qdiscs */
1317 +static void dpaa2_ceetm_attach(struct Qdisc *sch)
1318 +{
1319 + struct net_device *dev = qdisc_dev(sch);
1320 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1321 + struct Qdisc *qdisc, *old_qdisc;
1322 + unsigned int i;
1323 +
1324 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1325 +
1326 + for (i = 0; i < dev->num_tx_queues; i++) {
1327 + qdisc = priv->root.qdiscs[i];
1328 + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1329 + if (old_qdisc)
1330 + qdisc_destroy(old_qdisc);
1331 + }
1332 +
1333 + /* Remove the references to the pfifo qdiscs since the kernel will
1334 + * destroy them when needed. No cleanup from our part is required from
1335 + * this point on.
1336 + */
1337 + kfree(priv->root.qdiscs);
1338 + priv->root.qdiscs = NULL;
1339 +}
1340 +
1341 +static unsigned long dpaa2_ceetm_cls_get(struct Qdisc *sch, u32 classid)
1342 +{
1343 + struct dpaa2_ceetm_class *cl;
1344 +
1345 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
1346 + __func__, classid, sch->handle);
1347 + cl = dpaa2_ceetm_find(classid, sch);
1348 +
1349 + if (cl)
1350 + cl->refcnt++;
1351 +
1352 + return (unsigned long)cl;
1353 +}
1354 +
1355 +static void dpaa2_ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
1356 +{
1357 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1358 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
1359 + __func__, cl->common.classid, sch->handle);
1360 + cl->refcnt--;
1361 +
1362 + if (cl->refcnt == 0)
1363 + dpaa2_ceetm_cls_destroy(sch, cl);
1364 +}
1365 +
1366 +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
1367 + struct dpaa2_ceetm_tc_copt *copt,
1368 + struct net_device *dev)
1369 +{
1370 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
1371 + struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
1372 + int err = 0;
1373 +
1374 + pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
1375 + cl->common.classid);
1376 +
1377 + if (!cl->shaped)
1378 + return 0;
1379 +
1380 + if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
1381 + &scfg, &ecfg))
1382 + return -EINVAL;
1383 +
1384 + err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
1385 + copt->shaping_cfg.coupled,
1386 + cl->root.ch_id);
1387 + if (err)
1388 + return err;
1389 +
1390 + memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
1391 + sizeof(struct dpaa2_ceetm_shaping_cfg));
1392 +
1393 + return err;
1394 +}
1395 +
1396 +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
1397 + struct dpaa2_ceetm_tc_copt *copt,
1398 + struct net_device *dev)
1399 +{
1400 + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
1401 + struct dpni_tx_schedule_cfg *sched_cfg;
1402 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
1403 + int err;
1404 +
1405 + pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
1406 + __func__, cl->common.classid, copt->mode, copt->weight);
1407 +
1408 + if (!cl->prio.cstats) {
1409 + cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
1410 + if (!cl->prio.cstats) {
1411 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
1412 + __func__);
1413 + return -ENOMEM;
1414 + }
1415 + }
1416 +
1417 + cl->prio.mode = copt->mode;
1418 + cl->prio.weight = copt->weight;
1419 +
1420 + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
1421 +
1422 + switch (copt->mode) {
1423 + case STRICT_PRIORITY:
1424 + sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
1425 + break;
1426 + case WEIGHTED_A:
1427 + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
1428 + break;
1429 + case WEIGHTED_B:
1430 + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
1431 + break;
1432 + }
1433 +
1434 + err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
1435 +
1436 + return err;
1437 +}
1438 +
1439 +/* Add a new ceetm class */
1440 +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
1441 + struct dpaa2_ceetm_tc_copt *copt,
1442 + unsigned long *arg)
1443 +{
1444 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1445 + struct net_device *dev = qdisc_dev(sch);
1446 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
1447 + struct dpaa2_ceetm_class *cl;
1448 + int err;
1449 +
1450 + if (copt->type == CEETM_ROOT &&
1451 + priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
1452 + pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
1453 + dpaa2_eth_ch_count(priv_eth),
1454 + dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
1455 + return -EINVAL;
1456 + }
1457 +
1458 + if (copt->type == CEETM_PRIO &&
1459 + priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
1460 + pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
1461 + dpaa2_eth_tc_count(priv_eth),
1462 + dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
1463 + return -EINVAL;
1464 + }
1465 +
1466 + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1467 + if (!cl)
1468 + return -ENOMEM;
1469 +
1470 + RCU_INIT_POINTER(cl->filter_list, NULL);
1471 +
1472 + cl->common.classid = classid;
1473 + cl->refcnt = 1;
1474 + cl->parent = sch;
1475 + cl->child = NULL;
1476 +
1477 + /* Add class handle in Qdisc */
1478 + dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
1479 +
1480 + cl->shaped = copt->shaped;
1481 + cl->type = copt->type;
1482 +
1483 + /* Claim a CEETM channel / tc - DPAA2. will assume transition from
1484 + * classid to qdid/qpri, starting from qdid / qpri 0
1485 + */
1486 + switch (copt->type) {
1487 + case CEETM_ROOT:
1488 + cl->root.ch_id = classid - sch->handle - 1;
1489 + err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
1490 + break;
1491 + case CEETM_PRIO:
1492 + cl->prio.qpri = classid - sch->handle - 1;
1493 + err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
1494 + break;
1495 + default:
1496 + err = -EINVAL;
1497 + break;
1498 + }
1499 +
1500 + if (err) {
1501 + pr_err("%s: Unable to set new %s class\n", __func__,
1502 + (copt->type == CEETM_ROOT ? "root" : "prio"));
1503 + goto out_free;
1504 + }
1505 +
1506 + switch (copt->type) {
1507 + case CEETM_ROOT:
1508 + pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
1509 + __func__, classid, cl->root.ch_id);
1510 + break;
1511 + case CEETM_PRIO:
1512 + pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
1513 + __func__, classid, cl->prio.qpri);
1514 + break;
1515 + }
1516 +
1517 + *arg = (unsigned long)cl;
1518 + return 0;
1519 +
1520 +out_free:
1521 + kfree(cl);
1522 + return err;
1523 +}
1524 +
1525 +/* Add or configure a ceetm class */
1526 +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
1527 + struct nlattr **tca, unsigned long *arg)
1528 +{
1529 + struct dpaa2_ceetm_qdisc *priv;
1530 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
1531 + struct nlattr *opt = tca[TCA_OPTIONS];
1532 + struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
1533 + struct dpaa2_ceetm_tc_copt *copt;
1534 + struct net_device *dev = qdisc_dev(sch);
1535 + int err;
1536 +
1537 + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
1538 + __func__, classid, sch->handle);
1539 +
1540 + if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
1541 + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
1542 + return -EINVAL;
1543 + }
1544 +
1545 + priv = qdisc_priv(sch);
1546 +
1547 + if (!opt) {
1548 + pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
1549 + return -EINVAL;
1550 + }
1551 +
1552 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
1553 + dpaa2_ceetm_policy);
1554 + if (err < 0) {
1555 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
1556 + "nla_parse_nested");
1557 + return -EINVAL;
1558 + }
1559 +
1560 + if (!tb[DPAA2_CEETM_TCA_COPT]) {
1561 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
1562 + "tb");
1563 + return -EINVAL;
1564 + }
1565 +
1566 + copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
1567 +
1568 + /* Configure an existing ceetm class */
1569 + if (cl) {
1570 + if (copt->type != cl->type) {
1571 + pr_err("CEETM: class %X is not of the provided type\n",
1572 + cl->common.classid);
1573 + return -EINVAL;
1574 + }
1575 +
1576 + switch (copt->type) {
1577 + case CEETM_ROOT:
1578 + return dpaa2_ceetm_cls_change_root(cl, copt, dev);
1579 + case CEETM_PRIO:
1580 + return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
1581 +
1582 + default:
1583 + pr_err(KBUILD_BASENAME " : %s : invalid class\n",
1584 + __func__);
1585 + return -EINVAL;
1586 + }
1587 + }
1588 +
1589 + return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
1590 +}
1591 +
1592 +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1593 +{
1594 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1595 + struct dpaa2_ceetm_class *cl;
1596 + unsigned int i;
1597 +
1598 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
1599 +
1600 + if (arg->stop)
1601 + return;
1602 +
1603 + for (i = 0; i < priv->clhash.hashsize; i++) {
1604 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
1605 + if (arg->count < arg->skip) {
1606 + arg->count++;
1607 + continue;
1608 + }
1609 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1610 + arg->stop = 1;
1611 + return;
1612 + }
1613 + arg->count++;
1614 + }
1615 + }
1616 +}
1617 +
1618 +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
1619 + struct sk_buff *skb, struct tcmsg *tcm)
1620 +{
1621 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1622 + struct nlattr *nest;
1623 + struct dpaa2_ceetm_tc_copt copt;
1624 +
1625 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1626 + __func__, cl->common.classid, sch->handle);
1627 +
1628 + sch_tree_lock(sch);
1629 +
1630 + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
1631 + tcm->tcm_handle = cl->common.classid;
1632 +
1633 + memset(&copt, 0, sizeof(copt));
1634 +
1635 + copt.shaped = cl->shaped;
1636 + copt.type = cl->type;
1637 +
1638 + switch (cl->type) {
1639 + case CEETM_ROOT:
1640 + if (cl->child)
1641 + tcm->tcm_info = cl->child->handle;
1642 +
1643 + memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
1644 + sizeof(struct dpaa2_ceetm_shaping_cfg));
1645 +
1646 + break;
1647 +
1648 + case CEETM_PRIO:
1649 + if (cl->child)
1650 + tcm->tcm_info = cl->child->handle;
1651 +
1652 + copt.mode = cl->prio.mode;
1653 + copt.weight = cl->prio.weight;
1654 +
1655 + break;
1656 + }
1657 +
1658 + nest = nla_nest_start(skb, TCA_OPTIONS);
1659 + if (!nest)
1660 + goto nla_put_failure;
1661 + if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
1662 + goto nla_put_failure;
1663 + nla_nest_end(skb, nest);
1664 + sch_tree_unlock(sch);
1665 + return skb->len;
1666 +
1667 +nla_put_failure:
1668 + sch_tree_unlock(sch);
1669 + nla_nest_cancel(skb, nest);
1670 + return -EMSGSIZE;
1671 +}
1672 +
1673 +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
1674 +{
1675 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1676 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1677 +
1678 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1679 + __func__, cl->common.classid, sch->handle);
1680 +
1681 + sch_tree_lock(sch);
1682 + qdisc_class_hash_remove(&priv->clhash, &cl->common);
1683 + cl->refcnt--;
1684 + WARN_ON(cl->refcnt == 0);
1685 + sch_tree_unlock(sch);
1686 + return 0;
1687 +}
1688 +
1689 +/* Get the class' child qdisc, if any */
1690 +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
1691 +{
1692 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1693 +
1694 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1695 + __func__, cl->common.classid, sch->handle);
1696 +
1697 + switch (cl->type) {
1698 + case CEETM_ROOT:
1699 + case CEETM_PRIO:
1700 + return cl->child;
1701 + }
1702 +
1703 + return NULL;
1704 +}
1705 +
1706 +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
1707 + struct Qdisc *new, struct Qdisc **old)
1708 +{
1709 + if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
1710 + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
1711 + return -EOPNOTSUPP;
1712 + }
1713 +
1714 + return 0;
1715 +}
1716 +
1717 +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
1718 + struct gnet_dump *d)
1719 +{
1720 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1721 + struct gnet_stats_basic_packed tmp_bstats;
1722 + struct dpaa2_ceetm_tc_xstats xstats;
1723 + union dpni_statistics dpni_stats;
1724 + struct net_device *dev = qdisc_dev(sch);
1725 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
1726 + u8 ch_id = 0;
1727 + int err;
1728 +
1729 + memset(&xstats, 0, sizeof(xstats));
1730 + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
1731 +
1732 + if (cl->type == CEETM_ROOT)
1733 + return 0;
1734 +
1735 + err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
1736 + DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
1737 + &dpni_stats);
1738 + if (err)
1739 + netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
1740 +
1741 + xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
1742 + xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
1743 + xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
1744 + xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
1745 +
1746 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1747 +}
1748 +
1749 +static struct tcf_proto __rcu **dpaa2_ceetm_tcf_chain(struct Qdisc *sch,
1750 + unsigned long arg)
1751 +{
1752 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1753 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1754 +
1755 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1756 + cl ? cl->common.classid : 0, sch->handle);
1757 + return cl ? &cl->filter_list : &priv->filter_list;
1758 +}
1759 +
1760 +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
1761 + unsigned long parent,
1762 + u32 classid)
1763 +{
1764 + struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
1765 +
1766 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1767 + cl ? cl->common.classid : 0, sch->handle);
1768 + return (unsigned long)cl;
1769 +}
1770 +
1771 +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
1772 +{
1773 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1774 +
1775 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1776 + cl ? cl->common.classid : 0, sch->handle);
1777 +}
1778 +
1779 +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
1780 + .graft = dpaa2_ceetm_cls_graft,
1781 + .leaf = dpaa2_ceetm_cls_leaf,
1782 + .get = dpaa2_ceetm_cls_get,
1783 + .put = dpaa2_ceetm_cls_put,
1784 + .change = dpaa2_ceetm_cls_change,
1785 + .delete = dpaa2_ceetm_cls_delete,
1786 + .walk = dpaa2_ceetm_cls_walk,
1787 + .tcf_chain = dpaa2_ceetm_tcf_chain,
1788 + .bind_tcf = dpaa2_ceetm_tcf_bind,
1789 + .unbind_tcf = dpaa2_ceetm_tcf_unbind,
1790 + .dump = dpaa2_ceetm_cls_dump,
1791 + .dump_stats = dpaa2_ceetm_cls_dump_stats,
1792 +};
1793 +
1794 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
1795 + .id = "ceetm",
1796 + .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
1797 + .cl_ops = &dpaa2_ceetm_cls_ops,
1798 + .init = dpaa2_ceetm_init,
1799 + .destroy = dpaa2_ceetm_destroy,
1800 + .change = dpaa2_ceetm_change,
1801 + .dump = dpaa2_ceetm_dump,
1802 + .attach = dpaa2_ceetm_attach,
1803 + .owner = THIS_MODULE,
1804 +};
1805 +
1806 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
1807 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1808 + int *qdid, int *qpri)
1809 +{
1810 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1811 + struct dpaa2_ceetm_class *cl = NULL;
1812 + struct tcf_result res;
1813 + struct tcf_proto *tcf;
1814 + int result;
1815 +
1816 + tcf = rcu_dereference_bh(priv->filter_list);
1817 + while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
1818 +#ifdef CONFIG_NET_CLS_ACT
1819 + switch (result) {
1820 + case TC_ACT_QUEUED:
1821 + case TC_ACT_STOLEN:
1822 + case TC_ACT_SHOT:
1823 + /* No valid class found due to action */
1824 + return -1;
1825 + }
1826 +#endif
1827 + cl = (void *)res.class;
1828 + if (!cl) {
1829 + /* The filter leads to the qdisc */
1830 + if (res.classid == sch->handle)
1831 + return 0;
1832 +
1833 + cl = dpaa2_ceetm_find(res.classid, sch);
1834 + /* The filter leads to an invalid class */
1835 + if (!cl)
1836 + break;
1837 + }
1838 +
1839 + /* The class might have its own filters attached */
1840 + tcf = rcu_dereference_bh(cl->filter_list);
1841 + }
1842 +
1843 + /* No valid class found */
1844 + if (!cl)
1845 + return 0;
1846 +
1847 + switch (cl->type) {
1848 + case CEETM_ROOT:
1849 + *qdid = cl->root.ch_id;
1850 +
1851 + /* The root class does not have a child prio qdisc */
1852 + if (!cl->child)
1853 + return 0;
1854 +
1855 + /* Run the prio qdisc classifiers */
1856 + return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
1857 +
1858 + case CEETM_PRIO:
1859 + *qpri = cl->prio.qpri;
1860 + break;
1861 + }
1862 +
1863 + return 0;
1864 +}
1865 +
1866 +int __init dpaa2_ceetm_register(void)
1867 +{
1868 + int err = 0;
1869 +
1870 + pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
1871 +
1872 + err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
1873 + if (unlikely(err))
1874 + pr_err(KBUILD_MODNAME
1875 + ": %s:%hu:%s(): register_qdisc() = %d\n",
1876 + KBUILD_BASENAME ".c", __LINE__, __func__, err);
1877 +
1878 + return err;
1879 +}
1880 +
1881 +void __exit dpaa2_ceetm_unregister(void)
1882 +{
1883 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
1884 + KBUILD_BASENAME ".c", __func__);
1885 +
1886 + unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
1887 +}
1888 --- /dev/null
1889 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
1890 @@ -0,0 +1,182 @@
1891 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1892 +/*
1893 + * Copyright 2017 NXP
1894 + *
1895 + */
1896 +
1897 +#ifndef __DPAA2_ETH_CEETM_H
1898 +#define __DPAA2_ETH_CEETM_H
1899 +
1900 +#include <net/pkt_sched.h>
1901 +#include <net/pkt_cls.h>
1902 +#include <net/netlink.h>
1903 +
1904 +#include "dpaa2-eth.h"
1905 +
1906 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
1907 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
1908 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
1909 + * hex).
1910 + */
1911 +#define PFIFO_MIN_OFFSET 0x21
1912 +
1913 +#define DPAA2_CEETM_MIN_WEIGHT 100
1914 +#define DPAA2_CEETM_MAX_WEIGHT 24800
1915 +
1916 +#define DPAA2_CEETM_TD_THRESHOLD 1000
1917 +
1918 +enum wbfs_group_type {
1919 + WBFS_GRP_A,
1920 + WBFS_GRP_B,
1921 + WBFS_GRP_LARGE
1922 +};
1923 +
1924 +enum {
1925 + DPAA2_CEETM_TCA_UNSPEC,
1926 + DPAA2_CEETM_TCA_COPT,
1927 + DPAA2_CEETM_TCA_QOPS,
1928 + DPAA2_CEETM_TCA_MAX,
1929 +};
1930 +
1931 +/* CEETM configuration types */
1932 +enum dpaa2_ceetm_type {
1933 + CEETM_ROOT = 1,
1934 + CEETM_PRIO,
1935 +};
1936 +
1937 +enum {
1938 + STRICT_PRIORITY = 0,
1939 + WEIGHTED_A,
1940 + WEIGHTED_B,
1941 +};
1942 +
1943 +struct dpaa2_ceetm_shaping_cfg {
1944 + __u64 cir; /* committed information rate */
1945 + __u64 eir; /* excess information rate */
1946 + __u16 cbs; /* committed burst size */
1947 + __u16 ebs; /* excess burst size */
1948 + __u8 coupled; /* shaper coupling */
1949 +};
1950 +
1951 +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
1952 +
1953 +struct dpaa2_ceetm_class;
1954 +struct dpaa2_ceetm_qdisc_stats;
1955 +struct dpaa2_ceetm_class_stats;
1956 +
1957 +/* corresponds to CEETM shaping at LNI level */
1958 +struct dpaa2_root_q {
1959 + struct Qdisc **qdiscs;
1960 + struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
1961 +};
1962 +
1963 +/* corresponds to the number of priorities a channel serves */
1964 +struct dpaa2_prio_q {
1965 + struct dpaa2_ceetm_class *parent;
1966 + struct dpni_tx_priorities_cfg tx_prio_cfg;
1967 +};
1968 +
1969 +struct dpaa2_ceetm_qdisc {
1970 + struct Qdisc_class_hash clhash;
1971 + struct tcf_proto *filter_list; /* qdisc attached filters */
1972 +
1973 + enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1974 + bool shaped;
1975 + union {
1976 + struct dpaa2_root_q root;
1977 + struct dpaa2_prio_q prio;
1978 + };
1979 +};
1980 +
1981 +/* CEETM Qdisc configuration parameters */
1982 +struct dpaa2_ceetm_tc_qopt {
1983 + enum dpaa2_ceetm_type type;
1984 + __u16 shaped;
1985 + __u8 prio_group_A;
1986 + __u8 prio_group_B;
1987 + __u8 separate_groups;
1988 +};
1989 +
1990 +/* root class - corresponds to a channel */
1991 +struct dpaa2_root_c {
1992 + struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1993 + u32 ch_id;
1994 +};
1995 +
1996 +/* prio class - corresponds to a strict priority queue (group) */
1997 +struct dpaa2_prio_c {
1998 + struct dpaa2_ceetm_class_stats __percpu *cstats;
1999 + u32 qpri;
2000 + u8 mode;
2001 + u16 weight;
2002 +};
2003 +
2004 +struct dpaa2_ceetm_class {
2005 + struct Qdisc_class_common common;
2006 + int refcnt;
2007 + struct tcf_proto *filter_list; /* class attached filters */
2008 + struct Qdisc *parent;
2009 + struct Qdisc *child;
2010 +
2011 + enum dpaa2_ceetm_type type; /* ROOT/PRIO */
2012 + bool shaped;
2013 + union {
2014 + struct dpaa2_root_c root;
2015 + struct dpaa2_prio_c prio;
2016 + };
2017 +};
2018 +
2019 +/* CEETM Class configuration parameters */
2020 +struct dpaa2_ceetm_tc_copt {
2021 + enum dpaa2_ceetm_type type;
2022 + struct dpaa2_ceetm_shaping_cfg shaping_cfg;
2023 + __u16 shaped;
2024 + __u8 mode;
2025 + __u16 weight;
2026 +};
2027 +
2028 +/* CEETM stats */
2029 +struct dpaa2_ceetm_qdisc_stats {
2030 + __u32 drops;
2031 +};
2032 +
2033 +struct dpaa2_ceetm_class_stats {
2034 + /* Software counters */
2035 + struct gnet_stats_basic_packed bstats;
2036 + __u32 ern_drop_count;
2037 + __u32 congested_count;
2038 +};
2039 +
2040 +struct dpaa2_ceetm_tc_xstats {
2041 + __u64 ceetm_dequeue_bytes;
2042 + __u64 ceetm_dequeue_frames;
2043 + __u64 ceetm_reject_bytes;
2044 + __u64 ceetm_reject_frames;
2045 +};
2046 +
2047 +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
2048 +int __init dpaa2_ceetm_register(void);
2049 +void __exit dpaa2_ceetm_unregister(void);
2050 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
2051 + int *qdid, int *qpri);
2052 +#else
2053 +static inline int dpaa2_ceetm_register(void)
2054 +{
2055 + return 0;
2056 +}
2057 +
2058 +static inline void dpaa2_ceetm_unregister(void) {}
2059 +
2060 +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
2061 + int *qdid, int *qpri)
2062 +{
2063 + return 0;
2064 +}
2065 +#endif
2066 +
2067 +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
2068 +{
2069 + return priv->ceetm_en;
2070 +}
2071 +
2072 +#endif
2073 --- /dev/null
2074 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
2075 @@ -0,0 +1,357 @@
2076 +
2077 +/* Copyright 2015 Freescale Semiconductor Inc.
2078 + *
2079 + * Redistribution and use in source and binary forms, with or without
2080 + * modification, are permitted provided that the following conditions are met:
2081 + * * Redistributions of source code must retain the above copyright
2082 + * notice, this list of conditions and the following disclaimer.
2083 + * * Redistributions in binary form must reproduce the above copyright
2084 + * notice, this list of conditions and the following disclaimer in the
2085 + * documentation and/or other materials provided with the distribution.
2086 + * * Neither the name of Freescale Semiconductor nor the
2087 + * names of its contributors may be used to endorse or promote products
2088 + * derived from this software without specific prior written permission.
2089 + *
2090 + *
2091 + * ALTERNATIVELY, this software may be distributed under the terms of the
2092 + * GNU General Public License ("GPL") as published by the Free Software
2093 + * Foundation, either version 2 of that License or (at your option) any
2094 + * later version.
2095 + *
2096 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2097 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2098 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2099 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2100 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2101 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2102 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2103 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2104 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2105 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2106 + */
2107 +
2108 +#include <linux/module.h>
2109 +#include <linux/debugfs.h>
2110 +#include "dpaa2-eth.h"
2111 +#include "dpaa2-eth-debugfs.h"
2112 +
2113 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
2114 +
2115 +static struct dentry *dpaa2_dbg_root;
2116 +
2117 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
2118 +{
2119 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
2120 + struct rtnl_link_stats64 *stats;
2121 + struct dpaa2_eth_drv_stats *extras;
2122 + int i;
2123 +
2124 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
2125 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
2126 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
2127 + "Tx SG", "Tx realloc", "Enq busy");
2128 +
2129 + for_each_online_cpu(i) {
2130 + stats = per_cpu_ptr(priv->percpu_stats, i);
2131 + extras = per_cpu_ptr(priv->percpu_extras, i);
2132 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
2133 + i,
2134 + stats->rx_packets,
2135 + stats->rx_errors,
2136 + extras->rx_sg_frames,
2137 + stats->tx_packets,
2138 + stats->tx_errors,
2139 + extras->tx_conf_frames,
2140 + extras->tx_sg_frames,
2141 + extras->tx_reallocs,
2142 + extras->tx_portal_busy);
2143 + }
2144 +
2145 + return 0;
2146 +}
2147 +
2148 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
2149 +{
2150 + int err;
2151 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
2152 +
2153 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
2154 + if (err < 0)
2155 + netdev_err(priv->net_dev, "single_open() failed\n");
2156 +
2157 + return err;
2158 +}
2159 +
2160 +static const struct file_operations dpaa2_dbg_cpu_ops = {
2161 + .open = dpaa2_dbg_cpu_open,
2162 + .read = seq_read,
2163 + .llseek = seq_lseek,
2164 + .release = single_release,
2165 +};
2166 +
2167 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
2168 +{
2169 + switch (fq->type) {
2170 + case DPAA2_RX_FQ:
2171 + return "Rx";
2172 + case DPAA2_TX_CONF_FQ:
2173 + return "Tx conf";
2174 + case DPAA2_RX_ERR_FQ:
2175 + return "Rx err";
2176 + default:
2177 + return "N/A";
2178 + }
2179 +}
2180 +
2181 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
2182 +{
2183 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
2184 + struct dpaa2_eth_fq *fq;
2185 + u32 fcnt, bcnt;
2186 + int i, err;
2187 +
2188 + seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
2189 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
2190 + "VFQID", "CPU", "Traffic Class", "Type", "Frames",
2191 + "Pending frames", "Congestion");
2192 +
2193 + for (i = 0; i < priv->num_fqs; i++) {
2194 + fq = &priv->fq[i];
2195 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2196 + if (err)
2197 + fcnt = 0;
2198 +
2199 + /* A lot of queues, no use displaying zero traffic ones */
2200 + if (!fq->stats.frames && !fcnt)
2201 + continue;
2202 +
2203 + seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
2204 + fq->fqid,
2205 + fq->target_cpu,
2206 + fq->tc,
2207 + fq_type_to_str(fq),
2208 + fq->stats.frames,
2209 + fcnt,
2210 + fq->stats.congestion_entry);
2211 + }
2212 +
2213 + return 0;
2214 +}
2215 +
2216 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
2217 +{
2218 + int err;
2219 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
2220 +
2221 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
2222 + if (err < 0)
2223 + netdev_err(priv->net_dev, "single_open() failed\n");
2224 +
2225 + return err;
2226 +}
2227 +
2228 +static const struct file_operations dpaa2_dbg_fq_ops = {
2229 + .open = dpaa2_dbg_fqs_open,
2230 + .read = seq_read,
2231 + .llseek = seq_lseek,
2232 + .release = single_release,
2233 +};
2234 +
2235 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
2236 +{
2237 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
2238 + struct dpaa2_eth_channel *ch;
2239 + int i;
2240 +
2241 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
2242 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
2243 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
2244 + "Avg frm/CDAN", "Buf count");
2245 +
2246 + for (i = 0; i < priv->num_channels; i++) {
2247 + ch = priv->channel[i];
2248 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
2249 + ch->ch_id,
2250 + ch->nctx.desired_cpu,
2251 + ch->stats.dequeue_portal_busy,
2252 + ch->stats.frames,
2253 + ch->stats.cdan,
2254 + ch->stats.frames / ch->stats.cdan,
2255 + ch->buf_count);
2256 + }
2257 +
2258 + return 0;
2259 +}
2260 +
2261 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
2262 +{
2263 + int err;
2264 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
2265 +
2266 + err = single_open(file, dpaa2_dbg_ch_show, priv);
2267 + if (err < 0)
2268 + netdev_err(priv->net_dev, "single_open() failed\n");
2269 +
2270 + return err;
2271 +}
2272 +
2273 +static const struct file_operations dpaa2_dbg_ch_ops = {
2274 + .open = dpaa2_dbg_ch_open,
2275 + .read = seq_read,
2276 + .llseek = seq_lseek,
2277 + .release = single_release,
2278 +};
2279 +
2280 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
2281 + size_t count, loff_t *offset)
2282 +{
2283 + struct dpaa2_eth_priv *priv = file->private_data;
2284 + struct rtnl_link_stats64 *percpu_stats;
2285 + struct dpaa2_eth_drv_stats *percpu_extras;
2286 + struct dpaa2_eth_fq *fq;
2287 + struct dpaa2_eth_channel *ch;
2288 + int i;
2289 +
2290 + for_each_online_cpu(i) {
2291 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2292 + memset(percpu_stats, 0, sizeof(*percpu_stats));
2293 +
2294 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
2295 + memset(percpu_extras, 0, sizeof(*percpu_extras));
2296 + }
2297 +
2298 + for (i = 0; i < priv->num_fqs; i++) {
2299 + fq = &priv->fq[i];
2300 + memset(&fq->stats, 0, sizeof(fq->stats));
2301 + }
2302 +
2303 + for (i = 0; i < priv->num_channels; i++) {
2304 + ch = priv->channel[i];
2305 + memset(&ch->stats, 0, sizeof(ch->stats));
2306 + }
2307 +
2308 + return count;
2309 +}
2310 +
2311 +static const struct file_operations dpaa2_dbg_reset_ops = {
2312 + .open = simple_open,
2313 + .write = dpaa2_dbg_reset_write,
2314 +};
2315 +
2316 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
2317 + const char __user *buf,
2318 + size_t count, loff_t *offset)
2319 +{
2320 + struct dpaa2_eth_priv *priv = file->private_data;
2321 + int err;
2322 +
2323 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
2324 + if (err)
2325 + netdev_err(priv->net_dev,
2326 + "dpni_reset_statistics() failed %d\n", err);
2327 +
2328 + return count;
2329 +}
2330 +
2331 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
2332 + .open = simple_open,
2333 + .write = dpaa2_dbg_reset_mc_write,
2334 +};
2335 +
2336 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
2337 +{
2338 + if (!dpaa2_dbg_root)
2339 + return;
2340 +
2341 + /* Create a directory for the interface */
2342 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
2343 + dpaa2_dbg_root);
2344 + if (!priv->dbg.dir) {
2345 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
2346 + return;
2347 + }
2348 +
2349 + /* per-cpu stats file */
2350 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
2351 + priv->dbg.dir, priv,
2352 + &dpaa2_dbg_cpu_ops);
2353 + if (!priv->dbg.cpu_stats) {
2354 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
2355 + goto err_cpu_stats;
2356 + }
2357 +
2358 + /* per-fq stats file */
2359 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
2360 + priv->dbg.dir, priv,
2361 + &dpaa2_dbg_fq_ops);
2362 + if (!priv->dbg.fq_stats) {
2363 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
2364 + goto err_fq_stats;
2365 + }
2366 +
2367 + /* per-fq stats file */
2368 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
2369 + priv->dbg.dir, priv,
2370 + &dpaa2_dbg_ch_ops);
2371 + if (!priv->dbg.fq_stats) {
2372 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
2373 + goto err_ch_stats;
2374 + }
2375 +
2376 + /* reset stats */
2377 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
2378 + priv->dbg.dir, priv,
2379 + &dpaa2_dbg_reset_ops);
2380 + if (!priv->dbg.reset_stats) {
2381 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
2382 + goto err_reset_stats;
2383 + }
2384 +
2385 + /* reset MC stats */
2386 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
2387 + 0222, priv->dbg.dir, priv,
2388 + &dpaa2_dbg_reset_mc_ops);
2389 + if (!priv->dbg.reset_mc_stats) {
2390 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
2391 + goto err_reset_mc_stats;
2392 + }
2393 +
2394 + return;
2395 +
2396 +err_reset_mc_stats:
2397 + debugfs_remove(priv->dbg.reset_stats);
2398 +err_reset_stats:
2399 + debugfs_remove(priv->dbg.ch_stats);
2400 +err_ch_stats:
2401 + debugfs_remove(priv->dbg.fq_stats);
2402 +err_fq_stats:
2403 + debugfs_remove(priv->dbg.cpu_stats);
2404 +err_cpu_stats:
2405 + debugfs_remove(priv->dbg.dir);
2406 +}
2407 +
2408 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
2409 +{
2410 + debugfs_remove(priv->dbg.reset_mc_stats);
2411 + debugfs_remove(priv->dbg.reset_stats);
2412 + debugfs_remove(priv->dbg.fq_stats);
2413 + debugfs_remove(priv->dbg.ch_stats);
2414 + debugfs_remove(priv->dbg.cpu_stats);
2415 + debugfs_remove(priv->dbg.dir);
2416 +}
2417 +
2418 +void dpaa2_eth_dbg_init(void)
2419 +{
2420 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
2421 + if (!dpaa2_dbg_root) {
2422 + pr_err("DPAA2-ETH: debugfs create failed\n");
2423 + return;
2424 + }
2425 +
2426 + pr_info("DPAA2-ETH: debugfs created\n");
2427 +}
2428 +
2429 +void __exit dpaa2_eth_dbg_exit(void)
2430 +{
2431 + debugfs_remove(dpaa2_dbg_root);
2432 +}
2433 --- /dev/null
2434 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
2435 @@ -0,0 +1,60 @@
2436 +/* Copyright 2015 Freescale Semiconductor Inc.
2437 + *
2438 + * Redistribution and use in source and binary forms, with or without
2439 + * modification, are permitted provided that the following conditions are met:
2440 + * * Redistributions of source code must retain the above copyright
2441 + * notice, this list of conditions and the following disclaimer.
2442 + * * Redistributions in binary form must reproduce the above copyright
2443 + * notice, this list of conditions and the following disclaimer in the
2444 + * documentation and/or other materials provided with the distribution.
2445 + * * Neither the name of Freescale Semiconductor nor the
2446 + * names of its contributors may be used to endorse or promote products
2447 + * derived from this software without specific prior written permission.
2448 + *
2449 + *
2450 + * ALTERNATIVELY, this software may be distributed under the terms of the
2451 + * GNU General Public License ("GPL") as published by the Free Software
2452 + * Foundation, either version 2 of that License or (at your option) any
2453 + * later version.
2454 + *
2455 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2456 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2457 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2458 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2459 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2460 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2461 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2462 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2463 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2464 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2465 + */
2466 +
2467 +#ifndef DPAA2_ETH_DEBUGFS_H
2468 +#define DPAA2_ETH_DEBUGFS_H
2469 +
2470 +#include <linux/dcache.h>
2471 +
2472 +struct dpaa2_eth_priv;
2473 +
2474 +struct dpaa2_debugfs {
2475 + struct dentry *dir;
2476 + struct dentry *fq_stats;
2477 + struct dentry *ch_stats;
2478 + struct dentry *cpu_stats;
2479 + struct dentry *reset_stats;
2480 + struct dentry *reset_mc_stats;
2481 +};
2482 +
2483 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
2484 +void dpaa2_eth_dbg_init(void);
2485 +void dpaa2_eth_dbg_exit(void);
2486 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
2487 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
2488 +#else
2489 +static inline void dpaa2_eth_dbg_init(void) {}
2490 +static inline void dpaa2_eth_dbg_exit(void) {}
2491 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
2492 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
2493 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
2494 +
2495 +#endif /* DPAA2_ETH_DEBUGFS_H */
2496 --- /dev/null
2497 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
2498 @@ -0,0 +1,185 @@
2499 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
2500 + *
2501 + * Redistribution and use in source and binary forms, with or without
2502 + * modification, are permitted provided that the following conditions are met:
2503 + * * Redistributions of source code must retain the above copyright
2504 + * notice, this list of conditions and the following disclaimer.
2505 + * * Redistributions in binary form must reproduce the above copyright
2506 + * notice, this list of conditions and the following disclaimer in the
2507 + * documentation and/or other materials provided with the distribution.
2508 + * * Neither the name of Freescale Semiconductor nor the
2509 + * names of its contributors may be used to endorse or promote products
2510 + * derived from this software without specific prior written permission.
2511 + *
2512 + *
2513 + * ALTERNATIVELY, this software may be distributed under the terms of the
2514 + * GNU General Public License ("GPL") as published by the Free Software
2515 + * Foundation, either version 2 of that License or (at your option) any
2516 + * later version.
2517 + *
2518 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2519 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2520 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2521 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2522 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2523 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2524 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2525 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2526 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2527 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2528 + */
2529 +
2530 +#undef TRACE_SYSTEM
2531 +#define TRACE_SYSTEM dpaa2_eth
2532 +
2533 +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2534 +#define _DPAA2_ETH_TRACE_H
2535 +
2536 +#include <linux/skbuff.h>
2537 +#include <linux/netdevice.h>
2538 +#include "dpaa2-eth.h"
2539 +#include <linux/tracepoint.h>
2540 +
2541 +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
2542 +/* trace_printk format for raw buffer event class */
2543 +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
2544 +
2545 +/* This is used to declare a class of events.
2546 + * individual events of this type will be defined below.
2547 + */
2548 +
2549 +/* Store details about a frame descriptor */
2550 +DECLARE_EVENT_CLASS(dpaa2_eth_fd,
2551 + /* Trace function prototype */
2552 + TP_PROTO(struct net_device *netdev,
2553 + const struct dpaa2_fd *fd),
2554 +
2555 + /* Repeat argument list here */
2556 + TP_ARGS(netdev, fd),
2557 +
2558 + /* A structure containing the relevant information we want
2559 + * to record. Declare name and type for each normal element,
2560 + * name, type and size for arrays. Use __string for variable
2561 + * length strings.
2562 + */
2563 + TP_STRUCT__entry(
2564 + __field(u64, fd_addr)
2565 + __field(u32, fd_len)
2566 + __field(u16, fd_offset)
2567 + __string(name, netdev->name)
2568 + ),
2569 +
2570 + /* The function that assigns values to the above declared
2571 + * fields
2572 + */
2573 + TP_fast_assign(
2574 + __entry->fd_addr = dpaa2_fd_get_addr(fd);
2575 + __entry->fd_len = dpaa2_fd_get_len(fd);
2576 + __entry->fd_offset = dpaa2_fd_get_offset(fd);
2577 + __assign_str(name, netdev->name);
2578 + ),
2579 +
2580 + /* This is what gets printed when the trace event is
2581 + * triggered.
2582 + */
2583 + TP_printk(TR_FMT,
2584 + __get_str(name),
2585 + __entry->fd_addr,
2586 + __entry->fd_len,
2587 + __entry->fd_offset)
2588 +);
2589 +
2590 +/* Now declare events of the above type. Format is:
2591 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
2592 + */
2593 +
2594 +/* Tx (egress) fd */
2595 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
2596 + TP_PROTO(struct net_device *netdev,
2597 + const struct dpaa2_fd *fd),
2598 +
2599 + TP_ARGS(netdev, fd)
2600 +);
2601 +
2602 +/* Rx fd */
2603 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
2604 + TP_PROTO(struct net_device *netdev,
2605 + const struct dpaa2_fd *fd),
2606 +
2607 + TP_ARGS(netdev, fd)
2608 +);
2609 +
2610 +/* Tx confirmation fd */
2611 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
2612 + TP_PROTO(struct net_device *netdev,
2613 + const struct dpaa2_fd *fd),
2614 +
2615 + TP_ARGS(netdev, fd)
2616 +);
2617 +
2618 +/* Log data about raw buffers. Useful for tracing DPBP content. */
2619 +TRACE_EVENT(dpaa2_eth_buf_seed,
2620 + /* Trace function prototype */
2621 + TP_PROTO(struct net_device *netdev,
2622 + /* virtual address and size */
2623 + void *vaddr,
2624 + size_t size,
2625 + /* dma map address and size */
2626 + dma_addr_t dma_addr,
2627 + size_t map_size,
2628 + /* buffer pool id, if relevant */
2629 + u16 bpid),
2630 +
2631 + /* Repeat argument list here */
2632 + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
2633 +
2634 + /* A structure containing the relevant information we want
2635 + * to record. Declare name and type for each normal element,
2636 + * name, type and size for arrays. Use __string for variable
2637 + * length strings.
2638 + */
2639 + TP_STRUCT__entry(
2640 + __field(void *, vaddr)
2641 + __field(size_t, size)
2642 + __field(dma_addr_t, dma_addr)
2643 + __field(size_t, map_size)
2644 + __field(u16, bpid)
2645 + __string(name, netdev->name)
2646 + ),
2647 +
2648 + /* The function that assigns values to the above declared
2649 + * fields
2650 + */
2651 + TP_fast_assign(
2652 + __entry->vaddr = vaddr;
2653 + __entry->size = size;
2654 + __entry->dma_addr = dma_addr;
2655 + __entry->map_size = map_size;
2656 + __entry->bpid = bpid;
2657 + __assign_str(name, netdev->name);
2658 + ),
2659 +
2660 + /* This is what gets printed when the trace event is
2661 + * triggered.
2662 + */
2663 + TP_printk(TR_BUF_FMT,
2664 + __get_str(name),
2665 + __entry->vaddr,
2666 + __entry->size,
2667 + &__entry->dma_addr,
2668 + __entry->map_size,
2669 + __entry->bpid)
2670 +);
2671 +
2672 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
2673 + * The syntax is the same as for DECLARE_EVENT_CLASS().
2674 + */
2675 +
2676 +#endif /* _DPAA2_ETH_TRACE_H */
2677 +
2678 +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
2679 +#undef TRACE_INCLUDE_PATH
2680 +#define TRACE_INCLUDE_PATH .
2681 +#undef TRACE_INCLUDE_FILE
2682 +#define TRACE_INCLUDE_FILE dpaa2-eth-trace
2683 +#include <trace/define_trace.h>
2684 --- /dev/null
2685 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
2686 @@ -0,0 +1,3734 @@
2687 +/* Copyright 2014-2016 Freescale Semiconductor Inc.
2688 + * Copyright 2016-2017 NXP
2689 + *
2690 + * Redistribution and use in source and binary forms, with or without
2691 + * modification, are permitted provided that the following conditions are met:
2692 + * * Redistributions of source code must retain the above copyright
2693 + * notice, this list of conditions and the following disclaimer.
2694 + * * Redistributions in binary form must reproduce the above copyright
2695 + * notice, this list of conditions and the following disclaimer in the
2696 + * documentation and/or other materials provided with the distribution.
2697 + * * Neither the name of Freescale Semiconductor nor the
2698 + * names of its contributors may be used to endorse or promote products
2699 + * derived from this software without specific prior written permission.
2700 + *
2701 + *
2702 + * ALTERNATIVELY, this software may be distributed under the terms of the
2703 + * GNU General Public License ("GPL") as published by the Free Software
2704 + * Foundation, either version 2 of that License or (at your option) any
2705 + * later version.
2706 + *
2707 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2708 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2709 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2710 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2711 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2712 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2713 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2714 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2715 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2716 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2717 + */
2718 +#include <linux/init.h>
2719 +#include <linux/module.h>
2720 +#include <linux/platform_device.h>
2721 +#include <linux/etherdevice.h>
2722 +#include <linux/of_net.h>
2723 +#include <linux/interrupt.h>
2724 +#include <linux/msi.h>
2725 +#include <linux/kthread.h>
2726 +#include <linux/iommu.h>
2727 +#include <linux/net_tstamp.h>
2728 +#include <linux/bpf.h>
2729 +#include <linux/filter.h>
2730 +#include <linux/atomic.h>
2731 +#include <net/sock.h>
2732 +#include <linux/fsl/mc.h>
2733 +#include "dpaa2-eth.h"
2734 +#include "dpaa2-eth-ceetm.h"
2735 +
2736 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
2737 + * using trace events only need to #include <trace/events/sched.h>
2738 + */
2739 +#define CREATE_TRACE_POINTS
2740 +#include "dpaa2-eth-trace.h"
2741 +
2742 +MODULE_LICENSE("Dual BSD/GPL");
2743 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
2744 +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
2745 +
2746 +const char dpaa2_eth_drv_version[] = "0.1";
2747 +
2748 +static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
2749 + dma_addr_t iova_addr)
2750 +{
2751 + phys_addr_t phys_addr;
2752 +
2753 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
2754 +
2755 + return phys_to_virt(phys_addr);
2756 +}
2757 +
2758 +static void validate_rx_csum(struct dpaa2_eth_priv *priv,
2759 + u32 fd_status,
2760 + struct sk_buff *skb)
2761 +{
2762 + skb_checksum_none_assert(skb);
2763 +
2764 + /* HW checksum validation is disabled, nothing to do here */
2765 + if (!(priv->net_dev->features & NETIF_F_RXCSUM))
2766 + return;
2767 +
2768 + /* Read checksum validation bits */
2769 + if (!((fd_status & DPAA2_FAS_L3CV) &&
2770 + (fd_status & DPAA2_FAS_L4CV)))
2771 + return;
2772 +
2773 + /* Inform the stack there's no need to compute L3/L4 csum anymore */
2774 + skb->ip_summed = CHECKSUM_UNNECESSARY;
2775 +}
2776 +
2777 +/* Free a received FD.
2778 + * Not to be used for Tx conf FDs or on any other paths.
2779 + */
2780 +static void free_rx_fd(struct dpaa2_eth_priv *priv,
2781 + const struct dpaa2_fd *fd,
2782 + void *vaddr)
2783 +{
2784 + struct device *dev = priv->net_dev->dev.parent;
2785 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
2786 + u8 fd_format = dpaa2_fd_get_format(fd);
2787 + struct dpaa2_sg_entry *sgt;
2788 + void *sg_vaddr;
2789 + int i;
2790 +
2791 + /* If single buffer frame, just free the data buffer */
2792 + if (fd_format == dpaa2_fd_single)
2793 + goto free_buf;
2794 + else if (fd_format != dpaa2_fd_sg)
2795 + /* We don't support any other format */
2796 + return;
2797 +
2798 + /* For S/G frames, we first need to free all SG entries
2799 + * except the first one, which was taken care of already
2800 + */
2801 + sgt = vaddr + dpaa2_fd_get_offset(fd);
2802 + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
2803 + addr = dpaa2_sg_get_addr(&sgt[i]);
2804 + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2805 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2806 + DMA_BIDIRECTIONAL);
2807 +
2808 + skb_free_frag(sg_vaddr);
2809 + if (dpaa2_sg_is_final(&sgt[i]))
2810 + break;
2811 + }
2812 +
2813 +free_buf:
2814 + skb_free_frag(vaddr);
2815 +}
2816 +
2817 +/* Build a linear skb based on a single-buffer frame descriptor */
2818 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
2819 + struct dpaa2_eth_channel *ch,
2820 + const struct dpaa2_fd *fd,
2821 + void *fd_vaddr)
2822 +{
2823 + struct sk_buff *skb = NULL;
2824 + u16 fd_offset = dpaa2_fd_get_offset(fd);
2825 + u32 fd_length = dpaa2_fd_get_len(fd);
2826 +
2827 + ch->buf_count--;
2828 +
2829 + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
2830 + if (unlikely(!skb))
2831 + return NULL;
2832 +
2833 + skb_reserve(skb, fd_offset);
2834 + skb_put(skb, fd_length);
2835 +
2836 + return skb;
2837 +}
2838 +
2839 +/* Build a non linear (fragmented) skb based on a S/G table */
2840 +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
2841 + struct dpaa2_eth_channel *ch,
2842 + struct dpaa2_sg_entry *sgt)
2843 +{
2844 + struct sk_buff *skb = NULL;
2845 + struct device *dev = priv->net_dev->dev.parent;
2846 + void *sg_vaddr;
2847 + dma_addr_t sg_addr;
2848 + u16 sg_offset;
2849 + u32 sg_length;
2850 + struct page *page, *head_page;
2851 + int page_offset;
2852 + int i;
2853 +
2854 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
2855 + struct dpaa2_sg_entry *sge = &sgt[i];
2856 +
2857 + /* NOTE: We only support SG entries in dpaa2_sg_single format,
2858 + * but this is the only format we may receive from HW anyway
2859 + */
2860 +
2861 + /* Get the address and length from the S/G entry */
2862 + sg_addr = dpaa2_sg_get_addr(sge);
2863 + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
2864 + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2865 + DMA_BIDIRECTIONAL);
2866 +
2867 + sg_length = dpaa2_sg_get_len(sge);
2868 +
2869 + if (i == 0) {
2870 + /* We build the skb around the first data buffer */
2871 + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
2872 + if (unlikely(!skb)) {
2873 + /* Free the first SG entry now, since we already
2874 + * unmapped it and obtained the virtual address
2875 + */
2876 + skb_free_frag(sg_vaddr);
2877 +
2878 + /* We still need to subtract the buffers used
2879 + * by this FD from our software counter
2880 + */
2881 + while (!dpaa2_sg_is_final(&sgt[i]) &&
2882 + i < DPAA2_ETH_MAX_SG_ENTRIES)
2883 + i++;
2884 + break;
2885 + }
2886 +
2887 + sg_offset = dpaa2_sg_get_offset(sge);
2888 + skb_reserve(skb, sg_offset);
2889 + skb_put(skb, sg_length);
2890 + } else {
2891 + /* Rest of the data buffers are stored as skb frags */
2892 + page = virt_to_page(sg_vaddr);
2893 + head_page = virt_to_head_page(sg_vaddr);
2894 +
2895 + /* Offset in page (which may be compound).
2896 + * Data in subsequent SG entries is stored from the
2897 + * beginning of the buffer, so we don't need to add the
2898 + * sg_offset.
2899 + */
2900 + page_offset = ((unsigned long)sg_vaddr &
2901 + (PAGE_SIZE - 1)) +
2902 + (page_address(page) - page_address(head_page));
2903 +
2904 + skb_add_rx_frag(skb, i - 1, head_page, page_offset,
2905 + sg_length, DPAA2_ETH_RX_BUF_SIZE);
2906 + }
2907 +
2908 + if (dpaa2_sg_is_final(sge))
2909 + break;
2910 + }
2911 +
2912 + WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
2913 +
2914 + /* Count all data buffers + SG table buffer */
2915 + ch->buf_count -= i + 2;
2916 +
2917 + return skb;
2918 +}
2919 +
2920 +static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
2921 + struct dpaa2_fd *fd,
2922 + void *buf_start,
2923 + u16 queue_id)
2924 +{
2925 + struct dpaa2_eth_fq *fq;
2926 + struct rtnl_link_stats64 *percpu_stats;
2927 + struct dpaa2_eth_drv_stats *percpu_extras;
2928 + struct dpaa2_faead *faead;
2929 + u32 ctrl, frc;
2930 + int i, err;
2931 +
2932 + /* Mark the egress frame annotation area as valid */
2933 + frc = dpaa2_fd_get_frc(fd);
2934 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2935 + dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
2936 +
2937 + ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
2938 + faead = dpaa2_get_faead(buf_start, false);
2939 + faead->ctrl = cpu_to_le32(ctrl);
2940 + faead->conf_fqid = 0;
2941 +
2942 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2943 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
2944 +
2945 + fq = &priv->fq[queue_id];
2946 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2947 + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2948 + priv->tx_qdid, 0,
2949 + fq->tx_qdbin, fd);
2950 + if (err != -EBUSY)
2951 + break;
2952 + }
2953 +
2954 + percpu_extras->tx_portal_busy += i;
2955 + if (unlikely(err)) {
2956 + percpu_stats->tx_errors++;
2957 + } else {
2958 + percpu_stats->tx_packets++;
2959 + percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
2960 + }
2961 +
2962 + return err;
2963 +}
2964 +
2965 +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
2966 +{
2967 + struct device *dev = priv->net_dev->dev.parent;
2968 + void *vaddr;
2969 + int i;
2970 +
2971 + for (i = 0; i < count; i++) {
2972 + /* Same logic as on regular Rx path */
2973 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
2974 + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
2975 + DMA_BIDIRECTIONAL);
2976 + skb_free_frag(vaddr);
2977 + }
2978 +}
2979 +
2980 +static void release_fd_buf(struct dpaa2_eth_priv *priv,
2981 + struct dpaa2_eth_channel *ch,
2982 + dma_addr_t addr)
2983 +{
2984 + int err;
2985 +
2986 + ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
2987 + if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
2988 + return;
2989 +
2990 + while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
2991 + ch->rel_buf_array,
2992 + ch->rel_buf_cnt)) == -EBUSY)
2993 + cpu_relax();
2994 +
2995 + if (err)
2996 + free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
2997 +
2998 + ch->rel_buf_cnt = 0;
2999 +}
3000 +
3001 +/* Main Rx frame processing routine */
3002 +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
3003 + struct dpaa2_eth_channel *ch,
3004 + const struct dpaa2_fd *fd,
3005 + struct napi_struct *napi,
3006 + u16 queue_id)
3007 +{
3008 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
3009 + u8 fd_format = dpaa2_fd_get_format(fd);
3010 + void *vaddr;
3011 + struct sk_buff *skb;
3012 + struct rtnl_link_stats64 *percpu_stats;
3013 + struct dpaa2_eth_drv_stats *percpu_extras;
3014 + struct device *dev = priv->net_dev->dev.parent;
3015 + struct dpaa2_fas *fas;
3016 + void *buf_data;
3017 + u32 status = 0;
3018 + struct bpf_prog *xdp_prog;
3019 + struct xdp_buff xdp;
3020 + u32 xdp_act;
3021 +
3022 + /* Tracing point */
3023 + trace_dpaa2_rx_fd(priv->net_dev, fd);
3024 +
3025 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
3026 + dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
3027 + DMA_BIDIRECTIONAL);
3028 +
3029 + fas = dpaa2_get_fas(vaddr, false);
3030 + prefetch(fas);
3031 + buf_data = vaddr + dpaa2_fd_get_offset(fd);
3032 + prefetch(buf_data);
3033 +
3034 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
3035 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
3036 +
3037 + xdp_prog = READ_ONCE(ch->xdp_prog);
3038 +
3039 + if (fd_format == dpaa2_fd_single) {
3040 + if (xdp_prog) {
3041 + xdp.data = buf_data;
3042 + xdp.data_end = buf_data + dpaa2_fd_get_len(fd);
3043 + /* for now, we don't support changes in header size */
3044 + xdp.data_hard_start = buf_data;
3045 +
3046 + /* update stats here, as we won't reach the code
3047 + * that does that for standard frames
3048 + */
3049 + percpu_stats->rx_packets++;
3050 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
3051 +
3052 + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
3053 + switch (xdp_act) {
3054 + case XDP_PASS:
3055 + break;
3056 + default:
3057 + bpf_warn_invalid_xdp_action(xdp_act);
3058 + case XDP_ABORTED:
3059 + case XDP_DROP:
3060 + release_fd_buf(priv, ch, addr);
3061 + goto drop_cnt;
3062 + case XDP_TX:
3063 + if (dpaa2_eth_xdp_tx(priv, (struct dpaa2_fd *)fd, vaddr,
3064 + queue_id)) {
3065 + dma_unmap_single(dev, addr,
3066 + DPAA2_ETH_RX_BUF_SIZE,
3067 + DMA_BIDIRECTIONAL);
3068 + free_rx_fd(priv, fd, vaddr);
3069 + ch->buf_count--;
3070 + }
3071 + return;
3072 + }
3073 + }
3074 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
3075 + DMA_BIDIRECTIONAL);
3076 + skb = build_linear_skb(priv, ch, fd, vaddr);
3077 + } else if (fd_format == dpaa2_fd_sg) {
3078 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
3079 + DMA_BIDIRECTIONAL);
3080 + skb = build_frag_skb(priv, ch, buf_data);
3081 + skb_free_frag(vaddr);
3082 + percpu_extras->rx_sg_frames++;
3083 + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
3084 + } else {
3085 + /* We don't support any other format */
3086 + goto drop_cnt;
3087 + }
3088 +
3089 + if (unlikely(!skb))
3090 + goto drop_fd;
3091 +
3092 + prefetch(skb->data);
3093 +
3094 + /* Get the timestamp value */
3095 + if (priv->ts_rx_en) {
3096 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
3097 + u64 *ns = dpaa2_get_ts(vaddr, false);
3098 +
3099 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
3100 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
3101 + shhwtstamps->hwtstamp = ns_to_ktime(*ns);
3102 + }
3103 +
3104 + /* Check if we need to validate the L4 csum */
3105 + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
3106 + status = le32_to_cpu(fas->status);
3107 + validate_rx_csum(priv, status, skb);
3108 + }
3109 +
3110 + skb->protocol = eth_type_trans(skb, priv->net_dev);
3111 +
3112 + /* Record Rx queue - this will be used when picking a Tx queue to
3113 + * forward the frames. We're keeping flow affinity through the
3114 + * network stack.
3115 + */
3116 + skb_record_rx_queue(skb, queue_id);
3117 +
3118 + percpu_stats->rx_packets++;
3119 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
3120 +
3121 + napi_gro_receive(napi, skb);
3122 +
3123 + return;
3124 +
3125 +drop_fd:
3126 + free_rx_fd(priv, fd, vaddr);
3127 +drop_cnt:
3128 + percpu_stats->rx_dropped++;
3129 +}
3130 +
3131 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3132 +/* Processing of Rx frames received on the error FQ
3133 + * We check and print the error bits and then free the frame
3134 + */
3135 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
3136 + struct dpaa2_eth_channel *ch,
3137 + const struct dpaa2_fd *fd,
3138 + struct napi_struct *napi __always_unused,
3139 + u16 queue_id __always_unused)
3140 +{
3141 + struct device *dev = priv->net_dev->dev.parent;
3142 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
3143 + void *vaddr;
3144 + struct rtnl_link_stats64 *percpu_stats;
3145 + struct dpaa2_fas *fas;
3146 + u32 status = 0;
3147 + u32 fd_errors;
3148 + bool has_fas_errors = false;
3149 +
3150 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
3151 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
3152 +
3153 + /* check frame errors in the FD field */
3154 + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
3155 + if (likely(fd_errors)) {
3156 + has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
3157 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
3158 + if (net_ratelimit())
3159 + netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
3160 + fd_errors);
3161 + }
3162 +
3163 + /* check frame errors in the FAS field */
3164 + if (has_fas_errors) {
3165 + fas = dpaa2_get_fas(vaddr, false);
3166 + status = le32_to_cpu(fas->status);
3167 + if (net_ratelimit())
3168 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
3169 + status & DPAA2_FAS_RX_ERR_MASK);
3170 + }
3171 + free_rx_fd(priv, fd, vaddr);
3172 +
3173 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
3174 + percpu_stats->rx_errors++;
3175 + ch->buf_count--;
3176 +}
3177 +#endif
3178 +
3179 +/* Consume all frames pull-dequeued into the store. This is the simplest way to
3180 + * make sure we don't accidentally issue another volatile dequeue which would
3181 + * overwrite (leak) frames already in the store.
3182 + *
3183 + * The number of frames is returned using the last 2 output arguments,
3184 + * separately for Rx and Tx confirmations.
3185 + *
3186 + * Observance of NAPI budget is not our concern, leaving that to the caller.
3187 + */
3188 +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
3189 + int *tx_conf_cleaned)
3190 +{
3191 + struct dpaa2_eth_priv *priv = ch->priv;
3192 + struct dpaa2_eth_fq *fq = NULL;
3193 + struct dpaa2_dq *dq;
3194 + const struct dpaa2_fd *fd;
3195 + int cleaned = 0;
3196 + int is_last;
3197 +
3198 + do {
3199 + dq = dpaa2_io_store_next(ch->store, &is_last);
3200 + if (unlikely(!dq)) {
3201 + /* If we're here, we *must* have placed a
3202 + * volatile dequeue comnmand, so keep reading through
3203 + * the store until we get some sort of valid response
3204 + * token (either a valid frame or an "empty dequeue")
3205 + */
3206 + continue;
3207 + }
3208 +
3209 + fd = dpaa2_dq_fd(dq);
3210 + prefetch(fd);
3211 +
3212 + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
3213 + fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
3214 + cleaned++;
3215 + } while (!is_last);
3216 +
3217 + if (!cleaned)
3218 + return false;
3219 +
3220 + /* All frames brought in store by a volatile dequeue
3221 + * come from the same queue
3222 + */
3223 + if (fq->type == DPAA2_TX_CONF_FQ)
3224 + *tx_conf_cleaned += cleaned;
3225 + else
3226 + *rx_cleaned += cleaned;
3227 +
3228 + fq->stats.frames += cleaned;
3229 + ch->stats.frames += cleaned;
3230 +
3231 + return true;
3232 +}
3233 +
3234 +/* Configure the egress frame annotation for timestamp update */
3235 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
3236 +{
3237 + struct dpaa2_faead *faead;
3238 + u32 ctrl, frc;
3239 +
3240 + /* Mark the egress frame annotation area as valid */
3241 + frc = dpaa2_fd_get_frc(fd);
3242 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
3243 +
3244 + /* Set hardware annotation size */
3245 + ctrl = dpaa2_fd_get_ctrl(fd);
3246 + dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
3247 +
3248 + /* enable UPD (update prepanded data) bit in FAEAD field of
3249 + * hardware frame annotation area
3250 + */
3251 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
3252 + faead = dpaa2_get_faead(buf_start, true);
3253 + faead->ctrl = cpu_to_le32(ctrl);
3254 +}
3255 +
3256 +/* Create a frame descriptor based on a fragmented skb */
3257 +static int build_sg_fd(struct dpaa2_eth_priv *priv,
3258 + struct sk_buff *skb,
3259 + struct dpaa2_fd *fd)
3260 +{
3261 + struct device *dev = priv->net_dev->dev.parent;
3262 + void *sgt_buf = NULL;
3263 + dma_addr_t addr;
3264 + int nr_frags = skb_shinfo(skb)->nr_frags;
3265 + struct dpaa2_sg_entry *sgt;
3266 + int i, err;
3267 + int sgt_buf_size;
3268 + struct scatterlist *scl, *crt_scl;
3269 + int num_sg;
3270 + int num_dma_bufs;
3271 + struct dpaa2_eth_swa *swa;
3272 +
3273 + /* Create and map scatterlist.
3274 + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
3275 + * to go beyond nr_frags+1.
3276 + * Note: We don't support chained scatterlists
3277 + */
3278 + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
3279 + return -EINVAL;
3280 +
3281 + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
3282 + if (unlikely(!scl))
3283 + return -ENOMEM;
3284 +
3285 + sg_init_table(scl, nr_frags + 1);
3286 + num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
3287 + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
3288 + if (unlikely(!num_dma_bufs)) {
3289 + err = -ENOMEM;
3290 + goto dma_map_sg_failed;
3291 + }
3292 +
3293 + /* Prepare the HW SGT structure */
3294 + sgt_buf_size = priv->tx_data_offset +
3295 + sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
3296 + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
3297 + if (unlikely(!sgt_buf)) {
3298 + err = -ENOMEM;
3299 + goto sgt_buf_alloc_failed;
3300 + }
3301 + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
3302 + memset(sgt_buf, 0, sgt_buf_size);
3303 +
3304 + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
3305 +
3306 + /* Fill in the HW SGT structure.
3307 + *
3308 + * sgt_buf is zeroed out, so the following fields are implicit
3309 + * in all sgt entries:
3310 + * - offset is 0
3311 + * - format is 'dpaa2_sg_single'
3312 + */
3313 + for_each_sg(scl, crt_scl, num_dma_bufs, i) {
3314 + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
3315 + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
3316 + }
3317 + dpaa2_sg_set_final(&sgt[i - 1], true);
3318 +
3319 + /* Store the skb backpointer in the SGT buffer.
3320 + * Fit the scatterlist and the number of buffers alongside the
3321 + * skb backpointer in the software annotation area. We'll need
3322 + * all of them on Tx Conf.
3323 + */
3324 + swa = (struct dpaa2_eth_swa *)sgt_buf;
3325 + swa->type = DPAA2_ETH_SWA_SG;
3326 + swa->sg.skb = skb;
3327 + swa->sg.scl = scl;
3328 + swa->sg.num_sg = num_sg;
3329 + swa->sg.sgt_size = sgt_buf_size;
3330 +
3331 + /* Separately map the SGT buffer */
3332 + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
3333 + if (unlikely(dma_mapping_error(dev, addr))) {
3334 + err = -ENOMEM;
3335 + goto dma_map_single_failed;
3336 + }
3337 + dpaa2_fd_set_offset(fd, priv->tx_data_offset);
3338 + dpaa2_fd_set_format(fd, dpaa2_fd_sg);
3339 + dpaa2_fd_set_addr(fd, addr);
3340 + dpaa2_fd_set_len(fd, skb->len);
3341 + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
3342 +
3343 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
3344 + enable_tx_tstamp(fd, sgt_buf);
3345 +
3346 + return 0;
3347 +
3348 +dma_map_single_failed:
3349 + skb_free_frag(sgt_buf);
3350 +sgt_buf_alloc_failed:
3351 + dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
3352 +dma_map_sg_failed:
3353 + kfree(scl);
3354 + return err;
3355 +}
3356 +
3357 +/* Create a frame descriptor based on a linear skb */
3358 +static int build_single_fd(struct dpaa2_eth_priv *priv,
3359 + struct sk_buff *skb,
3360 + struct dpaa2_fd *fd)
3361 +{
3362 + struct device *dev = priv->net_dev->dev.parent;
3363 + u8 *buffer_start, *aligned_start;
3364 + struct dpaa2_eth_swa *swa;
3365 + dma_addr_t addr;
3366 +
3367 + buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
3368 +
3369 + /* If there's enough room to align the FD address, do it.
3370 + * It will help hardware optimize accesses.
3371 + */
3372 + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
3373 + DPAA2_ETH_TX_BUF_ALIGN);
3374 + if (aligned_start >= skb->head)
3375 + buffer_start = aligned_start;
3376 +
3377 + /* Store a backpointer to the skb at the beginning of the buffer
3378 + * (in the private data area) such that we can release it
3379 + * on Tx confirm
3380 + */
3381 + swa = (struct dpaa2_eth_swa *)buffer_start;
3382 + swa->type = DPAA2_ETH_SWA_SINGLE;
3383 + swa->single.skb = skb;
3384 +
3385 + addr = dma_map_single(dev, buffer_start,
3386 + skb_tail_pointer(skb) - buffer_start,
3387 + DMA_BIDIRECTIONAL);
3388 + if (unlikely(dma_mapping_error(dev, addr)))
3389 + return -ENOMEM;
3390 +
3391 + dpaa2_fd_set_addr(fd, addr);
3392 + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
3393 + dpaa2_fd_set_len(fd, skb->len);
3394 + dpaa2_fd_set_format(fd, dpaa2_fd_single);
3395 + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
3396 +
3397 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
3398 + enable_tx_tstamp(fd, buffer_start);
3399 +
3400 + return 0;
3401 +}
3402 +
3403 +/* FD freeing routine on the Tx path
3404 + *
3405 + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
3406 + * back-pointed to is also freed.
3407 + * This can be called either from dpaa2_eth_tx_conf() or on the error path of
3408 + * dpaa2_eth_tx().
3409 + * Optionally, return the frame annotation status word (FAS), which needs
3410 + * to be checked if we're on the confirmation path.
3411 + */
3412 +static void free_tx_fd(struct dpaa2_eth_priv *priv,
3413 + const struct dpaa2_fd *fd,
3414 + bool in_napi)
3415 +{
3416 + struct device *dev = priv->net_dev->dev.parent;
3417 + dma_addr_t fd_addr;
3418 + struct sk_buff *skb = NULL;
3419 + unsigned char *buffer_start;
3420 + struct dpaa2_eth_swa *swa;
3421 + u8 fd_format = dpaa2_fd_get_format(fd);
3422 +
3423 + fd_addr = dpaa2_fd_get_addr(fd);
3424 + buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
3425 + swa = (struct dpaa2_eth_swa *)buffer_start;
3426 +
3427 + if (fd_format == dpaa2_fd_single) {
3428 + skb = swa->single.skb;
3429 + /* Accessing the skb buffer is safe before dma unmap, because
3430 + * we didn't map the actual skb shell.
3431 + */
3432 + dma_unmap_single(dev, fd_addr,
3433 + skb_tail_pointer(skb) - buffer_start,
3434 + DMA_BIDIRECTIONAL);
3435 + } else if (fd_format == dpaa2_fd_sg) {
3436 + skb = swa->sg.skb;
3437 +
3438 + /* Unmap the scatterlist */
3439 + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
3440 + kfree(swa->sg.scl);
3441 +
3442 + /* Unmap the SGT buffer */
3443 + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
3444 + DMA_BIDIRECTIONAL);
3445 + } else {
3446 + netdev_dbg(priv->net_dev, "Invalid FD format\n");
3447 + return;
3448 + }
3449 +
3450 + /* Get the timestamp value */
3451 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
3452 + struct skb_shared_hwtstamps shhwtstamps;
3453 + u64 *ns;
3454 +
3455 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
3456 +
3457 + ns = dpaa2_get_ts(buffer_start, true);
3458 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
3459 + shhwtstamps.hwtstamp = ns_to_ktime(*ns);
3460 + skb_tstamp_tx(skb, &shhwtstamps);
3461 + }
3462 +
3463 + /* Free SGT buffer allocated on tx */
3464 + if (fd_format != dpaa2_fd_single)
3465 + skb_free_frag(buffer_start);
3466 +
3467 + /* Move on with skb release */
3468 + napi_consume_skb(skb, in_napi);
3469 +}
3470 +
3471 +static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
3472 +{
3473 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3474 + struct device *dev = net_dev->dev.parent;
3475 + struct dpaa2_fd fd;
3476 + struct rtnl_link_stats64 *percpu_stats;
3477 + struct dpaa2_eth_drv_stats *percpu_extras;
3478 + unsigned int needed_headroom;
3479 + struct dpaa2_eth_fq *fq;
3480 + u16 queue_mapping;
3481 + int err, i, ch_id = 0, qpri = 0;
3482 +
3483 + queue_mapping = skb_get_queue_mapping(skb);
3484 + fq = &priv->fq[queue_mapping];
3485 +
3486 + /* If we're congested, stop this tx queue; transmission of
3487 + * the current skb happens regardless of congestion state
3488 + */
3489 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
3490 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3491 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
3492 + netif_stop_subqueue(net_dev, queue_mapping);
3493 + fq->stats.congestion_entry++;
3494 + }
3495 +
3496 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
3497 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
3498 +
3499 + /* For non-linear skb we don't need a minimum headroom */
3500 + needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
3501 + if (skb_headroom(skb) < needed_headroom) {
3502 + struct sk_buff *ns;
3503 +
3504 + ns = skb_realloc_headroom(skb, needed_headroom);
3505 + if (unlikely(!ns)) {
3506 + percpu_stats->tx_dropped++;
3507 + goto err_alloc_headroom;
3508 + }
3509 + percpu_extras->tx_reallocs++;
3510 + if (skb->sk)
3511 + skb_set_owner_w(ns, skb->sk);
3512 + dev_kfree_skb(skb);
3513 + skb = ns;
3514 + }
3515 +
3516 + /* We'll be holding a back-reference to the skb until Tx Confirmation;
3517 + * we don't want that overwritten by a concurrent Tx with a cloned skb.
3518 + */
3519 + skb = skb_unshare(skb, GFP_ATOMIC);
3520 + if (unlikely(!skb)) {
3521 + /* skb_unshare() has already freed the skb */
3522 + percpu_stats->tx_dropped++;
3523 + return NETDEV_TX_OK;
3524 + }
3525 +
3526 + /* Setup the FD fields */
3527 + memset(&fd, 0, sizeof(fd));
3528 +
3529 + if (skb_is_nonlinear(skb)) {
3530 + err = build_sg_fd(priv, skb, &fd);
3531 + percpu_extras->tx_sg_frames++;
3532 + percpu_extras->tx_sg_bytes += skb->len;
3533 + } else {
3534 + err = build_single_fd(priv, skb, &fd);
3535 + }
3536 +
3537 + if (unlikely(err)) {
3538 + percpu_stats->tx_dropped++;
3539 + goto err_build_fd;
3540 + }
3541 +
3542 + /* Tracing point */
3543 + trace_dpaa2_tx_fd(net_dev, &fd);
3544 +
3545 + if (dpaa2_eth_ceetm_is_enabled(priv)) {
3546 + err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &qpri);
3547 + if (err)
3548 + goto err_ceetm_classify;
3549 + }
3550 +
3551 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
3552 + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3553 + priv->tx_qdid, qpri,
3554 + fq->tx_qdbin, &fd);
3555 + if (err != -EBUSY)
3556 + break;
3557 + }
3558 + percpu_extras->tx_portal_busy += i;
3559 + if (unlikely(err < 0)) {
3560 + percpu_stats->tx_errors++;
3561 + /* Clean up everything, including freeing the skb */
3562 + free_tx_fd(priv, &fd, false);
3563 + } else {
3564 + percpu_stats->tx_packets++;
3565 + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
3566 + }
3567 +
3568 + return NETDEV_TX_OK;
3569 +
3570 +err_ceetm_classify:
3571 + free_tx_fd(priv, &fd, false);
3572 +err_build_fd:
3573 +err_alloc_headroom:
3574 + dev_kfree_skb(skb);
3575 +
3576 + return NETDEV_TX_OK;
3577 +}
3578 +
3579 +/* Tx confirmation frame processing routine */
3580 +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
3581 + struct dpaa2_eth_channel *ch,
3582 + const struct dpaa2_fd *fd,
3583 + struct napi_struct *napi __always_unused,
3584 + u16 queue_id)
3585 +{
3586 + struct device *dev = priv->net_dev->dev.parent;
3587 + struct rtnl_link_stats64 *percpu_stats;
3588 + struct dpaa2_eth_drv_stats *percpu_extras;
3589 + u32 fd_errors;
3590 +
3591 + /* Tracing point */
3592 + trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
3593 +
3594 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
3595 + percpu_extras->tx_conf_frames++;
3596 + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
3597 +
3598 + /* Check congestion state and wake all queues if necessary */
3599 + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
3600 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
3601 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3602 + if (!dpaa2_cscn_state_congested(priv->cscn_mem))
3603 + netif_tx_wake_all_queues(priv->net_dev);
3604 + }
3605 +
3606 + /* Check frame errors in the FD field */
3607 + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
3608 + free_tx_fd(priv, fd, true);
3609 +
3610 + if (likely(!fd_errors))
3611 + return;
3612 +
3613 + if (net_ratelimit())
3614 + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
3615 + fd_errors);
3616 +
3617 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
3618 + /* Tx-conf logically pertains to the egress path. */
3619 + percpu_stats->tx_errors++;
3620 +}
3621 +
3622 +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
3623 +{
3624 + int err;
3625 +
3626 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
3627 + DPNI_OFF_RX_L3_CSUM, enable);
3628 + if (err) {
3629 + netdev_err(priv->net_dev,
3630 + "dpni_set_offload(RX_L3_CSUM) failed\n");
3631 + return err;
3632 + }
3633 +
3634 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
3635 + DPNI_OFF_RX_L4_CSUM, enable);
3636 + if (err) {
3637 + netdev_err(priv->net_dev,
3638 + "dpni_set_offload(RX_L4_CSUM) failed\n");
3639 + return err;
3640 + }
3641 +
3642 + return 0;
3643 +}
3644 +
3645 +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
3646 +{
3647 + int err;
3648 +
3649 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
3650 + DPNI_OFF_TX_L3_CSUM, enable);
3651 + if (err) {
3652 + netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
3653 + return err;
3654 + }
3655 +
3656 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
3657 + DPNI_OFF_TX_L4_CSUM, enable);
3658 + if (err) {
3659 + netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
3660 + return err;
3661 + }
3662 +
3663 + return 0;
3664 +}
3665 +
3666 +/* Perform a single release command to add buffers
3667 + * to the specified buffer pool
3668 + */
3669 +static int add_bufs(struct dpaa2_eth_priv *priv,
3670 + struct dpaa2_eth_channel *ch, u16 bpid)
3671 +{
3672 + struct device *dev = priv->net_dev->dev.parent;
3673 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
3674 + void *buf;
3675 + dma_addr_t addr;
3676 + int i, err;
3677 +
3678 + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
3679 + /* Allocate buffer visible to WRIOP + skb shared info +
3680 + * alignment padding
3681 + */
3682 + buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
3683 + if (unlikely(!buf))
3684 + goto err_alloc;
3685 +
3686 + buf = PTR_ALIGN(buf, priv->rx_buf_align);
3687 +
3688 + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
3689 + DMA_BIDIRECTIONAL);
3690 + if (unlikely(dma_mapping_error(dev, addr)))
3691 + goto err_map;
3692 +
3693 + buf_array[i] = addr;
3694 +
3695 + /* tracing point */
3696 + trace_dpaa2_eth_buf_seed(priv->net_dev,
3697 + buf, dpaa2_eth_buf_raw_size(priv),
3698 + addr, DPAA2_ETH_RX_BUF_SIZE,
3699 + bpid);
3700 + }
3701 +
3702 +release_bufs:
3703 + /* In case the portal is busy, retry until successful */
3704 + while ((err = dpaa2_io_service_release(ch->dpio, bpid,
3705 + buf_array, i)) == -EBUSY)
3706 + cpu_relax();
3707 +
3708 + /* If release command failed, clean up and bail out; not much
3709 + * else we can do about it
3710 + */
3711 + if (err) {
3712 + free_bufs(priv, buf_array, i);
3713 + return 0;
3714 + }
3715 +
3716 + return i;
3717 +
3718 +err_map:
3719 + skb_free_frag(buf);
3720 +err_alloc:
3721 + /* If we managed to allocate at least some buffers, release them */
3722 + if (i)
3723 + goto release_bufs;
3724 +
3725 + return 0;
3726 +}
3727 +
3728 +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
3729 +{
3730 + int i, j;
3731 + int new_count;
3732 +
3733 + /* This is the lazy seeding of Rx buffer pools.
3734 + * dpaa2_add_bufs() is also used on the Rx hotpath and calls
3735 + * napi_alloc_frag(). The trouble with that is that it in turn ends up
3736 + * calling this_cpu_ptr(), which mandates execution in atomic context.
3737 + * Rather than splitting up the code, do a one-off preempt disable.
3738 + */
3739 + preempt_disable();
3740 + for (j = 0; j < priv->num_channels; j++) {
3741 + priv->channel[j]->buf_count = 0;
3742 + for (i = 0; i < priv->max_bufs_per_ch;
3743 + i += DPAA2_ETH_BUFS_PER_CMD) {
3744 + new_count = add_bufs(priv, priv->channel[j], bpid);
3745 + priv->channel[j]->buf_count += new_count;
3746 +
3747 + if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
3748 + preempt_enable();
3749 + return -ENOMEM;
3750 + }
3751 + }
3752 + }
3753 + preempt_enable();
3754 +
3755 + return 0;
3756 +}
3757 +
3758 +/**
3759 + * Drain the specified number of buffers from the DPNI's private buffer pool.
3760 + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
3761 + */
3762 +static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
3763 +{
3764 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
3765 + int ret;
3766 +
3767 + do {
3768 + ret = dpaa2_io_service_acquire(NULL, priv->bpid,
3769 + buf_array, count);
3770 + if (ret < 0) {
3771 + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
3772 + return;
3773 + }
3774 + free_bufs(priv, buf_array, ret);
3775 + } while (ret);
3776 +}
3777 +
3778 +static void drain_pool(struct dpaa2_eth_priv *priv)
3779 +{
3780 + preempt_disable();
3781 + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
3782 + drain_bufs(priv, 1);
3783 + preempt_enable();
3784 +}
3785 +
3786 +/* Function is called from softirq context only, so we don't need to guard
3787 + * the access to percpu count
3788 + */
3789 +static int refill_pool(struct dpaa2_eth_priv *priv,
3790 + struct dpaa2_eth_channel *ch,
3791 + u16 bpid)
3792 +{
3793 + int new_count;
3794 +
3795 + if (likely(ch->buf_count >= priv->refill_thresh))
3796 + return 0;
3797 +
3798 + do {
3799 + new_count = add_bufs(priv, ch, bpid);
3800 + if (unlikely(!new_count)) {
3801 + /* Out of memory; abort for now, we'll try later on */
3802 + break;
3803 + }
3804 + ch->buf_count += new_count;
3805 + } while (ch->buf_count < priv->max_bufs_per_ch);
3806 +
3807 + if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
3808 + return -ENOMEM;
3809 +
3810 + return 0;
3811 +}
3812 +
3813 +static int pull_channel(struct dpaa2_eth_channel *ch)
3814 +{
3815 + int err;
3816 + int dequeues = -1;
3817 +
3818 + /* Retry while portal is busy */
3819 + do {
3820 + err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
3821 + ch->store);
3822 + dequeues++;
3823 + cpu_relax();
3824 + } while (err == -EBUSY);
3825 +
3826 + ch->stats.dequeue_portal_busy += dequeues;
3827 + if (unlikely(err))
3828 + ch->stats.pull_err++;
3829 +
3830 + return err;
3831 +}
3832 +
3833 +/* NAPI poll routine
3834 + *
3835 + * Frames are dequeued from the QMan channel associated with this NAPI context.
3836 + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
3837 + * confirmation frames are limited by a threshold per NAPI poll cycle.
3838 + */
3839 +static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
3840 +{
3841 + struct dpaa2_eth_channel *ch;
3842 + int rx_cleaned = 0, tx_conf_cleaned = 0;
3843 + bool store_cleaned;
3844 + struct dpaa2_eth_priv *priv;
3845 + int err;
3846 +
3847 + ch = container_of(napi, struct dpaa2_eth_channel, napi);
3848 + priv = ch->priv;
3849 +
3850 + do {
3851 + err = pull_channel(ch);
3852 + if (unlikely(err))
3853 + break;
3854 +
3855 + /* Refill pool if appropriate */
3856 + refill_pool(priv, ch, priv->bpid);
3857 +
3858 + store_cleaned = consume_frames(ch, &rx_cleaned,
3859 + &tx_conf_cleaned);
3860 +
3861 + /* If we've either consumed the budget with Rx frames,
3862 + * or reached the Tx conf threshold, we're done.
3863 + */
3864 + if (rx_cleaned >= budget ||
3865 + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
3866 + return budget;
3867 + } while (store_cleaned);
3868 +
3869 + /* We didn't consume the entire budget, finish napi and
3870 + * re-enable data availability notifications
3871 + */
3872 + napi_complete(napi);
3873 + do {
3874 + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
3875 + cpu_relax();
3876 + } while (err == -EBUSY);
3877 + WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
3878 + ch->nctx.desired_cpu);
3879 +
3880 + return max(rx_cleaned, 1);
3881 +}
3882 +
3883 +static void enable_ch_napi(struct dpaa2_eth_priv *priv)
3884 +{
3885 + struct dpaa2_eth_channel *ch;
3886 + int i;
3887 +
3888 + for (i = 0; i < priv->num_channels; i++) {
3889 + ch = priv->channel[i];
3890 + napi_enable(&ch->napi);
3891 + }
3892 +}
3893 +
3894 +static void disable_ch_napi(struct dpaa2_eth_priv *priv)
3895 +{
3896 + struct dpaa2_eth_channel *ch;
3897 + int i;
3898 +
3899 + for (i = 0; i < priv->num_channels; i++) {
3900 + ch = priv->channel[i];
3901 + napi_disable(&ch->napi);
3902 + }
3903 +}
3904 +
3905 +static int link_state_update(struct dpaa2_eth_priv *priv)
3906 +{
3907 + struct dpni_link_state state;
3908 + int err;
3909 +
3910 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
3911 + if (unlikely(err)) {
3912 + netdev_err(priv->net_dev,
3913 + "dpni_get_link_state() failed\n");
3914 + return err;
3915 + }
3916 +
3917 + /* Chech link state; speed / duplex changes are not treated yet */
3918 + if (priv->link_state.up == state.up)
3919 + return 0;
3920 +
3921 + priv->link_state = state;
3922 + if (state.up) {
3923 + netif_carrier_on(priv->net_dev);
3924 + netif_tx_start_all_queues(priv->net_dev);
3925 + } else {
3926 + netif_tx_stop_all_queues(priv->net_dev);
3927 + netif_carrier_off(priv->net_dev);
3928 + }
3929 +
3930 + netdev_info(priv->net_dev, "Link Event: state %s\n",
3931 + state.up ? "up" : "down");
3932 +
3933 + return 0;
3934 +}
3935 +
3936 +static int dpaa2_eth_open(struct net_device *net_dev)
3937 +{
3938 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3939 + int err;
3940 +
3941 + /* We'll only start the txqs when the link is actually ready; make sure
3942 + * we don't race against the link up notification, which may come
3943 + * immediately after dpni_enable();
3944 + */
3945 + netif_tx_stop_all_queues(net_dev);
3946 +
3947 + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
3948 + * return true and cause 'ip link show' to report the LOWER_UP flag,
3949 + * even though the link notification wasn't even received.
3950 + */
3951 + netif_carrier_off(net_dev);
3952 +
3953 + err = seed_pool(priv, priv->bpid);
3954 + if (err) {
3955 + /* Not much to do; the buffer pool, though not filled up,
3956 + * may still contain some buffers which would enable us
3957 + * to limp on.
3958 + */
3959 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3960 + priv->dpbp_dev->obj_desc.id, priv->bpid);
3961 + }
3962 +
3963 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
3964 +
3965 + err = dpni_enable(priv->mc_io, 0, priv->mc_token);
3966 + if (err < 0) {
3967 + netdev_err(net_dev, "dpni_enable() failed\n");
3968 + goto enable_err;
3969 + }
3970 +
3971 + /* If the DPMAC object has already processed the link up interrupt,
3972 + * we have to learn the link state ourselves.
3973 + */
3974 + err = link_state_update(priv);
3975 + if (err < 0) {
3976 + netdev_err(net_dev, "Can't update link state\n");
3977 + goto link_state_err;
3978 + }
3979 +
3980 + return 0;
3981 +
3982 +link_state_err:
3983 +enable_err:
3984 + priv->refill_thresh = 0;
3985 + drain_pool(priv);
3986 + return err;
3987 +}
3988 +
3989 +static int dpaa2_eth_stop(struct net_device *net_dev)
3990 +{
3991 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3992 + int dpni_enabled;
3993 + int retries = 10, i;
3994 + int err = 0;
3995 +
3996 + netif_tx_stop_all_queues(net_dev);
3997 + netif_carrier_off(net_dev);
3998 +
3999 + /* Loop while dpni_disable() attempts to drain the egress FQs
4000 + * and confirm them back to us.
4001 + */
4002 + do {
4003 + dpni_disable(priv->mc_io, 0, priv->mc_token);
4004 + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
4005 + if (dpni_enabled)
4006 + /* Allow the hardware some slack */
4007 + msleep(100);
4008 + } while (dpni_enabled && --retries);
4009 + if (!retries) {
4010 + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
4011 + /* Must go on and finish processing pending frames, so we don't
4012 + * crash at the next "ifconfig up"
4013 + */
4014 + err = -ETIMEDOUT;
4015 + }
4016 +
4017 + priv->refill_thresh = 0;
4018 +
4019 + /* Wait for all running napi poll routines to finish, so that no
4020 + * new refill operations are started
4021 + */
4022 + for (i = 0; i < priv->num_channels; i++)
4023 + napi_synchronize(&priv->channel[i]->napi);
4024 +
4025 + /* Empty the buffer pool */
4026 + drain_pool(priv);
4027 +
4028 + return err;
4029 +}
4030 +
4031 +static int dpaa2_eth_init(struct net_device *net_dev)
4032 +{
4033 + u64 supported = 0;
4034 + u64 not_supported = 0;
4035 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4036 + u32 options = priv->dpni_attrs.options;
4037 +
4038 + /* Capabilities listing */
4039 + supported |= IFF_LIVE_ADDR_CHANGE;
4040 +
4041 + if (options & DPNI_OPT_NO_MAC_FILTER)
4042 + not_supported |= IFF_UNICAST_FLT;
4043 + else
4044 + supported |= IFF_UNICAST_FLT;
4045 +
4046 + net_dev->priv_flags |= supported;
4047 + net_dev->priv_flags &= ~not_supported;
4048 +
4049 + /* Features */
4050 + net_dev->features = NETIF_F_RXCSUM |
4051 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4052 + NETIF_F_SG | NETIF_F_HIGHDMA |
4053 + NETIF_F_LLTX;
4054 + net_dev->hw_features = net_dev->features;
4055 +
4056 + return 0;
4057 +}
4058 +
4059 +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
4060 +{
4061 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4062 + struct device *dev = net_dev->dev.parent;
4063 + int err;
4064 +
4065 + err = eth_mac_addr(net_dev, addr);
4066 + if (err < 0) {
4067 + dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
4068 + return err;
4069 + }
4070 +
4071 + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4072 + net_dev->dev_addr);
4073 + if (err) {
4074 + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
4075 + return err;
4076 + }
4077 +
4078 + return 0;
4079 +}
4080 +
4081 +/** Fill in counters maintained by the GPP driver. These may be different from
4082 + * the hardware counters obtained by ethtool.
4083 + */
4084 +static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev,
4085 + struct rtnl_link_stats64 *stats)
4086 +{
4087 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4088 + struct rtnl_link_stats64 *percpu_stats;
4089 + u64 *cpustats;
4090 + u64 *netstats = (u64 *)stats;
4091 + int i, j;
4092 + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
4093 +
4094 + for_each_possible_cpu(i) {
4095 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
4096 + cpustats = (u64 *)percpu_stats;
4097 + for (j = 0; j < num; j++)
4098 + netstats[j] += cpustats[j];
4099 + }
4100 + return stats;
4101 +}
4102 +
4103 +/* Copy mac unicast addresses from @net_dev to @priv.
4104 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
4105 + */
4106 +static void add_uc_hw_addr(const struct net_device *net_dev,
4107 + struct dpaa2_eth_priv *priv)
4108 +{
4109 + struct netdev_hw_addr *ha;
4110 + int err;
4111 +
4112 + netdev_for_each_uc_addr(ha, net_dev) {
4113 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
4114 + ha->addr);
4115 + if (err)
4116 + netdev_warn(priv->net_dev,
4117 + "Could not add ucast MAC %pM to the filtering table (err %d)\n",
4118 + ha->addr, err);
4119 + }
4120 +}
4121 +
4122 +/* Copy mac multicast addresses from @net_dev to @priv
4123 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
4124 + */
4125 +static void add_mc_hw_addr(const struct net_device *net_dev,
4126 + struct dpaa2_eth_priv *priv)
4127 +{
4128 + struct netdev_hw_addr *ha;
4129 + int err;
4130 +
4131 + netdev_for_each_mc_addr(ha, net_dev) {
4132 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
4133 + ha->addr);
4134 + if (err)
4135 + netdev_warn(priv->net_dev,
4136 + "Could not add mcast MAC %pM to the filtering table (err %d)\n",
4137 + ha->addr, err);
4138 + }
4139 +}
4140 +
4141 +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
4142 +{
4143 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4144 + int uc_count = netdev_uc_count(net_dev);
4145 + int mc_count = netdev_mc_count(net_dev);
4146 + u8 max_mac = priv->dpni_attrs.mac_filter_entries;
4147 + u32 options = priv->dpni_attrs.options;
4148 + u16 mc_token = priv->mc_token;
4149 + struct fsl_mc_io *mc_io = priv->mc_io;
4150 + int err;
4151 +
4152 + /* Basic sanity checks; these probably indicate a misconfiguration */
4153 + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
4154 + netdev_info(net_dev,
4155 + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
4156 + max_mac);
4157 +
4158 + /* Force promiscuous if the uc or mc counts exceed our capabilities. */
4159 + if (uc_count > max_mac) {
4160 + netdev_info(net_dev,
4161 + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
4162 + uc_count, max_mac);
4163 + goto force_promisc;
4164 + }
4165 + if (mc_count + uc_count > max_mac) {
4166 + netdev_info(net_dev,
4167 + "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
4168 + uc_count + mc_count, max_mac);
4169 + goto force_mc_promisc;
4170 + }
4171 +
4172 + /* Adjust promisc settings due to flag combinations */
4173 + if (net_dev->flags & IFF_PROMISC)
4174 + goto force_promisc;
4175 + if (net_dev->flags & IFF_ALLMULTI) {
4176 + /* First, rebuild unicast filtering table. This should be done
4177 + * in promisc mode, in order to avoid frame loss while we
4178 + * progressively add entries to the table.
4179 + * We don't know whether we had been in promisc already, and
4180 + * making an MC call to find out is expensive; so set uc promisc
4181 + * nonetheless.
4182 + */
4183 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
4184 + if (err)
4185 + netdev_warn(net_dev, "Can't set uc promisc\n");
4186 +
4187 + /* Actual uc table reconstruction. */
4188 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
4189 + if (err)
4190 + netdev_warn(net_dev, "Can't clear uc filters\n");
4191 + add_uc_hw_addr(net_dev, priv);
4192 +
4193 + /* Finally, clear uc promisc and set mc promisc as requested. */
4194 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
4195 + if (err)
4196 + netdev_warn(net_dev, "Can't clear uc promisc\n");
4197 + goto force_mc_promisc;
4198 + }
4199 +
4200 + /* Neither unicast, nor multicast promisc will be on... eventually.
4201 + * For now, rebuild mac filtering tables while forcing both of them on.
4202 + */
4203 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
4204 + if (err)
4205 + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
4206 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
4207 + if (err)
4208 + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
4209 +
4210 + /* Actual mac filtering tables reconstruction */
4211 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
4212 + if (err)
4213 + netdev_warn(net_dev, "Can't clear mac filters\n");
4214 + add_mc_hw_addr(net_dev, priv);
4215 + add_uc_hw_addr(net_dev, priv);
4216 +
4217 + /* Now we can clear both ucast and mcast promisc, without risking
4218 + * to drop legitimate frames anymore.
4219 + */
4220 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
4221 + if (err)
4222 + netdev_warn(net_dev, "Can't clear ucast promisc\n");
4223 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
4224 + if (err)
4225 + netdev_warn(net_dev, "Can't clear mcast promisc\n");
4226 +
4227 + return;
4228 +
4229 +force_promisc:
4230 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
4231 + if (err)
4232 + netdev_warn(net_dev, "Can't set ucast promisc\n");
4233 +force_mc_promisc:
4234 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
4235 + if (err)
4236 + netdev_warn(net_dev, "Can't set mcast promisc\n");
4237 +}
4238 +
4239 +static int dpaa2_eth_set_features(struct net_device *net_dev,
4240 + netdev_features_t features)
4241 +{
4242 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4243 + netdev_features_t changed = features ^ net_dev->features;
4244 + bool enable;
4245 + int err;
4246 +
4247 + if (changed & NETIF_F_RXCSUM) {
4248 + enable = !!(features & NETIF_F_RXCSUM);
4249 + err = set_rx_csum(priv, enable);
4250 + if (err)
4251 + return err;
4252 + }
4253 +
4254 + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
4255 + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
4256 + err = set_tx_csum(priv, enable);
4257 + if (err)
4258 + return err;
4259 + }
4260 +
4261 + return 0;
4262 +}
4263 +
4264 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4265 +{
4266 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
4267 + struct hwtstamp_config config;
4268 +
4269 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
4270 + return -EFAULT;
4271 +
4272 + switch (config.tx_type) {
4273 + case HWTSTAMP_TX_OFF:
4274 + priv->ts_tx_en = false;
4275 + break;
4276 + case HWTSTAMP_TX_ON:
4277 + priv->ts_tx_en = true;
4278 + break;
4279 + default:
4280 + return -ERANGE;
4281 + }
4282 +
4283 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
4284 + priv->ts_rx_en = false;
4285 + } else {
4286 + priv->ts_rx_en = true;
4287 + /* TS is set for all frame types, not only those requested */
4288 + config.rx_filter = HWTSTAMP_FILTER_ALL;
4289 + }
4290 +
4291 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
4292 + -EFAULT : 0;
4293 +}
4294 +
4295 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4296 +{
4297 + if (cmd == SIOCSHWTSTAMP)
4298 + return dpaa2_eth_ts_ioctl(dev, rq, cmd);
4299 +
4300 + return -EINVAL;
4301 +}
4302 +
4303 +static int set_buffer_layout(struct dpaa2_eth_priv *priv)
4304 +{
4305 + struct device *dev = priv->net_dev->dev.parent;
4306 + struct dpni_buffer_layout buf_layout = {0};
4307 + int err;
4308 +
4309 + /* We need to check for WRIOP version 1.0.0, but depending on the MC
4310 + * version, this number is not always provided correctly on rev1.
4311 + * We need to check for both alternatives in this situation.
4312 + */
4313 + if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
4314 + priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
4315 + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
4316 + else
4317 + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
4318 +
4319 + /* tx buffer */
4320 + buf_layout.pass_timestamp = true;
4321 + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
4322 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
4323 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
4324 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
4325 + DPNI_QUEUE_TX, &buf_layout);
4326 + if (err) {
4327 + dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
4328 + return err;
4329 + }
4330 +
4331 + /* tx-confirm buffer */
4332 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
4333 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
4334 + DPNI_QUEUE_TX_CONFIRM, &buf_layout);
4335 + if (err) {
4336 + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
4337 + return err;
4338 + }
4339 +
4340 + /* Now that we've set our tx buffer layout, retrieve the minimum
4341 + * required tx data offset.
4342 + */
4343 + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
4344 + &priv->tx_data_offset);
4345 + if (err) {
4346 + dev_err(dev, "dpni_get_tx_data_offset() failed\n");
4347 + return err;
4348 + }
4349 +
4350 + if ((priv->tx_data_offset % 64) != 0)
4351 + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
4352 + priv->tx_data_offset);
4353 +
4354 + /* rx buffer */
4355 + buf_layout.pass_frame_status = true;
4356 + buf_layout.pass_parser_result = true;
4357 + buf_layout.data_align = priv->rx_buf_align;
4358 + buf_layout.private_data_size = 0;
4359 + buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
4360 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
4361 + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
4362 + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
4363 + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
4364 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
4365 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
4366 + DPNI_QUEUE_RX, &buf_layout);
4367 + if (err) {
4368 + dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
4369 + return err;
4370 + }
4371 +
4372 + return 0;
4373 +}
4374 +
4375 +static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
4376 +{
4377 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4378 + struct dpaa2_eth_channel *ch;
4379 + struct bpf_prog *old_prog;
4380 + int i, err;
4381 +
4382 + /* No support for SG frames */
4383 + if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
4384 + return -EINVAL;
4385 +
4386 + if (netif_running(net_dev)) {
4387 + err = dpaa2_eth_stop(net_dev);
4388 + if (err)
4389 + return err;
4390 + }
4391 +
4392 + if (prog) {
4393 + prog = bpf_prog_add(prog, priv->num_channels - 1);
4394 + if (IS_ERR(prog))
4395 + return PTR_ERR(prog);
4396 + }
4397 +
4398 + priv->has_xdp_prog = !!prog;
4399 +
4400 + for (i = 0; i < priv->num_channels; i++) {
4401 + ch = priv->channel[i];
4402 + old_prog = xchg(&ch->xdp_prog, prog);
4403 + if (old_prog)
4404 + bpf_prog_put(old_prog);
4405 + }
4406 +
4407 + if (netif_running(net_dev)) {
4408 + err = dpaa2_eth_open(net_dev);
4409 + if (err)
4410 + return err;
4411 + }
4412 +
4413 + return 0;
4414 +}
4415 +
4416 +static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
4417 +{
4418 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
4419 +
4420 + switch (xdp->command) {
4421 + case XDP_SETUP_PROG:
4422 + return dpaa2_eth_set_xdp(dev, xdp->prog);
4423 + case XDP_QUERY_PROG:
4424 + xdp->prog_attached = priv->has_xdp_prog;
4425 + return 0;
4426 + default:
4427 + return -EINVAL;
4428 + }
4429 +}
4430 +
4431 +static const struct net_device_ops dpaa2_eth_ops = {
4432 + .ndo_open = dpaa2_eth_open,
4433 + .ndo_start_xmit = dpaa2_eth_tx,
4434 + .ndo_stop = dpaa2_eth_stop,
4435 + .ndo_init = dpaa2_eth_init,
4436 + .ndo_set_mac_address = dpaa2_eth_set_addr,
4437 + .ndo_get_stats64 = dpaa2_eth_get_stats,
4438 + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
4439 + .ndo_set_features = dpaa2_eth_set_features,
4440 + .ndo_do_ioctl = dpaa2_eth_ioctl,
4441 + .ndo_xdp = dpaa2_eth_xdp,
4442 +};
4443 +
4444 +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
4445 +{
4446 + struct dpaa2_eth_channel *ch;
4447 +
4448 + ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
4449 +
4450 + /* Update NAPI statistics */
4451 + ch->stats.cdan++;
4452 +
4453 + napi_schedule_irqoff(&ch->napi);
4454 +}
4455 +
4456 +/* Allocate and configure a DPCON object */
4457 +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
4458 +{
4459 + struct fsl_mc_device *dpcon;
4460 + struct device *dev = priv->net_dev->dev.parent;
4461 + struct dpcon_attr attrs;
4462 + int err;
4463 +
4464 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
4465 + FSL_MC_POOL_DPCON, &dpcon);
4466 + if (err) {
4467 + dev_info(dev, "Not enough DPCONs, will go on as-is\n");
4468 + return NULL;
4469 + }
4470 +
4471 + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
4472 + if (err) {
4473 + dev_err(dev, "dpcon_open() failed\n");
4474 + goto free;
4475 + }
4476 +
4477 + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
4478 + if (err) {
4479 + dev_err(dev, "dpcon_reset() failed\n");
4480 + goto close;
4481 + }
4482 +
4483 + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
4484 + if (err) {
4485 + dev_err(dev, "dpcon_get_attributes() failed\n");
4486 + goto close;
4487 + }
4488 +
4489 + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
4490 + if (err) {
4491 + dev_err(dev, "dpcon_enable() failed\n");
4492 + goto close;
4493 + }
4494 +
4495 + return dpcon;
4496 +
4497 +close:
4498 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
4499 +free:
4500 + fsl_mc_object_free(dpcon);
4501 +
4502 + return NULL;
4503 +}
4504 +
4505 +static void free_dpcon(struct dpaa2_eth_priv *priv,
4506 + struct fsl_mc_device *dpcon)
4507 +{
4508 + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
4509 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
4510 + fsl_mc_object_free(dpcon);
4511 +}
4512 +
4513 +static struct dpaa2_eth_channel *
4514 +alloc_channel(struct dpaa2_eth_priv *priv)
4515 +{
4516 + struct dpaa2_eth_channel *channel;
4517 + struct dpcon_attr attr;
4518 + struct device *dev = priv->net_dev->dev.parent;
4519 + int err;
4520 +
4521 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
4522 + if (!channel)
4523 + return NULL;
4524 +
4525 + channel->dpcon = setup_dpcon(priv);
4526 + if (!channel->dpcon)
4527 + goto err_setup;
4528 +
4529 + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
4530 + &attr);
4531 + if (err) {
4532 + dev_err(dev, "dpcon_get_attributes() failed\n");
4533 + goto err_get_attr;
4534 + }
4535 +
4536 + channel->dpcon_id = attr.id;
4537 + channel->ch_id = attr.qbman_ch_id;
4538 + channel->priv = priv;
4539 +
4540 + return channel;
4541 +
4542 +err_get_attr:
4543 + free_dpcon(priv, channel->dpcon);
4544 +err_setup:
4545 + kfree(channel);
4546 + return NULL;
4547 +}
4548 +
4549 +static void free_channel(struct dpaa2_eth_priv *priv,
4550 + struct dpaa2_eth_channel *channel)
4551 +{
4552 + struct bpf_prog *prog;
4553 +
4554 + free_dpcon(priv, channel->dpcon);
4555 +
4556 + prog = READ_ONCE(channel->xdp_prog);
4557 + if (prog)
4558 + bpf_prog_put(prog);
4559 +
4560 + kfree(channel);
4561 +}
4562 +
4563 +/* DPIO setup: allocate and configure QBMan channels, setup core affinity
4564 + * and register data availability notifications
4565 + */
4566 +static int setup_dpio(struct dpaa2_eth_priv *priv)
4567 +{
4568 + struct dpaa2_io_notification_ctx *nctx;
4569 + struct dpaa2_eth_channel *channel;
4570 + struct dpcon_notification_cfg dpcon_notif_cfg;
4571 + struct device *dev = priv->net_dev->dev.parent;
4572 + int i, err;
4573 +
4574 + /* We want the ability to spread ingress traffic (RX, TX conf) to as
4575 + * many cores as possible, so we need one channel for each core
4576 + * (unless there's fewer queues than cores, in which case the extra
4577 + * channels would be wasted).
4578 + * Allocate one channel per core and register it to the core's
4579 + * affine DPIO. If not enough channels are available for all cores
4580 + * or if some cores don't have an affine DPIO, there will be no
4581 + * ingress frame processing on those cores.
4582 + */
4583 + cpumask_clear(&priv->dpio_cpumask);
4584 + for_each_online_cpu(i) {
4585 + /* Try to allocate a channel */
4586 + channel = alloc_channel(priv);
4587 + if (!channel) {
4588 + dev_info(dev,
4589 + "No affine channel for cpu %d and above\n", i);
4590 + err = -ENODEV;
4591 + goto err_alloc_ch;
4592 + }
4593 +
4594 + priv->channel[priv->num_channels] = channel;
4595 +
4596 + nctx = &channel->nctx;
4597 + nctx->is_cdan = 1;
4598 + nctx->cb = cdan_cb;
4599 + nctx->id = channel->ch_id;
4600 + nctx->desired_cpu = i;
4601 +
4602 + /* Register the new context */
4603 + channel->dpio = dpaa2_io_service_select(i);
4604 + err = dpaa2_io_service_register(channel->dpio, nctx);
4605 + if (err) {
4606 + dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
4607 + /* If no affine DPIO for this core, there's probably
4608 + * none available for next cores either. Signal we want
4609 + * to retry later, in case the DPIO devices weren't
4610 + * probed yet.
4611 + */
4612 + err = -EPROBE_DEFER;
4613 + goto err_service_reg;
4614 + }
4615 +
4616 + /* Register DPCON notification with MC */
4617 + dpcon_notif_cfg.dpio_id = nctx->dpio_id;
4618 + dpcon_notif_cfg.priority = 0;
4619 + dpcon_notif_cfg.user_ctx = nctx->qman64;
4620 + err = dpcon_set_notification(priv->mc_io, 0,
4621 + channel->dpcon->mc_handle,
4622 + &dpcon_notif_cfg);
4623 + if (err) {
4624 + dev_err(dev, "dpcon_set_notification failed()\n");
4625 + goto err_set_cdan;
4626 + }
4627 +
4628 + /* If we managed to allocate a channel and also found an affine
4629 + * DPIO for this core, add it to the final mask
4630 + */
4631 + cpumask_set_cpu(i, &priv->dpio_cpumask);
4632 + priv->num_channels++;
4633 +
4634 + /* Stop if we already have enough channels to accommodate all
4635 + * RX and TX conf queues
4636 + */
4637 + if (priv->num_channels == dpaa2_eth_queue_count(priv))
4638 + break;
4639 + }
4640 +
4641 + return 0;
4642 +
4643 +err_set_cdan:
4644 + dpaa2_io_service_deregister(channel->dpio, nctx);
4645 +err_service_reg:
4646 + free_channel(priv, channel);
4647 +err_alloc_ch:
4648 + if (cpumask_empty(&priv->dpio_cpumask)) {
4649 + dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
4650 + return err;
4651 + }
4652 +
4653 + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
4654 + cpumask_pr_args(&priv->dpio_cpumask));
4655 +
4656 + return 0;
4657 +}
4658 +
4659 +static void free_dpio(struct dpaa2_eth_priv *priv)
4660 +{
4661 + int i;
4662 + struct dpaa2_eth_channel *ch;
4663 +
4664 + /* deregister CDAN notifications and free channels */
4665 + for (i = 0; i < priv->num_channels; i++) {
4666 + ch = priv->channel[i];
4667 + dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
4668 + free_channel(priv, ch);
4669 + }
4670 +}
4671 +
4672 +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
4673 + int cpu)
4674 +{
4675 + struct device *dev = priv->net_dev->dev.parent;
4676 + int i;
4677 +
4678 + for (i = 0; i < priv->num_channels; i++)
4679 + if (priv->channel[i]->nctx.desired_cpu == cpu)
4680 + return priv->channel[i];
4681 +
4682 + /* We should never get here. Issue a warning and return
4683 + * the first channel, because it's still better than nothing
4684 + */
4685 + dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
4686 +
4687 + return priv->channel[0];
4688 +}
4689 +
4690 +static void set_fq_affinity(struct dpaa2_eth_priv *priv)
4691 +{
4692 + struct device *dev = priv->net_dev->dev.parent;
4693 + struct cpumask xps_mask;
4694 + struct dpaa2_eth_fq *fq;
4695 + int rx_cpu, txc_cpu;
4696 + int i, err;
4697 +
4698 + /* For each FQ, pick one channel/CPU to deliver frames to.
4699 + * This may well change at runtime, either through irqbalance or
4700 + * through direct user intervention.
4701 + */
4702 + rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
4703 +
4704 + for (i = 0; i < priv->num_fqs; i++) {
4705 + fq = &priv->fq[i];
4706 + switch (fq->type) {
4707 + case DPAA2_RX_FQ:
4708 + case DPAA2_RX_ERR_FQ:
4709 + fq->target_cpu = rx_cpu;
4710 + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
4711 + if (rx_cpu >= nr_cpu_ids)
4712 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
4713 + break;
4714 + case DPAA2_TX_CONF_FQ:
4715 + fq->target_cpu = txc_cpu;
4716 +
4717 + /* Tell the stack to affine to txc_cpu the Tx queue
4718 + * associated with the confirmation one
4719 + */
4720 + cpumask_clear(&xps_mask);
4721 + cpumask_set_cpu(txc_cpu, &xps_mask);
4722 + err = netif_set_xps_queue(priv->net_dev, &xps_mask,
4723 + fq->flowid);
4724 + if (err)
4725 + dev_info_once(dev, "Error setting XPS queue\n");
4726 +
4727 + txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
4728 + if (txc_cpu >= nr_cpu_ids)
4729 + txc_cpu = cpumask_first(&priv->dpio_cpumask);
4730 + break;
4731 + default:
4732 + dev_err(dev, "Unknown FQ type: %d\n", fq->type);
4733 + }
4734 + fq->channel = get_affine_channel(priv, fq->target_cpu);
4735 + }
4736 +}
4737 +
4738 +static void setup_fqs(struct dpaa2_eth_priv *priv)
4739 +{
4740 + int i, j;
4741 +
4742 + /* We have one TxConf FQ per Tx flow.
4743 + * The number of Tx and Rx queues is the same.
4744 + * Tx queues come first in the fq array.
4745 + */
4746 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
4747 + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
4748 + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
4749 + priv->fq[priv->num_fqs++].flowid = (u16)i;
4750 + }
4751 +
4752 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
4753 + for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
4754 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
4755 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
4756 + priv->fq[priv->num_fqs].tc = (u8)i;
4757 + priv->fq[priv->num_fqs++].flowid = (u16)j;
4758 + }
4759 +
4760 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
4761 + /* We have exactly one Rx error queue per DPNI */
4762 + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
4763 + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
4764 +#endif
4765 +
4766 + /* For each FQ, decide on which core to process incoming frames */
4767 + set_fq_affinity(priv);
4768 +}
4769 +
4770 +/* Allocate and configure one buffer pool for each interface */
4771 +static int setup_dpbp(struct dpaa2_eth_priv *priv)
4772 +{
4773 + int err;
4774 + struct fsl_mc_device *dpbp_dev;
4775 + struct device *dev = priv->net_dev->dev.parent;
4776 + struct dpbp_attr dpbp_attrs;
4777 +
4778 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
4779 + &dpbp_dev);
4780 + if (err) {
4781 + dev_err(dev, "DPBP device allocation failed\n");
4782 + return err;
4783 + }
4784 +
4785 + priv->dpbp_dev = dpbp_dev;
4786 +
4787 + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
4788 + &dpbp_dev->mc_handle);
4789 + if (err) {
4790 + dev_err(dev, "dpbp_open() failed\n");
4791 + goto err_open;
4792 + }
4793 +
4794 + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
4795 + if (err) {
4796 + dev_err(dev, "dpbp_reset() failed\n");
4797 + goto err_reset;
4798 + }
4799 +
4800 + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
4801 + if (err) {
4802 + dev_err(dev, "dpbp_enable() failed\n");
4803 + goto err_enable;
4804 + }
4805 +
4806 + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
4807 + &dpbp_attrs);
4808 + if (err) {
4809 + dev_err(dev, "dpbp_get_attributes() failed\n");
4810 + goto err_get_attr;
4811 + }
4812 + priv->bpid = dpbp_attrs.bpid;
4813 +
4814 + /* By default we start with flow control enabled */
4815 + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
4816 +
4817 + return 0;
4818 +
4819 +err_get_attr:
4820 + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
4821 +err_enable:
4822 +err_reset:
4823 + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
4824 +err_open:
4825 + fsl_mc_object_free(dpbp_dev);
4826 +
4827 + return err;
4828 +}
4829 +
4830 +static void free_dpbp(struct dpaa2_eth_priv *priv)
4831 +{
4832 + drain_pool(priv);
4833 + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
4834 + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
4835 + fsl_mc_object_free(priv->dpbp_dev);
4836 +}
4837 +
4838 +static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
4839 +{
4840 + struct dpni_congestion_notification_cfg notif_cfg = {0};
4841 + struct device *dev = priv->net_dev->dev.parent;
4842 + int err;
4843 +
4844 + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4845 + GFP_KERNEL);
4846 +
4847 + if (!priv->cscn_unaligned)
4848 + return -ENOMEM;
4849 +
4850 + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
4851 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
4852 + DMA_FROM_DEVICE);
4853 + if (dma_mapping_error(dev, priv->cscn_dma)) {
4854 + dev_err(dev, "Error mapping CSCN memory area\n");
4855 + err = -ENOMEM;
4856 + goto err_dma_map;
4857 + }
4858 +
4859 + notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
4860 + notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
4861 + notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
4862 + notif_cfg.message_ctx = (u64)priv;
4863 + notif_cfg.message_iova = priv->cscn_dma;
4864 + notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
4865 + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
4866 + DPNI_CONG_OPT_COHERENT_WRITE;
4867 + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
4868 + DPNI_QUEUE_TX, 0, &notif_cfg);
4869 + if (err) {
4870 + dev_err(dev, "dpni_set_congestion_notification failed\n");
4871 + goto err_set_cong;
4872 + }
4873 +
4874 + return 0;
4875 +
4876 +err_set_cong:
4877 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4878 +err_dma_map:
4879 + kfree(priv->cscn_unaligned);
4880 +
4881 + return err;
4882 +}
4883 +
4884 +/* Configure the DPNI object this interface is associated with */
4885 +static int setup_dpni(struct fsl_mc_device *ls_dev)
4886 +{
4887 + struct device *dev = &ls_dev->dev;
4888 + struct dpaa2_eth_priv *priv;
4889 + struct net_device *net_dev;
4890 + struct dpni_link_cfg cfg = {0};
4891 + int err;
4892 +
4893 + net_dev = dev_get_drvdata(dev);
4894 + priv = netdev_priv(net_dev);
4895 +
4896 + /* get a handle for the DPNI object */
4897 + err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
4898 + if (err) {
4899 + dev_err(dev, "dpni_open() failed\n");
4900 + return err;
4901 + }
4902 +
4903 + /* Check if we can work with this DPNI object */
4904 + err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
4905 + &priv->dpni_ver_minor);
4906 + if (err) {
4907 + dev_err(dev, "dpni_get_api_version() failed\n");
4908 + goto close;
4909 + }
4910 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
4911 + dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
4912 + priv->dpni_ver_major, priv->dpni_ver_minor,
4913 + DPNI_VER_MAJOR, DPNI_VER_MINOR);
4914 + err = -ENOTSUPP;
4915 + goto close;
4916 + }
4917 +
4918 + ls_dev->mc_io = priv->mc_io;
4919 + ls_dev->mc_handle = priv->mc_token;
4920 +
4921 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
4922 + if (err) {
4923 + dev_err(dev, "dpni_reset() failed\n");
4924 + goto close;
4925 + }
4926 +
4927 + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
4928 + &priv->dpni_attrs);
4929 + if (err) {
4930 + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
4931 + goto close;
4932 + }
4933 +
4934 + err = set_buffer_layout(priv);
4935 + if (err)
4936 + goto close;
4937 +
4938 + /* Enable congestion notifications for Tx queues */
4939 + err = setup_tx_congestion(priv);
4940 + if (err)
4941 + goto close;
4942 +
4943 + /* allocate classification rule space */
4944 + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
4945 + dpaa2_eth_fs_count(priv), GFP_KERNEL);
4946 + if (!priv->cls_rule)
4947 + goto close;
4948 +
4949 + /* Enable flow control */
4950 + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
4951 + priv->tx_pause_frames = true;
4952 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
4953 + if (err) {
4954 + dev_err(dev, "dpni_set_link_cfg() failed\n");
4955 + goto cls_free;
4956 + }
4957 +
4958 + return 0;
4959 +
4960 +cls_free:
4961 + kfree(priv->cls_rule);
4962 +close:
4963 + dpni_close(priv->mc_io, 0, priv->mc_token);
4964 +
4965 + return err;
4966 +}
4967 +
4968 +static void free_dpni(struct dpaa2_eth_priv *priv)
4969 +{
4970 + struct device *dev = priv->net_dev->dev.parent;
4971 + int err;
4972 +
4973 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
4974 + if (err)
4975 + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
4976 + err);
4977 +
4978 + dpni_close(priv->mc_io, 0, priv->mc_token);
4979 +
4980 + kfree(priv->cls_rule);
4981 +
4982 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4983 + kfree(priv->cscn_unaligned);
4984 +}
4985 +
4986 +static int setup_rx_flow(struct dpaa2_eth_priv *priv,
4987 + struct dpaa2_eth_fq *fq)
4988 +{
4989 + struct device *dev = priv->net_dev->dev.parent;
4990 + struct dpni_queue queue;
4991 + struct dpni_queue_id qid;
4992 + int err;
4993 +
4994 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
4995 + DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
4996 + if (err) {
4997 + dev_err(dev, "dpni_get_queue(RX) failed\n");
4998 + return err;
4999 + }
5000 +
5001 + fq->fqid = qid.fqid;
5002 +
5003 + queue.destination.id = fq->channel->dpcon_id;
5004 + queue.destination.type = DPNI_DEST_DPCON;
5005 + queue.destination.priority = 1;
5006 + queue.user_context = (u64)(uintptr_t)fq;
5007 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
5008 + DPNI_QUEUE_RX, fq->tc, fq->flowid,
5009 + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
5010 + &queue);
5011 + if (err) {
5012 + dev_err(dev, "dpni_set_queue(RX) failed\n");
5013 + return err;
5014 + }
5015 +
5016 + return 0;
5017 +}
5018 +
5019 +static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
5020 + struct dpni_taildrop *td)
5021 +{
5022 + struct device *dev = priv->net_dev->dev.parent;
5023 + int i, err;
5024 +
5025 + for (i = 0; i < priv->num_fqs; i++) {
5026 + if (priv->fq[i].type != DPAA2_RX_FQ)
5027 + continue;
5028 +
5029 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
5030 + DPNI_CP_QUEUE, DPNI_QUEUE_RX,
5031 + priv->fq[i].tc, priv->fq[i].flowid,
5032 + td);
5033 + if (err) {
5034 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
5035 + return err;
5036 + }
5037 +
5038 + dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
5039 + (td->enable ? "Enabled" : "Disabled"),
5040 + i);
5041 + }
5042 +
5043 + return 0;
5044 +}
5045 +
5046 +static int set_group_taildrop(struct dpaa2_eth_priv *priv,
5047 + struct dpni_taildrop *td)
5048 +{
5049 + struct device *dev = priv->net_dev->dev.parent;
5050 + struct dpni_taildrop disable_td, *tc_td;
5051 + int i, err;
5052 +
5053 + memset(&disable_td, 0, sizeof(struct dpni_taildrop));
5054 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5055 + if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
5056 + /* Do not set taildrop thresholds for PFC-enabled
5057 + * traffic classes. We will enable congestion
5058 + * notifications for them.
5059 + */
5060 + tc_td = &disable_td;
5061 + else
5062 + tc_td = td;
5063 +
5064 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
5065 + DPNI_CP_GROUP, DPNI_QUEUE_RX,
5066 + i, 0, tc_td);
5067 + if (err) {
5068 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
5069 + return err;
5070 + }
5071 +
5072 + dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
5073 + (tc_td->enable ? "Enabled" : "Disabled"),
5074 + priv->fq[i].flowid, priv->fq[i].tc);
5075 + }
5076 +
5077 + return 0;
5078 +}
5079 +
5080 +/* Enable/disable Rx FQ taildrop
5081 + *
5082 + * Rx FQ taildrop is mutually exclusive with flow control and it only gets
5083 + * disabled when FC is active. Depending on FC status, we need to compute
5084 + * the maximum number of buffers in the pool differently, so use the
5085 + * opportunity to update max number of buffers as well.
5086 + */
5087 +int set_rx_taildrop(struct dpaa2_eth_priv *priv)
5088 +{
5089 + enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
5090 + struct dpni_taildrop td_queue, td_group;
5091 + int err = 0;
5092 +
5093 + switch (cfg) {
5094 + case DPAA2_ETH_TD_NONE:
5095 + memset(&td_queue, 0, sizeof(struct dpni_taildrop));
5096 + memset(&td_group, 0, sizeof(struct dpni_taildrop));
5097 + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
5098 + priv->num_channels;
5099 + break;
5100 + case DPAA2_ETH_TD_QUEUE:
5101 + memset(&td_group, 0, sizeof(struct dpni_taildrop));
5102 + td_queue.enable = 1;
5103 + td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
5104 + td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
5105 + dpaa2_eth_tc_count(priv);
5106 + priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
5107 + break;
5108 + case DPAA2_ETH_TD_GROUP:
5109 + memset(&td_queue, 0, sizeof(struct dpni_taildrop));
5110 + td_group.enable = 1;
5111 + td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
5112 + td_group.threshold = NAPI_POLL_WEIGHT *
5113 + dpaa2_eth_queue_count(priv);
5114 + priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
5115 + dpaa2_eth_tc_count(priv);
5116 + break;
5117 + default:
5118 + break;
5119 + }
5120 +
5121 + err = set_queue_taildrop(priv, &td_queue);
5122 + if (err)
5123 + return err;
5124 +
5125 + err = set_group_taildrop(priv, &td_group);
5126 + if (err)
5127 + return err;
5128 +
5129 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
5130 +
5131 + return 0;
5132 +}
5133 +
5134 +static int setup_tx_flow(struct dpaa2_eth_priv *priv,
5135 + struct dpaa2_eth_fq *fq)
5136 +{
5137 + struct device *dev = priv->net_dev->dev.parent;
5138 + struct dpni_queue queue;
5139 + struct dpni_queue_id qid;
5140 + int err;
5141 +
5142 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
5143 + DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
5144 + if (err) {
5145 + dev_err(dev, "dpni_get_queue(TX) failed\n");
5146 + return err;
5147 + }
5148 +
5149 + fq->tx_qdbin = qid.qdbin;
5150 +
5151 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
5152 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
5153 + &queue, &qid);
5154 + if (err) {
5155 + dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
5156 + return err;
5157 + }
5158 +
5159 + fq->fqid = qid.fqid;
5160 +
5161 + queue.destination.id = fq->channel->dpcon_id;
5162 + queue.destination.type = DPNI_DEST_DPCON;
5163 + queue.destination.priority = 0;
5164 + queue.user_context = (u64)(uintptr_t)fq;
5165 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
5166 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
5167 + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
5168 + &queue);
5169 + if (err) {
5170 + dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
5171 + return err;
5172 + }
5173 +
5174 + return 0;
5175 +}
5176 +
5177 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
5178 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
5179 + struct dpaa2_eth_fq *fq)
5180 +{
5181 + struct device *dev = priv->net_dev->dev.parent;
5182 + struct dpni_queue q = { { 0 } };
5183 + struct dpni_queue_id qid;
5184 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
5185 + int err;
5186 +
5187 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
5188 + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
5189 + if (err) {
5190 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
5191 + return err;
5192 + }
5193 +
5194 + fq->fqid = qid.fqid;
5195 +
5196 + q.destination.id = fq->channel->dpcon_id;
5197 + q.destination.type = DPNI_DEST_DPCON;
5198 + q.destination.priority = 1;
5199 + q.user_context = (u64)fq;
5200 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
5201 + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
5202 + if (err) {
5203 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
5204 + return err;
5205 + }
5206 +
5207 + return 0;
5208 +}
5209 +#endif
5210 +
5211 +/* default hash key fields */
5212 +static struct dpaa2_eth_dist_fields default_dist_fields[] = {
5213 + {
5214 + /* L2 header */
5215 + .rxnfc_field = RXH_L2DA,
5216 + .cls_prot = NET_PROT_ETH,
5217 + .cls_field = NH_FLD_ETH_DA,
5218 + .id = DPAA2_ETH_DIST_ETHDST,
5219 + .size = 6,
5220 + }, {
5221 + .cls_prot = NET_PROT_ETH,
5222 + .cls_field = NH_FLD_ETH_SA,
5223 + .id = DPAA2_ETH_DIST_ETHSRC,
5224 + .size = 6,
5225 + }, {
5226 + /* This is the last ethertype field parsed:
5227 + * depending on frame format, it can be the MAC ethertype
5228 + * or the VLAN etype.
5229 + */
5230 + .cls_prot = NET_PROT_ETH,
5231 + .cls_field = NH_FLD_ETH_TYPE,
5232 + .id = DPAA2_ETH_DIST_ETHTYPE,
5233 + .size = 2,
5234 + }, {
5235 + /* VLAN header */
5236 + .rxnfc_field = RXH_VLAN,
5237 + .cls_prot = NET_PROT_VLAN,
5238 + .cls_field = NH_FLD_VLAN_TCI,
5239 + .id = DPAA2_ETH_DIST_VLAN,
5240 + .size = 2,
5241 + }, {
5242 + /* IP header */
5243 + .rxnfc_field = RXH_IP_SRC,
5244 + .cls_prot = NET_PROT_IP,
5245 + .cls_field = NH_FLD_IP_SRC,
5246 + .id = DPAA2_ETH_DIST_IPSRC,
5247 + .size = 4,
5248 + }, {
5249 + .rxnfc_field = RXH_IP_DST,
5250 + .cls_prot = NET_PROT_IP,
5251 + .cls_field = NH_FLD_IP_DST,
5252 + .id = DPAA2_ETH_DIST_IPDST,
5253 + .size = 4,
5254 + }, {
5255 + .rxnfc_field = RXH_L3_PROTO,
5256 + .cls_prot = NET_PROT_IP,
5257 + .cls_field = NH_FLD_IP_PROTO,
5258 + .id = DPAA2_ETH_DIST_IPPROTO,
5259 + .size = 1,
5260 + }, {
5261 + /* Using UDP ports, this is functionally equivalent to raw
5262 + * byte pairs from L4 header.
5263 + */
5264 + .rxnfc_field = RXH_L4_B_0_1,
5265 + .cls_prot = NET_PROT_UDP,
5266 + .cls_field = NH_FLD_UDP_PORT_SRC,
5267 + .id = DPAA2_ETH_DIST_L4SRC,
5268 + .size = 2,
5269 + }, {
5270 + .rxnfc_field = RXH_L4_B_2_3,
5271 + .cls_prot = NET_PROT_UDP,
5272 + .cls_field = NH_FLD_UDP_PORT_DST,
5273 + .id = DPAA2_ETH_DIST_L4DST,
5274 + .size = 2,
5275 + },
5276 +};
5277 +
5278 +static int legacy_config_dist_key(struct dpaa2_eth_priv *priv,
5279 + dma_addr_t key_iova)
5280 +{
5281 + struct device *dev = priv->net_dev->dev.parent;
5282 + struct dpni_rx_tc_dist_cfg dist_cfg;
5283 + int i, err;
5284 +
5285 + /* In legacy mode, we can't configure flow steering independently */
5286 + if (!dpaa2_eth_hash_enabled(priv))
5287 + return -EOPNOTSUPP;
5288 +
5289 + memset(&dist_cfg, 0, sizeof(dist_cfg));
5290 +
5291 + dist_cfg.key_cfg_iova = key_iova;
5292 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
5293 + if (dpaa2_eth_fs_enabled(priv)) {
5294 + dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
5295 + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
5296 + } else {
5297 + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
5298 + }
5299 +
5300 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5301 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
5302 + &dist_cfg);
5303 + if (err) {
5304 + dev_err(dev, "dpni_set_rx_tc_dist failed\n");
5305 + return err;
5306 + }
5307 + }
5308 +
5309 + return 0;
5310 +}
5311 +
5312 +static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
5313 +{
5314 + struct device *dev = priv->net_dev->dev.parent;
5315 + struct dpni_rx_dist_cfg dist_cfg;
5316 + int i, err;
5317 +
5318 + if (!dpaa2_eth_hash_enabled(priv))
5319 + return -EOPNOTSUPP;
5320 +
5321 + memset(&dist_cfg, 0, sizeof(dist_cfg));
5322 +
5323 + dist_cfg.key_cfg_iova = key_iova;
5324 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
5325 + dist_cfg.enable = true;
5326 +
5327 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5328 + dist_cfg.tc = i;
5329 +
5330 + err = dpni_set_rx_hash_dist(priv->mc_io, 0,
5331 + priv->mc_token, &dist_cfg);
5332 + if (err) {
5333 + dev_err(dev, "dpni_set_rx_hash_dist failed\n");
5334 + return err;
5335 + }
5336 + }
5337 +
5338 + return 0;
5339 +}
5340 +
5341 +static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
5342 +{
5343 + struct device *dev = priv->net_dev->dev.parent;
5344 + struct dpni_rx_dist_cfg dist_cfg;
5345 + int i, err;
5346 +
5347 + if (!dpaa2_eth_fs_enabled(priv))
5348 + return -EOPNOTSUPP;
5349 +
5350 + memset(&dist_cfg, 0, sizeof(dist_cfg));
5351 +
5352 + dist_cfg.key_cfg_iova = key_iova;
5353 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
5354 + dist_cfg.enable = true;
5355 +
5356 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5357 + dist_cfg.tc = i;
5358 +
5359 + err = dpni_set_rx_fs_dist(priv->mc_io, 0,
5360 + priv->mc_token, &dist_cfg);
5361 + if (err) {
5362 + dev_err(dev, "dpni_set_rx_fs_dist failed\n");
5363 + return err;
5364 + }
5365 + }
5366 +
5367 + return 0;
5368 +}
5369 +
5370 +int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
5371 + enum dpaa2_eth_rx_dist type, u32 key_fields)
5372 +{
5373 + struct device *dev = priv->net_dev->dev.parent;
5374 + struct dpkg_profile_cfg cls_cfg;
5375 + struct dpkg_extract *key;
5376 + u32 hash_fields = 0;
5377 + dma_addr_t key_iova;
5378 + u8 *key_mem;
5379 + int i, err;
5380 +
5381 + memset(&cls_cfg, 0, sizeof(cls_cfg));
5382 +
5383 + for (i = 0; i < priv->num_dist_fields; i++) {
5384 + if (!(key_fields & priv->dist_fields[i].id))
5385 + continue;
5386 +
5387 + key = &cls_cfg.extracts[cls_cfg.num_extracts];
5388 + key->type = DPKG_EXTRACT_FROM_HDR;
5389 + key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot;
5390 + key->extract.from_hdr.type = DPKG_FULL_FIELD;
5391 + key->extract.from_hdr.field = priv->dist_fields[i].cls_field;
5392 + cls_cfg.num_extracts++;
5393 +
5394 + hash_fields |= priv->dist_fields[i].rxnfc_field;
5395 + }
5396 +
5397 + key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
5398 + if (!key_mem)
5399 + return -ENOMEM;
5400 +
5401 + err = dpni_prepare_key_cfg(&cls_cfg, key_mem);
5402 + if (err) {
5403 + dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
5404 + goto free_key;
5405 + }
5406 +
5407 + key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE,
5408 + DMA_TO_DEVICE);
5409 + if (dma_mapping_error(dev, key_iova)) {
5410 + dev_err(dev, "DMA mapping failed\n");
5411 + err = -ENOMEM;
5412 + goto free_key;
5413 + }
5414 +
5415 + switch (type) {
5416 + case DPAA2_ETH_RX_DIST_LEGACY:
5417 + err = legacy_config_dist_key(priv, key_iova);
5418 + break;
5419 + case DPAA2_ETH_RX_DIST_HASH:
5420 + err = config_hash_key(priv, key_iova);
5421 + break;
5422 + case DPAA2_ETH_RX_DIST_FS:
5423 + err = config_fs_key(priv, key_iova);
5424 + break;
5425 + default:
5426 + err = -EINVAL;
5427 + break;
5428 + }
5429 +
5430 + dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
5431 + DMA_TO_DEVICE);
5432 + if (err) {
5433 + if (err != -EOPNOTSUPP)
5434 + dev_err(dev, "Distribution key config failed\n");
5435 + goto free_key;
5436 + }
5437 +
5438 + if (type != DPAA2_ETH_RX_DIST_FS)
5439 + priv->rx_hash_fields = hash_fields;
5440 +
5441 +free_key:
5442 + kfree(key_mem);
5443 + return err;
5444 +}
5445 +
5446 +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
5447 + * frame queues and channels
5448 + */
5449 +static int bind_dpni(struct dpaa2_eth_priv *priv)
5450 +{
5451 + struct net_device *net_dev = priv->net_dev;
5452 + struct device *dev = net_dev->dev.parent;
5453 + struct dpni_pools_cfg pools_params;
5454 + struct dpni_error_cfg err_cfg;
5455 + int err = 0;
5456 + int i;
5457 +
5458 + pools_params.num_dpbp = 1;
5459 + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
5460 + pools_params.pools[0].backup_pool = 0;
5461 + pools_params.pools[0].priority_mask = 0xff;
5462 + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
5463 + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
5464 + if (err) {
5465 + dev_err(dev, "dpni_set_pools() failed\n");
5466 + return err;
5467 + }
5468 +
5469 + /* Verify classification options and disable hashing and/or
5470 + * flow steering support in case of invalid configuration values
5471 + */
5472 + priv->dist_fields = default_dist_fields;
5473 + priv->num_dist_fields = ARRAY_SIZE(default_dist_fields);
5474 + check_cls_support(priv);
5475 +
5476 + /* have the interface implicitly distribute traffic based on
5477 + * a static hash key. Also configure flow steering key, if supported.
5478 + * Errors here are not blocking, so just let the called function
5479 + * print its error message and move along.
5480 + */
5481 + if (dpaa2_eth_has_legacy_dist(priv)) {
5482 + dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY,
5483 + DPAA2_ETH_DIST_ALL);
5484 + } else {
5485 + dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH,
5486 + DPAA2_ETH_DIST_DEFAULT_HASH);
5487 + dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS,
5488 + DPAA2_ETH_DIST_ALL);
5489 + }
5490 +
5491 + /* Configure handling of error frames */
5492 + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
5493 + err_cfg.set_frame_annotation = 1;
5494 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
5495 + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
5496 +#else
5497 + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
5498 +#endif
5499 + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
5500 + &err_cfg);
5501 + if (err) {
5502 + dev_err(dev, "dpni_set_errors_behavior failed\n");
5503 + return err;
5504 + }
5505 +
5506 + /* Configure Rx and Tx conf queues to generate CDANs */
5507 + for (i = 0; i < priv->num_fqs; i++) {
5508 + switch (priv->fq[i].type) {
5509 + case DPAA2_RX_FQ:
5510 + err = setup_rx_flow(priv, &priv->fq[i]);
5511 + break;
5512 + case DPAA2_TX_CONF_FQ:
5513 + err = setup_tx_flow(priv, &priv->fq[i]);
5514 + break;
5515 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
5516 + case DPAA2_RX_ERR_FQ:
5517 + err = setup_rx_err_flow(priv, &priv->fq[i]);
5518 + break;
5519 +#endif
5520 + default:
5521 + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
5522 + return -EINVAL;
5523 + }
5524 + if (err)
5525 + return err;
5526 + }
5527 +
5528 + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
5529 + DPNI_QUEUE_TX, &priv->tx_qdid);
5530 + if (err) {
5531 + dev_err(dev, "dpni_get_qdid() failed\n");
5532 + return err;
5533 + }
5534 +
5535 + return 0;
5536 +}
5537 +
5538 +/* Allocate rings for storing incoming frame descriptors */
5539 +static int alloc_rings(struct dpaa2_eth_priv *priv)
5540 +{
5541 + struct net_device *net_dev = priv->net_dev;
5542 + struct device *dev = net_dev->dev.parent;
5543 + int i;
5544 +
5545 + for (i = 0; i < priv->num_channels; i++) {
5546 + priv->channel[i]->store =
5547 + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
5548 + if (!priv->channel[i]->store) {
5549 + netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
5550 + goto err_ring;
5551 + }
5552 + }
5553 +
5554 + return 0;
5555 +
5556 +err_ring:
5557 + for (i = 0; i < priv->num_channels; i++) {
5558 + if (!priv->channel[i]->store)
5559 + break;
5560 + dpaa2_io_store_destroy(priv->channel[i]->store);
5561 + }
5562 +
5563 + return -ENOMEM;
5564 +}
5565 +
5566 +static void free_rings(struct dpaa2_eth_priv *priv)
5567 +{
5568 + int i;
5569 +
5570 + for (i = 0; i < priv->num_channels; i++)
5571 + dpaa2_io_store_destroy(priv->channel[i]->store);
5572 +}
5573 +
5574 +static int set_mac_addr(struct dpaa2_eth_priv *priv)
5575 +{
5576 + struct net_device *net_dev = priv->net_dev;
5577 + struct device *dev = net_dev->dev.parent;
5578 + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
5579 + int err;
5580 +
5581 + /* Get firmware address, if any */
5582 + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
5583 + if (err) {
5584 + dev_err(dev, "dpni_get_port_mac_addr() failed\n");
5585 + return err;
5586 + }
5587 +
5588 + /* Get DPNI attributes address, if any */
5589 + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
5590 + dpni_mac_addr);
5591 + if (err) {
5592 + dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
5593 + return err;
5594 + }
5595 +
5596 + /* First check if firmware has any address configured by bootloader */
5597 + if (!is_zero_ether_addr(mac_addr)) {
5598 + /* If the DPMAC addr != DPNI addr, update it */
5599 + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
5600 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
5601 + priv->mc_token,
5602 + mac_addr);
5603 + if (err) {
5604 + dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
5605 + return err;
5606 + }
5607 + }
5608 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
5609 + } else if (is_zero_ether_addr(dpni_mac_addr)) {
5610 + /* No MAC address configured, fill in net_dev->dev_addr
5611 + * with a random one
5612 + */
5613 + eth_hw_addr_random(net_dev);
5614 + dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
5615 +
5616 + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
5617 + net_dev->dev_addr);
5618 + if (err) {
5619 + dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
5620 + return err;
5621 + }
5622 +
5623 + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
5624 + * practical purposes, this will be our "permanent" mac address,
5625 + * at least until the next reboot. This move will also permit
5626 + * register_netdevice() to properly fill up net_dev->perm_addr.
5627 + */
5628 + net_dev->addr_assign_type = NET_ADDR_PERM;
5629 + } else {
5630 + /* NET_ADDR_PERM is default, all we have to do is
5631 + * fill in the device addr.
5632 + */
5633 + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
5634 + }
5635 +
5636 + return 0;
5637 +}
5638 +
5639 +static int netdev_init(struct net_device *net_dev)
5640 +{
5641 + struct device *dev = net_dev->dev.parent;
5642 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5643 + u8 bcast_addr[ETH_ALEN];
5644 + u8 num_queues;
5645 + int err;
5646 +
5647 + net_dev->netdev_ops = &dpaa2_eth_ops;
5648 +
5649 + err = set_mac_addr(priv);
5650 + if (err)
5651 + return err;
5652 +
5653 + /* Explicitly add the broadcast address to the MAC filtering table */
5654 + eth_broadcast_addr(bcast_addr);
5655 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
5656 + if (err) {
5657 + dev_err(dev, "dpni_add_mac_addr() failed\n");
5658 + return err;
5659 + }
5660 +
5661 + /* Set MTU upper limit; lower limit is default (68B) */
5662 + net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
5663 + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
5664 + (u16)DPAA2_ETH_MFL);
5665 + if (err) {
5666 + dev_err(dev, "dpni_set_max_frame_length() failed\n");
5667 + return err;
5668 + }
5669 +
5670 + /* Set actual number of queues in the net device */
5671 + num_queues = dpaa2_eth_queue_count(priv);
5672 + err = netif_set_real_num_tx_queues(net_dev, num_queues);
5673 + if (err) {
5674 + dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
5675 + return err;
5676 + }
5677 + err = netif_set_real_num_rx_queues(net_dev, num_queues);
5678 + if (err) {
5679 + dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
5680 + return err;
5681 + }
5682 +
5683 + /* Our .ndo_init will be called herein */
5684 + err = register_netdev(net_dev);
5685 + if (err < 0) {
5686 + dev_err(dev, "register_netdev() failed\n");
5687 + return err;
5688 + }
5689 +
5690 + return 0;
5691 +}
5692 +
5693 +static int poll_link_state(void *arg)
5694 +{
5695 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
5696 + int err;
5697 +
5698 + while (!kthread_should_stop()) {
5699 + err = link_state_update(priv);
5700 + if (unlikely(err))
5701 + return err;
5702 +
5703 + msleep(DPAA2_ETH_LINK_STATE_REFRESH);
5704 + }
5705 +
5706 + return 0;
5707 +}
5708 +
5709 +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
5710 +{
5711 + u32 status = ~0;
5712 + struct device *dev = (struct device *)arg;
5713 + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
5714 + struct net_device *net_dev = dev_get_drvdata(dev);
5715 + int err;
5716 +
5717 + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
5718 + DPNI_IRQ_INDEX, &status);
5719 + if (unlikely(err)) {
5720 + netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
5721 + return IRQ_HANDLED;
5722 + }
5723 +
5724 + if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
5725 + link_state_update(netdev_priv(net_dev));
5726 +
5727 + return IRQ_HANDLED;
5728 +}
5729 +
5730 +static int setup_irqs(struct fsl_mc_device *ls_dev)
5731 +{
5732 + int err = 0;
5733 + struct fsl_mc_device_irq *irq;
5734 +
5735 + err = fsl_mc_allocate_irqs(ls_dev);
5736 + if (err) {
5737 + dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
5738 + return err;
5739 + }
5740 +
5741 + irq = ls_dev->irqs[0];
5742 + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
5743 + NULL, dpni_irq0_handler_thread,
5744 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
5745 + dev_name(&ls_dev->dev), &ls_dev->dev);
5746 + if (err < 0) {
5747 + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
5748 + goto free_mc_irq;
5749 + }
5750 +
5751 + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
5752 + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
5753 + if (err < 0) {
5754 + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
5755 + goto free_irq;
5756 + }
5757 +
5758 + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
5759 + DPNI_IRQ_INDEX, 1);
5760 + if (err < 0) {
5761 + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
5762 + goto free_irq;
5763 + }
5764 +
5765 + return 0;
5766 +
5767 +free_irq:
5768 + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
5769 +free_mc_irq:
5770 + fsl_mc_free_irqs(ls_dev);
5771 +
5772 + return err;
5773 +}
5774 +
5775 +static void add_ch_napi(struct dpaa2_eth_priv *priv)
5776 +{
5777 + int i;
5778 + struct dpaa2_eth_channel *ch;
5779 +
5780 + for (i = 0; i < priv->num_channels; i++) {
5781 + ch = priv->channel[i];
5782 + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
5783 + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
5784 + NAPI_POLL_WEIGHT);
5785 + }
5786 +}
5787 +
5788 +static void del_ch_napi(struct dpaa2_eth_priv *priv)
5789 +{
5790 + int i;
5791 + struct dpaa2_eth_channel *ch;
5792 +
5793 + for (i = 0; i < priv->num_channels; i++) {
5794 + ch = priv->channel[i];
5795 + netif_napi_del(&ch->napi);
5796 + }
5797 +}
5798 +
5799 +/* SysFS support */
5800 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
5801 + struct device_attribute *attr,
5802 + char *buf)
5803 +{
5804 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
5805 + /* No MC API for getting the shaping config. We're stateful. */
5806 + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
5807 +
5808 + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
5809 +}
5810 +
5811 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
5812 + struct device_attribute *attr,
5813 + const char *buf,
5814 + size_t count)
5815 +{
5816 + int err, items;
5817 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
5818 + struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
5819 +
5820 + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
5821 + if (items != 2) {
5822 + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
5823 + return -EINVAL;
5824 + }
5825 + /* Size restriction as per MC API documentation */
5826 + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
5827 + pr_err("max_burst_size must be <= %d\n",
5828 + DPAA2_ETH_MAX_BURST_SIZE);
5829 + return -EINVAL;
5830 + }
5831 +
5832 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
5833 + &ercfg, 0);
5834 + if (err) {
5835 + dev_err(dev, "dpni_set_tx_shaping() failed\n");
5836 + return -EPERM;
5837 + }
5838 + /* If successful, save the current configuration for future inquiries */
5839 + priv->shaping_cfg = scfg;
5840 +
5841 + return count;
5842 +}
5843 +
5844 +static struct device_attribute dpaa2_eth_attrs[] = {
5845 + __ATTR(tx_shaping,
5846 + 0600,
5847 + dpaa2_eth_show_tx_shaping,
5848 + dpaa2_eth_write_tx_shaping),
5849 +};
5850 +
5851 +static void dpaa2_eth_sysfs_init(struct device *dev)
5852 +{
5853 + int i, err;
5854 +
5855 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
5856 + err = device_create_file(dev, &dpaa2_eth_attrs[i]);
5857 + if (err) {
5858 + dev_err(dev, "ERROR creating sysfs file\n");
5859 + goto undo;
5860 + }
5861 + }
5862 + return;
5863 +
5864 +undo:
5865 + while (i > 0)
5866 + device_remove_file(dev, &dpaa2_eth_attrs[--i]);
5867 +}
5868 +
5869 +static void dpaa2_eth_sysfs_remove(struct device *dev)
5870 +{
5871 + int i;
5872 +
5873 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
5874 + device_remove_file(dev, &dpaa2_eth_attrs[i]);
5875 +}
5876 +
5877 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
5878 +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
5879 + struct ieee_pfc *pfc)
5880 +{
5881 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5882 + struct dpni_congestion_notification_cfg notification_cfg;
5883 + struct dpni_link_state state;
5884 + int err, i;
5885 +
5886 + priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
5887 +
5888 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5889 + if (err) {
5890 + netdev_err(net_dev, "ERROR %d getting link state", err);
5891 + return err;
5892 + }
5893 +
5894 + if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
5895 + return 0;
5896 +
5897 + priv->pfc.pfc_en = 0;
5898 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
5899 + err = dpni_get_congestion_notification(priv->mc_io, 0,
5900 + priv->mc_token,
5901 + DPNI_QUEUE_RX,
5902 + i, &notification_cfg);
5903 + if (err) {
5904 + netdev_err(net_dev, "Error %d getting congestion notif",
5905 + err);
5906 + return err;
5907 + }
5908 +
5909 + if (notification_cfg.threshold_entry)
5910 + priv->pfc.pfc_en |= 1 << i;
5911 + }
5912 +
5913 + memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
5914 +
5915 + return 0;
5916 +}
5917 +
5918 +/* Configure ingress classification based on VLAN PCP */
5919 +static int set_vlan_qos(struct dpaa2_eth_priv *priv)
5920 +{
5921 + struct device *dev = priv->net_dev->dev.parent;
5922 + struct dpkg_profile_cfg kg_cfg = {0};
5923 + struct dpni_qos_tbl_cfg qos_cfg = {0};
5924 + struct dpni_rule_cfg key_params;
5925 + u8 *params_iova, *key, *mask = NULL;
5926 + /* We only need the trailing 16 bits, without the TPID */
5927 + u8 key_size = VLAN_HLEN / 2;
5928 + int err = 0, i, j = 0;
5929 +
5930 + if (priv->vlan_clsf_set)
5931 + return 0;
5932 +
5933 + params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
5934 + if (!params_iova)
5935 + return -ENOMEM;
5936 +
5937 + kg_cfg.num_extracts = 1;
5938 + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
5939 + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
5940 + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
5941 + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
5942 +
5943 + err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
5944 + if (err) {
5945 + dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
5946 + goto out_free;
5947 + }
5948 +
5949 + /* Set QoS table */
5950 + qos_cfg.default_tc = 0;
5951 + qos_cfg.discard_on_miss = 0;
5952 + qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
5953 + DPAA2_CLASSIFIER_DMA_SIZE,
5954 + DMA_TO_DEVICE);
5955 + if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
5956 + dev_err(dev, "%s: DMA mapping failed\n", __func__);
5957 + err = -ENOMEM;
5958 + goto out_free;
5959 + }
5960 + err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
5961 + dma_unmap_single(dev, qos_cfg.key_cfg_iova,
5962 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
5963 +
5964 + if (err) {
5965 + dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
5966 + goto out_free;
5967 + }
5968 +
5969 + key_params.key_size = key_size;
5970 +
5971 + if (dpaa2_eth_fs_mask_enabled(priv)) {
5972 + mask = kzalloc(key_size, GFP_KERNEL);
5973 + if (!mask)
5974 + goto out_free;
5975 +
5976 + *mask = cpu_to_be16(VLAN_PRIO_MASK);
5977 +
5978 + key_params.mask_iova = dma_map_single(dev, mask, key_size,
5979 + DMA_TO_DEVICE);
5980 + if (dma_mapping_error(dev, key_params.mask_iova)) {
5981 + dev_err(dev, "DMA mapping failed %s\n", __func__);
5982 + err = -ENOMEM;
5983 + goto out_free_mask;
5984 + }
5985 + } else {
5986 + key_params.mask_iova = 0;
5987 + }
5988 +
5989 + key = kzalloc(key_size, GFP_KERNEL);
5990 + if (!key)
5991 + goto out_cleanup_mask;
5992 +
5993 + key_params.key_iova = dma_map_single(dev, key, key_size,
5994 + DMA_TO_DEVICE);
5995 + if (dma_mapping_error(dev, key_params.key_iova)) {
5996 + dev_err(dev, "%s: DMA mapping failed\n", __func__);
5997 + err = -ENOMEM;
5998 + goto out_free_key;
5999 + }
6000 +
6001 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
6002 + *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
6003 +
6004 + dma_sync_single_for_device(dev, key_params.key_iova,
6005 + key_size, DMA_TO_DEVICE);
6006 +
6007 + err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
6008 + &key_params, i, j++);
6009 + if (err) {
6010 + dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
6011 + goto out_remove;
6012 + }
6013 + }
6014 +
6015 + priv->vlan_clsf_set = true;
6016 + dev_dbg(dev, "Vlan PCP QoS classification set\n");
6017 + goto out_cleanup;
6018 +
6019 +out_remove:
6020 + for (j = 0; j < i; j++) {
6021 + *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
6022 +
6023 + dma_sync_single_for_device(dev, key_params.key_iova, key_size,
6024 + DMA_TO_DEVICE);
6025 +
6026 + err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
6027 + &key_params);
6028 + if (err)
6029 + dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
6030 + }
6031 +
6032 +out_cleanup:
6033 + dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
6034 +out_free_key:
6035 + kfree(key);
6036 +out_cleanup_mask:
6037 + if (key_params.mask_iova)
6038 + dma_unmap_single(dev, key_params.mask_iova, key_size,
6039 + DMA_TO_DEVICE);
6040 +out_free_mask:
6041 + kfree(mask);
6042 +out_free:
6043 + kfree(params_iova);
6044 + return err;
6045 +}
6046 +
6047 +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
6048 + struct ieee_pfc *pfc)
6049 +{
6050 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6051 + struct dpni_congestion_notification_cfg notification_cfg = {0};
6052 + struct dpni_link_state state = {0};
6053 + struct dpni_link_cfg cfg = {0};
6054 + struct ieee_pfc old_pfc;
6055 + int err = 0, i;
6056 +
6057 + if (dpaa2_eth_tc_count(priv) == 1) {
6058 + netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
6059 + return 0;
6060 + }
6061 +
6062 + /* Zero out pfc_enabled prios greater than tc_count */
6063 + pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
6064 +
6065 + if (priv->pfc.pfc_en == pfc->pfc_en)
6066 + /* Same enabled mask, nothing to be done */
6067 + return 0;
6068 +
6069 + err = set_vlan_qos(priv);
6070 + if (err)
6071 + return err;
6072 +
6073 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
6074 + if (err) {
6075 + netdev_err(net_dev, "ERROR %d getting link state", err);
6076 + return err;
6077 + }
6078 +
6079 + cfg.rate = state.rate;
6080 + cfg.options = state.options;
6081 + if (pfc->pfc_en)
6082 + cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
6083 + else
6084 + cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
6085 +
6086 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
6087 + if (err) {
6088 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
6089 + return err;
6090 + }
6091 +
6092 + memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
6093 + memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
6094 +
6095 + err = set_rx_taildrop(priv);
6096 + if (err)
6097 + goto out_restore_config;
6098 +
6099 + /* configure congestion notifications */
6100 + notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
6101 + notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
6102 + notification_cfg.message_iova = 0ULL;
6103 + notification_cfg.message_ctx = 0ULL;
6104 +
6105 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
6106 + if (dpaa2_eth_is_pfc_enabled(priv, i)) {
6107 + notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
6108 + notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
6109 + } else {
6110 + notification_cfg.threshold_entry = 0;
6111 + notification_cfg.threshold_exit = 0;
6112 + }
6113 +
6114 + err = dpni_set_congestion_notification(priv->mc_io, 0,
6115 + priv->mc_token,
6116 + DPNI_QUEUE_RX,
6117 + i, &notification_cfg);
6118 + if (err) {
6119 + netdev_err(net_dev, "Error %d setting congestion notif",
6120 + err);
6121 + goto out_restore_config;
6122 + }
6123 +
6124 + netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
6125 + (notification_cfg.threshold_entry ?
6126 + "Enabled" : "Disabled"), i);
6127 + }
6128 +
6129 + return 0;
6130 +
6131 +out_restore_config:
6132 + memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
6133 + return err;
6134 +}
6135 +
6136 +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
6137 +{
6138 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6139 +
6140 + return priv->dcbx_mode;
6141 +}
6142 +
6143 +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
6144 +{
6145 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6146 +
6147 + priv->dcbx_mode = mode;
6148 + return 0;
6149 +}
6150 +
6151 +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
6152 +{
6153 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6154 +
6155 + switch (capid) {
6156 + case DCB_CAP_ATTR_PFC:
6157 + *cap = true;
6158 + break;
6159 + case DCB_CAP_ATTR_PFC_TCS:
6160 + /* bitmap where each bit represents a number of traffic
6161 + * classes the device can be configured to use for Priority
6162 + * Flow Control
6163 + */
6164 + *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
6165 + break;
6166 + case DCB_CAP_ATTR_DCBX:
6167 + *cap = priv->dcbx_mode;
6168 + break;
6169 + default:
6170 + *cap = false;
6171 + break;
6172 + }
6173 +
6174 + return 0;
6175 +}
6176 +
6177 +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
6178 + .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
6179 + .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
6180 + .getdcbx = dpaa2_eth_dcbnl_getdcbx,
6181 + .setdcbx = dpaa2_eth_dcbnl_setdcbx,
6182 + .getcap = dpaa2_eth_dcbnl_getcap,
6183 +};
6184 +#endif
6185 +
6186 +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
6187 +{
6188 + struct device *dev;
6189 + struct net_device *net_dev = NULL;
6190 + struct dpaa2_eth_priv *priv = NULL;
6191 + int err = 0;
6192 +
6193 + dev = &dpni_dev->dev;
6194 +
6195 + /* Net device */
6196 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
6197 + if (!net_dev) {
6198 + dev_err(dev, "alloc_etherdev_mq() failed\n");
6199 + return -ENOMEM;
6200 + }
6201 +
6202 + SET_NETDEV_DEV(net_dev, dev);
6203 + dev_set_drvdata(dev, net_dev);
6204 +
6205 + priv = netdev_priv(net_dev);
6206 + priv->net_dev = net_dev;
6207 +
6208 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
6209 +
6210 + /* Obtain a MC portal */
6211 + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
6212 + &priv->mc_io);
6213 + if (err) {
6214 + if (err == -ENXIO)
6215 + err = -EPROBE_DEFER;
6216 + else
6217 + dev_err(dev, "MC portal allocation failed\n");
6218 + goto err_portal_alloc;
6219 + }
6220 +
6221 + /* MC objects initialization and configuration */
6222 + err = setup_dpni(dpni_dev);
6223 + if (err)
6224 + goto err_dpni_setup;
6225 +
6226 + err = setup_dpio(priv);
6227 + if (err)
6228 + goto err_dpio_setup;
6229 +
6230 + setup_fqs(priv);
6231 +
6232 + err = setup_dpbp(priv);
6233 + if (err)
6234 + goto err_dpbp_setup;
6235 +
6236 + err = bind_dpni(priv);
6237 + if (err)
6238 + goto err_bind;
6239 +
6240 + /* Percpu statistics */
6241 + priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
6242 + if (!priv->percpu_stats) {
6243 + dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
6244 + err = -ENOMEM;
6245 + goto err_alloc_percpu_stats;
6246 + }
6247 + priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
6248 + if (!priv->percpu_extras) {
6249 + dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
6250 + err = -ENOMEM;
6251 + goto err_alloc_percpu_extras;
6252 + }
6253 +
6254 + err = netdev_init(net_dev);
6255 + if (err)
6256 + goto err_netdev_init;
6257 +
6258 + /* Configure checksum offload based on current interface flags */
6259 + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
6260 + if (err)
6261 + goto err_csum;
6262 +
6263 + err = set_tx_csum(priv, !!(net_dev->features &
6264 + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
6265 + if (err)
6266 + goto err_csum;
6267 +
6268 + err = alloc_rings(priv);
6269 + if (err)
6270 + goto err_alloc_rings;
6271 +
6272 + net_dev->ethtool_ops = &dpaa2_ethtool_ops;
6273 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
6274 + net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
6275 + priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
6276 +#endif
6277 +
6278 + /* Add a NAPI context for each channel */
6279 + add_ch_napi(priv);
6280 + enable_ch_napi(priv);
6281 +
6282 + err = setup_irqs(dpni_dev);
6283 + if (err) {
6284 + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
6285 + priv->poll_thread = kthread_run(poll_link_state, priv,
6286 + "%s_poll_link", net_dev->name);
6287 + if (IS_ERR(priv->poll_thread)) {
6288 + netdev_err(net_dev, "Error starting polling thread\n");
6289 + goto err_poll_thread;
6290 + }
6291 + priv->do_link_poll = true;
6292 + }
6293 +
6294 + dpaa2_eth_sysfs_init(&net_dev->dev);
6295 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
6296 + dpaa2_dbg_add(priv);
6297 +#endif
6298 +
6299 + dev_info(dev, "Probed interface %s\n", net_dev->name);
6300 + return 0;
6301 +
6302 +err_poll_thread:
6303 + free_rings(priv);
6304 +err_alloc_rings:
6305 +err_csum:
6306 + unregister_netdev(net_dev);
6307 +err_netdev_init:
6308 + free_percpu(priv->percpu_extras);
6309 +err_alloc_percpu_extras:
6310 + free_percpu(priv->percpu_stats);
6311 +err_alloc_percpu_stats:
6312 + disable_ch_napi(priv);
6313 + del_ch_napi(priv);
6314 +err_bind:
6315 + free_dpbp(priv);
6316 +err_dpbp_setup:
6317 + free_dpio(priv);
6318 +err_dpio_setup:
6319 + free_dpni(priv);
6320 +err_dpni_setup:
6321 + fsl_mc_portal_free(priv->mc_io);
6322 +err_portal_alloc:
6323 + dev_set_drvdata(dev, NULL);
6324 + free_netdev(net_dev);
6325 +
6326 + return err;
6327 +}
6328 +
6329 +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
6330 +{
6331 + struct device *dev;
6332 + struct net_device *net_dev;
6333 + struct dpaa2_eth_priv *priv;
6334 +
6335 + dev = &ls_dev->dev;
6336 + net_dev = dev_get_drvdata(dev);
6337 + priv = netdev_priv(net_dev);
6338 +
6339 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
6340 + dpaa2_dbg_remove(priv);
6341 +#endif
6342 + dpaa2_eth_sysfs_remove(&net_dev->dev);
6343 +
6344 + unregister_netdev(net_dev);
6345 +
6346 + disable_ch_napi(priv);
6347 + del_ch_napi(priv);
6348 +
6349 + if (priv->do_link_poll)
6350 + kthread_stop(priv->poll_thread);
6351 + else
6352 + fsl_mc_free_irqs(ls_dev);
6353 +
6354 + free_rings(priv);
6355 + free_percpu(priv->percpu_stats);
6356 + free_percpu(priv->percpu_extras);
6357 + free_dpbp(priv);
6358 + free_dpio(priv);
6359 + free_dpni(priv);
6360 +
6361 + fsl_mc_portal_free(priv->mc_io);
6362 +
6363 + dev_set_drvdata(dev, NULL);
6364 + free_netdev(net_dev);
6365 +
6366 + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
6367 +
6368 + return 0;
6369 +}
6370 +
6371 +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
6372 + {
6373 + .vendor = FSL_MC_VENDOR_FREESCALE,
6374 + .obj_type = "dpni",
6375 + },
6376 + { .vendor = 0x0 }
6377 +};
6378 +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
6379 +
6380 +static struct fsl_mc_driver dpaa2_eth_driver = {
6381 + .driver = {
6382 + .name = KBUILD_MODNAME,
6383 + .owner = THIS_MODULE,
6384 + },
6385 + .probe = dpaa2_eth_probe,
6386 + .remove = dpaa2_eth_remove,
6387 + .match_id_table = dpaa2_eth_match_id_table
6388 +};
6389 +
6390 +static int __init dpaa2_eth_driver_init(void)
6391 +{
6392 + int err;
6393 +
6394 + dpaa2_eth_dbg_init();
6395 + err = fsl_mc_driver_register(&dpaa2_eth_driver);
6396 + if (err)
6397 + goto out_debugfs_err;
6398 +
6399 + err = dpaa2_ceetm_register();
6400 + if (err)
6401 + goto out_ceetm_err;
6402 +
6403 + return 0;
6404 +
6405 +out_ceetm_err:
6406 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
6407 +out_debugfs_err:
6408 + dpaa2_eth_dbg_exit();
6409 + return err;
6410 +}
6411 +
6412 +static void __exit dpaa2_eth_driver_exit(void)
6413 +{
6414 + dpaa2_ceetm_unregister();
6415 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
6416 + dpaa2_eth_dbg_exit();
6417 +}
6418 +
6419 +module_init(dpaa2_eth_driver_init);
6420 +module_exit(dpaa2_eth_driver_exit);
6421 --- /dev/null
6422 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
6423 @@ -0,0 +1,601 @@
6424 +/* Copyright 2014-2016 Freescale Semiconductor Inc.
6425 + * Copyright 2016 NXP
6426 + *
6427 + * Redistribution and use in source and binary forms, with or without
6428 + * modification, are permitted provided that the following conditions are met:
6429 + * * Redistributions of source code must retain the above copyright
6430 + * notice, this list of conditions and the following disclaimer.
6431 + * * Redistributions in binary form must reproduce the above copyright
6432 + * notice, this list of conditions and the following disclaimer in the
6433 + * documentation and/or other materials provided with the distribution.
6434 + * * Neither the name of Freescale Semiconductor nor the
6435 + * names of its contributors may be used to endorse or promote products
6436 + * derived from this software without specific prior written permission.
6437 + *
6438 + *
6439 + * ALTERNATIVELY, this software may be distributed under the terms of the
6440 + * GNU General Public License ("GPL") as published by the Free Software
6441 + * Foundation, either version 2 of that License or (at your option) any
6442 + * later version.
6443 + *
6444 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
6445 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6446 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6447 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
6448 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6449 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
6450 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6451 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6452 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6453 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6454 + */
6455 +
6456 +#ifndef __DPAA2_ETH_H
6457 +#define __DPAA2_ETH_H
6458 +
6459 +#include <linux/dcbnl.h>
6460 +#include <linux/netdevice.h>
6461 +#include <linux/if_vlan.h>
6462 +
6463 +#include "../../fsl-mc/include/dpaa2-io.h"
6464 +#include "../../fsl-mc/include/dpaa2-fd.h"
6465 +#include "dpni.h"
6466 +#include "dpni-cmd.h"
6467 +
6468 +#include "dpaa2-eth-trace.h"
6469 +#include "dpaa2-eth-debugfs.h"
6470 +
6471 +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
6472 +
6473 +#define DPAA2_ETH_STORE_SIZE 16
6474 +
6475 +/* We set a max threshold for how many Tx confirmations we should process
6476 + * on a NAPI poll call, they take less processing time.
6477 + */
6478 +#define TX_CONF_PER_NAPI_POLL 256
6479 +
6480 +/* Maximum number of scatter-gather entries in an ingress frame,
6481 + * considering the maximum receive frame size is 64K
6482 + */
6483 +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
6484 +
6485 +/* Maximum acceptable MTU value. It is in direct relation with the hardware
6486 + * enforced Max Frame Length (currently 10k).
6487 + */
6488 +#define DPAA2_ETH_MFL (10 * 1024)
6489 +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
6490 +/* Convert L3 MTU to L2 MFL */
6491 +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
6492 +
6493 +/* Maximum burst size value for Tx shaping */
6494 +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
6495 +
6496 +/* Maximum number of buffers that can be acquired/released through a single
6497 + * QBMan command
6498 + */
6499 +#define DPAA2_ETH_BUFS_PER_CMD 7
6500 +
6501 +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
6502 + * frames in the Rx queues (length of the current frame is not
6503 + * taken into account when making the taildrop decision)
6504 + */
6505 +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
6506 +
6507 +/* Buffer quota per queue. Must be large enough such that for minimum sized
6508 + * frames taildrop kicks in before the bpool gets depleted, so we compute
6509 + * how many 64B frames fit inside the taildrop threshold and add a margin
6510 + * to accommodate the buffer refill delay.
6511 + */
6512 +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
6513 +#define DPAA2_ETH_NUM_BUFS_PER_CH (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
6514 +#define DPAA2_ETH_REFILL_THRESH(priv) \
6515 + ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
6516 +
6517 +/* Global buffer quota in case flow control is enabled */
6518 +#define DPAA2_ETH_NUM_BUFS_FC 256
6519 +
6520 +/* Hardware requires alignment for ingress/egress buffer addresses */
6521 +#define DPAA2_ETH_TX_BUF_ALIGN 64
6522 +
6523 +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
6524 + * to 256B. For newer revisions, the requirement is only for 64B alignment
6525 + */
6526 +#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
6527 +#define DPAA2_ETH_RX_BUF_ALIGN 64
6528 +
6529 +#define DPAA2_ETH_RX_BUF_SIZE 2048
6530 +#define DPAA2_ETH_SKB_SIZE \
6531 + (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
6532 +
6533 +/* PTP nominal frequency 1GHz */
6534 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
6535 +
6536 +/* Hardware annotation area in RX/TX buffers */
6537 +#define DPAA2_ETH_RX_HWA_SIZE 64
6538 +#define DPAA2_ETH_TX_HWA_SIZE 128
6539 +
6540 +/* We are accommodating a skb backpointer and some S/G info
6541 + * in the frame's software annotation. The hardware
6542 + * options are either 0 or 64, so we choose the latter.
6543 + */
6544 +#define DPAA2_ETH_SWA_SIZE 64
6545 +
6546 +/* We store different information in the software annotation area of a Tx frame
6547 + * based on what type of frame it is
6548 + */
6549 +enum dpaa2_eth_swa_type {
6550 + DPAA2_ETH_SWA_SINGLE,
6551 + DPAA2_ETH_SWA_SG,
6552 +};
6553 +
6554 +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
6555 +struct dpaa2_eth_swa {
6556 + enum dpaa2_eth_swa_type type;
6557 + union {
6558 + struct {
6559 + struct sk_buff *skb;
6560 + } single;
6561 + struct {
6562 + struct sk_buff *skb;
6563 + struct scatterlist *scl;
6564 + int num_sg;
6565 + int sgt_size;
6566 + } sg;
6567 + };
6568 +};
6569 +
6570 +/* Annotation valid bits in FD FRC */
6571 +#define DPAA2_FD_FRC_FASV 0x8000
6572 +#define DPAA2_FD_FRC_FAEADV 0x4000
6573 +#define DPAA2_FD_FRC_FAPRV 0x2000
6574 +#define DPAA2_FD_FRC_FAIADV 0x1000
6575 +#define DPAA2_FD_FRC_FASWOV 0x0800
6576 +#define DPAA2_FD_FRC_FAICFDV 0x0400
6577 +
6578 +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
6579 +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
6580 + FD_CTRL_SBE | \
6581 + FD_CTRL_FSE | \
6582 + FD_CTRL_FAERR)
6583 +
6584 +/* Annotation bits in FD CTRL */
6585 +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
6586 +
6587 +/* Frame annotation status */
6588 +struct dpaa2_fas {
6589 + u8 reserved;
6590 + u8 ppid;
6591 + __le16 ifpid;
6592 + __le32 status;
6593 +};
6594 +
6595 +/* Frame annotation status word is located in the first 8 bytes
6596 + * of the buffer's hardware annoatation area
6597 + */
6598 +#define DPAA2_FAS_OFFSET 0
6599 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
6600 +
6601 +/* Timestamp is located in the next 8 bytes of the buffer's
6602 + * hardware annotation area
6603 + */
6604 +#define DPAA2_TS_OFFSET 0x8
6605 +
6606 +/* Frame annotation egress action descriptor */
6607 +#define DPAA2_FAEAD_OFFSET 0x58
6608 +
6609 +struct dpaa2_faead {
6610 + __le32 conf_fqid;
6611 + __le32 ctrl;
6612 +};
6613 +
6614 +#define DPAA2_FAEAD_A2V 0x20000000
6615 +#define DPAA2_FAEAD_A4V 0x08000000
6616 +#define DPAA2_FAEAD_UPDV 0x00001000
6617 +#define DPAA2_FAEAD_EBDDV 0x00002000
6618 +#define DPAA2_FAEAD_UPD 0x00000010
6619 +
6620 +/* Accessors for the hardware annotation fields that we use */
6621 +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
6622 +{
6623 + return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
6624 +}
6625 +
6626 +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
6627 +{
6628 + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
6629 +}
6630 +
6631 +static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa)
6632 +{
6633 + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
6634 +}
6635 +
6636 +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
6637 +{
6638 + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
6639 +}
6640 +
6641 +/* Error and status bits in the frame annotation status word */
6642 +/* Debug frame, otherwise supposed to be discarded */
6643 +#define DPAA2_FAS_DISC 0x80000000
6644 +/* MACSEC frame */
6645 +#define DPAA2_FAS_MS 0x40000000
6646 +#define DPAA2_FAS_PTP 0x08000000
6647 +/* Ethernet multicast frame */
6648 +#define DPAA2_FAS_MC 0x04000000
6649 +/* Ethernet broadcast frame */
6650 +#define DPAA2_FAS_BC 0x02000000
6651 +#define DPAA2_FAS_KSE 0x00040000
6652 +#define DPAA2_FAS_EOFHE 0x00020000
6653 +#define DPAA2_FAS_MNLE 0x00010000
6654 +#define DPAA2_FAS_TIDE 0x00008000
6655 +#define DPAA2_FAS_PIEE 0x00004000
6656 +/* Frame length error */
6657 +#define DPAA2_FAS_FLE 0x00002000
6658 +/* Frame physical error */
6659 +#define DPAA2_FAS_FPE 0x00001000
6660 +#define DPAA2_FAS_PTE 0x00000080
6661 +#define DPAA2_FAS_ISP 0x00000040
6662 +#define DPAA2_FAS_PHE 0x00000020
6663 +#define DPAA2_FAS_BLE 0x00000010
6664 +/* L3 csum validation performed */
6665 +#define DPAA2_FAS_L3CV 0x00000008
6666 +/* L3 csum error */
6667 +#define DPAA2_FAS_L3CE 0x00000004
6668 +/* L4 csum validation performed */
6669 +#define DPAA2_FAS_L4CV 0x00000002
6670 +/* L4 csum error */
6671 +#define DPAA2_FAS_L4CE 0x00000001
6672 +/* Possible errors on the ingress path */
6673 +#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \
6674 + DPAA2_FAS_EOFHE | \
6675 + DPAA2_FAS_MNLE | \
6676 + DPAA2_FAS_TIDE | \
6677 + DPAA2_FAS_PIEE | \
6678 + DPAA2_FAS_FLE | \
6679 + DPAA2_FAS_FPE | \
6680 + DPAA2_FAS_PTE | \
6681 + DPAA2_FAS_ISP | \
6682 + DPAA2_FAS_PHE | \
6683 + DPAA2_FAS_BLE | \
6684 + DPAA2_FAS_L3CE | \
6685 + DPAA2_FAS_L4CE)
6686 +
6687 +/* Time in milliseconds between link state updates */
6688 +#define DPAA2_ETH_LINK_STATE_REFRESH 1000
6689 +
6690 +/* Number of times to retry a frame enqueue before giving up.
6691 + * Value determined empirically, in order to minimize the number
6692 + * of frames dropped on Tx
6693 + */
6694 +#define DPAA2_ETH_ENQUEUE_RETRIES 10
6695 +
6696 +/* Tx congestion entry & exit thresholds, in number of bytes.
6697 + * We allow a maximum of 512KB worth of frames pending processing on the Tx
6698 + * queues of an interface
6699 + */
6700 +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
6701 +#define DPAA2_ETH_TX_CONG_EXIT_THRESH \
6702 + (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10)
6703 +
6704 +/* Driver statistics, other than those in struct rtnl_link_stats64.
6705 + * These are usually collected per-CPU and aggregated by ethtool.
6706 + */
6707 +struct dpaa2_eth_drv_stats {
6708 + __u64 tx_conf_frames;
6709 + __u64 tx_conf_bytes;
6710 + __u64 tx_sg_frames;
6711 + __u64 tx_sg_bytes;
6712 + __u64 tx_reallocs;
6713 + __u64 rx_sg_frames;
6714 + __u64 rx_sg_bytes;
6715 + /* Enqueues retried due to portal busy */
6716 + __u64 tx_portal_busy;
6717 +};
6718 +
6719 +/* Per-FQ statistics */
6720 +struct dpaa2_eth_fq_stats {
6721 + /* Number of frames received on this queue */
6722 + __u64 frames;
6723 + /* Number of times this queue entered congestion */
6724 + __u64 congestion_entry;
6725 +};
6726 +
6727 +/* Per-channel statistics */
6728 +struct dpaa2_eth_ch_stats {
6729 + /* Volatile dequeues retried due to portal busy */
6730 + __u64 dequeue_portal_busy;
6731 + /* Number of CDANs; useful to estimate avg NAPI len */
6732 + __u64 cdan;
6733 + /* Number of frames received on queues from this channel */
6734 + __u64 frames;
6735 + /* Pull errors */
6736 + __u64 pull_err;
6737 +};
6738 +
6739 +#define DPAA2_ETH_MAX_TCS 8
6740 +
6741 +/* Maximum number of queues associated with a DPNI */
6742 +#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
6743 +#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
6744 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
6745 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
6746 + DPAA2_ETH_MAX_TX_QUEUES + \
6747 + DPAA2_ETH_MAX_RX_ERR_QUEUES)
6748 +
6749 +#define DPAA2_ETH_MAX_DPCONS 16
6750 +
6751 +enum dpaa2_eth_fq_type {
6752 + DPAA2_RX_FQ = 0,
6753 + DPAA2_TX_CONF_FQ,
6754 + DPAA2_RX_ERR_FQ
6755 +};
6756 +
6757 +struct dpaa2_eth_priv;
6758 +
6759 +struct dpaa2_eth_fq {
6760 + u32 fqid;
6761 + u32 tx_qdbin;
6762 + u16 flowid;
6763 + u8 tc;
6764 + int target_cpu;
6765 + struct dpaa2_eth_channel *channel;
6766 + enum dpaa2_eth_fq_type type;
6767 +
6768 + void (*consume)(struct dpaa2_eth_priv *,
6769 + struct dpaa2_eth_channel *,
6770 + const struct dpaa2_fd *,
6771 + struct napi_struct *,
6772 + u16 queue_id);
6773 + struct dpaa2_eth_fq_stats stats;
6774 +};
6775 +
6776 +struct dpaa2_eth_channel {
6777 + struct dpaa2_io_notification_ctx nctx;
6778 + struct fsl_mc_device *dpcon;
6779 + int dpcon_id;
6780 + int ch_id;
6781 + struct napi_struct napi;
6782 + struct dpaa2_io *dpio;
6783 + struct dpaa2_io_store *store;
6784 + struct dpaa2_eth_priv *priv;
6785 + int buf_count;
6786 + struct dpaa2_eth_ch_stats stats;
6787 + struct bpf_prog *xdp_prog;
6788 + u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
6789 + u8 rel_buf_cnt;
6790 +};
6791 +
6792 +struct dpaa2_eth_cls_rule {
6793 + struct ethtool_rx_flow_spec fs;
6794 + bool in_use;
6795 +};
6796 +
6797 +struct dpaa2_eth_dist_fields {
6798 + u64 rxnfc_field;
6799 + enum net_prot cls_prot;
6800 + int cls_field;
6801 + int offset;
6802 + int size;
6803 + u32 id;
6804 +};
6805 +
6806 +/* Driver private data */
6807 +struct dpaa2_eth_priv {
6808 + struct net_device *net_dev;
6809 + /* Standard statistics */
6810 + struct rtnl_link_stats64 __percpu *percpu_stats;
6811 + /* Extra stats, in addition to the ones known by the kernel */
6812 + struct dpaa2_eth_drv_stats __percpu *percpu_extras;
6813 + bool ts_tx_en; /* Tx timestamping enabled */
6814 + bool ts_rx_en; /* Rx timestamping enabled */
6815 + u16 tx_data_offset;
6816 + u16 bpid;
6817 + u16 tx_qdid;
6818 + u16 rx_buf_align;
6819 + struct iommu_domain *iommu_domain;
6820 + int max_bufs_per_ch;
6821 + int refill_thresh;
6822 + bool has_xdp_prog;
6823 +
6824 + void *cscn_mem; /* Tx congestion notifications are written here */
6825 + void *cscn_unaligned;
6826 + dma_addr_t cscn_dma;
6827 +
6828 + u8 num_fqs;
6829 + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
6830 +
6831 + u8 num_channels;
6832 + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
6833 +
6834 + struct dpni_attr dpni_attrs;
6835 + u16 dpni_ver_major;
6836 + u16 dpni_ver_minor;
6837 + struct fsl_mc_device *dpbp_dev;
6838 +
6839 + struct fsl_mc_io *mc_io;
6840 + /* Cores which have an affine DPIO/DPCON.
6841 + * This is the cpu set on which Rx and Tx conf frames are processed
6842 + */
6843 + struct cpumask dpio_cpumask;
6844 +
6845 + u16 mc_token;
6846 +
6847 + struct dpni_link_state link_state;
6848 + bool do_link_poll;
6849 + struct task_struct *poll_thread;
6850 +
6851 + /* Rx distribution (hash and flow steering) header fields
6852 + * supported by the driver
6853 + */
6854 + struct dpaa2_eth_dist_fields *dist_fields;
6855 + u8 num_dist_fields;
6856 + /* enabled ethtool hashing bits */
6857 + u64 rx_hash_fields;
6858 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
6859 + struct dpaa2_debugfs dbg;
6860 +#endif
6861 + /* array of classification rules */
6862 + struct dpaa2_eth_cls_rule *cls_rule;
6863 + struct dpni_tx_shaping_cfg shaping_cfg;
6864 +
6865 + u8 dcbx_mode;
6866 + struct ieee_pfc pfc;
6867 + bool vlan_clsf_set;
6868 + bool tx_pause_frames;
6869 +
6870 + bool ceetm_en;
6871 +};
6872 +
6873 +enum dpaa2_eth_rx_dist {
6874 + DPAA2_ETH_RX_DIST_HASH,
6875 + DPAA2_ETH_RX_DIST_FS,
6876 + DPAA2_ETH_RX_DIST_LEGACY
6877 +};
6878 +
6879 +/* Supported Rx distribution field ids */
6880 +#define DPAA2_ETH_DIST_ETHSRC BIT(0)
6881 +#define DPAA2_ETH_DIST_ETHDST BIT(1)
6882 +#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
6883 +#define DPAA2_ETH_DIST_VLAN BIT(3)
6884 +#define DPAA2_ETH_DIST_IPSRC BIT(4)
6885 +#define DPAA2_ETH_DIST_IPDST BIT(5)
6886 +#define DPAA2_ETH_DIST_IPPROTO BIT(6)
6887 +#define DPAA2_ETH_DIST_L4SRC BIT(7)
6888 +#define DPAA2_ETH_DIST_L4DST BIT(8)
6889 +#define DPAA2_ETH_DIST_ALL (~0U)
6890 +
6891 +/* Default Rx hash key */
6892 +#define DPAA2_ETH_DIST_DEFAULT_HASH \
6893 + (DPAA2_ETH_DIST_IPPROTO | \
6894 + DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \
6895 + DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST)
6896 +
6897 +#define dpaa2_eth_hash_enabled(priv) \
6898 + ((priv)->dpni_attrs.num_queues > 1)
6899 +
6900 +#define dpaa2_eth_fs_enabled(priv) \
6901 + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
6902 +
6903 +#define dpaa2_eth_fs_mask_enabled(priv) \
6904 + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
6905 +
6906 +#define dpaa2_eth_fs_count(priv) \
6907 + ((priv)->dpni_attrs.fs_entries)
6908 +
6909 +/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
6910 +#define DPAA2_CLASSIFIER_DMA_SIZE 256
6911 +
6912 +extern const struct ethtool_ops dpaa2_ethtool_ops;
6913 +extern const char dpaa2_eth_drv_version[];
6914 +
6915 +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
6916 + u16 ver_major, u16 ver_minor)
6917 +{
6918 + if (priv->dpni_ver_major == ver_major)
6919 + return priv->dpni_ver_minor - ver_minor;
6920 + return priv->dpni_ver_major - ver_major;
6921 +}
6922 +
6923 +#define DPNI_DIST_KEY_VER_MAJOR 7
6924 +#define DPNI_DIST_KEY_VER_MINOR 5
6925 +
6926 +static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv)
6927 +{
6928 + return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR,
6929 + DPNI_DIST_KEY_VER_MINOR) < 0);
6930 +}
6931 +
6932 +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
6933 + * the buffer also needs space for its shared info struct, and we need
6934 + * to allocate enough to accommodate hardware alignment restrictions
6935 + */
6936 +static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
6937 +{
6938 + return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
6939 +}
6940 +
6941 +/* Total headroom needed by the hardware in Tx frame buffers */
6942 +static inline unsigned int
6943 +dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb)
6944 +{
6945 + unsigned int headroom = DPAA2_ETH_SWA_SIZE;
6946 +
6947 + /* If we don't have an skb (e.g. XDP buffer), we only need space for
6948 + * the software annotation area
6949 + */
6950 + if (!skb)
6951 + return headroom;
6952 +
6953 + /* For non-linear skbs we have no headroom requirement, as we build a
6954 + * SG frame with a newly allocated SGT buffer
6955 + */
6956 + if (skb_is_nonlinear(skb))
6957 + return 0;
6958 +
6959 + /* If we have Tx timestamping, need 128B hardware annotation */
6960 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
6961 + headroom += DPAA2_ETH_TX_HWA_SIZE;
6962 +
6963 + return headroom;
6964 +}
6965 +
6966 +/* Extra headroom space requested to hardware, in order to make sure there's
6967 + * no realloc'ing in forwarding scenarios. We need to reserve enough space
6968 + * such that we can accommodate the maximum required Tx offset and alignment
6969 + * in the ingress frame buffer
6970 + */
6971 +static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
6972 +{
6973 + return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
6974 + DPAA2_ETH_RX_HWA_SIZE;
6975 +}
6976 +
6977 +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
6978 +{
6979 + return priv->dpni_attrs.num_queues;
6980 +}
6981 +
6982 +static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
6983 +{
6984 + return priv->dpni_attrs.num_tcs;
6985 +}
6986 +
6987 +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
6988 + int traffic_class)
6989 +{
6990 + return priv->pfc.pfc_en & (1 << traffic_class);
6991 +}
6992 +
6993 +enum dpaa2_eth_td_cfg {
6994 + DPAA2_ETH_TD_NONE,
6995 + DPAA2_ETH_TD_QUEUE,
6996 + DPAA2_ETH_TD_GROUP
6997 +};
6998 +
6999 +static inline enum dpaa2_eth_td_cfg
7000 +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
7001 +{
7002 + bool pfc_enabled = !!(priv->pfc.pfc_en);
7003 +
7004 + if (pfc_enabled)
7005 + return DPAA2_ETH_TD_GROUP;
7006 + else if (priv->tx_pause_frames)
7007 + return DPAA2_ETH_TD_NONE;
7008 + else
7009 + return DPAA2_ETH_TD_QUEUE;
7010 +}
7011 +
7012 +static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
7013 +{
7014 + return 1;
7015 +}
7016 +
7017 +void check_cls_support(struct dpaa2_eth_priv *priv);
7018 +
7019 +int set_rx_taildrop(struct dpaa2_eth_priv *priv);
7020 +
7021 +int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
7022 + enum dpaa2_eth_rx_dist type, u32 key_fields);
7023 +
7024 +#endif /* __DPAA2_H */
7025 --- /dev/null
7026 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
7027 @@ -0,0 +1,878 @@
7028 +/* Copyright 2014-2016 Freescale Semiconductor Inc.
7029 + * Copyright 2016-2017 NXP
7030 + *
7031 + * Redistribution and use in source and binary forms, with or without
7032 + * modification, are permitted provided that the following conditions are met:
7033 + * * Redistributions of source code must retain the above copyright
7034 + * notice, this list of conditions and the following disclaimer.
7035 + * * Redistributions in binary form must reproduce the above copyright
7036 + * notice, this list of conditions and the following disclaimer in the
7037 + * documentation and/or other materials provided with the distribution.
7038 + * * Neither the name of Freescale Semiconductor nor the
7039 + * names of its contributors may be used to endorse or promote products
7040 + * derived from this software without specific prior written permission.
7041 + *
7042 + *
7043 + * ALTERNATIVELY, this software may be distributed under the terms of the
7044 + * GNU General Public License ("GPL") as published by the Free Software
7045 + * Foundation, either version 2 of that License or (at your option) any
7046 + * later version.
7047 + *
7048 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7049 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7050 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7051 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7052 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7053 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7054 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7055 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7056 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7057 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7058 + */
7059 +
7060 +#include "dpni.h" /* DPNI_LINK_OPT_* */
7061 +#include "dpaa2-eth.h"
7062 +
7063 +/* To be kept in sync with DPNI statistics */
7064 +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
7065 + "[hw] rx frames",
7066 + "[hw] rx bytes",
7067 + "[hw] rx mcast frames",
7068 + "[hw] rx mcast bytes",
7069 + "[hw] rx bcast frames",
7070 + "[hw] rx bcast bytes",
7071 + "[hw] tx frames",
7072 + "[hw] tx bytes",
7073 + "[hw] tx mcast frames",
7074 + "[hw] tx mcast bytes",
7075 + "[hw] tx bcast frames",
7076 + "[hw] tx bcast bytes",
7077 + "[hw] rx filtered frames",
7078 + "[hw] rx discarded frames",
7079 + "[hw] rx nobuffer discards",
7080 + "[hw] tx discarded frames",
7081 + "[hw] tx confirmed frames",
7082 +};
7083 +
7084 +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
7085 +
7086 +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
7087 + /* per-cpu stats */
7088 + "[drv] tx conf frames",
7089 + "[drv] tx conf bytes",
7090 + "[drv] tx sg frames",
7091 + "[drv] tx sg bytes",
7092 + "[drv] tx realloc frames",
7093 + "[drv] rx sg frames",
7094 + "[drv] rx sg bytes",
7095 + "[drv] enqueue portal busy",
7096 + /* Channel stats */
7097 + "[drv] dequeue portal busy",
7098 + "[drv] channel pull errors",
7099 + "[drv] cdan",
7100 + "[drv] tx congestion state",
7101 +#ifdef CONFIG_FSL_QBMAN_DEBUG
7102 + /* FQ stats */
7103 + "rx pending frames",
7104 + "rx pending bytes",
7105 + "tx conf pending frames",
7106 + "tx conf pending bytes",
7107 + "buffer count"
7108 +#endif
7109 +};
7110 +
7111 +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
7112 +
7113 +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
7114 + struct ethtool_drvinfo *drvinfo)
7115 +{
7116 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7117 +
7118 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
7119 + strlcpy(drvinfo->version, dpaa2_eth_drv_version,
7120 + sizeof(drvinfo->version));
7121 +
7122 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
7123 + "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
7124 +
7125 + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
7126 + sizeof(drvinfo->bus_info));
7127 +}
7128 +
7129 +static int
7130 +dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
7131 + struct ethtool_link_ksettings *link_settings)
7132 +{
7133 + struct dpni_link_state state = {0};
7134 + int err = 0;
7135 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7136 +
7137 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
7138 + if (err) {
7139 + netdev_err(net_dev, "ERROR %d getting link state\n", err);
7140 + goto out;
7141 + }
7142 +
7143 + /* At the moment, we have no way of interrogating the DPMAC
7144 + * from the DPNI side - and for that matter there may exist
7145 + * no DPMAC at all. So for now we just don't report anything
7146 + * beyond the DPNI attributes.
7147 + */
7148 + if (state.options & DPNI_LINK_OPT_AUTONEG)
7149 + link_settings->base.autoneg = AUTONEG_ENABLE;
7150 + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
7151 + link_settings->base.duplex = DUPLEX_FULL;
7152 + link_settings->base.speed = state.rate;
7153 +
7154 +out:
7155 + return err;
7156 +}
7157 +
7158 +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
7159 +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
7160 +static int
7161 +dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
7162 + const struct ethtool_link_ksettings *link_settings)
7163 +{
7164 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7165 + struct dpni_link_state state = {0};
7166 + struct dpni_link_cfg cfg = {0};
7167 + int err = 0;
7168 +
7169 + /* If using an older MC version, the DPNI must be down
7170 + * in order to be able to change link settings. Taking steps to let
7171 + * the user know that.
7172 + */
7173 + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
7174 + DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
7175 + if (netif_running(net_dev)) {
7176 + netdev_info(net_dev, "Interface must be brought down first.\n");
7177 + return -EACCES;
7178 + }
7179 + }
7180 +
7181 + /* Need to interrogate link state to get flow control params */
7182 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
7183 + if (err) {
7184 + netdev_err(net_dev, "Error getting link state\n");
7185 + goto out;
7186 + }
7187 +
7188 + cfg.options = state.options;
7189 + cfg.rate = link_settings->base.speed;
7190 + if (link_settings->base.autoneg == AUTONEG_ENABLE)
7191 + cfg.options |= DPNI_LINK_OPT_AUTONEG;
7192 + else
7193 + cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
7194 + if (link_settings->base.duplex == DUPLEX_HALF)
7195 + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
7196 + else
7197 + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
7198 +
7199 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
7200 + if (err)
7201 + /* ethtool will be loud enough if we return an error; no point
7202 + * in putting our own error message on the console by default
7203 + */
7204 + netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
7205 +
7206 +out:
7207 + return err;
7208 +}
7209 +
7210 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
7211 + struct ethtool_pauseparam *pause)
7212 +{
7213 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7214 + struct dpni_link_state state = {0};
7215 + int err;
7216 +
7217 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
7218 + if (err)
7219 + netdev_dbg(net_dev, "Error getting link state\n");
7220 +
7221 + /* Report general port autonegotiation status */
7222 + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
7223 + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
7224 + pause->tx_pause = pause->rx_pause ^
7225 + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
7226 +}
7227 +
7228 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
7229 + struct ethtool_pauseparam *pause)
7230 +{
7231 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7232 + struct dpni_link_state state = {0};
7233 + struct dpni_link_cfg cfg = {0};
7234 + u32 current_tx_pause;
7235 + int err = 0;
7236 +
7237 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
7238 + if (err) {
7239 + netdev_dbg(net_dev, "Error getting link state\n");
7240 + goto out;
7241 + }
7242 +
7243 + cfg.rate = state.rate;
7244 + cfg.options = state.options;
7245 + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
7246 + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
7247 +
7248 + /* We don't support changing pause frame autonegotiation separately
7249 + * from general port autoneg
7250 + */
7251 + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
7252 + netdev_warn(net_dev,
7253 + "Cannot change pause frame autoneg separately\n");
7254 +
7255 + if (pause->rx_pause)
7256 + cfg.options |= DPNI_LINK_OPT_PAUSE;
7257 + else
7258 + cfg.options &= ~DPNI_LINK_OPT_PAUSE;
7259 +
7260 + if (pause->rx_pause ^ pause->tx_pause)
7261 + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
7262 + else
7263 + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
7264 +
7265 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
7266 + if (err) {
7267 + netdev_dbg(net_dev, "Error setting link\n");
7268 + goto out;
7269 + }
7270 +
7271 + /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
7272 + if (current_tx_pause == pause->tx_pause)
7273 + goto out;
7274 +
7275 + priv->tx_pause_frames = pause->tx_pause;
7276 + err = set_rx_taildrop(priv);
7277 + if (err)
7278 + netdev_dbg(net_dev, "Error configuring taildrop\n");
7279 +
7280 +out:
7281 + return err;
7282 +}
7283 +
7284 +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
7285 + u8 *data)
7286 +{
7287 + u8 *p = data;
7288 + int i;
7289 +
7290 + switch (stringset) {
7291 + case ETH_SS_STATS:
7292 + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
7293 + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
7294 + p += ETH_GSTRING_LEN;
7295 + }
7296 + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
7297 + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
7298 + p += ETH_GSTRING_LEN;
7299 + }
7300 + break;
7301 + }
7302 +}
7303 +
7304 +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
7305 +{
7306 + switch (sset) {
7307 + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
7308 + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
7309 + default:
7310 + return -EOPNOTSUPP;
7311 + }
7312 +}
7313 +
7314 +/** Fill in hardware counters, as returned by MC.
7315 + */
7316 +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
7317 + struct ethtool_stats *stats,
7318 + u64 *data)
7319 +{
7320 + int i = 0;
7321 + int j, k, err;
7322 + int num_cnt;
7323 + union dpni_statistics dpni_stats;
7324 +
7325 +#ifdef CONFIG_FSL_QBMAN_DEBUG
7326 + u32 fcnt, bcnt;
7327 + u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
7328 + u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
7329 + u32 buf_cnt;
7330 +#endif
7331 + u64 cdan = 0;
7332 + u64 portal_busy = 0, pull_err = 0;
7333 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7334 + struct dpaa2_eth_drv_stats *extras;
7335 + struct dpaa2_eth_ch_stats *ch_stats;
7336 +
7337 + memset(data, 0,
7338 + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
7339 +
7340 + /* Print standard counters, from DPNI statistics */
7341 + for (j = 0; j <= 2; j++) {
7342 + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
7343 + j, 0, &dpni_stats);
7344 + if (err != 0)
7345 + netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
7346 + switch (j) {
7347 + case 0:
7348 + num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
7349 + break;
7350 + case 1:
7351 + num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
7352 + break;
7353 + case 2:
7354 + num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
7355 + break;
7356 + }
7357 + for (k = 0; k < num_cnt; k++)
7358 + *(data + i++) = dpni_stats.raw.counter[k];
7359 + }
7360 +
7361 + /* Print per-cpu extra stats */
7362 + for_each_online_cpu(k) {
7363 + extras = per_cpu_ptr(priv->percpu_extras, k);
7364 + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
7365 + *((__u64 *)data + i + j) += *((__u64 *)extras + j);
7366 + }
7367 + i += j;
7368 +
7369 + for (j = 0; j < priv->num_channels; j++) {
7370 + ch_stats = &priv->channel[j]->stats;
7371 + cdan += ch_stats->cdan;
7372 + portal_busy += ch_stats->dequeue_portal_busy;
7373 + pull_err += ch_stats->pull_err;
7374 + }
7375 +
7376 + *(data + i++) = portal_busy;
7377 + *(data + i++) = pull_err;
7378 + *(data + i++) = cdan;
7379 +
7380 + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
7381 +
7382 +#ifdef CONFIG_FSL_QBMAN_DEBUG
7383 + for (j = 0; j < priv->num_fqs; j++) {
7384 + /* Print FQ instantaneous counts */
7385 + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
7386 + &fcnt, &bcnt);
7387 + if (err) {
7388 + netdev_warn(net_dev, "FQ query error %d", err);
7389 + return;
7390 + }
7391 +
7392 + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
7393 + fcnt_tx_total += fcnt;
7394 + bcnt_tx_total += bcnt;
7395 + } else {
7396 + fcnt_rx_total += fcnt;
7397 + bcnt_rx_total += bcnt;
7398 + }
7399 + }
7400 +
7401 + *(data + i++) = fcnt_rx_total;
7402 + *(data + i++) = bcnt_rx_total;
7403 + *(data + i++) = fcnt_tx_total;
7404 + *(data + i++) = bcnt_tx_total;
7405 +
7406 + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
7407 + if (err) {
7408 + netdev_warn(net_dev, "Buffer count query error %d\n", err);
7409 + return;
7410 + }
7411 + *(data + i++) = buf_cnt;
7412 +#endif
7413 +}
7414 +
7415 +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
7416 +{
7417 + int i, off = 0;
7418 +
7419 + for (i = 0; i < priv->num_dist_fields; i++) {
7420 + if (priv->dist_fields[i].cls_prot == prot &&
7421 + priv->dist_fields[i].cls_field == field)
7422 + return off;
7423 + off += priv->dist_fields[i].size;
7424 + }
7425 +
7426 + return -1;
7427 +}
7428 +
7429 +static u8 cls_key_size(struct dpaa2_eth_priv *priv)
7430 +{
7431 + u8 i, size = 0;
7432 +
7433 + for (i = 0; i < priv->num_dist_fields; i++)
7434 + size += priv->dist_fields[i].size;
7435 +
7436 + return size;
7437 +}
7438 +
7439 +void check_cls_support(struct dpaa2_eth_priv *priv)
7440 +{
7441 + u8 key_size = cls_key_size(priv);
7442 + struct device *dev = priv->net_dev->dev.parent;
7443 +
7444 + if (dpaa2_eth_hash_enabled(priv)) {
7445 + if (priv->dpni_attrs.fs_key_size < key_size) {
7446 + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
7447 + priv->dpni_attrs.fs_key_size,
7448 + key_size);
7449 + goto disable_fs;
7450 + }
7451 + if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
7452 + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
7453 + DPKG_MAX_NUM_OF_EXTRACTS);
7454 + goto disable_fs;
7455 + }
7456 + }
7457 +
7458 + if (dpaa2_eth_fs_enabled(priv)) {
7459 + if (!dpaa2_eth_hash_enabled(priv)) {
7460 + dev_info(dev, "Insufficient queues. Steering is disabled\n");
7461 + goto disable_fs;
7462 + }
7463 +
7464 + if (!dpaa2_eth_fs_mask_enabled(priv)) {
7465 + dev_info(dev, "Key masks not supported. Steering is disabled\n");
7466 + goto disable_fs;
7467 + }
7468 + }
7469 +
7470 + return;
7471 +
7472 +disable_fs:
7473 + priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
7474 + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
7475 +}
7476 +
7477 +static int prep_l4_rule(struct dpaa2_eth_priv *priv,
7478 + struct ethtool_tcpip4_spec *l4_value,
7479 + struct ethtool_tcpip4_spec *l4_mask,
7480 + void *key, void *mask, u8 l4_proto)
7481 +{
7482 + int offset;
7483 +
7484 + if (l4_mask->tos) {
7485 + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
7486 + return -EOPNOTSUPP;
7487 + }
7488 +
7489 + if (l4_mask->ip4src) {
7490 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
7491 + *(u32 *)(key + offset) = l4_value->ip4src;
7492 + *(u32 *)(mask + offset) = l4_mask->ip4src;
7493 + }
7494 +
7495 + if (l4_mask->ip4dst) {
7496 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
7497 + *(u32 *)(key + offset) = l4_value->ip4dst;
7498 + *(u32 *)(mask + offset) = l4_mask->ip4dst;
7499 + }
7500 +
7501 + if (l4_mask->psrc) {
7502 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
7503 + *(u32 *)(key + offset) = l4_value->psrc;
7504 + *(u32 *)(mask + offset) = l4_mask->psrc;
7505 + }
7506 +
7507 + if (l4_mask->pdst) {
7508 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
7509 + *(u32 *)(key + offset) = l4_value->pdst;
7510 + *(u32 *)(mask + offset) = l4_mask->pdst;
7511 + }
7512 +
7513 + /* Only apply the rule for the user-specified L4 protocol
7514 + * and if ethertype matches IPv4
7515 + */
7516 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
7517 + *(u16 *)(key + offset) = htons(ETH_P_IP);
7518 + *(u16 *)(mask + offset) = 0xFFFF;
7519 +
7520 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
7521 + *(u8 *)(key + offset) = l4_proto;
7522 + *(u8 *)(mask + offset) = 0xFF;
7523 +
7524 + /* TODO: check IP version */
7525 +
7526 + return 0;
7527 +}
7528 +
7529 +static int prep_eth_rule(struct dpaa2_eth_priv *priv,
7530 + struct ethhdr *eth_value, struct ethhdr *eth_mask,
7531 + void *key, void *mask)
7532 +{
7533 + int offset;
7534 +
7535 + if (eth_mask->h_proto) {
7536 + netdev_err(priv->net_dev, "Ethertype is not supported!\n");
7537 + return -EOPNOTSUPP;
7538 + }
7539 +
7540 + if (!is_zero_ether_addr(eth_mask->h_source)) {
7541 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
7542 + ether_addr_copy(key + offset, eth_value->h_source);
7543 + ether_addr_copy(mask + offset, eth_mask->h_source);
7544 + }
7545 +
7546 + if (!is_zero_ether_addr(eth_mask->h_dest)) {
7547 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
7548 + ether_addr_copy(key + offset, eth_value->h_dest);
7549 + ether_addr_copy(mask + offset, eth_mask->h_dest);
7550 + }
7551 +
7552 + return 0;
7553 +}
7554 +
7555 +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
7556 + struct ethtool_usrip4_spec *uip_value,
7557 + struct ethtool_usrip4_spec *uip_mask,
7558 + void *key, void *mask)
7559 +{
7560 + int offset;
7561 +
7562 + if (uip_mask->tos)
7563 + return -EOPNOTSUPP;
7564 +
7565 + if (uip_mask->ip4src) {
7566 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
7567 + *(u32 *)(key + offset) = uip_value->ip4src;
7568 + *(u32 *)(mask + offset) = uip_mask->ip4src;
7569 + }
7570 +
7571 + if (uip_mask->ip4dst) {
7572 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
7573 + *(u32 *)(key + offset) = uip_value->ip4dst;
7574 + *(u32 *)(mask + offset) = uip_mask->ip4dst;
7575 + }
7576 +
7577 + if (uip_mask->proto) {
7578 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
7579 + *(u32 *)(key + offset) = uip_value->proto;
7580 + *(u32 *)(mask + offset) = uip_mask->proto;
7581 + }
7582 + if (uip_mask->l4_4_bytes) {
7583 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
7584 + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
7585 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
7586 +
7587 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
7588 + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
7589 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
7590 + }
7591 +
7592 + /* Ethertype must be IP */
7593 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
7594 + *(u16 *)(key + offset) = htons(ETH_P_IP);
7595 + *(u16 *)(mask + offset) = 0xFFFF;
7596 +
7597 + return 0;
7598 +}
7599 +
7600 +static int prep_ext_rule(struct dpaa2_eth_priv *priv,
7601 + struct ethtool_flow_ext *ext_value,
7602 + struct ethtool_flow_ext *ext_mask,
7603 + void *key, void *mask)
7604 +{
7605 + int offset;
7606 +
7607 + if (ext_mask->vlan_etype)
7608 + return -EOPNOTSUPP;
7609 +
7610 + if (ext_mask->vlan_tci) {
7611 + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
7612 + *(u16 *)(key + offset) = ext_value->vlan_tci;
7613 + *(u16 *)(mask + offset) = ext_mask->vlan_tci;
7614 + }
7615 +
7616 + return 0;
7617 +}
7618 +
7619 +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
7620 + struct ethtool_flow_ext *ext_value,
7621 + struct ethtool_flow_ext *ext_mask,
7622 + void *key, void *mask)
7623 +{
7624 + int offset;
7625 +
7626 + if (!is_zero_ether_addr(ext_mask->h_dest)) {
7627 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
7628 + ether_addr_copy(key + offset, ext_value->h_dest);
7629 + ether_addr_copy(mask + offset, ext_mask->h_dest);
7630 + }
7631 +
7632 + return 0;
7633 +}
7634 +
7635 +static int prep_cls_rule(struct net_device *net_dev,
7636 + struct ethtool_rx_flow_spec *fs,
7637 + void *key)
7638 +{
7639 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7640 + const u8 key_size = cls_key_size(priv);
7641 + void *msk = key + key_size;
7642 + int err;
7643 +
7644 + memset(key, 0, key_size * 2);
7645 +
7646 + switch (fs->flow_type & 0xff) {
7647 + case TCP_V4_FLOW:
7648 + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
7649 + &fs->m_u.tcp_ip4_spec, key, msk,
7650 + IPPROTO_TCP);
7651 + break;
7652 + case UDP_V4_FLOW:
7653 + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
7654 + &fs->m_u.udp_ip4_spec, key, msk,
7655 + IPPROTO_UDP);
7656 + break;
7657 + case SCTP_V4_FLOW:
7658 + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
7659 + &fs->m_u.sctp_ip4_spec, key, msk,
7660 + IPPROTO_SCTP);
7661 + break;
7662 + case ETHER_FLOW:
7663 + err = prep_eth_rule(priv, &fs->h_u.ether_spec,
7664 + &fs->m_u.ether_spec, key, msk);
7665 + break;
7666 + case IP_USER_FLOW:
7667 + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
7668 + &fs->m_u.usr_ip4_spec, key, msk);
7669 + break;
7670 + default:
7671 + /* TODO: AH, ESP */
7672 + return -EOPNOTSUPP;
7673 + }
7674 + if (err)
7675 + return err;
7676 +
7677 + if (fs->flow_type & FLOW_EXT) {
7678 + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
7679 + if (err)
7680 + return err;
7681 + }
7682 +
7683 + if (fs->flow_type & FLOW_MAC_EXT) {
7684 + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
7685 + if (err)
7686 + return err;
7687 + }
7688 +
7689 + return 0;
7690 +}
7691 +
7692 +static int del_cls(struct net_device *net_dev, int location);
7693 +
7694 +static int do_cls(struct net_device *net_dev,
7695 + struct ethtool_rx_flow_spec *fs,
7696 + bool add)
7697 +{
7698 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7699 + struct device *dev = net_dev->dev.parent;
7700 + const int rule_cnt = dpaa2_eth_fs_count(priv);
7701 + struct dpni_rule_cfg rule_cfg;
7702 + struct dpni_fs_action_cfg fs_act = { 0 };
7703 + void *dma_mem;
7704 + int err = 0, tc;
7705 +
7706 + if (!dpaa2_eth_fs_enabled(priv)) {
7707 + netdev_err(net_dev, "dev does not support steering!\n");
7708 + /* dev doesn't support steering */
7709 + return -EOPNOTSUPP;
7710 + }
7711 +
7712 + if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
7713 + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
7714 + fs->location >= rule_cnt)
7715 + return -EINVAL;
7716 +
7717 + /* When adding a new rule, check if location if available
7718 + * and if not, free the existing table entry before inserting
7719 + * the new one
7720 + */
7721 + if (add && (priv->cls_rule[fs->location].in_use == true))
7722 + del_cls(net_dev, fs->location);
7723 +
7724 + memset(&rule_cfg, 0, sizeof(rule_cfg));
7725 + rule_cfg.key_size = cls_key_size(priv);
7726 +
7727 + /* allocate twice the key size, for the actual key and for mask */
7728 + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
7729 + if (!dma_mem)
7730 + return -ENOMEM;
7731 +
7732 + err = prep_cls_rule(net_dev, fs, dma_mem);
7733 + if (err)
7734 + goto err_free_mem;
7735 +
7736 + rule_cfg.key_iova = dma_map_single(dev, dma_mem,
7737 + rule_cfg.key_size * 2,
7738 + DMA_TO_DEVICE);
7739 +
7740 + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
7741 +
7742 + if (fs->ring_cookie == RX_CLS_FLOW_DISC)
7743 + fs_act.options |= DPNI_FS_OPT_DISCARD;
7744 + else
7745 + fs_act.flow_id = fs->ring_cookie;
7746 +
7747 + for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
7748 + if (add)
7749 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
7750 + tc, fs->location, &rule_cfg,
7751 + &fs_act);
7752 + else
7753 + err = dpni_remove_fs_entry(priv->mc_io, 0,
7754 + priv->mc_token, tc,
7755 + &rule_cfg);
7756 +
7757 + if (err)
7758 + break;
7759 + }
7760 +
7761 + dma_unmap_single(dev, rule_cfg.key_iova,
7762 + rule_cfg.key_size * 2, DMA_TO_DEVICE);
7763 +
7764 + if (err)
7765 + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
7766 +
7767 +err_free_mem:
7768 + kfree(dma_mem);
7769 +
7770 + return err;
7771 +}
7772 +
7773 +static int add_cls(struct net_device *net_dev,
7774 + struct ethtool_rx_flow_spec *fs)
7775 +{
7776 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7777 + int err;
7778 +
7779 + err = do_cls(net_dev, fs, true);
7780 + if (err)
7781 + return err;
7782 +
7783 + priv->cls_rule[fs->location].in_use = true;
7784 + priv->cls_rule[fs->location].fs = *fs;
7785 +
7786 + return 0;
7787 +}
7788 +
7789 +static int del_cls(struct net_device *net_dev, int location)
7790 +{
7791 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7792 + int err;
7793 +
7794 + err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
7795 + if (err)
7796 + return err;
7797 +
7798 + priv->cls_rule[location].in_use = false;
7799 +
7800 + return 0;
7801 +}
7802 +
7803 +static int set_hash(struct net_device *net_dev, u64 data)
7804 +{
7805 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7806 + u32 key = 0;
7807 + int i;
7808 +
7809 + if (data & RXH_DISCARD)
7810 + return -EOPNOTSUPP;
7811 +
7812 + for (i = 0; i < priv->num_dist_fields; i++)
7813 + if (priv->dist_fields[i].rxnfc_field & data)
7814 + key |= priv->dist_fields[i].id;
7815 +
7816 + return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key);
7817 +}
7818 +
7819 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
7820 + struct ethtool_rxnfc *rxnfc)
7821 +{
7822 + int err = 0;
7823 +
7824 + switch (rxnfc->cmd) {
7825 + case ETHTOOL_SRXCLSRLINS:
7826 + err = add_cls(net_dev, &rxnfc->fs);
7827 + break;
7828 + case ETHTOOL_SRXCLSRLDEL:
7829 + err = del_cls(net_dev, rxnfc->fs.location);
7830 + break;
7831 + case ETHTOOL_SRXFH:
7832 + err = set_hash(net_dev, rxnfc->data);
7833 + break;
7834 + default:
7835 + err = -EOPNOTSUPP;
7836 + }
7837 +
7838 + return err;
7839 +}
7840 +
7841 +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
7842 + struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
7843 +{
7844 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7845 + const int rule_cnt = dpaa2_eth_fs_count(priv);
7846 + int i, j;
7847 +
7848 + switch (rxnfc->cmd) {
7849 + case ETHTOOL_GRXFH:
7850 + /* we purposely ignore cmd->flow_type for now, because the
7851 + * classifier only supports a single set of fields for all
7852 + * protocols
7853 + */
7854 + rxnfc->data = priv->rx_hash_fields;
7855 + break;
7856 + case ETHTOOL_GRXRINGS:
7857 + rxnfc->data = dpaa2_eth_queue_count(priv);
7858 + break;
7859 +
7860 + case ETHTOOL_GRXCLSRLCNT:
7861 + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
7862 + if (priv->cls_rule[i].in_use)
7863 + rxnfc->rule_cnt++;
7864 + rxnfc->data = rule_cnt;
7865 + break;
7866 +
7867 + case ETHTOOL_GRXCLSRULE:
7868 + if (!priv->cls_rule[rxnfc->fs.location].in_use)
7869 + return -EINVAL;
7870 +
7871 + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
7872 + break;
7873 +
7874 + case ETHTOOL_GRXCLSRLALL:
7875 + for (i = 0, j = 0; i < rule_cnt; i++) {
7876 + if (!priv->cls_rule[i].in_use)
7877 + continue;
7878 + if (j == rxnfc->rule_cnt)
7879 + return -EMSGSIZE;
7880 + rule_locs[j++] = i;
7881 + }
7882 + rxnfc->rule_cnt = j;
7883 + rxnfc->data = rule_cnt;
7884 + break;
7885 +
7886 + default:
7887 + return -EOPNOTSUPP;
7888 + }
7889 +
7890 + return 0;
7891 +}
7892 +
7893 +const struct ethtool_ops dpaa2_ethtool_ops = {
7894 + .get_drvinfo = dpaa2_eth_get_drvinfo,
7895 + .get_link = ethtool_op_get_link,
7896 + .get_link_ksettings = dpaa2_eth_get_link_ksettings,
7897 + .set_link_ksettings = dpaa2_eth_set_link_ksettings,
7898 + .get_pauseparam = dpaa2_eth_get_pauseparam,
7899 + .set_pauseparam = dpaa2_eth_set_pauseparam,
7900 + .get_sset_count = dpaa2_eth_get_sset_count,
7901 + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
7902 + .get_strings = dpaa2_eth_get_strings,
7903 + .get_rxnfc = dpaa2_eth_get_rxnfc,
7904 + .set_rxnfc = dpaa2_eth_set_rxnfc,
7905 +};
7906 --- /dev/null
7907 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
7908 @@ -0,0 +1,176 @@
7909 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
7910 + *
7911 + * Redistribution and use in source and binary forms, with or without
7912 + * modification, are permitted provided that the following conditions are met:
7913 + * * Redistributions of source code must retain the above copyright
7914 + * notice, this list of conditions and the following disclaimer.
7915 + * * Redistributions in binary form must reproduce the above copyright
7916 + * notice, this list of conditions and the following disclaimer in the
7917 + * documentation and/or other materials provided with the distribution.
7918 + * * Neither the name of the above-listed copyright holders nor the
7919 + * names of any contributors may be used to endorse or promote products
7920 + * derived from this software without specific prior written permission.
7921 + *
7922 + *
7923 + * ALTERNATIVELY, this software may be distributed under the terms of the
7924 + * GNU General Public License ("GPL") as published by the Free Software
7925 + * Foundation, either version 2 of that License or (at your option) any
7926 + * later version.
7927 + *
7928 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
7929 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7930 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7931 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
7932 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
7933 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
7934 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
7935 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
7936 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
7937 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
7938 + * POSSIBILITY OF SUCH DAMAGE.
7939 + */
7940 +#ifndef __FSL_DPKG_H_
7941 +#define __FSL_DPKG_H_
7942 +
7943 +#include <linux/types.h>
7944 +#include "net.h"
7945 +
7946 +/* Data Path Key Generator API
7947 + * Contains initialization APIs and runtime APIs for the Key Generator
7948 + */
7949 +
7950 +/** Key Generator properties */
7951 +
7952 +/**
7953 + * Number of masks per key extraction
7954 + */
7955 +#define DPKG_NUM_OF_MASKS 4
7956 +/**
7957 + * Number of extractions per key profile
7958 + */
7959 +#define DPKG_MAX_NUM_OF_EXTRACTS 10
7960 +
7961 +/**
7962 + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
7963 + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
7964 + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
7965 + * @DPKG_FULL_FIELD: Extract a full field
7966 + */
7967 +enum dpkg_extract_from_hdr_type {
7968 + DPKG_FROM_HDR = 0,
7969 + DPKG_FROM_FIELD = 1,
7970 + DPKG_FULL_FIELD = 2
7971 +};
7972 +
7973 +/**
7974 + * enum dpkg_extract_type - Enumeration for selecting extraction type
7975 + * @DPKG_EXTRACT_FROM_HDR: Extract from the header
7976 + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
7977 + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
7978 + * e.g. can be used to extract header existence;
7979 + * please refer to 'Parse Result definition' section in the parser BG
7980 + */
7981 +enum dpkg_extract_type {
7982 + DPKG_EXTRACT_FROM_HDR = 0,
7983 + DPKG_EXTRACT_FROM_DATA = 1,
7984 + DPKG_EXTRACT_FROM_PARSE = 3
7985 +};
7986 +
7987 +/**
7988 + * struct dpkg_mask - A structure for defining a single extraction mask
7989 + * @mask: Byte mask for the extracted content
7990 + * @offset: Offset within the extracted content
7991 + */
7992 +struct dpkg_mask {
7993 + u8 mask;
7994 + u8 offset;
7995 +};
7996 +
7997 +/**
7998 + * struct dpkg_extract - A structure for defining a single extraction
7999 + * @type: Determines how the union below is interpreted:
8000 + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
8001 + * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
8002 + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
8003 + * @extract: Selects extraction method
8004 + * @num_of_byte_masks: Defines the number of valid entries in the array below;
8005 + * This is also the number of bytes to be used as masks
8006 + * @masks: Masks parameters
8007 + */
8008 +struct dpkg_extract {
8009 + enum dpkg_extract_type type;
8010 + /**
8011 + * union extract - Selects extraction method
8012 + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
8013 + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
8014 + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
8015 + */
8016 + union {
8017 + /**
8018 + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
8019 + * @prot: Any of the supported headers
8020 + * @type: Defines the type of header extraction:
8021 + * DPKG_FROM_HDR: use size & offset below;
8022 + * DPKG_FROM_FIELD: use field, size and offset below;
8023 + * DPKG_FULL_FIELD: use field below
8024 + * @field: One of the supported fields (NH_FLD_)
8025 + *
8026 + * @size: Size in bytes
8027 + * @offset: Byte offset
8028 + * @hdr_index: Clear for cases not listed below;
8029 + * Used for protocols that may have more than a single
8030 + * header, 0 indicates an outer header;
8031 + * Supported protocols (possible values):
8032 + * NET_PROT_VLAN (0, HDR_INDEX_LAST);
8033 + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
8034 + * NET_PROT_IP(0, HDR_INDEX_LAST);
8035 + * NET_PROT_IPv4(0, HDR_INDEX_LAST);
8036 + * NET_PROT_IPv6(0, HDR_INDEX_LAST);
8037 + */
8038 +
8039 + struct {
8040 + enum net_prot prot;
8041 + enum dpkg_extract_from_hdr_type type;
8042 + u32 field;
8043 + u8 size;
8044 + u8 offset;
8045 + u8 hdr_index;
8046 + } from_hdr;
8047 + /**
8048 + * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
8049 + * @size: Size in bytes
8050 + * @offset: Byte offset
8051 + */
8052 + struct {
8053 + u8 size;
8054 + u8 offset;
8055 + } from_data;
8056 +
8057 + /**
8058 + * struct from_parse - Used when
8059 + * 'type = DPKG_EXTRACT_FROM_PARSE'
8060 + * @size: Size in bytes
8061 + * @offset: Byte offset
8062 + */
8063 + struct {
8064 + u8 size;
8065 + u8 offset;
8066 + } from_parse;
8067 + } extract;
8068 +
8069 + u8 num_of_byte_masks;
8070 + struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
8071 +};
8072 +
8073 +/**
8074 + * struct dpkg_profile_cfg - A structure for defining a full Key Generation
8075 + * profile (rule)
8076 + * @num_extracts: Defines the number of valid entries in the array below
8077 + * @extracts: Array of required extractions
8078 + */
8079 +struct dpkg_profile_cfg {
8080 + u8 num_extracts;
8081 + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
8082 +};
8083 +
8084 +#endif /* __FSL_DPKG_H_ */
8085 --- /dev/null
8086 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
8087 @@ -0,0 +1,719 @@
8088 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
8089 + * Copyright 2016 NXP
8090 + *
8091 + * Redistribution and use in source and binary forms, with or without
8092 + * modification, are permitted provided that the following conditions are met:
8093 + * * Redistributions of source code must retain the above copyright
8094 + * notice, this list of conditions and the following disclaimer.
8095 + * * Redistributions in binary form must reproduce the above copyright
8096 + * notice, this list of conditions and the following disclaimer in the
8097 + * documentation and/or other materials provided with the distribution.
8098 + * * Neither the name of the above-listed copyright holders nor the
8099 + * names of any contributors may be used to endorse or promote products
8100 + * derived from this software without specific prior written permission.
8101 + *
8102 + *
8103 + * ALTERNATIVELY, this software may be distributed under the terms of the
8104 + * GNU General Public License ("GPL") as published by the Free Software
8105 + * Foundation, either version 2 of that License or (at your option) any
8106 + * later version.
8107 + *
8108 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8109 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8110 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8111 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8112 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8113 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8114 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8115 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8116 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8117 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8118 + * POSSIBILITY OF SUCH DAMAGE.
8119 + */
8120 +#ifndef _FSL_DPNI_CMD_H
8121 +#define _FSL_DPNI_CMD_H
8122 +
8123 +#include "dpni.h"
8124 +
8125 +/* DPNI Version */
8126 +#define DPNI_VER_MAJOR 7
8127 +#define DPNI_VER_MINOR 0
8128 +#define DPNI_CMD_BASE_VERSION 1
8129 +#define DPNI_CMD_2ND_VERSION 2
8130 +#define DPNI_CMD_ID_OFFSET 4
8131 +
8132 +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
8133 +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
8134 +
8135 +#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
8136 +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
8137 +#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
8138 +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
8139 +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
8140 +
8141 +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
8142 +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
8143 +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
8144 +#define DPNI_CMDID_RESET DPNI_CMD(0x005)
8145 +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
8146 +
8147 +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
8148 +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
8149 +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
8150 +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
8151 +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
8152 +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
8153 +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
8154 +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
8155 +
8156 +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
8157 +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
8158 +
8159 +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
8160 +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
8161 +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
8162 +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
8163 +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
8164 +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
8165 +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
8166 +
8167 +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
8168 +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
8169 +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
8170 +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
8171 +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
8172 +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
8173 +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
8174 +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
8175 +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
8176 +
8177 +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
8178 +
8179 +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
8180 +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
8181 +#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
8182 +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
8183 +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
8184 +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
8185 +
8186 +#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
8187 +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
8188 +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
8189 +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
8190 +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
8191 +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
8192 +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
8193 +
8194 +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
8195 +
8196 +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
8197 +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
8198 +
8199 +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
8200 +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
8201 +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
8202 +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
8203 +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
8204 +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
8205 +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
8206 +
8207 +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
8208 +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
8209 +
8210 +/* Macros for accessing command fields smaller than 1byte */
8211 +#define DPNI_MASK(field) \
8212 + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
8213 + DPNI_##field##_SHIFT)
8214 +
8215 +#define dpni_set_field(var, field, val) \
8216 + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
8217 +#define dpni_get_field(var, field) \
8218 + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
8219 +
8220 +struct dpni_cmd_open {
8221 + __le32 dpni_id;
8222 +};
8223 +
8224 +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
8225 +struct dpni_cmd_set_pools {
8226 + u8 num_dpbp;
8227 + u8 backup_pool_mask;
8228 + __le16 pad;
8229 + struct {
8230 + __le16 dpbp_id;
8231 + u8 priority_mask;
8232 + u8 pad;
8233 + } pool[DPNI_MAX_DPBP];
8234 + __le16 buffer_size[DPNI_MAX_DPBP];
8235 +};
8236 +
8237 +/* The enable indication is always the least significant bit */
8238 +#define DPNI_ENABLE_SHIFT 0
8239 +#define DPNI_ENABLE_SIZE 1
8240 +
8241 +struct dpni_rsp_is_enabled {
8242 + u8 enabled;
8243 +};
8244 +
8245 +struct dpni_rsp_get_irq {
8246 + /* response word 0 */
8247 + __le32 irq_val;
8248 + __le32 pad;
8249 + /* response word 1 */
8250 + __le64 irq_addr;
8251 + /* response word 2 */
8252 + __le32 irq_num;
8253 + __le32 type;
8254 +};
8255 +
8256 +struct dpni_cmd_set_irq_enable {
8257 + u8 enable;
8258 + u8 pad[3];
8259 + u8 irq_index;
8260 +};
8261 +
8262 +struct dpni_cmd_get_irq_enable {
8263 + __le32 pad;
8264 + u8 irq_index;
8265 +};
8266 +
8267 +struct dpni_rsp_get_irq_enable {
8268 + u8 enabled;
8269 +};
8270 +
8271 +struct dpni_cmd_set_irq_mask {
8272 + __le32 mask;
8273 + u8 irq_index;
8274 +};
8275 +
8276 +struct dpni_cmd_get_irq_mask {
8277 + __le32 pad;
8278 + u8 irq_index;
8279 +};
8280 +
8281 +struct dpni_rsp_get_irq_mask {
8282 + __le32 mask;
8283 +};
8284 +
8285 +struct dpni_cmd_get_irq_status {
8286 + __le32 status;
8287 + u8 irq_index;
8288 +};
8289 +
8290 +struct dpni_rsp_get_irq_status {
8291 + __le32 status;
8292 +};
8293 +
8294 +struct dpni_cmd_clear_irq_status {
8295 + __le32 status;
8296 + u8 irq_index;
8297 +};
8298 +
8299 +struct dpni_rsp_get_attr {
8300 + /* response word 0 */
8301 + __le32 options;
8302 + u8 num_queues;
8303 + u8 num_tcs;
8304 + u8 mac_filter_entries;
8305 + u8 pad0;
8306 + /* response word 1 */
8307 + u8 vlan_filter_entries;
8308 + u8 pad1;
8309 + u8 qos_entries;
8310 + u8 pad2;
8311 + __le16 fs_entries;
8312 + __le16 pad3;
8313 + /* response word 2 */
8314 + u8 qos_key_size;
8315 + u8 fs_key_size;
8316 + __le16 wriop_version;
8317 +};
8318 +
8319 +#define DPNI_ERROR_ACTION_SHIFT 0
8320 +#define DPNI_ERROR_ACTION_SIZE 4
8321 +#define DPNI_FRAME_ANN_SHIFT 4
8322 +#define DPNI_FRAME_ANN_SIZE 1
8323 +
8324 +struct dpni_cmd_set_errors_behavior {
8325 + __le32 errors;
8326 + /* from least significant bit: error_action:4, set_frame_annotation:1 */
8327 + u8 flags;
8328 +};
8329 +
8330 +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
8331 + * buffer layouts, but they all share the same parameters.
8332 + * If one of the functions changes, below structure needs to be split.
8333 + */
8334 +
8335 +#define DPNI_PASS_TS_SHIFT 0
8336 +#define DPNI_PASS_TS_SIZE 1
8337 +#define DPNI_PASS_PR_SHIFT 1
8338 +#define DPNI_PASS_PR_SIZE 1
8339 +#define DPNI_PASS_FS_SHIFT 2
8340 +#define DPNI_PASS_FS_SIZE 1
8341 +
8342 +struct dpni_cmd_get_buffer_layout {
8343 + u8 qtype;
8344 +};
8345 +
8346 +struct dpni_rsp_get_buffer_layout {
8347 + /* response word 0 */
8348 + u8 pad0[6];
8349 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
8350 + u8 flags;
8351 + u8 pad1;
8352 + /* response word 1 */
8353 + __le16 private_data_size;
8354 + __le16 data_align;
8355 + __le16 head_room;
8356 + __le16 tail_room;
8357 +};
8358 +
8359 +struct dpni_cmd_set_buffer_layout {
8360 + /* cmd word 0 */
8361 + u8 qtype;
8362 + u8 pad0[3];
8363 + __le16 options;
8364 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
8365 + u8 flags;
8366 + u8 pad1;
8367 + /* cmd word 1 */
8368 + __le16 private_data_size;
8369 + __le16 data_align;
8370 + __le16 head_room;
8371 + __le16 tail_room;
8372 +};
8373 +
8374 +struct dpni_cmd_set_offload {
8375 + u8 pad[3];
8376 + u8 dpni_offload;
8377 + __le32 config;
8378 +};
8379 +
8380 +struct dpni_cmd_get_offload {
8381 + u8 pad[3];
8382 + u8 dpni_offload;
8383 +};
8384 +
8385 +struct dpni_rsp_get_offload {
8386 + __le32 pad;
8387 + __le32 config;
8388 +};
8389 +
8390 +struct dpni_cmd_get_qdid {
8391 + u8 qtype;
8392 +};
8393 +
8394 +struct dpni_rsp_get_qdid {
8395 + __le16 qdid;
8396 +};
8397 +
8398 +struct dpni_rsp_get_tx_data_offset {
8399 + __le16 data_offset;
8400 +};
8401 +
8402 +struct dpni_cmd_get_statistics {
8403 + u8 page_number;
8404 + u8 param;
8405 +};
8406 +
8407 +struct dpni_rsp_get_statistics {
8408 + __le64 counter[DPNI_STATISTICS_CNT];
8409 +};
8410 +
8411 +struct dpni_cmd_set_link_cfg {
8412 + /* cmd word 0 */
8413 + __le64 pad0;
8414 + /* cmd word 1 */
8415 + __le32 rate;
8416 + __le32 pad1;
8417 + /* cmd word 2 */
8418 + __le64 options;
8419 +};
8420 +
8421 +#define DPNI_LINK_STATE_SHIFT 0
8422 +#define DPNI_LINK_STATE_SIZE 1
8423 +
8424 +struct dpni_rsp_get_link_state {
8425 + /* response word 0 */
8426 + __le32 pad0;
8427 + /* from LSB: up:1 */
8428 + u8 flags;
8429 + u8 pad1[3];
8430 + /* response word 1 */
8431 + __le32 rate;
8432 + __le32 pad2;
8433 + /* response word 2 */
8434 + __le64 options;
8435 +};
8436 +
8437 +#define DPNI_COUPLED_SHIFT 0
8438 +#define DPNI_COUPLED_SIZE 1
8439 +
8440 +struct dpni_cmd_set_tx_shaping {
8441 + /* cmd word 0 */
8442 + __le16 tx_cr_max_burst_size;
8443 + __le16 tx_er_max_burst_size;
8444 + __le32 pad;
8445 + /* cmd word 1 */
8446 + __le32 tx_cr_rate_limit;
8447 + __le32 tx_er_rate_limit;
8448 + /* cmd word 2 */
8449 + /* from LSB: coupled:1 */
8450 + u8 coupled;
8451 +};
8452 +
8453 +struct dpni_cmd_set_max_frame_length {
8454 + __le16 max_frame_length;
8455 +};
8456 +
8457 +struct dpni_rsp_get_max_frame_length {
8458 + __le16 max_frame_length;
8459 +};
8460 +
8461 +struct dpni_cmd_set_multicast_promisc {
8462 + u8 enable;
8463 +};
8464 +
8465 +struct dpni_rsp_get_multicast_promisc {
8466 + u8 enabled;
8467 +};
8468 +
8469 +struct dpni_cmd_set_unicast_promisc {
8470 + u8 enable;
8471 +};
8472 +
8473 +struct dpni_rsp_get_unicast_promisc {
8474 + u8 enabled;
8475 +};
8476 +
8477 +struct dpni_cmd_set_primary_mac_addr {
8478 + __le16 pad;
8479 + u8 mac_addr[6];
8480 +};
8481 +
8482 +struct dpni_rsp_get_primary_mac_addr {
8483 + __le16 pad;
8484 + u8 mac_addr[6];
8485 +};
8486 +
8487 +struct dpni_rsp_get_port_mac_addr {
8488 + __le16 pad;
8489 + u8 mac_addr[6];
8490 +};
8491 +
8492 +struct dpni_cmd_add_mac_addr {
8493 + __le16 pad;
8494 + u8 mac_addr[6];
8495 +};
8496 +
8497 +struct dpni_cmd_remove_mac_addr {
8498 + __le16 pad;
8499 + u8 mac_addr[6];
8500 +};
8501 +
8502 +#define DPNI_UNICAST_FILTERS_SHIFT 0
8503 +#define DPNI_UNICAST_FILTERS_SIZE 1
8504 +#define DPNI_MULTICAST_FILTERS_SHIFT 1
8505 +#define DPNI_MULTICAST_FILTERS_SIZE 1
8506 +
8507 +struct dpni_cmd_clear_mac_filters {
8508 + /* from LSB: unicast:1, multicast:1 */
8509 + u8 flags;
8510 +};
8511 +
8512 +#define DPNI_SEPARATE_GRP_SHIFT 0
8513 +#define DPNI_SEPARATE_GRP_SIZE 1
8514 +#define DPNI_MODE_1_SHIFT 0
8515 +#define DPNI_MODE_1_SIZE 4
8516 +#define DPNI_MODE_2_SHIFT 4
8517 +#define DPNI_MODE_2_SIZE 4
8518 +
8519 +struct dpni_cmd_set_tx_priorities {
8520 + __le16 flags;
8521 + u8 prio_group_A;
8522 + u8 prio_group_B;
8523 + __le32 pad0;
8524 + u8 modes[4];
8525 + __le32 pad1;
8526 + __le64 pad2;
8527 + __le16 delta_bandwidth[8];
8528 +};
8529 +
8530 +#define DPNI_DIST_MODE_SHIFT 0
8531 +#define DPNI_DIST_MODE_SIZE 4
8532 +#define DPNI_MISS_ACTION_SHIFT 4
8533 +#define DPNI_MISS_ACTION_SIZE 4
8534 +
8535 +struct dpni_cmd_set_rx_tc_dist {
8536 + /* cmd word 0 */
8537 + __le16 dist_size;
8538 + u8 tc_id;
8539 + /* from LSB: dist_mode:4, miss_action:4 */
8540 + u8 flags;
8541 + __le16 pad0;
8542 + __le16 default_flow_id;
8543 + /* cmd word 1..5 */
8544 + __le64 pad1[5];
8545 + /* cmd word 6 */
8546 + __le64 key_cfg_iova;
8547 +};
8548 +
8549 +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
8550 + * key_cfg_iova)
8551 + */
8552 +struct dpni_mask_cfg {
8553 + u8 mask;
8554 + u8 offset;
8555 +};
8556 +
8557 +#define DPNI_EFH_TYPE_SHIFT 0
8558 +#define DPNI_EFH_TYPE_SIZE 4
8559 +#define DPNI_EXTRACT_TYPE_SHIFT 0
8560 +#define DPNI_EXTRACT_TYPE_SIZE 4
8561 +
8562 +struct dpni_dist_extract {
8563 + /* word 0 */
8564 + u8 prot;
8565 + /* EFH type stored in the 4 least significant bits */
8566 + u8 efh_type;
8567 + u8 size;
8568 + u8 offset;
8569 + __le32 field;
8570 + /* word 1 */
8571 + u8 hdr_index;
8572 + u8 constant;
8573 + u8 num_of_repeats;
8574 + u8 num_of_byte_masks;
8575 + /* Extraction type is stored in the 4 LSBs */
8576 + u8 extract_type;
8577 + u8 pad[3];
8578 + /* word 2 */
8579 + struct dpni_mask_cfg masks[4];
8580 +};
8581 +
8582 +struct dpni_ext_set_rx_tc_dist {
8583 + /* extension word 0 */
8584 + u8 num_extracts;
8585 + u8 pad[7];
8586 + /* words 1..25 */
8587 + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
8588 +};
8589 +
8590 +struct dpni_cmd_get_queue {
8591 + u8 qtype;
8592 + u8 tc;
8593 + u8 index;
8594 +};
8595 +
8596 +#define DPNI_DEST_TYPE_SHIFT 0
8597 +#define DPNI_DEST_TYPE_SIZE 4
8598 +#define DPNI_STASH_CTRL_SHIFT 6
8599 +#define DPNI_STASH_CTRL_SIZE 1
8600 +#define DPNI_HOLD_ACTIVE_SHIFT 7
8601 +#define DPNI_HOLD_ACTIVE_SIZE 1
8602 +
8603 +struct dpni_rsp_get_queue {
8604 + /* response word 0 */
8605 + __le64 pad0;
8606 + /* response word 1 */
8607 + __le32 dest_id;
8608 + __le16 pad1;
8609 + u8 dest_prio;
8610 + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
8611 + u8 flags;
8612 + /* response word 2 */
8613 + __le64 flc;
8614 + /* response word 3 */
8615 + __le64 user_context;
8616 + /* response word 4 */
8617 + __le32 fqid;
8618 + __le16 qdbin;
8619 +};
8620 +
8621 +struct dpni_cmd_set_queue {
8622 + /* cmd word 0 */
8623 + u8 qtype;
8624 + u8 tc;
8625 + u8 index;
8626 + u8 options;
8627 + __le32 pad0;
8628 + /* cmd word 1 */
8629 + __le32 dest_id;
8630 + __le16 pad1;
8631 + u8 dest_prio;
8632 + u8 flags;
8633 + /* cmd word 2 */
8634 + __le64 flc;
8635 + /* cmd word 3 */
8636 + __le64 user_context;
8637 +};
8638 +
8639 +#define DPNI_DISCARD_ON_MISS_SHIFT 0
8640 +#define DPNI_DISCARD_ON_MISS_SIZE 1
8641 +
8642 +struct dpni_cmd_set_qos_table {
8643 + __le32 pad;
8644 + u8 default_tc;
8645 + /* only the LSB */
8646 + u8 discard_on_miss;
8647 + __le16 pad1[21];
8648 + __le64 key_cfg_iova;
8649 +};
8650 +
8651 +struct dpni_cmd_add_qos_entry {
8652 + __le16 pad;
8653 + u8 tc_id;
8654 + u8 key_size;
8655 + __le16 index;
8656 + __le16 pad2;
8657 + __le64 key_iova;
8658 + __le64 mask_iova;
8659 +};
8660 +
8661 +struct dpni_cmd_remove_qos_entry {
8662 + u8 pad1[3];
8663 + u8 key_size;
8664 + __le32 pad2;
8665 + __le64 key_iova;
8666 + __le64 mask_iova;
8667 +};
8668 +
8669 +struct dpni_cmd_add_fs_entry {
8670 + /* cmd word 0 */
8671 + __le16 options;
8672 + u8 tc_id;
8673 + u8 key_size;
8674 + __le16 index;
8675 + __le16 flow_id;
8676 + /* cmd word 1 */
8677 + __le64 key_iova;
8678 + /* cmd word 2 */
8679 + __le64 mask_iova;
8680 + /* cmd word 3 */
8681 + __le64 flc;
8682 +};
8683 +
8684 +struct dpni_cmd_remove_fs_entry {
8685 + /* cmd word 0 */
8686 + __le16 pad0;
8687 + u8 tc_id;
8688 + u8 key_size;
8689 + __le32 pad1;
8690 + /* cmd word 1 */
8691 + __le64 key_iova;
8692 + /* cmd word 2 */
8693 + __le64 mask_iova;
8694 +};
8695 +
8696 +struct dpni_cmd_set_taildrop {
8697 + /* cmd word 0 */
8698 + u8 congestion_point;
8699 + u8 qtype;
8700 + u8 tc;
8701 + u8 index;
8702 + __le32 pad0;
8703 + /* cmd word 1 */
8704 + /* Only least significant bit is relevant */
8705 + u8 enable;
8706 + u8 pad1;
8707 + u8 units;
8708 + u8 pad2;
8709 + __le32 threshold;
8710 +};
8711 +
8712 +struct dpni_cmd_get_taildrop {
8713 + u8 congestion_point;
8714 + u8 qtype;
8715 + u8 tc;
8716 + u8 index;
8717 +};
8718 +
8719 +struct dpni_rsp_get_taildrop {
8720 + /* cmd word 0 */
8721 + __le64 pad0;
8722 + /* cmd word 1 */
8723 + /* only least significant bit is relevant */
8724 + u8 enable;
8725 + u8 pad1;
8726 + u8 units;
8727 + u8 pad2;
8728 + __le32 threshold;
8729 +};
8730 +
8731 +struct dpni_rsp_get_api_version {
8732 + u16 major;
8733 + u16 minor;
8734 +};
8735 +
8736 +#define DPNI_DEST_TYPE_SHIFT 0
8737 +#define DPNI_DEST_TYPE_SIZE 4
8738 +#define DPNI_CONG_UNITS_SHIFT 4
8739 +#define DPNI_CONG_UNITS_SIZE 2
8740 +
8741 +struct dpni_cmd_set_congestion_notification {
8742 + /* cmd word 0 */
8743 + u8 qtype;
8744 + u8 tc;
8745 + u8 pad[6];
8746 + /* cmd word 1 */
8747 + __le32 dest_id;
8748 + __le16 notification_mode;
8749 + u8 dest_priority;
8750 + /* from LSB: dest_type: 4 units:2 */
8751 + u8 type_units;
8752 + /* cmd word 2 */
8753 + __le64 message_iova;
8754 + /* cmd word 3 */
8755 + __le64 message_ctx;
8756 + /* cmd word 4 */
8757 + __le32 threshold_entry;
8758 + __le32 threshold_exit;
8759 +};
8760 +
8761 +struct dpni_cmd_get_congestion_notification {
8762 + /* cmd word 0 */
8763 + u8 qtype;
8764 + u8 tc;
8765 +};
8766 +
8767 +struct dpni_rsp_get_congestion_notification {
8768 + /* cmd word 0 */
8769 + __le64 pad;
8770 + /* cmd word 1 */
8771 + __le32 dest_id;
8772 + __le16 notification_mode;
8773 + u8 dest_priority;
8774 + /* from LSB: dest_type: 4 units:2 */
8775 + u8 type_units;
8776 + /* cmd word 2 */
8777 + __le64 message_iova;
8778 + /* cmd word 3 */
8779 + __le64 message_ctx;
8780 + /* cmd word 4 */
8781 + __le32 threshold_entry;
8782 + __le32 threshold_exit;
8783 +};
8784 +
8785 +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
8786 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
8787 +struct dpni_cmd_set_rx_fs_dist {
8788 + __le16 dist_size;
8789 + u8 enable;
8790 + u8 tc;
8791 + __le16 miss_flow_id;
8792 + __le16 pad;
8793 + __le64 key_cfg_iova;
8794 +};
8795 +
8796 +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
8797 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
8798 +struct dpni_cmd_set_rx_hash_dist {
8799 + __le16 dist_size;
8800 + u8 enable;
8801 + u8 tc;
8802 + __le32 pad;
8803 + __le64 key_cfg_iova;
8804 +};
8805 +
8806 +#endif /* _FSL_DPNI_CMD_H */
8807 --- /dev/null
8808 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
8809 @@ -0,0 +1,2112 @@
8810 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
8811 + * Copyright 2016 NXP
8812 + *
8813 + * Redistribution and use in source and binary forms, with or without
8814 + * modification, are permitted provided that the following conditions are met:
8815 + * * Redistributions of source code must retain the above copyright
8816 + * notice, this list of conditions and the following disclaimer.
8817 + * * Redistributions in binary form must reproduce the above copyright
8818 + * notice, this list of conditions and the following disclaimer in the
8819 + * documentation and/or other materials provided with the distribution.
8820 + * * Neither the name of the above-listed copyright holders nor the
8821 + * names of any contributors may be used to endorse or promote products
8822 + * derived from this software without specific prior written permission.
8823 + *
8824 + *
8825 + * ALTERNATIVELY, this software may be distributed under the terms of the
8826 + * GNU General Public License ("GPL") as published by the Free Software
8827 + * Foundation, either version 2 of that License or (at your option) any
8828 + * later version.
8829 + *
8830 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8831 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8832 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8833 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8834 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8835 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8836 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8837 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8838 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8839 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8840 + * POSSIBILITY OF SUCH DAMAGE.
8841 + */
8842 +#include <linux/kernel.h>
8843 +#include <linux/errno.h>
8844 +#include <linux/fsl/mc.h>
8845 +#include "dpni.h"
8846 +#include "dpni-cmd.h"
8847 +
8848 +/**
8849 + * dpni_prepare_key_cfg() - function prepare extract parameters
8850 + * @cfg: defining a full Key Generation profile (rule)
8851 + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
8852 + *
8853 + * This function has to be called before the following functions:
8854 + * - dpni_set_rx_tc_dist()
8855 + * - dpni_set_qos_table()
8856 + */
8857 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
8858 +{
8859 + int i, j;
8860 + struct dpni_ext_set_rx_tc_dist *dpni_ext;
8861 + struct dpni_dist_extract *extr;
8862 +
8863 + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
8864 + return -EINVAL;
8865 +
8866 + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
8867 + dpni_ext->num_extracts = cfg->num_extracts;
8868 +
8869 + for (i = 0; i < cfg->num_extracts; i++) {
8870 + extr = &dpni_ext->extracts[i];
8871 +
8872 + switch (cfg->extracts[i].type) {
8873 + case DPKG_EXTRACT_FROM_HDR:
8874 + extr->prot = cfg->extracts[i].extract.from_hdr.prot;
8875 + dpni_set_field(extr->efh_type, EFH_TYPE,
8876 + cfg->extracts[i].extract.from_hdr.type);
8877 + extr->size = cfg->extracts[i].extract.from_hdr.size;
8878 + extr->offset = cfg->extracts[i].extract.from_hdr.offset;
8879 + extr->field = cpu_to_le32(
8880 + cfg->extracts[i].extract.from_hdr.field);
8881 + extr->hdr_index =
8882 + cfg->extracts[i].extract.from_hdr.hdr_index;
8883 + break;
8884 + case DPKG_EXTRACT_FROM_DATA:
8885 + extr->size = cfg->extracts[i].extract.from_data.size;
8886 + extr->offset =
8887 + cfg->extracts[i].extract.from_data.offset;
8888 + break;
8889 + case DPKG_EXTRACT_FROM_PARSE:
8890 + extr->size = cfg->extracts[i].extract.from_parse.size;
8891 + extr->offset =
8892 + cfg->extracts[i].extract.from_parse.offset;
8893 + break;
8894 + default:
8895 + return -EINVAL;
8896 + }
8897 +
8898 + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
8899 + dpni_set_field(extr->extract_type, EXTRACT_TYPE,
8900 + cfg->extracts[i].type);
8901 +
8902 + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
8903 + extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
8904 + extr->masks[j].offset =
8905 + cfg->extracts[i].masks[j].offset;
8906 + }
8907 + }
8908 +
8909 + return 0;
8910 +}
8911 +
8912 +/**
8913 + * dpni_open() - Open a control session for the specified object
8914 + * @mc_io: Pointer to MC portal's I/O object
8915 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8916 + * @dpni_id: DPNI unique ID
8917 + * @token: Returned token; use in subsequent API calls
8918 + *
8919 + * This function can be used to open a control session for an
8920 + * already created object; an object may have been declared in
8921 + * the DPL or by calling the dpni_create() function.
8922 + * This function returns a unique authentication token,
8923 + * associated with the specific object ID and the specific MC
8924 + * portal; this token must be used in all subsequent commands for
8925 + * this specific object.
8926 + *
8927 + * Return: '0' on Success; Error code otherwise.
8928 + */
8929 +int dpni_open(struct fsl_mc_io *mc_io,
8930 + u32 cmd_flags,
8931 + int dpni_id,
8932 + u16 *token)
8933 +{
8934 + struct fsl_mc_command cmd = { 0 };
8935 + struct dpni_cmd_open *cmd_params;
8936 +
8937 + int err;
8938 +
8939 + /* prepare command */
8940 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
8941 + cmd_flags,
8942 + 0);
8943 + cmd_params = (struct dpni_cmd_open *)cmd.params;
8944 + cmd_params->dpni_id = cpu_to_le32(dpni_id);
8945 +
8946 + /* send command to mc*/
8947 + err = mc_send_command(mc_io, &cmd);
8948 + if (err)
8949 + return err;
8950 +
8951 + /* retrieve response parameters */
8952 + *token = mc_cmd_hdr_read_token(&cmd);
8953 +
8954 + return 0;
8955 +}
8956 +
8957 +/**
8958 + * dpni_close() - Close the control session of the object
8959 + * @mc_io: Pointer to MC portal's I/O object
8960 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8961 + * @token: Token of DPNI object
8962 + *
8963 + * After this function is called, no further operations are
8964 + * allowed on the object without opening a new control session.
8965 + *
8966 + * Return: '0' on Success; Error code otherwise.
8967 + */
8968 +int dpni_close(struct fsl_mc_io *mc_io,
8969 + u32 cmd_flags,
8970 + u16 token)
8971 +{
8972 + struct fsl_mc_command cmd = { 0 };
8973 +
8974 + /* prepare command */
8975 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
8976 + cmd_flags,
8977 + token);
8978 +
8979 + /* send command to mc*/
8980 + return mc_send_command(mc_io, &cmd);
8981 +}
8982 +
8983 +/**
8984 + * dpni_set_pools() - Set buffer pools configuration
8985 + * @mc_io: Pointer to MC portal's I/O object
8986 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8987 + * @token: Token of DPNI object
8988 + * @cfg: Buffer pools configuration
8989 + *
8990 + * mandatory for DPNI operation
8991 + * warning:Allowed only when DPNI is disabled
8992 + *
8993 + * Return: '0' on Success; Error code otherwise.
8994 + */
8995 +int dpni_set_pools(struct fsl_mc_io *mc_io,
8996 + u32 cmd_flags,
8997 + u16 token,
8998 + const struct dpni_pools_cfg *cfg)
8999 +{
9000 + struct fsl_mc_command cmd = { 0 };
9001 + struct dpni_cmd_set_pools *cmd_params;
9002 + int i;
9003 +
9004 + /* prepare command */
9005 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
9006 + cmd_flags,
9007 + token);
9008 + cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
9009 + cmd_params->num_dpbp = cfg->num_dpbp;
9010 + for (i = 0; i < DPNI_MAX_DPBP; i++) {
9011 + cmd_params->pool[i].dpbp_id =
9012 + cpu_to_le16(cfg->pools[i].dpbp_id);
9013 + cmd_params->pool[i].priority_mask =
9014 + cfg->pools[i].priority_mask;
9015 + cmd_params->buffer_size[i] =
9016 + cpu_to_le16(cfg->pools[i].buffer_size);
9017 + cmd_params->backup_pool_mask |=
9018 + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
9019 + }
9020 +
9021 + /* send command to mc*/
9022 + return mc_send_command(mc_io, &cmd);
9023 +}
9024 +
9025 +/**
9026 + * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
9027 + * @mc_io: Pointer to MC portal's I/O object
9028 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9029 + * @token: Token of DPNI object
9030 + *
9031 + * Return: '0' on Success; Error code otherwise.
9032 + */
9033 +int dpni_enable(struct fsl_mc_io *mc_io,
9034 + u32 cmd_flags,
9035 + u16 token)
9036 +{
9037 + struct fsl_mc_command cmd = { 0 };
9038 +
9039 + /* prepare command */
9040 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
9041 + cmd_flags,
9042 + token);
9043 +
9044 + /* send command to mc*/
9045 + return mc_send_command(mc_io, &cmd);
9046 +}
9047 +
9048 +/**
9049 + * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
9050 + * @mc_io: Pointer to MC portal's I/O object
9051 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9052 + * @token: Token of DPNI object
9053 + *
9054 + * Return: '0' on Success; Error code otherwise.
9055 + */
9056 +int dpni_disable(struct fsl_mc_io *mc_io,
9057 + u32 cmd_flags,
9058 + u16 token)
9059 +{
9060 + struct fsl_mc_command cmd = { 0 };
9061 +
9062 + /* prepare command */
9063 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
9064 + cmd_flags,
9065 + token);
9066 +
9067 + /* send command to mc*/
9068 + return mc_send_command(mc_io, &cmd);
9069 +}
9070 +
9071 +/**
9072 + * dpni_is_enabled() - Check if the DPNI is enabled.
9073 + * @mc_io: Pointer to MC portal's I/O object
9074 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9075 + * @token: Token of DPNI object
9076 + * @en: Returns '1' if object is enabled; '0' otherwise
9077 + *
9078 + * Return: '0' on Success; Error code otherwise.
9079 + */
9080 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
9081 + u32 cmd_flags,
9082 + u16 token,
9083 + int *en)
9084 +{
9085 + struct fsl_mc_command cmd = { 0 };
9086 + struct dpni_rsp_is_enabled *rsp_params;
9087 + int err;
9088 +
9089 + /* prepare command */
9090 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
9091 + cmd_flags,
9092 + token);
9093 +
9094 + /* send command to mc*/
9095 + err = mc_send_command(mc_io, &cmd);
9096 + if (err)
9097 + return err;
9098 +
9099 + /* retrieve response parameters */
9100 + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
9101 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
9102 +
9103 + return 0;
9104 +}
9105 +
9106 +/**
9107 + * dpni_reset() - Reset the DPNI, returns the object to initial state.
9108 + * @mc_io: Pointer to MC portal's I/O object
9109 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9110 + * @token: Token of DPNI object
9111 + *
9112 + * Return: '0' on Success; Error code otherwise.
9113 + */
9114 +int dpni_reset(struct fsl_mc_io *mc_io,
9115 + u32 cmd_flags,
9116 + u16 token)
9117 +{
9118 + struct fsl_mc_command cmd = { 0 };
9119 +
9120 + /* prepare command */
9121 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
9122 + cmd_flags,
9123 + token);
9124 +
9125 + /* send command to mc*/
9126 + return mc_send_command(mc_io, &cmd);
9127 +}
9128 +
9129 +/**
9130 + * dpni_set_irq_enable() - Set overall interrupt state.
9131 + * @mc_io: Pointer to MC portal's I/O object
9132 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9133 + * @token: Token of DPNI object
9134 + * @irq_index: The interrupt index to configure
9135 + * @en: Interrupt state: - enable = 1, disable = 0
9136 + *
9137 + * Allows GPP software to control when interrupts are generated.
9138 + * Each interrupt can have up to 32 causes. The enable/disable control's the
9139 + * overall interrupt state. if the interrupt is disabled no causes will cause
9140 + * an interrupt.
9141 + *
9142 + * Return: '0' on Success; Error code otherwise.
9143 + */
9144 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
9145 + u32 cmd_flags,
9146 + u16 token,
9147 + u8 irq_index,
9148 + u8 en)
9149 +{
9150 + struct fsl_mc_command cmd = { 0 };
9151 + struct dpni_cmd_set_irq_enable *cmd_params;
9152 +
9153 + /* prepare command */
9154 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
9155 + cmd_flags,
9156 + token);
9157 + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
9158 + dpni_set_field(cmd_params->enable, ENABLE, en);
9159 + cmd_params->irq_index = irq_index;
9160 +
9161 + /* send command to mc*/
9162 + return mc_send_command(mc_io, &cmd);
9163 +}
9164 +
9165 +/**
9166 + * dpni_get_irq_enable() - Get overall interrupt state
9167 + * @mc_io: Pointer to MC portal's I/O object
9168 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9169 + * @token: Token of DPNI object
9170 + * @irq_index: The interrupt index to configure
9171 + * @en: Returned interrupt state - enable = 1, disable = 0
9172 + *
9173 + * Return: '0' on Success; Error code otherwise.
9174 + */
9175 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
9176 + u32 cmd_flags,
9177 + u16 token,
9178 + u8 irq_index,
9179 + u8 *en)
9180 +{
9181 + struct fsl_mc_command cmd = { 0 };
9182 + struct dpni_cmd_get_irq_enable *cmd_params;
9183 + struct dpni_rsp_get_irq_enable *rsp_params;
9184 +
9185 + int err;
9186 +
9187 + /* prepare command */
9188 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
9189 + cmd_flags,
9190 + token);
9191 + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
9192 + cmd_params->irq_index = irq_index;
9193 +
9194 + /* send command to mc*/
9195 + err = mc_send_command(mc_io, &cmd);
9196 + if (err)
9197 + return err;
9198 +
9199 + /* retrieve response parameters */
9200 + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
9201 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
9202 +
9203 + return 0;
9204 +}
9205 +
9206 +/**
9207 + * dpni_set_irq_mask() - Set interrupt mask.
9208 + * @mc_io: Pointer to MC portal's I/O object
9209 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9210 + * @token: Token of DPNI object
9211 + * @irq_index: The interrupt index to configure
9212 + * @mask: event mask to trigger interrupt;
9213 + * each bit:
9214 + * 0 = ignore event
9215 + * 1 = consider event for asserting IRQ
9216 + *
9217 + * Every interrupt can have up to 32 causes and the interrupt model supports
9218 + * masking/unmasking each cause independently
9219 + *
9220 + * Return: '0' on Success; Error code otherwise.
9221 + */
9222 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
9223 + u32 cmd_flags,
9224 + u16 token,
9225 + u8 irq_index,
9226 + u32 mask)
9227 +{
9228 + struct fsl_mc_command cmd = { 0 };
9229 + struct dpni_cmd_set_irq_mask *cmd_params;
9230 +
9231 + /* prepare command */
9232 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
9233 + cmd_flags,
9234 + token);
9235 + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
9236 + cmd_params->mask = cpu_to_le32(mask);
9237 + cmd_params->irq_index = irq_index;
9238 +
9239 + /* send command to mc*/
9240 + return mc_send_command(mc_io, &cmd);
9241 +}
9242 +
9243 +/**
9244 + * dpni_get_irq_mask() - Get interrupt mask.
9245 + * @mc_io: Pointer to MC portal's I/O object
9246 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9247 + * @token: Token of DPNI object
9248 + * @irq_index: The interrupt index to configure
9249 + * @mask: Returned event mask to trigger interrupt
9250 + *
9251 + * Every interrupt can have up to 32 causes and the interrupt model supports
9252 + * masking/unmasking each cause independently
9253 + *
9254 + * Return: '0' on Success; Error code otherwise.
9255 + */
9256 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
9257 + u32 cmd_flags,
9258 + u16 token,
9259 + u8 irq_index,
9260 + u32 *mask)
9261 +{
9262 + struct fsl_mc_command cmd = { 0 };
9263 + struct dpni_cmd_get_irq_mask *cmd_params;
9264 + struct dpni_rsp_get_irq_mask *rsp_params;
9265 + int err;
9266 +
9267 + /* prepare command */
9268 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
9269 + cmd_flags,
9270 + token);
9271 + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
9272 + cmd_params->irq_index = irq_index;
9273 +
9274 + /* send command to mc*/
9275 + err = mc_send_command(mc_io, &cmd);
9276 + if (err)
9277 + return err;
9278 +
9279 + /* retrieve response parameters */
9280 + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
9281 + *mask = le32_to_cpu(rsp_params->mask);
9282 +
9283 + return 0;
9284 +}
9285 +
9286 +/**
9287 + * dpni_get_irq_status() - Get the current status of any pending interrupts.
9288 + * @mc_io: Pointer to MC portal's I/O object
9289 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9290 + * @token: Token of DPNI object
9291 + * @irq_index: The interrupt index to configure
9292 + * @status: Returned interrupts status - one bit per cause:
9293 + * 0 = no interrupt pending
9294 + * 1 = interrupt pending
9295 + *
9296 + * Return: '0' on Success; Error code otherwise.
9297 + */
9298 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
9299 + u32 cmd_flags,
9300 + u16 token,
9301 + u8 irq_index,
9302 + u32 *status)
9303 +{
9304 + struct fsl_mc_command cmd = { 0 };
9305 + struct dpni_cmd_get_irq_status *cmd_params;
9306 + struct dpni_rsp_get_irq_status *rsp_params;
9307 + int err;
9308 +
9309 + /* prepare command */
9310 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
9311 + cmd_flags,
9312 + token);
9313 + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
9314 + cmd_params->status = cpu_to_le32(*status);
9315 + cmd_params->irq_index = irq_index;
9316 +
9317 + /* send command to mc*/
9318 + err = mc_send_command(mc_io, &cmd);
9319 + if (err)
9320 + return err;
9321 +
9322 + /* retrieve response parameters */
9323 + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
9324 + *status = le32_to_cpu(rsp_params->status);
9325 +
9326 + return 0;
9327 +}
9328 +
9329 +/**
9330 + * dpni_clear_irq_status() - Clear a pending interrupt's status
9331 + * @mc_io: Pointer to MC portal's I/O object
9332 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9333 + * @token: Token of DPNI object
9334 + * @irq_index: The interrupt index to configure
9335 + * @status: bits to clear (W1C) - one bit per cause:
9336 + * 0 = don't change
9337 + * 1 = clear status bit
9338 + *
9339 + * Return: '0' on Success; Error code otherwise.
9340 + */
9341 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
9342 + u32 cmd_flags,
9343 + u16 token,
9344 + u8 irq_index,
9345 + u32 status)
9346 +{
9347 + struct fsl_mc_command cmd = { 0 };
9348 + struct dpni_cmd_clear_irq_status *cmd_params;
9349 +
9350 + /* prepare command */
9351 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
9352 + cmd_flags,
9353 + token);
9354 + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
9355 + cmd_params->irq_index = irq_index;
9356 + cmd_params->status = cpu_to_le32(status);
9357 +
9358 + /* send command to mc*/
9359 + return mc_send_command(mc_io, &cmd);
9360 +}
9361 +
9362 +/**
9363 + * dpni_get_attributes() - Retrieve DPNI attributes.
9364 + * @mc_io: Pointer to MC portal's I/O object
9365 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9366 + * @token: Token of DPNI object
9367 + * @attr: Object's attributes
9368 + *
9369 + * Return: '0' on Success; Error code otherwise.
9370 + */
9371 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
9372 + u32 cmd_flags,
9373 + u16 token,
9374 + struct dpni_attr *attr)
9375 +{
9376 + struct fsl_mc_command cmd = { 0 };
9377 + struct dpni_rsp_get_attr *rsp_params;
9378 +
9379 + int err;
9380 +
9381 + /* prepare command */
9382 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
9383 + cmd_flags,
9384 + token);
9385 +
9386 + /* send command to mc*/
9387 + err = mc_send_command(mc_io, &cmd);
9388 + if (err)
9389 + return err;
9390 +
9391 + /* retrieve response parameters */
9392 + rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
9393 + attr->options = le32_to_cpu(rsp_params->options);
9394 + attr->num_queues = rsp_params->num_queues;
9395 + attr->num_tcs = rsp_params->num_tcs;
9396 + attr->mac_filter_entries = rsp_params->mac_filter_entries;
9397 + attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
9398 + attr->qos_entries = rsp_params->qos_entries;
9399 + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
9400 + attr->qos_key_size = rsp_params->qos_key_size;
9401 + attr->fs_key_size = rsp_params->fs_key_size;
9402 + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
9403 +
9404 + return 0;
9405 +}
9406 +
9407 +/**
9408 + * dpni_set_errors_behavior() - Set errors behavior
9409 + * @mc_io: Pointer to MC portal's I/O object
9410 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9411 + * @token: Token of DPNI object
9412 + * @cfg: Errors configuration
9413 + *
9414 + * this function may be called numerous times with different
9415 + * error masks
9416 + *
9417 + * Return: '0' on Success; Error code otherwise.
9418 + */
9419 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
9420 + u32 cmd_flags,
9421 + u16 token,
9422 + struct dpni_error_cfg *cfg)
9423 +{
9424 + struct fsl_mc_command cmd = { 0 };
9425 + struct dpni_cmd_set_errors_behavior *cmd_params;
9426 +
9427 + /* prepare command */
9428 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
9429 + cmd_flags,
9430 + token);
9431 + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
9432 + cmd_params->errors = cpu_to_le32(cfg->errors);
9433 + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
9434 + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
9435 +
9436 + /* send command to mc*/
9437 + return mc_send_command(mc_io, &cmd);
9438 +}
9439 +
9440 +/**
9441 + * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
9442 + * @mc_io: Pointer to MC portal's I/O object
9443 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9444 + * @token: Token of DPNI object
9445 + * @qtype: Type of queue to retrieve configuration for
9446 + * @layout: Returns buffer layout attributes
9447 + *
9448 + * Return: '0' on Success; Error code otherwise.
9449 + */
9450 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
9451 + u32 cmd_flags,
9452 + u16 token,
9453 + enum dpni_queue_type qtype,
9454 + struct dpni_buffer_layout *layout)
9455 +{
9456 + struct fsl_mc_command cmd = { 0 };
9457 + struct dpni_cmd_get_buffer_layout *cmd_params;
9458 + struct dpni_rsp_get_buffer_layout *rsp_params;
9459 + int err;
9460 +
9461 + /* prepare command */
9462 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
9463 + cmd_flags,
9464 + token);
9465 + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
9466 + cmd_params->qtype = qtype;
9467 +
9468 + /* send command to mc*/
9469 + err = mc_send_command(mc_io, &cmd);
9470 + if (err)
9471 + return err;
9472 +
9473 + /* retrieve response parameters */
9474 + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
9475 + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
9476 + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
9477 + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
9478 + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
9479 + layout->data_align = le16_to_cpu(rsp_params->data_align);
9480 + layout->data_head_room = le16_to_cpu(rsp_params->head_room);
9481 + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
9482 +
9483 + return 0;
9484 +}
9485 +
9486 +/**
9487 + * dpni_set_buffer_layout() - Set buffer layout configuration.
9488 + * @mc_io: Pointer to MC portal's I/O object
9489 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9490 + * @token: Token of DPNI object
9491 + * @qtype: Type of queue this configuration applies to
9492 + * @layout: Buffer layout configuration
9493 + *
9494 + * Return: '0' on Success; Error code otherwise.
9495 + *
9496 + * @warning Allowed only when DPNI is disabled
9497 + */
9498 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
9499 + u32 cmd_flags,
9500 + u16 token,
9501 + enum dpni_queue_type qtype,
9502 + const struct dpni_buffer_layout *layout)
9503 +{
9504 + struct fsl_mc_command cmd = { 0 };
9505 + struct dpni_cmd_set_buffer_layout *cmd_params;
9506 +
9507 + /* prepare command */
9508 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
9509 + cmd_flags,
9510 + token);
9511 + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
9512 + cmd_params->qtype = qtype;
9513 + cmd_params->options = cpu_to_le16(layout->options);
9514 + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
9515 + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
9516 + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
9517 + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
9518 + cmd_params->data_align = cpu_to_le16(layout->data_align);
9519 + cmd_params->head_room = cpu_to_le16(layout->data_head_room);
9520 + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
9521 +
9522 + /* send command to mc*/
9523 + return mc_send_command(mc_io, &cmd);
9524 +}
9525 +
9526 +/**
9527 + * dpni_set_offload() - Set DPNI offload configuration.
9528 + * @mc_io: Pointer to MC portal's I/O object
9529 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9530 + * @token: Token of DPNI object
9531 + * @type: Type of DPNI offload
9532 + * @config: Offload configuration.
9533 + * For checksum offloads, non-zero value enables the offload
9534 + *
9535 + * Return: '0' on Success; Error code otherwise.
9536 + *
9537 + * @warning Allowed only when DPNI is disabled
9538 + */
9539 +
9540 +int dpni_set_offload(struct fsl_mc_io *mc_io,
9541 + u32 cmd_flags,
9542 + u16 token,
9543 + enum dpni_offload type,
9544 + u32 config)
9545 +{
9546 + struct fsl_mc_command cmd = { 0 };
9547 + struct dpni_cmd_set_offload *cmd_params;
9548 +
9549 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
9550 + cmd_flags,
9551 + token);
9552 + cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
9553 + cmd_params->dpni_offload = type;
9554 + cmd_params->config = cpu_to_le32(config);
9555 +
9556 + return mc_send_command(mc_io, &cmd);
9557 +}
9558 +
9559 +int dpni_get_offload(struct fsl_mc_io *mc_io,
9560 + u32 cmd_flags,
9561 + u16 token,
9562 + enum dpni_offload type,
9563 + u32 *config)
9564 +{
9565 + struct fsl_mc_command cmd = { 0 };
9566 + struct dpni_cmd_get_offload *cmd_params;
9567 + struct dpni_rsp_get_offload *rsp_params;
9568 + int err;
9569 +
9570 + /* prepare command */
9571 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
9572 + cmd_flags,
9573 + token);
9574 + cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
9575 + cmd_params->dpni_offload = type;
9576 +
9577 + /* send command to mc*/
9578 + err = mc_send_command(mc_io, &cmd);
9579 + if (err)
9580 + return err;
9581 +
9582 + /* retrieve response parameters */
9583 + rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
9584 + *config = le32_to_cpu(rsp_params->config);
9585 +
9586 + return 0;
9587 +}
9588 +
9589 +/**
9590 + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
9591 + * for enqueue operations
9592 + * @mc_io: Pointer to MC portal's I/O object
9593 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9594 + * @token: Token of DPNI object
9595 + * @qtype: Type of queue to receive QDID for
9596 + * @qdid: Returned virtual QDID value that should be used as an argument
9597 + * in all enqueue operations
9598 + *
9599 + * Return: '0' on Success; Error code otherwise.
9600 + */
9601 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
9602 + u32 cmd_flags,
9603 + u16 token,
9604 + enum dpni_queue_type qtype,
9605 + u16 *qdid)
9606 +{
9607 + struct fsl_mc_command cmd = { 0 };
9608 + struct dpni_cmd_get_qdid *cmd_params;
9609 + struct dpni_rsp_get_qdid *rsp_params;
9610 + int err;
9611 +
9612 + /* prepare command */
9613 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
9614 + cmd_flags,
9615 + token);
9616 + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
9617 + cmd_params->qtype = qtype;
9618 +
9619 + /* send command to mc*/
9620 + err = mc_send_command(mc_io, &cmd);
9621 + if (err)
9622 + return err;
9623 +
9624 + /* retrieve response parameters */
9625 + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
9626 + *qdid = le16_to_cpu(rsp_params->qdid);
9627 +
9628 + return 0;
9629 +}
9630 +
9631 +/**
9632 + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
9633 + * @mc_io: Pointer to MC portal's I/O object
9634 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9635 + * @token: Token of DPNI object
9636 + * @data_offset: Tx data offset (from start of buffer)
9637 + *
9638 + * Return: '0' on Success; Error code otherwise.
9639 + */
9640 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
9641 + u32 cmd_flags,
9642 + u16 token,
9643 + u16 *data_offset)
9644 +{
9645 + struct fsl_mc_command cmd = { 0 };
9646 + struct dpni_rsp_get_tx_data_offset *rsp_params;
9647 + int err;
9648 +
9649 + /* prepare command */
9650 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
9651 + cmd_flags,
9652 + token);
9653 +
9654 + /* send command to mc*/
9655 + err = mc_send_command(mc_io, &cmd);
9656 + if (err)
9657 + return err;
9658 +
9659 + /* retrieve response parameters */
9660 + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
9661 + *data_offset = le16_to_cpu(rsp_params->data_offset);
9662 +
9663 + return 0;
9664 +}
9665 +
9666 +/**
9667 + * dpni_set_link_cfg() - set the link configuration.
9668 + * @mc_io: Pointer to MC portal's I/O object
9669 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9670 + * @token: Token of DPNI object
9671 + * @cfg: Link configuration
9672 + *
9673 + * Return: '0' on Success; Error code otherwise.
9674 + */
9675 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
9676 + u32 cmd_flags,
9677 + u16 token,
9678 + const struct dpni_link_cfg *cfg)
9679 +{
9680 + struct fsl_mc_command cmd = { 0 };
9681 + struct dpni_cmd_set_link_cfg *cmd_params;
9682 +
9683 + /* prepare command */
9684 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
9685 + cmd_flags,
9686 + token);
9687 + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
9688 + cmd_params->rate = cpu_to_le32(cfg->rate);
9689 + cmd_params->options = cpu_to_le64(cfg->options);
9690 +
9691 + /* send command to mc*/
9692 + return mc_send_command(mc_io, &cmd);
9693 +}
9694 +
9695 +/**
9696 + * dpni_get_link_state() - Return the link state (either up or down)
9697 + * @mc_io: Pointer to MC portal's I/O object
9698 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9699 + * @token: Token of DPNI object
9700 + * @state: Returned link state;
9701 + *
9702 + * Return: '0' on Success; Error code otherwise.
9703 + */
9704 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
9705 + u32 cmd_flags,
9706 + u16 token,
9707 + struct dpni_link_state *state)
9708 +{
9709 + struct fsl_mc_command cmd = { 0 };
9710 + struct dpni_rsp_get_link_state *rsp_params;
9711 + int err;
9712 +
9713 + /* prepare command */
9714 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
9715 + cmd_flags,
9716 + token);
9717 +
9718 + /* send command to mc*/
9719 + err = mc_send_command(mc_io, &cmd);
9720 + if (err)
9721 + return err;
9722 +
9723 + /* retrieve response parameters */
9724 + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
9725 + state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
9726 + state->rate = le32_to_cpu(rsp_params->rate);
9727 + state->options = le64_to_cpu(rsp_params->options);
9728 +
9729 + return 0;
9730 +}
9731 +
9732 +/**
9733 + * dpni_set_tx_shaping() - Set the transmit shaping
9734 + * @mc_io: Pointer to MC portal's I/O object
9735 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9736 + * @token: Token of DPNI object
9737 + * @tx_cr_shaper: TX committed rate shaping configuration
9738 + * @tx_er_shaper: TX excess rate shaping configuration
9739 + * @coupled: Committed and excess rate shapers are coupled
9740 + *
9741 + * Return: '0' on Success; Error code otherwise.
9742 + */
9743 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
9744 + u32 cmd_flags,
9745 + u16 token,
9746 + const struct dpni_tx_shaping_cfg *tx_cr_shaper,
9747 + const struct dpni_tx_shaping_cfg *tx_er_shaper,
9748 + int coupled)
9749 +{
9750 + struct fsl_mc_command cmd = { 0 };
9751 + struct dpni_cmd_set_tx_shaping *cmd_params;
9752 +
9753 + /* prepare command */
9754 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
9755 + cmd_flags,
9756 + token);
9757 + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
9758 + cmd_params->tx_cr_max_burst_size =
9759 + cpu_to_le16(tx_cr_shaper->max_burst_size);
9760 + cmd_params->tx_er_max_burst_size =
9761 + cpu_to_le16(tx_er_shaper->max_burst_size);
9762 + cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
9763 + cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
9764 + dpni_set_field(cmd_params->coupled, COUPLED, coupled);
9765 +
9766 + /* send command to mc*/
9767 + return mc_send_command(mc_io, &cmd);
9768 +}
9769 +
9770 +/**
9771 + * dpni_set_max_frame_length() - Set the maximum received frame length.
9772 + * @mc_io: Pointer to MC portal's I/O object
9773 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9774 + * @token: Token of DPNI object
9775 + * @max_frame_length: Maximum received frame length (in
9776 + * bytes); frame is discarded if its
9777 + * length exceeds this value
9778 + *
9779 + * Return: '0' on Success; Error code otherwise.
9780 + */
9781 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
9782 + u32 cmd_flags,
9783 + u16 token,
9784 + u16 max_frame_length)
9785 +{
9786 + struct fsl_mc_command cmd = { 0 };
9787 + struct dpni_cmd_set_max_frame_length *cmd_params;
9788 +
9789 + /* prepare command */
9790 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
9791 + cmd_flags,
9792 + token);
9793 + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
9794 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
9795 +
9796 + /* send command to mc*/
9797 + return mc_send_command(mc_io, &cmd);
9798 +}
9799 +
9800 +/**
9801 + * dpni_get_max_frame_length() - Get the maximum received frame length.
9802 + * @mc_io: Pointer to MC portal's I/O object
9803 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9804 + * @token: Token of DPNI object
9805 + * @max_frame_length: Maximum received frame length (in
9806 + * bytes); frame is discarded if its
9807 + * length exceeds this value
9808 + *
9809 + * Return: '0' on Success; Error code otherwise.
9810 + */
9811 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
9812 + u32 cmd_flags,
9813 + u16 token,
9814 + u16 *max_frame_length)
9815 +{
9816 + struct fsl_mc_command cmd = { 0 };
9817 + struct dpni_rsp_get_max_frame_length *rsp_params;
9818 + int err;
9819 +
9820 + /* prepare command */
9821 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
9822 + cmd_flags,
9823 + token);
9824 +
9825 + /* send command to mc*/
9826 + err = mc_send_command(mc_io, &cmd);
9827 + if (err)
9828 + return err;
9829 +
9830 + /* retrieve response parameters */
9831 + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
9832 + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
9833 +
9834 + return 0;
9835 +}
9836 +
9837 +/**
9838 + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
9839 + * @mc_io: Pointer to MC portal's I/O object
9840 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9841 + * @token: Token of DPNI object
9842 + * @en: Set to '1' to enable; '0' to disable
9843 + *
9844 + * Return: '0' on Success; Error code otherwise.
9845 + */
9846 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
9847 + u32 cmd_flags,
9848 + u16 token,
9849 + int en)
9850 +{
9851 + struct fsl_mc_command cmd = { 0 };
9852 + struct dpni_cmd_set_multicast_promisc *cmd_params;
9853 +
9854 + /* prepare command */
9855 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
9856 + cmd_flags,
9857 + token);
9858 + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
9859 + dpni_set_field(cmd_params->enable, ENABLE, en);
9860 +
9861 + /* send command to mc*/
9862 + return mc_send_command(mc_io, &cmd);
9863 +}
9864 +
9865 +/**
9866 + * dpni_get_multicast_promisc() - Get multicast promiscuous mode
9867 + * @mc_io: Pointer to MC portal's I/O object
9868 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9869 + * @token: Token of DPNI object
9870 + * @en: Returns '1' if enabled; '0' otherwise
9871 + *
9872 + * Return: '0' on Success; Error code otherwise.
9873 + */
9874 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
9875 + u32 cmd_flags,
9876 + u16 token,
9877 + int *en)
9878 +{
9879 + struct fsl_mc_command cmd = { 0 };
9880 + struct dpni_rsp_get_multicast_promisc *rsp_params;
9881 + int err;
9882 +
9883 + /* prepare command */
9884 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
9885 + cmd_flags,
9886 + token);
9887 +
9888 + /* send command to mc*/
9889 + err = mc_send_command(mc_io, &cmd);
9890 + if (err)
9891 + return err;
9892 +
9893 + /* retrieve response parameters */
9894 + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
9895 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
9896 +
9897 + return 0;
9898 +}
9899 +
9900 +/**
9901 + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
9902 + * @mc_io: Pointer to MC portal's I/O object
9903 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9904 + * @token: Token of DPNI object
9905 + * @en: Set to '1' to enable; '0' to disable
9906 + *
9907 + * Return: '0' on Success; Error code otherwise.
9908 + */
9909 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
9910 + u32 cmd_flags,
9911 + u16 token,
9912 + int en)
9913 +{
9914 + struct fsl_mc_command cmd = { 0 };
9915 + struct dpni_cmd_set_unicast_promisc *cmd_params;
9916 +
9917 + /* prepare command */
9918 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
9919 + cmd_flags,
9920 + token);
9921 + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
9922 + dpni_set_field(cmd_params->enable, ENABLE, en);
9923 +
9924 + /* send command to mc*/
9925 + return mc_send_command(mc_io, &cmd);
9926 +}
9927 +
9928 +/**
9929 + * dpni_get_unicast_promisc() - Get unicast promiscuous mode
9930 + * @mc_io: Pointer to MC portal's I/O object
9931 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9932 + * @token: Token of DPNI object
9933 + * @en: Returns '1' if enabled; '0' otherwise
9934 + *
9935 + * Return: '0' on Success; Error code otherwise.
9936 + */
9937 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
9938 + u32 cmd_flags,
9939 + u16 token,
9940 + int *en)
9941 +{
9942 + struct fsl_mc_command cmd = { 0 };
9943 + struct dpni_rsp_get_unicast_promisc *rsp_params;
9944 + int err;
9945 +
9946 + /* prepare command */
9947 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
9948 + cmd_flags,
9949 + token);
9950 +
9951 + /* send command to mc*/
9952 + err = mc_send_command(mc_io, &cmd);
9953 + if (err)
9954 + return err;
9955 +
9956 + /* retrieve response parameters */
9957 + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
9958 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
9959 +
9960 + return 0;
9961 +}
9962 +
9963 +/**
9964 + * dpni_set_primary_mac_addr() - Set the primary MAC address
9965 + * @mc_io: Pointer to MC portal's I/O object
9966 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9967 + * @token: Token of DPNI object
9968 + * @mac_addr: MAC address to set as primary address
9969 + *
9970 + * Return: '0' on Success; Error code otherwise.
9971 + */
9972 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
9973 + u32 cmd_flags,
9974 + u16 token,
9975 + const u8 mac_addr[6])
9976 +{
9977 + struct fsl_mc_command cmd = { 0 };
9978 + struct dpni_cmd_set_primary_mac_addr *cmd_params;
9979 + int i;
9980 +
9981 + /* prepare command */
9982 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
9983 + cmd_flags,
9984 + token);
9985 + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
9986 + for (i = 0; i < 6; i++)
9987 + cmd_params->mac_addr[i] = mac_addr[5 - i];
9988 +
9989 + /* send command to mc*/
9990 + return mc_send_command(mc_io, &cmd);
9991 +}
9992 +
9993 +/**
9994 + * dpni_get_primary_mac_addr() - Get the primary MAC address
9995 + * @mc_io: Pointer to MC portal's I/O object
9996 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
9997 + * @token: Token of DPNI object
9998 + * @mac_addr: Returned MAC address
9999 + *
10000 + * Return: '0' on Success; Error code otherwise.
10001 + */
10002 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
10003 + u32 cmd_flags,
10004 + u16 token,
10005 + u8 mac_addr[6])
10006 +{
10007 + struct fsl_mc_command cmd = { 0 };
10008 + struct dpni_rsp_get_primary_mac_addr *rsp_params;
10009 + int i, err;
10010 +
10011 + /* prepare command */
10012 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
10013 + cmd_flags,
10014 + token);
10015 +
10016 + /* send command to mc*/
10017 + err = mc_send_command(mc_io, &cmd);
10018 + if (err)
10019 + return err;
10020 +
10021 + /* retrieve response parameters */
10022 + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
10023 + for (i = 0; i < 6; i++)
10024 + mac_addr[5 - i] = rsp_params->mac_addr[i];
10025 +
10026 + return 0;
10027 +}
10028 +
10029 +/**
10030 + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
10031 + * port the DPNI is attached to
10032 + * @mc_io: Pointer to MC portal's I/O object
10033 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10034 + * @token: Token of DPNI object
10035 + * @mac_addr: MAC address of the physical port, if any, otherwise 0
10036 + *
10037 + * The primary MAC address is not cleared by this operation.
10038 + *
10039 + * Return: '0' on Success; Error code otherwise.
10040 + */
10041 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
10042 + u32 cmd_flags,
10043 + u16 token,
10044 + u8 mac_addr[6])
10045 +{
10046 + struct fsl_mc_command cmd = { 0 };
10047 + struct dpni_rsp_get_port_mac_addr *rsp_params;
10048 + int i, err;
10049 +
10050 + /* prepare command */
10051 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
10052 + cmd_flags,
10053 + token);
10054 +
10055 + /* send command to mc*/
10056 + err = mc_send_command(mc_io, &cmd);
10057 + if (err)
10058 + return err;
10059 +
10060 + /* retrieve response parameters */
10061 + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
10062 + for (i = 0; i < 6; i++)
10063 + mac_addr[5 - i] = rsp_params->mac_addr[i];
10064 +
10065 + return 0;
10066 +}
10067 +
10068 +/**
10069 + * dpni_add_mac_addr() - Add MAC address filter
10070 + * @mc_io: Pointer to MC portal's I/O object
10071 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10072 + * @token: Token of DPNI object
10073 + * @mac_addr: MAC address to add
10074 + *
10075 + * Return: '0' on Success; Error code otherwise.
10076 + */
10077 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
10078 + u32 cmd_flags,
10079 + u16 token,
10080 + const u8 mac_addr[6])
10081 +{
10082 + struct fsl_mc_command cmd = { 0 };
10083 + struct dpni_cmd_add_mac_addr *cmd_params;
10084 + int i;
10085 +
10086 + /* prepare command */
10087 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
10088 + cmd_flags,
10089 + token);
10090 + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
10091 + for (i = 0; i < 6; i++)
10092 + cmd_params->mac_addr[i] = mac_addr[5 - i];
10093 +
10094 + /* send command to mc*/
10095 + return mc_send_command(mc_io, &cmd);
10096 +}
10097 +
10098 +/**
10099 + * dpni_remove_mac_addr() - Remove MAC address filter
10100 + * @mc_io: Pointer to MC portal's I/O object
10101 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10102 + * @token: Token of DPNI object
10103 + * @mac_addr: MAC address to remove
10104 + *
10105 + * Return: '0' on Success; Error code otherwise.
10106 + */
10107 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
10108 + u32 cmd_flags,
10109 + u16 token,
10110 + const u8 mac_addr[6])
10111 +{
10112 + struct fsl_mc_command cmd = { 0 };
10113 + struct dpni_cmd_remove_mac_addr *cmd_params;
10114 + int i;
10115 +
10116 + /* prepare command */
10117 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
10118 + cmd_flags,
10119 + token);
10120 + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
10121 + for (i = 0; i < 6; i++)
10122 + cmd_params->mac_addr[i] = mac_addr[5 - i];
10123 +
10124 + /* send command to mc*/
10125 + return mc_send_command(mc_io, &cmd);
10126 +}
10127 +
10128 +/**
10129 + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
10130 + * @mc_io: Pointer to MC portal's I/O object
10131 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10132 + * @token: Token of DPNI object
10133 + * @unicast: Set to '1' to clear unicast addresses
10134 + * @multicast: Set to '1' to clear multicast addresses
10135 + *
10136 + * The primary MAC address is not cleared by this operation.
10137 + *
10138 + * Return: '0' on Success; Error code otherwise.
10139 + */
10140 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
10141 + u32 cmd_flags,
10142 + u16 token,
10143 + int unicast,
10144 + int multicast)
10145 +{
10146 + struct fsl_mc_command cmd = { 0 };
10147 + struct dpni_cmd_clear_mac_filters *cmd_params;
10148 +
10149 + /* prepare command */
10150 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
10151 + cmd_flags,
10152 + token);
10153 + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
10154 + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
10155 + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
10156 +
10157 + /* send command to mc*/
10158 + return mc_send_command(mc_io, &cmd);
10159 +}
10160 +
10161 +/**
10162 + * dpni_set_tx_priorities() - Set transmission TC priority configuration
10163 + * @mc_io: Pointer to MC portal's I/O object
10164 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10165 + * @token: Token of DPNI object
10166 + * @cfg: Transmission selection configuration
10167 + *
10168 + * warning: Allowed only when DPNI is disabled
10169 + *
10170 + * Return: '0' on Success; Error code otherwise.
10171 + */
10172 +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
10173 + u32 cmd_flags,
10174 + u16 token,
10175 + const struct dpni_tx_priorities_cfg *cfg)
10176 +{
10177 + struct dpni_cmd_set_tx_priorities *cmd_params;
10178 + struct fsl_mc_command cmd = { 0 };
10179 + int i;
10180 +
10181 + /* prepare command */
10182 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
10183 + cmd_flags,
10184 + token);
10185 + cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
10186 + dpni_set_field(cmd_params->flags,
10187 + SEPARATE_GRP,
10188 + cfg->separate_groups);
10189 + cmd_params->prio_group_A = cfg->prio_group_A;
10190 + cmd_params->prio_group_B = cfg->prio_group_B;
10191 +
10192 + for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
10193 + dpni_set_field(cmd_params->modes[i / 2],
10194 + MODE_1,
10195 + cfg->tc_sched[i].mode);
10196 + dpni_set_field(cmd_params->modes[i / 2],
10197 + MODE_2,
10198 + cfg->tc_sched[i + 1].mode);
10199 + }
10200 +
10201 + for (i = 0; i < DPNI_MAX_TC; i++) {
10202 + cmd_params->delta_bandwidth[i] =
10203 + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
10204 + }
10205 +
10206 + /* send command to mc*/
10207 + return mc_send_command(mc_io, &cmd);
10208 +}
10209 +
10210 +/**
10211 + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
10212 + * @mc_io: Pointer to MC portal's I/O object
10213 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10214 + * @token: Token of DPNI object
10215 + * @tc_id: Traffic class selection (0-7)
10216 + * @cfg: Traffic class distribution configuration
10217 + *
10218 + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
10219 + * first to prepare the key_cfg_iova parameter
10220 + *
10221 + * Return: '0' on Success; error code otherwise.
10222 + */
10223 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
10224 + u32 cmd_flags,
10225 + u16 token,
10226 + u8 tc_id,
10227 + const struct dpni_rx_tc_dist_cfg *cfg)
10228 +{
10229 + struct fsl_mc_command cmd = { 0 };
10230 + struct dpni_cmd_set_rx_tc_dist *cmd_params;
10231 +
10232 + /* prepare command */
10233 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
10234 + cmd_flags,
10235 + token);
10236 + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
10237 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
10238 + cmd_params->tc_id = tc_id;
10239 + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
10240 + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
10241 + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
10242 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
10243 +
10244 + /* send command to mc*/
10245 + return mc_send_command(mc_io, &cmd);
10246 +}
10247 +
10248 +/*
10249 + * dpni_set_qos_table() - Set QoS mapping table
10250 + * @mc_io: Pointer to MC portal's I/O object
10251 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10252 + * @token: Token of DPNI object
10253 + * @cfg: QoS table configuration
10254 + *
10255 + * This function and all QoS-related functions require that
10256 + *'max_tcs > 1' was set at DPNI creation.
10257 + *
10258 + * warning: Before calling this function, call dpkg_prepare_key_cfg() to
10259 + * prepare the key_cfg_iova parameter
10260 + *
10261 + * Return: '0' on Success; Error code otherwise.
10262 + */
10263 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
10264 + u32 cmd_flags,
10265 + u16 token,
10266 + const struct dpni_qos_tbl_cfg *cfg)
10267 +{
10268 + struct dpni_cmd_set_qos_table *cmd_params;
10269 + struct fsl_mc_command cmd = { 0 };
10270 +
10271 + /* prepare command */
10272 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
10273 + cmd_flags,
10274 + token);
10275 + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
10276 + cmd_params->default_tc = cfg->default_tc;
10277 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
10278 + dpni_set_field(cmd_params->discard_on_miss,
10279 + ENABLE,
10280 + cfg->discard_on_miss);
10281 +
10282 + /* send command to mc*/
10283 + return mc_send_command(mc_io, &cmd);
10284 +}
10285 +
10286 +/**
10287 + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
10288 + * @mc_io: Pointer to MC portal's I/O object
10289 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10290 + * @token: Token of DPNI object
10291 + * @cfg: QoS rule to add
10292 + * @tc_id: Traffic class selection (0-7)
10293 + * @index: Location in the QoS table where to insert the entry.
10294 + * Only relevant if MASKING is enabled for QoS classification on
10295 + * this DPNI, it is ignored for exact match.
10296 + *
10297 + * Return: '0' on Success; Error code otherwise.
10298 + */
10299 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
10300 + u32 cmd_flags,
10301 + u16 token,
10302 + const struct dpni_rule_cfg *cfg,
10303 + u8 tc_id,
10304 + u16 index)
10305 +{
10306 + struct dpni_cmd_add_qos_entry *cmd_params;
10307 + struct fsl_mc_command cmd = { 0 };
10308 +
10309 + /* prepare command */
10310 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
10311 + cmd_flags,
10312 + token);
10313 + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
10314 + cmd_params->tc_id = tc_id;
10315 + cmd_params->key_size = cfg->key_size;
10316 + cmd_params->index = cpu_to_le16(index);
10317 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
10318 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
10319 +
10320 + /* send command to mc*/
10321 + return mc_send_command(mc_io, &cmd);
10322 +}
10323 +
10324 +/**
10325 + * dpni_remove_qos_entry() - Remove QoS mapping entry
10326 + * @mc_io: Pointer to MC portal's I/O object
10327 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10328 + * @token: Token of DPNI object
10329 + * @cfg: QoS rule to remove
10330 + *
10331 + * Return: '0' on Success; Error code otherwise.
10332 + */
10333 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
10334 + u32 cmd_flags,
10335 + u16 token,
10336 + const struct dpni_rule_cfg *cfg)
10337 +{
10338 + struct dpni_cmd_remove_qos_entry *cmd_params;
10339 + struct fsl_mc_command cmd = { 0 };
10340 +
10341 + /* prepare command */
10342 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
10343 + cmd_flags,
10344 + token);
10345 + cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
10346 + cmd_params->key_size = cfg->key_size;
10347 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
10348 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
10349 +
10350 + /* send command to mc*/
10351 + return mc_send_command(mc_io, &cmd);
10352 +}
10353 +
10354 +/**
10355 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
10356 + * (to select a flow ID)
10357 + * @mc_io: Pointer to MC portal's I/O object
10358 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10359 + * @token: Token of DPNI object
10360 + * @tc_id: Traffic class selection (0-7)
10361 + * @index: Location in the QoS table where to insert the entry.
10362 + * Only relevant if MASKING is enabled for QoS
10363 + * classification on this DPNI, it is ignored for exact match.
10364 + * @cfg: Flow steering rule to add
10365 + * @action: Action to be taken as result of a classification hit
10366 + *
10367 + * Return: '0' on Success; Error code otherwise.
10368 + */
10369 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
10370 + u32 cmd_flags,
10371 + u16 token,
10372 + u8 tc_id,
10373 + u16 index,
10374 + const struct dpni_rule_cfg *cfg,
10375 + const struct dpni_fs_action_cfg *action)
10376 +{
10377 + struct dpni_cmd_add_fs_entry *cmd_params;
10378 + struct fsl_mc_command cmd = { 0 };
10379 +
10380 + /* prepare command */
10381 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
10382 + cmd_flags,
10383 + token);
10384 + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
10385 + cmd_params->tc_id = tc_id;
10386 + cmd_params->key_size = cfg->key_size;
10387 + cmd_params->index = cpu_to_le16(index);
10388 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
10389 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
10390 + cmd_params->options = cpu_to_le16(action->options);
10391 + cmd_params->flow_id = cpu_to_le16(action->flow_id);
10392 + cmd_params->flc = cpu_to_le64(action->flc);
10393 +
10394 + /* send command to mc*/
10395 + return mc_send_command(mc_io, &cmd);
10396 +}
10397 +
10398 +/**
10399 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
10400 + * traffic class
10401 + * @mc_io: Pointer to MC portal's I/O object
10402 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10403 + * @token: Token of DPNI object
10404 + * @tc_id: Traffic class selection (0-7)
10405 + * @cfg: Flow steering rule to remove
10406 + *
10407 + * Return: '0' on Success; Error code otherwise.
10408 + */
10409 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
10410 + u32 cmd_flags,
10411 + u16 token,
10412 + u8 tc_id,
10413 + const struct dpni_rule_cfg *cfg)
10414 +{
10415 + struct dpni_cmd_remove_fs_entry *cmd_params;
10416 + struct fsl_mc_command cmd = { 0 };
10417 +
10418 + /* prepare command */
10419 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
10420 + cmd_flags,
10421 + token);
10422 + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
10423 + cmd_params->tc_id = tc_id;
10424 + cmd_params->key_size = cfg->key_size;
10425 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
10426 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
10427 +
10428 + /* send command to mc*/
10429 + return mc_send_command(mc_io, &cmd);
10430 +}
10431 +
10432 +/**
10433 + * dpni_set_congestion_notification() - Set traffic class congestion
10434 + * notification configuration
10435 + * @mc_io: Pointer to MC portal's I/O object
10436 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10437 + * @token: Token of DPNI object
10438 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
10439 + * @tc_id: Traffic class selection (0-7)
10440 + * @cfg: Congestion notification configuration
10441 + *
10442 + * Return: '0' on Success; error code otherwise.
10443 + */
10444 +int dpni_set_congestion_notification(
10445 + struct fsl_mc_io *mc_io,
10446 + u32 cmd_flags,
10447 + u16 token,
10448 + enum dpni_queue_type qtype,
10449 + u8 tc_id,
10450 + const struct dpni_congestion_notification_cfg *cfg)
10451 +{
10452 + struct dpni_cmd_set_congestion_notification *cmd_params;
10453 + struct fsl_mc_command cmd = { 0 };
10454 +
10455 + /* prepare command */
10456 + cmd.header = mc_encode_cmd_header(
10457 + DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
10458 + cmd_flags,
10459 + token);
10460 + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
10461 + cmd_params->qtype = qtype;
10462 + cmd_params->tc = tc_id;
10463 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
10464 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
10465 + cmd_params->dest_priority = cfg->dest_cfg.priority;
10466 + dpni_set_field(cmd_params->type_units, DEST_TYPE,
10467 + cfg->dest_cfg.dest_type);
10468 + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
10469 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
10470 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
10471 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
10472 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
10473 +
10474 + /* send command to mc*/
10475 + return mc_send_command(mc_io, &cmd);
10476 +}
10477 +
10478 +/**
10479 + * dpni_get_congestion_notification() - Get traffic class congestion
10480 + * notification configuration
10481 + * @mc_io: Pointer to MC portal's I/O object
10482 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10483 + * @token: Token of DPNI object
10484 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
10485 + * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX);
10486 + * bits 3-0 contain traffic class.
10487 + * Use macro DPNI_BUILD_CH_TC() to build correct value for
10488 + * tc_id parameter.
10489 + * @cfg: congestion notification configuration
10490 + *
10491 + * Return: '0' on Success; error code otherwise.
10492 + */
10493 +int dpni_get_congestion_notification(
10494 + struct fsl_mc_io *mc_io,
10495 + u32 cmd_flags,
10496 + u16 token,
10497 + enum dpni_queue_type qtype,
10498 + u8 tc_id,
10499 + struct dpni_congestion_notification_cfg *cfg)
10500 +{
10501 + struct dpni_rsp_get_congestion_notification *rsp_params;
10502 + struct dpni_cmd_get_congestion_notification *cmd_params;
10503 + struct fsl_mc_command cmd = { 0 };
10504 + int err;
10505 +
10506 + /* prepare command */
10507 + cmd.header = mc_encode_cmd_header(
10508 + DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
10509 + cmd_flags,
10510 + token);
10511 + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
10512 + cmd_params->qtype = qtype;
10513 + cmd_params->tc = tc_id;
10514 +
10515 + /* send command to mc*/
10516 + err = mc_send_command(mc_io, &cmd);
10517 + if (err)
10518 + return err;
10519 +
10520 + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
10521 + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
10522 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
10523 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
10524 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
10525 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
10526 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
10527 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
10528 + cfg->dest_cfg.priority = rsp_params->dest_priority;
10529 + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
10530 + DEST_TYPE);
10531 +
10532 + return 0;
10533 +}
10534 +
10535 +/**
10536 + * dpni_set_queue() - Set queue parameters
10537 + * @mc_io: Pointer to MC portal's I/O object
10538 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10539 + * @token: Token of DPNI object
10540 + * @qtype: Type of queue - all queue types are supported, although
10541 + * the command is ignored for Tx
10542 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
10543 + * @index: Selects the specific queue out of the set allocated for the
10544 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
10545 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what
10546 + * configuration options are set on the queue
10547 + * @queue: Queue structure
10548 + *
10549 + * Return: '0' on Success; Error code otherwise.
10550 + */
10551 +int dpni_set_queue(struct fsl_mc_io *mc_io,
10552 + u32 cmd_flags,
10553 + u16 token,
10554 + enum dpni_queue_type qtype,
10555 + u8 tc,
10556 + u8 index,
10557 + u8 options,
10558 + const struct dpni_queue *queue)
10559 +{
10560 + struct fsl_mc_command cmd = { 0 };
10561 + struct dpni_cmd_set_queue *cmd_params;
10562 +
10563 + /* prepare command */
10564 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
10565 + cmd_flags,
10566 + token);
10567 + cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
10568 + cmd_params->qtype = qtype;
10569 + cmd_params->tc = tc;
10570 + cmd_params->index = index;
10571 + cmd_params->options = options;
10572 + cmd_params->dest_id = cpu_to_le32(queue->destination.id);
10573 + cmd_params->dest_prio = queue->destination.priority;
10574 + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
10575 + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
10576 + dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
10577 + queue->destination.hold_active);
10578 + cmd_params->flc = cpu_to_le64(queue->flc.value);
10579 + cmd_params->user_context = cpu_to_le64(queue->user_context);
10580 +
10581 + /* send command to mc */
10582 + return mc_send_command(mc_io, &cmd);
10583 +}
10584 +
10585 +/**
10586 + * dpni_get_queue() - Get queue parameters
10587 + * @mc_io: Pointer to MC portal's I/O object
10588 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10589 + * @token: Token of DPNI object
10590 + * @qtype: Type of queue - all queue types are supported
10591 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
10592 + * @index: Selects the specific queue out of the set allocated for the
10593 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
10594 + * @queue: Queue configuration structure
10595 + * @qid: Queue identification
10596 + *
10597 + * Return: '0' on Success; Error code otherwise.
10598 + */
10599 +int dpni_get_queue(struct fsl_mc_io *mc_io,
10600 + u32 cmd_flags,
10601 + u16 token,
10602 + enum dpni_queue_type qtype,
10603 + u8 tc,
10604 + u8 index,
10605 + struct dpni_queue *queue,
10606 + struct dpni_queue_id *qid)
10607 +{
10608 + struct fsl_mc_command cmd = { 0 };
10609 + struct dpni_cmd_get_queue *cmd_params;
10610 + struct dpni_rsp_get_queue *rsp_params;
10611 + int err;
10612 +
10613 + /* prepare command */
10614 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
10615 + cmd_flags,
10616 + token);
10617 + cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
10618 + cmd_params->qtype = qtype;
10619 + cmd_params->tc = tc;
10620 + cmd_params->index = index;
10621 +
10622 + /* send command to mc */
10623 + err = mc_send_command(mc_io, &cmd);
10624 + if (err)
10625 + return err;
10626 +
10627 + /* retrieve response parameters */
10628 + rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
10629 + queue->destination.id = le32_to_cpu(rsp_params->dest_id);
10630 + queue->destination.priority = rsp_params->dest_prio;
10631 + queue->destination.type = dpni_get_field(rsp_params->flags,
10632 + DEST_TYPE);
10633 + queue->flc.stash_control = dpni_get_field(rsp_params->flags,
10634 + STASH_CTRL);
10635 + queue->destination.hold_active = dpni_get_field(rsp_params->flags,
10636 + HOLD_ACTIVE);
10637 + queue->flc.value = le64_to_cpu(rsp_params->flc);
10638 + queue->user_context = le64_to_cpu(rsp_params->user_context);
10639 + qid->fqid = le32_to_cpu(rsp_params->fqid);
10640 + qid->qdbin = le16_to_cpu(rsp_params->qdbin);
10641 +
10642 + return 0;
10643 +}
10644 +
10645 +/**
10646 + * dpni_get_statistics() - Get DPNI statistics
10647 + * @mc_io: Pointer to MC portal's I/O object
10648 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10649 + * @token: Token of DPNI object
10650 + * @page: Selects the statistics page to retrieve, see
10651 + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
10652 + * @param: Custom parameter for some pages used to select a certain
10653 + * statistic source, for example the TC.
10654 + * @stat: Structure containing the statistics
10655 + *
10656 + * Return: '0' on Success; Error code otherwise.
10657 + */
10658 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
10659 + u32 cmd_flags,
10660 + u16 token,
10661 + u8 page,
10662 + u8 param,
10663 + union dpni_statistics *stat)
10664 +{
10665 + struct fsl_mc_command cmd = { 0 };
10666 + struct dpni_cmd_get_statistics *cmd_params;
10667 + struct dpni_rsp_get_statistics *rsp_params;
10668 + int i, err;
10669 +
10670 + /* prepare command */
10671 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
10672 + cmd_flags,
10673 + token);
10674 + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
10675 + cmd_params->page_number = page;
10676 + cmd_params->param = param;
10677 +
10678 + /* send command to mc */
10679 + err = mc_send_command(mc_io, &cmd);
10680 + if (err)
10681 + return err;
10682 +
10683 + /* retrieve response parameters */
10684 + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
10685 + for (i = 0; i < DPNI_STATISTICS_CNT; i++)
10686 + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
10687 +
10688 + return 0;
10689 +}
10690 +
10691 +/**
10692 + * dpni_reset_statistics() - Clears DPNI statistics
10693 + * @mc_io: Pointer to MC portal's I/O object
10694 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10695 + * @token: Token of DPNI object
10696 + *
10697 + * Return: '0' on Success; Error code otherwise.
10698 + */
10699 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
10700 + u32 cmd_flags,
10701 + u16 token)
10702 +{
10703 + struct fsl_mc_command cmd = { 0 };
10704 +
10705 + /* prepare command */
10706 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
10707 + cmd_flags,
10708 + token);
10709 +
10710 + /* send command to mc*/
10711 + return mc_send_command(mc_io, &cmd);
10712 +}
10713 +
10714 +/**
10715 + * dpni_set_taildrop() - Set taildrop per queue or TC
10716 + * @mc_io: Pointer to MC portal's I/O object
10717 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10718 + * @token: Token of DPNI object
10719 + * @cg_point: Congestion point
10720 + * @q_type: Queue type on which the taildrop is configured.
10721 + * Only Rx queues are supported for now
10722 + * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
10723 + * bits 3-0 contain traffic class.
10724 + * Use macro DPNI_BUILD_CH_TC() to build correct value for
10725 + * tc parameter.
10726 + * @q_index: Index of the queue if the DPNI supports multiple queues for
10727 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
10728 + * @taildrop: Taildrop structure
10729 + *
10730 + * Return: '0' on Success; Error code otherwise.
10731 + */
10732 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
10733 + u32 cmd_flags,
10734 + u16 token,
10735 + enum dpni_congestion_point cg_point,
10736 + enum dpni_queue_type qtype,
10737 + u8 tc,
10738 + u8 index,
10739 + struct dpni_taildrop *taildrop)
10740 +{
10741 + struct fsl_mc_command cmd = { 0 };
10742 + struct dpni_cmd_set_taildrop *cmd_params;
10743 +
10744 + /* prepare command */
10745 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
10746 + cmd_flags,
10747 + token);
10748 + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
10749 + cmd_params->congestion_point = cg_point;
10750 + cmd_params->qtype = qtype;
10751 + cmd_params->tc = tc;
10752 + cmd_params->index = index;
10753 + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
10754 + cmd_params->units = taildrop->units;
10755 + cmd_params->threshold = cpu_to_le32(taildrop->threshold);
10756 +
10757 + /* send command to mc */
10758 + return mc_send_command(mc_io, &cmd);
10759 +}
10760 +
10761 +/**
10762 + * dpni_get_taildrop() - Get taildrop information
10763 + * @mc_io: Pointer to MC portal's I/O object
10764 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10765 + * @token: Token of DPNI object
10766 + * @cg_point: Congestion point
10767 + * @q_type: Queue type on which the taildrop is configured.
10768 + * Only Rx queues are supported for now
10769 + * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
10770 + * bits 3-0 contain traffic class.
10771 + * Use macro DPNI_BUILD_CH_TC() to build correct value for
10772 + * tc parameter.
10773 + * @q_index: Index of the queue if the DPNI supports multiple queues for
10774 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
10775 + * @taildrop: Taildrop structure
10776 + *
10777 + * Return: '0' on Success; Error code otherwise.
10778 + */
10779 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
10780 + u32 cmd_flags,
10781 + u16 token,
10782 + enum dpni_congestion_point cg_point,
10783 + enum dpni_queue_type qtype,
10784 + u8 tc,
10785 + u8 index,
10786 + struct dpni_taildrop *taildrop)
10787 +{
10788 + struct fsl_mc_command cmd = { 0 };
10789 + struct dpni_cmd_get_taildrop *cmd_params;
10790 + struct dpni_rsp_get_taildrop *rsp_params;
10791 + int err;
10792 +
10793 + /* prepare command */
10794 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
10795 + cmd_flags,
10796 + token);
10797 + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
10798 + cmd_params->congestion_point = cg_point;
10799 + cmd_params->qtype = qtype;
10800 + cmd_params->tc = tc;
10801 + cmd_params->index = index;
10802 +
10803 + /* send command to mc */
10804 + err = mc_send_command(mc_io, &cmd);
10805 + if (err)
10806 + return err;
10807 +
10808 + /* retrieve response parameters */
10809 + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
10810 + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
10811 + taildrop->units = rsp_params->units;
10812 + taildrop->threshold = le32_to_cpu(rsp_params->threshold);
10813 +
10814 + return 0;
10815 +}
10816 +
10817 +/**
10818 + * dpni_get_api_version() - Get Data Path Network Interface API version
10819 + * @mc_io: Pointer to MC portal's I/O object
10820 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10821 + * @major_ver: Major version of data path network interface API
10822 + * @minor_ver: Minor version of data path network interface API
10823 + *
10824 + * Return: '0' on Success; Error code otherwise.
10825 + */
10826 +int dpni_get_api_version(struct fsl_mc_io *mc_io,
10827 + u32 cmd_flags,
10828 + u16 *major_ver,
10829 + u16 *minor_ver)
10830 +{
10831 + struct dpni_rsp_get_api_version *rsp_params;
10832 + struct fsl_mc_command cmd = { 0 };
10833 + int err;
10834 +
10835 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
10836 + cmd_flags, 0);
10837 +
10838 + err = mc_send_command(mc_io, &cmd);
10839 + if (err)
10840 + return err;
10841 +
10842 + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
10843 + *major_ver = le16_to_cpu(rsp_params->major);
10844 + *minor_ver = le16_to_cpu(rsp_params->minor);
10845 +
10846 + return 0;
10847 +}
10848 +
10849 +/**
10850 + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
10851 + * @mc_io: Pointer to MC portal's I/O object
10852 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10853 + * @token: Token of DPNI object
10854 + * @cfg: Distribution configuration
10855 + * If the FS is already enabled with a previous call the classification
10856 + * key will be changed but all the table rules are kept. If the
10857 + * existing rules do not match the key the results will not be
10858 + * predictable. It is the user responsibility to keep key integrity.
10859 + * If cfg.enable is set to 1 the command will create a flow steering table
10860 + * and will classify packets according to this table. The packets that
10861 + * miss all the table rules will be classified according to settings
10862 + * made in dpni_set_rx_hash_dist()
10863 + * If cfg.enable is set to 0 the command will clear flow steering table.
10864 + * The packets will be classified according to settings made in
10865 + * dpni_set_rx_hash_dist()
10866 + */
10867 +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
10868 + u32 cmd_flags,
10869 + u16 token,
10870 + const struct dpni_rx_dist_cfg *cfg)
10871 +{
10872 + struct dpni_cmd_set_rx_fs_dist *cmd_params;
10873 + struct fsl_mc_command cmd = { 0 };
10874 +
10875 + /* prepare command */
10876 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
10877 + cmd_flags,
10878 + token);
10879 + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
10880 + cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
10881 + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
10882 + cmd_params->tc = cfg->tc;
10883 + cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id);
10884 + cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
10885 +
10886 + /* send command to mc*/
10887 + return mc_send_command(mc_io, &cmd);
10888 +}
10889 +
10890 +/**
10891 + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
10892 + * @mc_io: Pointer to MC portal's I/O object
10893 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10894 + * @token: Token of DPNI object
10895 + * @cfg: Distribution configuration
10896 + * If cfg.enable is set to 1 the packets will be classified using a hash
10897 + * function based on the key received in cfg.key_cfg_iova parameter.
10898 + * If cfg.enable is set to 0 the packets will be sent to the queue configured
10899 + * in dpni_set_rx_dist_default_queue() call
10900 + */
10901 +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
10902 + u32 cmd_flags,
10903 + u16 token,
10904 + const struct dpni_rx_dist_cfg *cfg)
10905 +{
10906 + struct dpni_cmd_set_rx_hash_dist *cmd_params;
10907 + struct fsl_mc_command cmd = { 0 };
10908 +
10909 + /* prepare command */
10910 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
10911 + cmd_flags,
10912 + token);
10913 + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
10914 + cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
10915 + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
10916 + cmd_params->tc = cfg->tc;
10917 + cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
10918 +
10919 + /* send command to mc*/
10920 + return mc_send_command(mc_io, &cmd);
10921 +}
10922 --- /dev/null
10923 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
10924 @@ -0,0 +1,1172 @@
10925 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
10926 + * Copyright 2016 NXP
10927 + *
10928 + * Redistribution and use in source and binary forms, with or without
10929 + * modification, are permitted provided that the following conditions are met:
10930 + * * Redistributions of source code must retain the above copyright
10931 + * notice, this list of conditions and the following disclaimer.
10932 + * * Redistributions in binary form must reproduce the above copyright
10933 + * notice, this list of conditions and the following disclaimer in the
10934 + * documentation and/or other materials provided with the distribution.
10935 + * * Neither the name of the above-listed copyright holders nor the
10936 + * names of any contributors may be used to endorse or promote products
10937 + * derived from this software without specific prior written permission.
10938 + *
10939 + *
10940 + * ALTERNATIVELY, this software may be distributed under the terms of the
10941 + * GNU General Public License ("GPL") as published by the Free Software
10942 + * Foundation, either version 2 of that License or (at your option) any
10943 + * later version.
10944 + *
10945 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10946 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10947 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10948 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10949 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10950 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10951 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10952 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10953 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10954 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10955 + * POSSIBILITY OF SUCH DAMAGE.
10956 + */
10957 +#ifndef __FSL_DPNI_H
10958 +#define __FSL_DPNI_H
10959 +
10960 +#include "dpkg.h"
10961 +
10962 +struct fsl_mc_io;
10963 +
10964 +/**
10965 + * Data Path Network Interface API
10966 + * Contains initialization APIs and runtime control APIs for DPNI
10967 + */
10968 +
10969 +/** General DPNI macros */
10970 +
10971 +/**
10972 + * Maximum number of traffic classes
10973 + */
10974 +#define DPNI_MAX_TC 8
10975 +/**
10976 + * Maximum number of buffer pools per DPNI
10977 + */
10978 +#define DPNI_MAX_DPBP 8
10979 +/**
10980 + * Maximum number of senders
10981 + */
10982 +#define DPNI_MAX_SENDERS 16
10983 +/**
10984 + * Maximum distribution size
10985 + */
10986 +#define DPNI_MAX_DIST_SIZE 16
10987 +
10988 +/**
10989 + * All traffic classes considered; see dpni_set_queue()
10990 + */
10991 +#define DPNI_ALL_TCS (u8)(-1)
10992 +/**
10993 + * All flows within traffic class considered; see dpni_set_queue()
10994 + */
10995 +#define DPNI_ALL_TC_FLOWS (u16)(-1)
10996 +/**
10997 + * Generate new flow ID; see dpni_set_queue()
10998 + */
10999 +#define DPNI_NEW_FLOW_ID (u16)(-1)
11000 +
11001 +/**
11002 + * Tx traffic is always released to a buffer pool on transmit, there are no
11003 + * resources allocated to have the frames confirmed back to the source after
11004 + * transmission.
11005 + */
11006 +#define DPNI_OPT_TX_FRM_RELEASE 0x000001
11007 +/**
11008 + * Disables support for MAC address filtering for addresses other than primary
11009 + * MAC address. This affects both unicast and multicast. Promiscuous mode can
11010 + * still be enabled/disabled for both unicast and multicast. If promiscuous mode
11011 + * is disabled, only traffic matching the primary MAC address will be accepted.
11012 + */
11013 +#define DPNI_OPT_NO_MAC_FILTER 0x000002
11014 +/**
11015 + * Allocate policers for this DPNI. They can be used to rate-limit traffic per
11016 + * traffic class (TC) basis.
11017 + */
11018 +#define DPNI_OPT_HAS_POLICING 0x000004
11019 +/**
11020 + * Congestion can be managed in several ways, allowing the buffer pool to
11021 + * deplete on ingress, taildrop on each queue or use congestion groups for sets
11022 + * of queues. If set, it configures a single congestion groups across all TCs.
11023 + * If reset, a congestion group is allocated for each TC. Only relevant if the
11024 + * DPNI has multiple traffic classes.
11025 + */
11026 +#define DPNI_OPT_SHARED_CONGESTION 0x000008
11027 +/**
11028 + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
11029 + * look-ups are exact match. Note that TCAM is not available on LS1088 and its
11030 + * variants. Setting this bit on these SoCs will trigger an error.
11031 + */
11032 +#define DPNI_OPT_HAS_KEY_MASKING 0x000010
11033 +/**
11034 + * Disables the flow steering table.
11035 + */
11036 +#define DPNI_OPT_NO_FS 0x000020
11037 +
11038 +int dpni_open(struct fsl_mc_io *mc_io,
11039 + u32 cmd_flags,
11040 + int dpni_id,
11041 + u16 *token);
11042 +
11043 +int dpni_close(struct fsl_mc_io *mc_io,
11044 + u32 cmd_flags,
11045 + u16 token);
11046 +
11047 +/**
11048 + * struct dpni_pools_cfg - Structure representing buffer pools configuration
11049 + * @num_dpbp: Number of DPBPs
11050 + * @pools: Array of buffer pools parameters; The number of valid entries
11051 + * must match 'num_dpbp' value
11052 + */
11053 +struct dpni_pools_cfg {
11054 + u8 num_dpbp;
11055 + /**
11056 + * struct pools - Buffer pools parameters
11057 + * @dpbp_id: DPBP object ID
11058 + * @priority_mask: priorities served by DPBP
11059 + * @buffer_size: Buffer size
11060 + * @backup_pool: Backup pool
11061 + */
11062 + struct {
11063 + u16 dpbp_id;
11064 + u8 priority_mask;
11065 + u16 buffer_size;
11066 + u8 backup_pool;
11067 + } pools[DPNI_MAX_DPBP];
11068 +};
11069 +
11070 +int dpni_set_pools(struct fsl_mc_io *mc_io,
11071 + u32 cmd_flags,
11072 + u16 token,
11073 + const struct dpni_pools_cfg *cfg);
11074 +
11075 +int dpni_enable(struct fsl_mc_io *mc_io,
11076 + u32 cmd_flags,
11077 + u16 token);
11078 +
11079 +int dpni_disable(struct fsl_mc_io *mc_io,
11080 + u32 cmd_flags,
11081 + u16 token);
11082 +
11083 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
11084 + u32 cmd_flags,
11085 + u16 token,
11086 + int *en);
11087 +
11088 +int dpni_reset(struct fsl_mc_io *mc_io,
11089 + u32 cmd_flags,
11090 + u16 token);
11091 +
11092 +/**
11093 + * DPNI IRQ Index and Events
11094 + */
11095 +
11096 +/**
11097 + * IRQ index
11098 + */
11099 +#define DPNI_IRQ_INDEX 0
11100 +/**
11101 + * IRQ event - indicates a change in link state
11102 + */
11103 +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
11104 +
11105 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
11106 + u32 cmd_flags,
11107 + u16 token,
11108 + u8 irq_index,
11109 + u8 en);
11110 +
11111 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
11112 + u32 cmd_flags,
11113 + u16 token,
11114 + u8 irq_index,
11115 + u8 *en);
11116 +
11117 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
11118 + u32 cmd_flags,
11119 + u16 token,
11120 + u8 irq_index,
11121 + u32 mask);
11122 +
11123 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
11124 + u32 cmd_flags,
11125 + u16 token,
11126 + u8 irq_index,
11127 + u32 *mask);
11128 +
11129 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
11130 + u32 cmd_flags,
11131 + u16 token,
11132 + u8 irq_index,
11133 + u32 *status);
11134 +
11135 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
11136 + u32 cmd_flags,
11137 + u16 token,
11138 + u8 irq_index,
11139 + u32 status);
11140 +
11141 +/**
11142 + * struct dpni_attr - Structure representing DPNI attributes
11143 + * @options: Any combination of the following options:
11144 + * DPNI_OPT_TX_FRM_RELEASE
11145 + * DPNI_OPT_NO_MAC_FILTER
11146 + * DPNI_OPT_HAS_POLICING
11147 + * DPNI_OPT_SHARED_CONGESTION
11148 + * DPNI_OPT_HAS_KEY_MASKING
11149 + * DPNI_OPT_NO_FS
11150 + * @num_queues: Number of Tx and Rx queues used for traffic distribution.
11151 + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
11152 + * @mac_filter_entries: Number of entries in the MAC address filtering table.
11153 + * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
11154 + * @qos_entries: Number of entries in the QoS classification table.
11155 + * @fs_entries: Number of entries in the flow steering table.
11156 + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
11157 + * than this when adding QoS entries will result in an error.
11158 + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
11159 + * key larger than this when composing the hash + FS key will
11160 + * result in an error.
11161 + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
11162 + * on 6, 5, 5 bits respectively.
11163 + */
11164 +struct dpni_attr {
11165 + u32 options;
11166 + u8 num_queues;
11167 + u8 num_tcs;
11168 + u8 mac_filter_entries;
11169 + u8 vlan_filter_entries;
11170 + u8 qos_entries;
11171 + u16 fs_entries;
11172 + u8 qos_key_size;
11173 + u8 fs_key_size;
11174 + u16 wriop_version;
11175 +};
11176 +
11177 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
11178 + u32 cmd_flags,
11179 + u16 token,
11180 + struct dpni_attr *attr);
11181 +
11182 +/**
11183 + * DPNI errors
11184 + */
11185 +
11186 +/**
11187 + * Extract out of frame header error
11188 + */
11189 +#define DPNI_ERROR_EOFHE 0x00020000
11190 +/**
11191 + * Frame length error
11192 + */
11193 +#define DPNI_ERROR_FLE 0x00002000
11194 +/**
11195 + * Frame physical error
11196 + */
11197 +#define DPNI_ERROR_FPE 0x00001000
11198 +/**
11199 + * Parsing header error
11200 + */
11201 +#define DPNI_ERROR_PHE 0x00000020
11202 +/**
11203 + * Parser L3 checksum error
11204 + */
11205 +#define DPNI_ERROR_L3CE 0x00000004
11206 +/**
11207 + * Parser L3 checksum error
11208 + */
11209 +#define DPNI_ERROR_L4CE 0x00000001
11210 +
11211 +/**
11212 + * enum dpni_error_action - Defines DPNI behavior for errors
11213 + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
11214 + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
11215 + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
11216 + */
11217 +enum dpni_error_action {
11218 + DPNI_ERROR_ACTION_DISCARD = 0,
11219 + DPNI_ERROR_ACTION_CONTINUE = 1,
11220 + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
11221 +};
11222 +
11223 +/**
11224 + * struct dpni_error_cfg - Structure representing DPNI errors treatment
11225 + * @errors: Errors mask; use 'DPNI_ERROR__<X>
11226 + * @error_action: The desired action for the errors mask
11227 + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
11228 + * status (FAS); relevant only for the non-discard action
11229 + */
11230 +struct dpni_error_cfg {
11231 + u32 errors;
11232 + enum dpni_error_action error_action;
11233 + int set_frame_annotation;
11234 +};
11235 +
11236 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
11237 + u32 cmd_flags,
11238 + u16 token,
11239 + struct dpni_error_cfg *cfg);
11240 +
11241 +/**
11242 + * DPNI buffer layout modification options
11243 + */
11244 +
11245 +/**
11246 + * Select to modify the time-stamp setting
11247 + */
11248 +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
11249 +/**
11250 + * Select to modify the parser-result setting; not applicable for Tx
11251 + */
11252 +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
11253 +/**
11254 + * Select to modify the frame-status setting
11255 + */
11256 +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
11257 +/**
11258 + * Select to modify the private-data-size setting
11259 + */
11260 +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
11261 +/**
11262 + * Select to modify the data-alignment setting
11263 + */
11264 +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
11265 +/**
11266 + * Select to modify the data-head-room setting
11267 + */
11268 +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
11269 +/**
11270 + * Select to modify the data-tail-room setting
11271 + */
11272 +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
11273 +
11274 +/**
11275 + * struct dpni_buffer_layout - Structure representing DPNI buffer layout
11276 + * @options: Flags representing the suggested modifications to the buffer
11277 + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
11278 + * @pass_timestamp: Pass timestamp value
11279 + * @pass_parser_result: Pass parser results
11280 + * @pass_frame_status: Pass frame status
11281 + * @private_data_size: Size kept for private data (in bytes)
11282 + * @data_align: Data alignment
11283 + * @data_head_room: Data head room
11284 + * @data_tail_room: Data tail room
11285 + */
11286 +struct dpni_buffer_layout {
11287 + u32 options;
11288 + int pass_timestamp;
11289 + int pass_parser_result;
11290 + int pass_frame_status;
11291 + u16 private_data_size;
11292 + u16 data_align;
11293 + u16 data_head_room;
11294 + u16 data_tail_room;
11295 +};
11296 +
11297 +/**
11298 + * enum dpni_queue_type - Identifies a type of queue targeted by the command
11299 + * @DPNI_QUEUE_RX: Rx queue
11300 + * @DPNI_QUEUE_TX: Tx queue
11301 + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
11302 + * @DPNI_QUEUE_RX_ERR: Rx error queue
11303 + */enum dpni_queue_type {
11304 + DPNI_QUEUE_RX,
11305 + DPNI_QUEUE_TX,
11306 + DPNI_QUEUE_TX_CONFIRM,
11307 + DPNI_QUEUE_RX_ERR,
11308 +};
11309 +
11310 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
11311 + u32 cmd_flags,
11312 + u16 token,
11313 + enum dpni_queue_type qtype,
11314 + struct dpni_buffer_layout *layout);
11315 +
11316 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
11317 + u32 cmd_flags,
11318 + u16 token,
11319 + enum dpni_queue_type qtype,
11320 + const struct dpni_buffer_layout *layout);
11321 +
11322 +/**
11323 + * enum dpni_offload - Identifies a type of offload targeted by the command
11324 + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
11325 + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
11326 + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
11327 + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
11328 + */
11329 +enum dpni_offload {
11330 + DPNI_OFF_RX_L3_CSUM,
11331 + DPNI_OFF_RX_L4_CSUM,
11332 + DPNI_OFF_TX_L3_CSUM,
11333 + DPNI_OFF_TX_L4_CSUM,
11334 +};
11335 +
11336 +int dpni_set_offload(struct fsl_mc_io *mc_io,
11337 + u32 cmd_flags,
11338 + u16 token,
11339 + enum dpni_offload type,
11340 + u32 config);
11341 +
11342 +int dpni_get_offload(struct fsl_mc_io *mc_io,
11343 + u32 cmd_flags,
11344 + u16 token,
11345 + enum dpni_offload type,
11346 + u32 *config);
11347 +
11348 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
11349 + u32 cmd_flags,
11350 + u16 token,
11351 + enum dpni_queue_type qtype,
11352 + u16 *qdid);
11353 +
11354 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
11355 + u32 cmd_flags,
11356 + u16 token,
11357 + u16 *data_offset);
11358 +
11359 +#define DPNI_STATISTICS_CNT 7
11360 +
11361 +union dpni_statistics {
11362 + /**
11363 + * struct page_0 - Page_0 statistics structure
11364 + * @ingress_all_frames: Ingress frame count
11365 + * @ingress_all_bytes: Ingress byte count
11366 + * @ingress_multicast_frames: Ingress multicast frame count
11367 + * @ingress_multicast_bytes: Ingress multicast byte count
11368 + * @ingress_broadcast_frames: Ingress broadcast frame count
11369 + * @ingress_broadcast_bytes: Ingress broadcast byte count
11370 + */
11371 + struct {
11372 + u64 ingress_all_frames;
11373 + u64 ingress_all_bytes;
11374 + u64 ingress_multicast_frames;
11375 + u64 ingress_multicast_bytes;
11376 + u64 ingress_broadcast_frames;
11377 + u64 ingress_broadcast_bytes;
11378 + } page_0;
11379 + /**
11380 + * struct page_1 - Page_1 statistics structure
11381 + * @egress_all_frames: Egress frame count
11382 + * @egress_all_bytes: Egress byte count
11383 + * @egress_multicast_frames: Egress multicast frame count
11384 + * @egress_multicast_bytes: Egress multicast byte count
11385 + * @egress_broadcast_frames: Egress broadcast frame count
11386 + * @egress_broadcast_bytes: Egress broadcast byte count
11387 + */
11388 + struct {
11389 + u64 egress_all_frames;
11390 + u64 egress_all_bytes;
11391 + u64 egress_multicast_frames;
11392 + u64 egress_multicast_bytes;
11393 + u64 egress_broadcast_frames;
11394 + u64 egress_broadcast_bytes;
11395 + } page_1;
11396 + /**
11397 + * struct page_2 - Page_2 statistics structure
11398 + * @ingress_filtered_frames: Ingress filtered frame count
11399 + * @ingress_discarded_frames: Ingress discarded frame count
11400 + * @ingress_nobuffer_discards: Ingress discarded frame count
11401 + * due to lack of buffers
11402 + * @egress_discarded_frames: Egress discarded frame count
11403 + * @egress_confirmed_frames: Egress confirmed frame count
11404 + */
11405 + struct {
11406 + u64 ingress_filtered_frames;
11407 + u64 ingress_discarded_frames;
11408 + u64 ingress_nobuffer_discards;
11409 + u64 egress_discarded_frames;
11410 + u64 egress_confirmed_frames;
11411 + } page_2;
11412 + /**
11413 + * struct page_3 - Page_3 statistics structure with values for the
11414 + * selected TC
11415 + * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
11416 + * dequeued
11417 + * @ceetm_dequeue_frames: Cumulative count of the number of frames
11418 + * dequeued
11419 + * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
11420 + * frames whose enqueue was rejected
11421 + * @ceetm_reject_frames: Cumulative count of all frame enqueues
11422 + * rejected
11423 + */
11424 + struct {
11425 + u64 ceetm_dequeue_bytes;
11426 + u64 ceetm_dequeue_frames;
11427 + u64 ceetm_reject_bytes;
11428 + u64 ceetm_reject_frames;
11429 + } page_3;
11430 + /**
11431 + * struct raw - raw statistics structure
11432 + */
11433 + struct {
11434 + u64 counter[DPNI_STATISTICS_CNT];
11435 + } raw;
11436 +};
11437 +
11438 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
11439 + u32 cmd_flags,
11440 + u16 token,
11441 + u8 page,
11442 + u8 param,
11443 + union dpni_statistics *stat);
11444 +
11445 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
11446 + u32 cmd_flags,
11447 + u16 token);
11448 +
11449 +/**
11450 + * Enable auto-negotiation
11451 + */
11452 +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
11453 +/**
11454 + * Enable half-duplex mode
11455 + */
11456 +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
11457 +/**
11458 + * Enable pause frames
11459 + */
11460 +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
11461 +/**
11462 + * Enable a-symmetric pause frames
11463 + */
11464 +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
11465 +/**
11466 + * Enable priority flow control pause frames
11467 + */
11468 +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
11469 +
11470 +/**
11471 + * struct - Structure representing DPNI link configuration
11472 + * @rate: Rate
11473 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
11474 + */
11475 +struct dpni_link_cfg {
11476 + u32 rate;
11477 + u64 options;
11478 +};
11479 +
11480 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
11481 + u32 cmd_flags,
11482 + u16 token,
11483 + const struct dpni_link_cfg *cfg);
11484 +
11485 +/**
11486 + * struct dpni_link_state - Structure representing DPNI link state
11487 + * @rate: Rate
11488 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
11489 + * @up: Link state; '0' for down, '1' for up
11490 + */
11491 +struct dpni_link_state {
11492 + u32 rate;
11493 + u64 options;
11494 + int up;
11495 +};
11496 +
11497 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
11498 + u32 cmd_flags,
11499 + u16 token,
11500 + struct dpni_link_state *state);
11501 +
11502 +/**
11503 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
11504 + * @rate_limit: rate in Mbps
11505 + * @max_burst_size: burst size in bytes (up to 64KB)
11506 + */
11507 +struct dpni_tx_shaping_cfg {
11508 + u32 rate_limit;
11509 + u16 max_burst_size;
11510 +};
11511 +
11512 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
11513 + u32 cmd_flags,
11514 + u16 token,
11515 + const struct dpni_tx_shaping_cfg *tx_cr_shaper,
11516 + const struct dpni_tx_shaping_cfg *tx_er_shaper,
11517 + int coupled);
11518 +
11519 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
11520 + u32 cmd_flags,
11521 + u16 token,
11522 + u16 max_frame_length);
11523 +
11524 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
11525 + u32 cmd_flags,
11526 + u16 token,
11527 + u16 *max_frame_length);
11528 +
11529 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
11530 + u32 cmd_flags,
11531 + u16 token,
11532 + int en);
11533 +
11534 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
11535 + u32 cmd_flags,
11536 + u16 token,
11537 + int *en);
11538 +
11539 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
11540 + u32 cmd_flags,
11541 + u16 token,
11542 + int en);
11543 +
11544 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
11545 + u32 cmd_flags,
11546 + u16 token,
11547 + int *en);
11548 +
11549 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
11550 + u32 cmd_flags,
11551 + u16 token,
11552 + const u8 mac_addr[6]);
11553 +
11554 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
11555 + u32 cmd_flags,
11556 + u16 token,
11557 + u8 mac_addr[6]);
11558 +
11559 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
11560 + u32 cm_flags,
11561 + u16 token,
11562 + u8 mac_addr[6]);
11563 +
11564 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
11565 + u32 cmd_flags,
11566 + u16 token,
11567 + const u8 mac_addr[6]);
11568 +
11569 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
11570 + u32 cmd_flags,
11571 + u16 token,
11572 + const u8 mac_addr[6]);
11573 +
11574 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
11575 + u32 cmd_flags,
11576 + u16 token,
11577 + int unicast,
11578 + int multicast);
11579 +
11580 +/**
11581 + * enum dpni_dist_mode - DPNI distribution mode
11582 + * @DPNI_DIST_MODE_NONE: No distribution
11583 + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
11584 + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
11585 + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
11586 + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
11587 + */
11588 +enum dpni_dist_mode {
11589 + DPNI_DIST_MODE_NONE = 0,
11590 + DPNI_DIST_MODE_HASH = 1,
11591 + DPNI_DIST_MODE_FS = 2
11592 +};
11593 +
11594 +/**
11595 + * enum dpni_fs_miss_action - DPNI Flow Steering miss action
11596 + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
11597 + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
11598 + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
11599 + */
11600 +enum dpni_fs_miss_action {
11601 + DPNI_FS_MISS_DROP = 0,
11602 + DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
11603 + DPNI_FS_MISS_HASH = 2
11604 +};
11605 +
11606 +/**
11607 + * struct dpni_fs_tbl_cfg - Flow Steering table configuration
11608 + * @miss_action: Miss action selection
11609 + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
11610 + */
11611 +struct dpni_fs_tbl_cfg {
11612 + enum dpni_fs_miss_action miss_action;
11613 + u16 default_flow_id;
11614 +};
11615 +
11616 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
11617 + u8 *key_cfg_buf);
11618 +
11619 +/**
11620 + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
11621 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
11622 + * key extractions to be used as the QoS criteria by calling
11623 + * dpkg_prepare_key_cfg()
11624 + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
11625 + * '0' to use the 'default_tc' in such cases
11626 + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
11627 + */
11628 +struct dpni_qos_tbl_cfg {
11629 + u64 key_cfg_iova;
11630 + int discard_on_miss;
11631 + u8 default_tc;
11632 +};
11633 +
11634 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
11635 + u32 cmd_flags,
11636 + u16 token,
11637 + const struct dpni_qos_tbl_cfg *cfg);
11638 +
11639 +/**
11640 + * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
11641 + * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
11642 + * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
11643 + * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
11644 + */
11645 +enum dpni_tx_schedule_mode {
11646 + DPNI_TX_SCHED_STRICT_PRIORITY = 0,
11647 + DPNI_TX_SCHED_WEIGHTED_A,
11648 + DPNI_TX_SCHED_WEIGHTED_B,
11649 +};
11650 +
11651 +/**
11652 + * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
11653 + * @mode: Scheduling mode
11654 + * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
11655 + * not applicable for 'strict-priority' mode;
11656 + */
11657 +struct dpni_tx_schedule_cfg {
11658 + enum dpni_tx_schedule_mode mode;
11659 + u16 delta_bandwidth;
11660 +};
11661 +
11662 +/**
11663 + * struct dpni_tx_priorities_cfg - Structure representing transmission
11664 + * priorities for DPNI TCs
11665 + * @tc_sched: An array of traffic-classes
11666 + * @prio_group_A: Priority of group A
11667 + * @prio_group_B: Priority of group B
11668 + * @separate_groups: Treat A and B groups as separate
11669 + * @ceetm_ch_idx: ceetm channel index to apply the changes
11670 + */
11671 +struct dpni_tx_priorities_cfg {
11672 + struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
11673 + u8 prio_group_A;
11674 + u8 prio_group_B;
11675 + u8 separate_groups;
11676 +};
11677 +
11678 +int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
11679 + u32 cmd_flags,
11680 + u16 token,
11681 + const struct dpni_tx_priorities_cfg *cfg);
11682 +
11683 +/**
11684 + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
11685 + * @dist_size: Set the distribution size;
11686 + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
11687 + * 112,128,192,224,256,384,448,512,768,896,1024
11688 + * @dist_mode: Distribution mode
11689 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
11690 + * the extractions to be used for the distribution key by calling
11691 + * dpni_prepare_key_cfg() relevant only when
11692 + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
11693 + * @fs_cfg: Flow Steering table configuration; only relevant if
11694 + * 'dist_mode = DPNI_DIST_MODE_FS'
11695 + */
11696 +struct dpni_rx_tc_dist_cfg {
11697 + u16 dist_size;
11698 + enum dpni_dist_mode dist_mode;
11699 + u64 key_cfg_iova;
11700 + struct dpni_fs_tbl_cfg fs_cfg;
11701 +};
11702 +
11703 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
11704 + u32 cmd_flags,
11705 + u16 token,
11706 + u8 tc_id,
11707 + const struct dpni_rx_tc_dist_cfg *cfg);
11708 +
11709 +/**
11710 + * enum dpni_dest - DPNI destination types
11711 + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
11712 + * does not generate FQDAN notifications; user is expected to
11713 + * dequeue from the queue based on polling or other user-defined
11714 + * method
11715 + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
11716 + * notifications to the specified DPIO; user is expected to dequeue
11717 + * from the queue only after notification is received
11718 + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
11719 + * FQDAN notifications, but is connected to the specified DPCON
11720 + * object; user is expected to dequeue from the DPCON channel
11721 + */
11722 +enum dpni_dest {
11723 + DPNI_DEST_NONE = 0,
11724 + DPNI_DEST_DPIO = 1,
11725 + DPNI_DEST_DPCON = 2
11726 +};
11727 +
11728 +/**
11729 + * struct dpni_queue - Queue structure
11730 + * @user_context: User data, presented to the user along with any frames from
11731 + * this queue. Not relevant for Tx queues.
11732 + */
11733 +struct dpni_queue {
11734 +/**
11735 + * struct destination - Destination structure
11736 + * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
11737 + * Identifies either a DPIO or a DPCON object. Not relevant for
11738 + * Tx queues.
11739 + * @type: May be one of the following:
11740 + * 0 - No destination, queue can be manually queried, but will not
11741 + * push traffic or notifications to a DPIO;
11742 + * 1 - The destination is a DPIO. When traffic becomes available in
11743 + * the queue a FQDAN (FQ data available notification) will be
11744 + * generated to selected DPIO;
11745 + * 2 - The destination is a DPCON. The queue is associated with a
11746 + * DPCON object for the purpose of scheduling between multiple
11747 + * queues. The DPCON may be independently configured to
11748 + * generate notifications. Not relevant for Tx queues.
11749 + * @hold_active: Hold active, maintains a queue scheduled for longer
11750 + * in a DPIO during dequeue to reduce spread of traffic.
11751 + * Only relevant if queues are not affined to a single DPIO.
11752 + */
11753 + struct {
11754 + u16 id;
11755 + enum dpni_dest type;
11756 + char hold_active;
11757 + u8 priority;
11758 + } destination;
11759 + u64 user_context;
11760 + struct {
11761 + u64 value;
11762 + char stash_control;
11763 + } flc;
11764 +};
11765 +
11766 +/**
11767 + * struct dpni_queue_id - Queue identification, used for enqueue commands
11768 + * or queue control
11769 + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
11770 + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
11771 + * for Tx queues.
11772 + */
11773 +struct dpni_queue_id {
11774 + u32 fqid;
11775 + u16 qdbin;
11776 +};
11777 +
11778 +/**
11779 + * Set User Context
11780 + */
11781 +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
11782 +#define DPNI_QUEUE_OPT_DEST 0x00000002
11783 +#define DPNI_QUEUE_OPT_FLC 0x00000004
11784 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
11785 +
11786 +int dpni_set_queue(struct fsl_mc_io *mc_io,
11787 + u32 cmd_flags,
11788 + u16 token,
11789 + enum dpni_queue_type qtype,
11790 + u8 tc,
11791 + u8 index,
11792 + u8 options,
11793 + const struct dpni_queue *queue);
11794 +
11795 +int dpni_get_queue(struct fsl_mc_io *mc_io,
11796 + u32 cmd_flags,
11797 + u16 token,
11798 + enum dpni_queue_type qtype,
11799 + u8 tc,
11800 + u8 index,
11801 + struct dpni_queue *queue,
11802 + struct dpni_queue_id *qid);
11803 +
11804 +/**
11805 + * enum dpni_congestion_unit - DPNI congestion units
11806 + * @DPNI_CONGESTION_UNIT_BYTES: bytes units
11807 + * @DPNI_CONGESTION_UNIT_FRAMES: frames units
11808 + */
11809 +enum dpni_congestion_unit {
11810 + DPNI_CONGESTION_UNIT_BYTES = 0,
11811 + DPNI_CONGESTION_UNIT_FRAMES
11812 +};
11813 +
11814 +/**
11815 + * enum dpni_congestion_point - Structure representing congestion point
11816 + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
11817 + * QUEUE_INDEX
11818 + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
11819 + * define the DPNI this can be either per TC (default) or per
11820 + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
11821 + * QUEUE_INDEX is ignored if this type is used.
11822 + */
11823 +enum dpni_congestion_point {
11824 + DPNI_CP_QUEUE,
11825 + DPNI_CP_GROUP,
11826 +};
11827 +
11828 +/**
11829 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
11830 + * @dest_type: Destination type
11831 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
11832 + * @priority: Priority selection within the DPIO or DPCON channel; valid
11833 + * values are 0-1 or 0-7, depending on the number of priorities
11834 + * in that channel; not relevant for 'DPNI_DEST_NONE' option
11835 + */
11836 +struct dpni_dest_cfg {
11837 + enum dpni_dest dest_type;
11838 + int dest_id;
11839 + u8 priority;
11840 +};
11841 +
11842 +/* DPNI congestion options */
11843 +
11844 +/**
11845 + * CSCN message is written to message_iova once entering a
11846 + * congestion state (see 'threshold_entry')
11847 + */
11848 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
11849 +/**
11850 + * CSCN message is written to message_iova once exiting a
11851 + * congestion state (see 'threshold_exit')
11852 + */
11853 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
11854 +/**
11855 + * CSCN write will attempt to allocate into a cache (coherent write);
11856 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
11857 + */
11858 +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
11859 +/**
11860 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
11861 + * DPIO/DPCON's WQ channel once entering a congestion state
11862 + * (see 'threshold_entry')
11863 + */
11864 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
11865 +/**
11866 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
11867 + * DPIO/DPCON's WQ channel once exiting a congestion state
11868 + * (see 'threshold_exit')
11869 + */
11870 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
11871 +/**
11872 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
11873 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
11874 + */
11875 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
11876 +/**
11877 + * This congestion will trigger flow control or priority flow control.
11878 + * This will have effect only if flow control is enabled with
11879 + * dpni_set_link_cfg().
11880 + */
11881 +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
11882 +
11883 +/**
11884 + * struct dpni_congestion_notification_cfg - congestion notification
11885 + * configuration
11886 + * @units: Units type
11887 + * @threshold_entry: Above this threshold we enter a congestion state.
11888 + * set it to '0' to disable it
11889 + * @threshold_exit: Below this threshold we exit the congestion state.
11890 + * @message_ctx: The context that will be part of the CSCN message
11891 + * @message_iova: I/O virtual address (must be in DMA-able memory),
11892 + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
11893 + * is contained in 'options'
11894 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
11895 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
11896 + */
11897 +
11898 +struct dpni_congestion_notification_cfg {
11899 + enum dpni_congestion_unit units;
11900 + u32 threshold_entry;
11901 + u32 threshold_exit;
11902 + u64 message_ctx;
11903 + u64 message_iova;
11904 + struct dpni_dest_cfg dest_cfg;
11905 + u16 notification_mode;
11906 +};
11907 +
11908 +/** Compose TC parameter for function dpni_set_congestion_notification()
11909 + * and dpni_get_congestion_notification().
11910 + */
11911 +#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
11912 + ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
11913 +
11914 +int dpni_set_congestion_notification(
11915 + struct fsl_mc_io *mc_io,
11916 + u32 cmd_flags,
11917 + u16 token,
11918 + enum dpni_queue_type qtype,
11919 + u8 tc_id,
11920 + const struct dpni_congestion_notification_cfg *cfg);
11921 +
11922 +int dpni_get_congestion_notification(
11923 + struct fsl_mc_io *mc_io,
11924 + u32 cmd_flags,
11925 + u16 token,
11926 + enum dpni_queue_type qtype,
11927 + u8 tc_id,
11928 + struct dpni_congestion_notification_cfg *cfg);
11929 +
11930 +/**
11931 + * struct dpni_taildrop - Structure representing the taildrop
11932 + * @enable: Indicates whether the taildrop is active or not.
11933 + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
11934 + * byte units, this field is ignored and assumed = 0 if
11935 + * CONGESTION_POINT is 0.
11936 + * @threshold: Threshold value, in units identified by UNITS field. Value 0
11937 + * cannot be used as a valid taildrop threshold, THRESHOLD must
11938 + * be > 0 if the taildrop is enabled.
11939 + */
11940 +struct dpni_taildrop {
11941 + char enable;
11942 + enum dpni_congestion_unit units;
11943 + u32 threshold;
11944 +};
11945 +
11946 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
11947 + u32 cmd_flags,
11948 + u16 token,
11949 + enum dpni_congestion_point cg_point,
11950 + enum dpni_queue_type q_type,
11951 + u8 tc,
11952 + u8 q_index,
11953 + struct dpni_taildrop *taildrop);
11954 +
11955 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
11956 + u32 cmd_flags,
11957 + u16 token,
11958 + enum dpni_congestion_point cg_point,
11959 + enum dpni_queue_type q_type,
11960 + u8 tc,
11961 + u8 q_index,
11962 + struct dpni_taildrop *taildrop);
11963 +
11964 +/**
11965 + * struct dpni_rule_cfg - Rule configuration for table lookup
11966 + * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
11967 + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
11968 + * @key_size: key and mask size (in bytes)
11969 + */
11970 +struct dpni_rule_cfg {
11971 + u64 key_iova;
11972 + u64 mask_iova;
11973 + u8 key_size;
11974 +};
11975 +
11976 +int dpni_get_api_version(struct fsl_mc_io *mc_io,
11977 + u32 cmd_flags,
11978 + u16 *major_ver,
11979 + u16 *minor_ver);
11980 +
11981 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
11982 + u32 cmd_flags,
11983 + u16 token,
11984 + const struct dpni_rule_cfg *cfg,
11985 + u8 tc_id,
11986 + u16 index);
11987 +
11988 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
11989 + u32 cmd_flags,
11990 + u16 token,
11991 + const struct dpni_rule_cfg *cfg);
11992 +
11993 +int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
11994 + u32 cmd_flags,
11995 + u16 token);
11996 +
11997 +/**
11998 + * Discard matching traffic. If set, this takes precedence over any other
11999 + * configuration and matching traffic is always discarded.
12000 + */
12001 + #define DPNI_FS_OPT_DISCARD 0x1
12002 +
12003 +/**
12004 + * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
12005 + * override the FLC value set per queue.
12006 + * For more details check the Frame Descriptor section in the hardware
12007 + * documentation.
12008 + */
12009 +#define DPNI_FS_OPT_SET_FLC 0x2
12010 +
12011 +/*
12012 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
12013 + * control. If set, the 6 least significant bits in value are interpreted as
12014 + * follows:
12015 + * - bits 0-1: indicates the number of 64 byte units of context that are
12016 + * stashed. FLC value is interpreted as a memory address in this case,
12017 + * excluding the 6 LS bits.
12018 + * - bits 2-3: indicates the number of 64 byte units of frame annotation
12019 + * to be stashed. Annotation is placed at FD[ADDR].
12020 + * - bits 4-5: indicates the number of 64 byte units of frame data to be
12021 + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
12022 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
12023 + */
12024 +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
12025 +
12026 +/**
12027 + * struct dpni_fs_action_cfg - Action configuration for table look-up
12028 + * @flc: FLC value for traffic matching this rule. Please check the
12029 + * Frame Descriptor section in the hardware documentation for
12030 + * more information.
12031 + * @flow_id: Identifies the Rx queue used for matching traffic. Supported
12032 + * values are in range 0 to num_queue-1.
12033 + * @options: Any combination of DPNI_FS_OPT_ values.
12034 + */
12035 +struct dpni_fs_action_cfg {
12036 + u64 flc;
12037 + u16 flow_id;
12038 + u16 options;
12039 +};
12040 +
12041 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
12042 + u32 cmd_flags,
12043 + u16 token,
12044 + u8 tc_id,
12045 + u16 index,
12046 + const struct dpni_rule_cfg *cfg,
12047 + const struct dpni_fs_action_cfg *action);
12048 +
12049 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
12050 + u32 cmd_flags,
12051 + u16 token,
12052 + u8 tc_id,
12053 + const struct dpni_rule_cfg *cfg);
12054 +
12055 +/**
12056 + * When used for queue_idx in function dpni_set_rx_dist_default_queue
12057 + * will signal to dpni to drop all unclassified frames
12058 + */
12059 +#define DPNI_FS_MISS_DROP ((uint16_t)-1)
12060 +
12061 +/**
12062 + * struct dpni_rx_dist_cfg - distribution configuration
12063 + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
12064 + * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
12065 + * 512,768,896,1024
12066 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
12067 + * the extractions to be used for the distribution key by calling
12068 + * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
12069 + * it can be '0'
12070 + * @enable: enable/disable the distribution.
12071 + * @tc: TC id for which distribution is set
12072 + * @fs_miss_flow_id: when packet misses all rules from flow steering table and
12073 + * hash is disabled it will be put into this queue id; use
12074 + * DPNI_FS_MISS_DROP to drop frames. The value of this field is
12075 + * used only when flow steering distribution is enabled and hash
12076 + * distribution is disabled
12077 + */
12078 +struct dpni_rx_dist_cfg {
12079 + u16 dist_size;
12080 + u64 key_cfg_iova;
12081 + u8 enable;
12082 + u8 tc;
12083 + u16 fs_miss_flow_id;
12084 +};
12085 +
12086 +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
12087 + u32 cmd_flags,
12088 + u16 token,
12089 + const struct dpni_rx_dist_cfg *cfg);
12090 +
12091 +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
12092 + u32 cmd_flags,
12093 + u16 token,
12094 + const struct dpni_rx_dist_cfg *cfg);
12095 +
12096 +#endif /* __FSL_DPNI_H */
12097 --- /dev/null
12098 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
12099 @@ -0,0 +1,480 @@
12100 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
12101 + *
12102 + * Redistribution and use in source and binary forms, with or without
12103 + * modification, are permitted provided that the following conditions are met:
12104 + * * Redistributions of source code must retain the above copyright
12105 + * notice, this list of conditions and the following disclaimer.
12106 + * * Redistributions in binary form must reproduce the above copyright
12107 + * notice, this list of conditions and the following disclaimer in the
12108 + * documentation and/or other materials provided with the distribution.
12109 + * * Neither the name of the above-listed copyright holders nor the
12110 + * names of any contributors may be used to endorse or promote products
12111 + * derived from this software without specific prior written permission.
12112 + *
12113 + *
12114 + * ALTERNATIVELY, this software may be distributed under the terms of the
12115 + * GNU General Public License ("GPL") as published by the Free Software
12116 + * Foundation, either version 2 of that License or (at your option) any
12117 + * later version.
12118 + *
12119 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
12120 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12121 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
12122 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
12123 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
12124 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
12125 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
12126 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
12127 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
12128 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
12129 + * POSSIBILITY OF SUCH DAMAGE.
12130 + */
12131 +#ifndef __FSL_NET_H
12132 +#define __FSL_NET_H
12133 +
12134 +#define LAST_HDR_INDEX 0xFFFFFFFF
12135 +
12136 +/*****************************************************************************/
12137 +/* Protocol fields */
12138 +/*****************************************************************************/
12139 +
12140 +/************************* Ethernet fields *********************************/
12141 +#define NH_FLD_ETH_DA (1)
12142 +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
12143 +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
12144 +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
12145 +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
12146 +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
12147 +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
12148 +
12149 +#define NH_FLD_ETH_ADDR_SIZE 6
12150 +
12151 +/*************************** VLAN fields ***********************************/
12152 +#define NH_FLD_VLAN_VPRI (1)
12153 +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
12154 +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
12155 +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
12156 +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
12157 +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
12158 +
12159 +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
12160 + NH_FLD_VLAN_CFI | \
12161 + NH_FLD_VLAN_VID)
12162 +
12163 +/************************ IP (generic) fields ******************************/
12164 +#define NH_FLD_IP_VER (1)
12165 +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
12166 +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
12167 +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
12168 +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
12169 +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
12170 +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
12171 +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
12172 +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
12173 +
12174 +#define NH_FLD_IP_PROTO_SIZE 1
12175 +
12176 +/***************************** IPV4 fields *********************************/
12177 +#define NH_FLD_IPV4_VER (1)
12178 +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
12179 +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
12180 +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
12181 +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
12182 +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
12183 +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
12184 +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
12185 +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
12186 +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
12187 +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
12188 +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
12189 +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
12190 +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
12191 +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
12192 +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
12193 +
12194 +#define NH_FLD_IPV4_ADDR_SIZE 4
12195 +#define NH_FLD_IPV4_PROTO_SIZE 1
12196 +
12197 +/***************************** IPV6 fields *********************************/
12198 +#define NH_FLD_IPV6_VER (1)
12199 +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
12200 +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
12201 +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
12202 +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
12203 +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
12204 +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
12205 +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
12206 +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
12207 +
12208 +#define NH_FLD_IPV6_ADDR_SIZE 16
12209 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
12210 +
12211 +/***************************** ICMP fields *********************************/
12212 +#define NH_FLD_ICMP_TYPE (1)
12213 +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
12214 +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
12215 +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
12216 +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
12217 +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
12218 +
12219 +#define NH_FLD_ICMP_CODE_SIZE 1
12220 +#define NH_FLD_ICMP_TYPE_SIZE 1
12221 +
12222 +/***************************** IGMP fields *********************************/
12223 +#define NH_FLD_IGMP_VERSION (1)
12224 +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
12225 +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
12226 +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
12227 +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
12228 +
12229 +/***************************** TCP fields **********************************/
12230 +#define NH_FLD_TCP_PORT_SRC (1)
12231 +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
12232 +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
12233 +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
12234 +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
12235 +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
12236 +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
12237 +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
12238 +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
12239 +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
12240 +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
12241 +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
12242 +
12243 +#define NH_FLD_TCP_PORT_SIZE 2
12244 +
12245 +/***************************** UDP fields **********************************/
12246 +#define NH_FLD_UDP_PORT_SRC (1)
12247 +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
12248 +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
12249 +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
12250 +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
12251 +
12252 +#define NH_FLD_UDP_PORT_SIZE 2
12253 +
12254 +/*************************** UDP-lite fields *******************************/
12255 +#define NH_FLD_UDP_LITE_PORT_SRC (1)
12256 +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
12257 +#define NH_FLD_UDP_LITE_ALL_FIELDS \
12258 + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
12259 +
12260 +#define NH_FLD_UDP_LITE_PORT_SIZE 2
12261 +
12262 +/*************************** UDP-encap-ESP fields **************************/
12263 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
12264 +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
12265 +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
12266 +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
12267 +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
12268 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
12269 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
12270 + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
12271 +
12272 +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
12273 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
12274 +
12275 +/***************************** SCTP fields *********************************/
12276 +#define NH_FLD_SCTP_PORT_SRC (1)
12277 +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
12278 +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
12279 +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
12280 +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
12281 +
12282 +#define NH_FLD_SCTP_PORT_SIZE 2
12283 +
12284 +/***************************** DCCP fields *********************************/
12285 +#define NH_FLD_DCCP_PORT_SRC (1)
12286 +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
12287 +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
12288 +
12289 +#define NH_FLD_DCCP_PORT_SIZE 2
12290 +
12291 +/***************************** IPHC fields *********************************/
12292 +#define NH_FLD_IPHC_CID (1)
12293 +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
12294 +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
12295 +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
12296 +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
12297 +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
12298 +
12299 +/***************************** SCTP fields *********************************/
12300 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
12301 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
12302 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
12303 +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
12304 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
12305 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
12306 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
12307 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
12308 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
12309 +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
12310 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
12311 + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
12312 +
12313 +/*************************** L2TPV2 fields *********************************/
12314 +#define NH_FLD_L2TPV2_TYPE_BIT (1)
12315 +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
12316 +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
12317 +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
12318 +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
12319 +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
12320 +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
12321 +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
12322 +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
12323 +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
12324 +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
12325 +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
12326 +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
12327 +#define NH_FLD_L2TPV2_ALL_FIELDS \
12328 + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
12329 +
12330 +/*************************** L2TPV3 fields *********************************/
12331 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
12332 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
12333 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
12334 +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
12335 +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
12336 +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
12337 +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
12338 +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
12339 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
12340 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
12341 + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
12342 +
12343 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
12344 +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
12345 +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
12346 +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
12347 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
12348 + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
12349 +
12350 +/**************************** PPP fields ***********************************/
12351 +#define NH_FLD_PPP_PID (1)
12352 +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
12353 +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
12354 +
12355 +/************************** PPPoE fields ***********************************/
12356 +#define NH_FLD_PPPOE_VER (1)
12357 +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
12358 +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
12359 +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
12360 +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
12361 +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
12362 +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
12363 +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
12364 +
12365 +/************************* PPP-Mux fields **********************************/
12366 +#define NH_FLD_PPPMUX_PID (1)
12367 +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
12368 +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
12369 +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
12370 +
12371 +/*********************** PPP-Mux sub-frame fields **************************/
12372 +#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
12373 +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
12374 +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
12375 +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
12376 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
12377 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
12378 + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
12379 +
12380 +/*************************** LLC fields ************************************/
12381 +#define NH_FLD_LLC_DSAP (1)
12382 +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
12383 +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
12384 +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
12385 +
12386 +/*************************** NLPID fields **********************************/
12387 +#define NH_FLD_NLPID_NLPID (1)
12388 +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
12389 +
12390 +/*************************** SNAP fields ***********************************/
12391 +#define NH_FLD_SNAP_OUI (1)
12392 +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
12393 +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
12394 +
12395 +/*************************** LLC SNAP fields *******************************/
12396 +#define NH_FLD_LLC_SNAP_TYPE (1)
12397 +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
12398 +
12399 +#define NH_FLD_ARP_HTYPE (1)
12400 +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
12401 +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
12402 +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
12403 +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
12404 +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
12405 +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
12406 +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
12407 +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
12408 +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
12409 +
12410 +/*************************** RFC2684 fields ********************************/
12411 +#define NH_FLD_RFC2684_LLC (1)
12412 +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
12413 +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
12414 +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
12415 +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
12416 +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
12417 +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
12418 +
12419 +/*************************** User defined fields ***************************/
12420 +#define NH_FLD_USER_DEFINED_SRCPORT (1)
12421 +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
12422 +#define NH_FLD_USER_DEFINED_ALL_FIELDS \
12423 + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
12424 +
12425 +/*************************** Payload fields ********************************/
12426 +#define NH_FLD_PAYLOAD_BUFFER (1)
12427 +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
12428 +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
12429 +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
12430 +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
12431 +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
12432 +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
12433 +
12434 +/*************************** GRE fields ************************************/
12435 +#define NH_FLD_GRE_TYPE (1)
12436 +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
12437 +
12438 +/*************************** MINENCAP fields *******************************/
12439 +#define NH_FLD_MINENCAP_SRC_IP (1)
12440 +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
12441 +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
12442 +#define NH_FLD_MINENCAP_ALL_FIELDS \
12443 + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
12444 +
12445 +/*************************** IPSEC AH fields *******************************/
12446 +#define NH_FLD_IPSEC_AH_SPI (1)
12447 +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
12448 +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
12449 +
12450 +/*************************** IPSEC ESP fields ******************************/
12451 +#define NH_FLD_IPSEC_ESP_SPI (1)
12452 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
12453 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
12454 +
12455 +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
12456 +
12457 +/*************************** MPLS fields ***********************************/
12458 +#define NH_FLD_MPLS_LABEL_STACK (1)
12459 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
12460 + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
12461 +
12462 +/*************************** MACSEC fields *********************************/
12463 +#define NH_FLD_MACSEC_SECTAG (1)
12464 +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
12465 +
12466 +/*************************** GTP fields ************************************/
12467 +#define NH_FLD_GTP_TEID (1)
12468 +
12469 +/* Protocol options */
12470 +
12471 +/* Ethernet options */
12472 +#define NH_OPT_ETH_BROADCAST 1
12473 +#define NH_OPT_ETH_MULTICAST 2
12474 +#define NH_OPT_ETH_UNICAST 3
12475 +#define NH_OPT_ETH_BPDU 4
12476 +
12477 +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
12478 +/* also applicable for broadcast */
12479 +
12480 +/* VLAN options */
12481 +#define NH_OPT_VLAN_CFI 1
12482 +
12483 +/* IPV4 options */
12484 +#define NH_OPT_IPV4_UNICAST 1
12485 +#define NH_OPT_IPV4_MULTICAST 2
12486 +#define NH_OPT_IPV4_BROADCAST 3
12487 +#define NH_OPT_IPV4_OPTION 4
12488 +#define NH_OPT_IPV4_FRAG 5
12489 +#define NH_OPT_IPV4_INITIAL_FRAG 6
12490 +
12491 +/* IPV6 options */
12492 +#define NH_OPT_IPV6_UNICAST 1
12493 +#define NH_OPT_IPV6_MULTICAST 2
12494 +#define NH_OPT_IPV6_OPTION 3
12495 +#define NH_OPT_IPV6_FRAG 4
12496 +#define NH_OPT_IPV6_INITIAL_FRAG 5
12497 +
12498 +/* General IP options (may be used for any version) */
12499 +#define NH_OPT_IP_FRAG 1
12500 +#define NH_OPT_IP_INITIAL_FRAG 2
12501 +#define NH_OPT_IP_OPTION 3
12502 +
12503 +/* Minenc. options */
12504 +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
12505 +
12506 +/* GRE. options */
12507 +#define NH_OPT_GRE_ROUTING_PRESENT 1
12508 +
12509 +/* TCP options */
12510 +#define NH_OPT_TCP_OPTIONS 1
12511 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
12512 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3
12513 +
12514 +/* CAPWAP options */
12515 +#define NH_OPT_CAPWAP_DTLS 1
12516 +
12517 +enum net_prot {
12518 + NET_PROT_NONE = 0,
12519 + NET_PROT_PAYLOAD,
12520 + NET_PROT_ETH,
12521 + NET_PROT_VLAN,
12522 + NET_PROT_IPV4,
12523 + NET_PROT_IPV6,
12524 + NET_PROT_IP,
12525 + NET_PROT_TCP,
12526 + NET_PROT_UDP,
12527 + NET_PROT_UDP_LITE,
12528 + NET_PROT_IPHC,
12529 + NET_PROT_SCTP,
12530 + NET_PROT_SCTP_CHUNK_DATA,
12531 + NET_PROT_PPPOE,
12532 + NET_PROT_PPP,
12533 + NET_PROT_PPPMUX,
12534 + NET_PROT_PPPMUX_SUBFRM,
12535 + NET_PROT_L2TPV2,
12536 + NET_PROT_L2TPV3_CTRL,
12537 + NET_PROT_L2TPV3_SESS,
12538 + NET_PROT_LLC,
12539 + NET_PROT_LLC_SNAP,
12540 + NET_PROT_NLPID,
12541 + NET_PROT_SNAP,
12542 + NET_PROT_MPLS,
12543 + NET_PROT_IPSEC_AH,
12544 + NET_PROT_IPSEC_ESP,
12545 + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
12546 + NET_PROT_MACSEC,
12547 + NET_PROT_GRE,
12548 + NET_PROT_MINENCAP,
12549 + NET_PROT_DCCP,
12550 + NET_PROT_ICMP,
12551 + NET_PROT_IGMP,
12552 + NET_PROT_ARP,
12553 + NET_PROT_CAPWAP_DATA,
12554 + NET_PROT_CAPWAP_CTRL,
12555 + NET_PROT_RFC2684,
12556 + NET_PROT_ICMPV6,
12557 + NET_PROT_FCOE,
12558 + NET_PROT_FIP,
12559 + NET_PROT_ISCSI,
12560 + NET_PROT_GTP,
12561 + NET_PROT_USER_DEFINED_L2,
12562 + NET_PROT_USER_DEFINED_L3,
12563 + NET_PROT_USER_DEFINED_L4,
12564 + NET_PROT_USER_DEFINED_L5,
12565 + NET_PROT_USER_DEFINED_SHIM1,
12566 + NET_PROT_USER_DEFINED_SHIM2,
12567 +
12568 + NET_PROT_DUMMY_LAST
12569 +};
12570 +
12571 +/*! IEEE8021.Q */
12572 +#define NH_IEEE8021Q_ETYPE 0x8100
12573 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
12574 + ((((u32)((etype) & 0xFFFF)) << 16) | \
12575 + (((u32)((pcp) & 0x07)) << 13) | \
12576 + (((u32)((dei) & 0x01)) << 12) | \
12577 + (((u32)((vlan_id) & 0xFFF))))
12578 +
12579 +#endif /* __FSL_NET_H */
12580 --- /dev/null
12581 +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
12582 @@ -0,0 +1,10 @@
12583 +# SPDX-License-Identifier: GPL-2.0
12584 +#
12585 +# Makefile for the Freescale DPAA2 Ethernet Switch
12586 +#
12587 +# Copyright 2014-2017 Freescale Semiconductor, Inc.
12588 +# Copyright 2017-2018 NXP
12589 +
12590 +obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
12591 +
12592 +dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
12593 --- /dev/null
12594 +++ b/drivers/staging/fsl-dpaa2/ethsw/README
12595 @@ -0,0 +1,106 @@
12596 +DPAA2 Ethernet Switch driver
12597 +============================
12598 +
12599 +This file provides documentation for the DPAA2 Ethernet Switch driver
12600 +
12601 +
12602 +Contents
12603 +========
12604 + Supported Platforms
12605 + Architecture Overview
12606 + Creating an Ethernet Switch
12607 + Features
12608 +
12609 +
12610 + Supported Platforms
12611 +===================
12612 +This driver provides networking support for Freescale LS2085A, LS2088A
12613 +DPAA2 SoCs.
12614 +
12615 +
12616 +Architecture Overview
12617 +=====================
12618 +The Ethernet Switch in the DPAA2 architecture consists of several hardware
12619 +resources that provide the functionality. These are allocated and
12620 +configured via the Management Complex (MC) portals. MC abstracts most of
12621 +these resources as DPAA2 objects and exposes ABIs through which they can
12622 +be configured and controlled.
12623 +
12624 +For a more detailed description of the DPAA2 architecture and its object
12625 +abstractions see:
12626 + drivers/staging/fsl-mc/README.txt
12627 +
12628 +The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
12629 +
12630 +Configuration interface:
12631 +
12632 + ---------------------
12633 + | DPAA2 Switch driver |
12634 + ---------------------
12635 + .
12636 + .
12637 + ----------
12638 + | DPSW API |
12639 + ----------
12640 + . software
12641 + ================= . ==============
12642 + . hardware
12643 + ---------------------
12644 + | MC hardware portals |
12645 + ---------------------
12646 + .
12647 + .
12648 + ------
12649 + | DPSW |
12650 + ------
12651 +
12652 +Driver uses the switch device driver model and exposes each switch port as
12653 +a network interface, which can be included in a bridge. Traffic switched
12654 +between ports is offloaded into the hardware. Exposed network interfaces
12655 +are not used for I/O, they are used just for configuration. This
12656 +limitation is going to be addressed in the future.
12657 +
12658 +The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
12659 +
12660 +
12661 + [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
12662 + : : : : : :
12663 + : : : : : :
12664 +[eth drv] [eth drv] [ ethsw drv ]
12665 + : : : : : : kernel
12666 +========================================================================
12667 + : : : : : : hardware
12668 + [DPNI] [DPNI] [============= DPSW =================]
12669 + | | | | | |
12670 + | ---------- | [DPMAC] [DPMAC]
12671 + ------------------------------- | |
12672 + | |
12673 + [PHY] [PHY]
12674 +
12675 +For a more detailed description of the Ethernet switch device driver model
12676 +see:
12677 + Documentation/networking/switchdev.txt
12678 +
12679 +Creating an Ethernet Switch
12680 +===========================
12681 +A device is created for the switch objects probed on the MC bus. Each DPSW
12682 +has a number of properties which determine the configuration options and
12683 +associated hardware resources.
12684 +
12685 +A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
12686 +be added to a container on the MC bus in one of two ways: statically,
12687 +through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
12688 +time; or created dynamically at runtime, via the DPAA2 objects APIs.
12689 +
12690 +Features
12691 +========
12692 +Driver configures DPSW to perform hardware switching offload of
12693 +unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
12694 +ports.
12695 +
12696 +It allows configuration of hardware learning, flooding, multicast groups,
12697 +port VLAN configuration and STP state.
12698 +
12699 +Static entries can be added/removed from the FDB.
12700 +
12701 +Hardware statistics for each port are provided through ethtool -S option.
12702 --- /dev/null
12703 +++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
12704 @@ -0,0 +1,14 @@
12705 +* Add I/O capabilities on switch port netdevices. This will allow control
12706 +traffic to reach the CPU.
12707 +* Add ACL to redirect control traffic to CPU.
12708 +* Add support for displaying learned FDB entries
12709 +* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
12710 +need to be kept in sync with binary interface changes in MC
12711 +* refine README file
12712 +* cleanup
12713 +
12714 +NOTE: At least first three of the above are required before getting the
12715 +DPAA2 Ethernet Switch driver out of staging. Another requirement is that
12716 +the fsl-mc bus driver is moved to drivers/bus and dpio driver is moved to
12717 +drivers/soc (this is required for I/O).
12718 +
12719 --- /dev/null
12720 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
12721 @@ -0,0 +1,359 @@
12722 +// SPDX-License-Identifier: GPL-2.0
12723 +/*
12724 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12725 + * Copyright 2017-2018 NXP
12726 + *
12727 + */
12728 +
12729 +#ifndef __FSL_DPSW_CMD_H
12730 +#define __FSL_DPSW_CMD_H
12731 +
12732 +/* DPSW Version */
12733 +#define DPSW_VER_MAJOR 8
12734 +#define DPSW_VER_MINOR 0
12735 +
12736 +#define DPSW_CMD_BASE_VERSION 1
12737 +#define DPSW_CMD_ID_OFFSET 4
12738 +
12739 +#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
12740 +
12741 +/* Command IDs */
12742 +#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
12743 +#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
12744 +
12745 +#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
12746 +
12747 +#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
12748 +#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
12749 +#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
12750 +#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
12751 +
12752 +#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
12753 +
12754 +#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
12755 +
12756 +#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
12757 +#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
12758 +
12759 +#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
12760 +#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
12761 +
12762 +#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
12763 +
12764 +#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
12765 +#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
12766 +
12767 +#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
12768 +
12769 +#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
12770 +#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
12771 +#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
12772 +
12773 +#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
12774 +
12775 +#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
12776 +
12777 +#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
12778 +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
12779 +#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
12780 +
12781 +#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
12782 +#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
12783 +#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
12784 +#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
12785 +
12786 +#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
12787 +#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
12788 +#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
12789 +#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
12790 +#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
12791 +
12792 +/* Macros for accessing command fields smaller than 1byte */
12793 +#define DPSW_MASK(field) \
12794 + GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
12795 + DPSW_##field##_SHIFT)
12796 +#define dpsw_set_field(var, field, val) \
12797 + ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
12798 +#define dpsw_get_field(var, field) \
12799 + (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
12800 +#define dpsw_get_bit(var, bit) \
12801 + (((var) >> (bit)) & GENMASK(0, 0))
12802 +
12803 +struct dpsw_cmd_open {
12804 + __le32 dpsw_id;
12805 +};
12806 +
12807 +#define DPSW_COMPONENT_TYPE_SHIFT 0
12808 +#define DPSW_COMPONENT_TYPE_SIZE 4
12809 +
12810 +struct dpsw_cmd_create {
12811 + /* cmd word 0 */
12812 + __le16 num_ifs;
12813 + u8 max_fdbs;
12814 + u8 max_meters_per_if;
12815 + /* from LSB: only the first 4 bits */
12816 + u8 component_type;
12817 + u8 pad[3];
12818 + /* cmd word 1 */
12819 + __le16 max_vlans;
12820 + __le16 max_fdb_entries;
12821 + __le16 fdb_aging_time;
12822 + __le16 max_fdb_mc_groups;
12823 + /* cmd word 2 */
12824 + __le64 options;
12825 +};
12826 +
12827 +struct dpsw_cmd_destroy {
12828 + __le32 dpsw_id;
12829 +};
12830 +
12831 +#define DPSW_ENABLE_SHIFT 0
12832 +#define DPSW_ENABLE_SIZE 1
12833 +
12834 +struct dpsw_rsp_is_enabled {
12835 + /* from LSB: enable:1 */
12836 + u8 enabled;
12837 +};
12838 +
12839 +struct dpsw_cmd_set_irq_enable {
12840 + u8 enable_state;
12841 + u8 pad[3];
12842 + u8 irq_index;
12843 +};
12844 +
12845 +struct dpsw_cmd_get_irq_enable {
12846 + __le32 pad;
12847 + u8 irq_index;
12848 +};
12849 +
12850 +struct dpsw_rsp_get_irq_enable {
12851 + u8 enable_state;
12852 +};
12853 +
12854 +struct dpsw_cmd_set_irq_mask {
12855 + __le32 mask;
12856 + u8 irq_index;
12857 +};
12858 +
12859 +struct dpsw_cmd_get_irq_mask {
12860 + __le32 pad;
12861 + u8 irq_index;
12862 +};
12863 +
12864 +struct dpsw_rsp_get_irq_mask {
12865 + __le32 mask;
12866 +};
12867 +
12868 +struct dpsw_cmd_get_irq_status {
12869 + __le32 status;
12870 + u8 irq_index;
12871 +};
12872 +
12873 +struct dpsw_rsp_get_irq_status {
12874 + __le32 status;
12875 +};
12876 +
12877 +struct dpsw_cmd_clear_irq_status {
12878 + __le32 status;
12879 + u8 irq_index;
12880 +};
12881 +
12882 +#define DPSW_COMPONENT_TYPE_SHIFT 0
12883 +#define DPSW_COMPONENT_TYPE_SIZE 4
12884 +
12885 +struct dpsw_rsp_get_attr {
12886 + /* cmd word 0 */
12887 + __le16 num_ifs;
12888 + u8 max_fdbs;
12889 + u8 num_fdbs;
12890 + __le16 max_vlans;
12891 + __le16 num_vlans;
12892 + /* cmd word 1 */
12893 + __le16 max_fdb_entries;
12894 + __le16 fdb_aging_time;
12895 + __le32 dpsw_id;
12896 + /* cmd word 2 */
12897 + __le16 mem_size;
12898 + __le16 max_fdb_mc_groups;
12899 + u8 max_meters_per_if;
12900 + /* from LSB only the first 4 bits */
12901 + u8 component_type;
12902 + __le16 pad;
12903 + /* cmd word 3 */
12904 + __le64 options;
12905 +};
12906 +
12907 +struct dpsw_cmd_if_set_flooding {
12908 + __le16 if_id;
12909 + /* from LSB: enable:1 */
12910 + u8 enable;
12911 +};
12912 +
12913 +struct dpsw_cmd_if_set_broadcast {
12914 + __le16 if_id;
12915 + /* from LSB: enable:1 */
12916 + u8 enable;
12917 +};
12918 +
12919 +#define DPSW_VLAN_ID_SHIFT 0
12920 +#define DPSW_VLAN_ID_SIZE 12
12921 +#define DPSW_DEI_SHIFT 12
12922 +#define DPSW_DEI_SIZE 1
12923 +#define DPSW_PCP_SHIFT 13
12924 +#define DPSW_PCP_SIZE 3
12925 +
12926 +struct dpsw_cmd_if_set_tci {
12927 + __le16 if_id;
12928 + /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
12929 + __le16 conf;
12930 +};
12931 +
12932 +struct dpsw_cmd_if_get_tci {
12933 + __le16 if_id;
12934 +};
12935 +
12936 +struct dpsw_rsp_if_get_tci {
12937 + __le16 pad;
12938 + __le16 vlan_id;
12939 + u8 dei;
12940 + u8 pcp;
12941 +};
12942 +
12943 +#define DPSW_STATE_SHIFT 0
12944 +#define DPSW_STATE_SIZE 4
12945 +
12946 +struct dpsw_cmd_if_set_stp {
12947 + __le16 if_id;
12948 + __le16 vlan_id;
12949 + /* only the first LSB 4 bits */
12950 + u8 state;
12951 +};
12952 +
12953 +#define DPSW_COUNTER_TYPE_SHIFT 0
12954 +#define DPSW_COUNTER_TYPE_SIZE 5
12955 +
12956 +struct dpsw_cmd_if_get_counter {
12957 + __le16 if_id;
12958 + /* from LSB: type:5 */
12959 + u8 type;
12960 +};
12961 +
12962 +struct dpsw_rsp_if_get_counter {
12963 + __le64 pad;
12964 + __le64 counter;
12965 +};
12966 +
12967 +struct dpsw_cmd_if {
12968 + __le16 if_id;
12969 +};
12970 +
12971 +struct dpsw_cmd_if_set_max_frame_length {
12972 + __le16 if_id;
12973 + __le16 frame_length;
12974 +};
12975 +
12976 +struct dpsw_cmd_if_set_link_cfg {
12977 + /* cmd word 0 */
12978 + __le16 if_id;
12979 + u8 pad[6];
12980 + /* cmd word 1 */
12981 + __le32 rate;
12982 + __le32 pad1;
12983 + /* cmd word 2 */
12984 + __le64 options;
12985 +};
12986 +
12987 +struct dpsw_cmd_if_get_link_state {
12988 + __le16 if_id;
12989 +};
12990 +
12991 +#define DPSW_UP_SHIFT 0
12992 +#define DPSW_UP_SIZE 1
12993 +
12994 +struct dpsw_rsp_if_get_link_state {
12995 + /* cmd word 0 */
12996 + __le32 pad0;
12997 + u8 up;
12998 + u8 pad1[3];
12999 + /* cmd word 1 */
13000 + __le32 rate;
13001 + __le32 pad2;
13002 + /* cmd word 2 */
13003 + __le64 options;
13004 +};
13005 +
13006 +struct dpsw_vlan_add {
13007 + __le16 fdb_id;
13008 + __le16 vlan_id;
13009 +};
13010 +
13011 +struct dpsw_cmd_vlan_manage_if {
13012 + /* cmd word 0 */
13013 + __le16 pad0;
13014 + __le16 vlan_id;
13015 + __le32 pad1;
13016 + /* cmd word 1-4 */
13017 + __le64 if_id[4];
13018 +};
13019 +
13020 +struct dpsw_cmd_vlan_remove {
13021 + __le16 pad;
13022 + __le16 vlan_id;
13023 +};
13024 +
13025 +struct dpsw_cmd_fdb_add {
13026 + __le32 pad;
13027 + __le16 fdb_aging_time;
13028 + __le16 num_fdb_entries;
13029 +};
13030 +
13031 +struct dpsw_rsp_fdb_add {
13032 + __le16 fdb_id;
13033 +};
13034 +
13035 +struct dpsw_cmd_fdb_remove {
13036 + __le16 fdb_id;
13037 +};
13038 +
13039 +#define DPSW_ENTRY_TYPE_SHIFT 0
13040 +#define DPSW_ENTRY_TYPE_SIZE 4
13041 +
13042 +struct dpsw_cmd_fdb_unicast_op {
13043 + /* cmd word 0 */
13044 + __le16 fdb_id;
13045 + u8 mac_addr[6];
13046 + /* cmd word 1 */
13047 + __le16 if_egress;
13048 + /* only the first 4 bits from LSB */
13049 + u8 type;
13050 +};
13051 +
13052 +struct dpsw_cmd_fdb_multicast_op {
13053 + /* cmd word 0 */
13054 + __le16 fdb_id;
13055 + __le16 num_ifs;
13056 + /* only the first 4 bits from LSB */
13057 + u8 type;
13058 + u8 pad[3];
13059 + /* cmd word 1 */
13060 + u8 mac_addr[6];
13061 + __le16 pad2;
13062 + /* cmd word 2-5 */
13063 + __le64 if_id[4];
13064 +};
13065 +
13066 +#define DPSW_LEARNING_MODE_SHIFT 0
13067 +#define DPSW_LEARNING_MODE_SIZE 4
13068 +
13069 +struct dpsw_cmd_fdb_set_learning_mode {
13070 + __le16 fdb_id;
13071 + /* only the first 4 bits from LSB */
13072 + u8 mode;
13073 +};
13074 +
13075 +struct dpsw_rsp_get_api_version {
13076 + __le16 version_major;
13077 + __le16 version_minor;
13078 +};
13079 +
13080 +#endif /* __FSL_DPSW_CMD_H */
13081 --- /dev/null
13082 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
13083 @@ -0,0 +1,1165 @@
13084 +// SPDX-License-Identifier: GPL-2.0
13085 +/*
13086 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
13087 + * Copyright 2017-2018 NXP
13088 + *
13089 + */
13090 +
13091 +#include <linux/fsl/mc.h>
13092 +#include "dpsw.h"
13093 +#include "dpsw-cmd.h"
13094 +
13095 +static void build_if_id_bitmap(__le64 *bmap,
13096 + const u16 *id,
13097 + const u16 num_ifs)
13098 +{
13099 + int i;
13100 +
13101 + for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
13102 + if (id[i] < DPSW_MAX_IF)
13103 + bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
13104 + }
13105 +}
13106 +
13107 +/**
13108 + * dpsw_open() - Open a control session for the specified object
13109 + * @mc_io: Pointer to MC portal's I/O object
13110 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13111 + * @dpsw_id: DPSW unique ID
13112 + * @token: Returned token; use in subsequent API calls
13113 + *
13114 + * This function can be used to open a control session for an
13115 + * already created object; an object may have been declared in
13116 + * the DPL or by calling the dpsw_create() function.
13117 + * This function returns a unique authentication token,
13118 + * associated with the specific object ID and the specific MC
13119 + * portal; this token must be used in all subsequent commands for
13120 + * this specific object
13121 + *
13122 + * Return: '0' on Success; Error code otherwise.
13123 + */
13124 +int dpsw_open(struct fsl_mc_io *mc_io,
13125 + u32 cmd_flags,
13126 + int dpsw_id,
13127 + u16 *token)
13128 +{
13129 + struct fsl_mc_command cmd = { 0 };
13130 + struct dpsw_cmd_open *cmd_params;
13131 + int err;
13132 +
13133 + /* prepare command */
13134 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
13135 + cmd_flags,
13136 + 0);
13137 + cmd_params = (struct dpsw_cmd_open *)cmd.params;
13138 + cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
13139 +
13140 + /* send command to mc*/
13141 + err = mc_send_command(mc_io, &cmd);
13142 + if (err)
13143 + return err;
13144 +
13145 + /* retrieve response parameters */
13146 + *token = mc_cmd_hdr_read_token(&cmd);
13147 +
13148 + return 0;
13149 +}
13150 +
13151 +/**
13152 + * dpsw_close() - Close the control session of the object
13153 + * @mc_io: Pointer to MC portal's I/O object
13154 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13155 + * @token: Token of DPSW object
13156 + *
13157 + * After this function is called, no further operations are
13158 + * allowed on the object without opening a new control session.
13159 + *
13160 + * Return: '0' on Success; Error code otherwise.
13161 + */
13162 +int dpsw_close(struct fsl_mc_io *mc_io,
13163 + u32 cmd_flags,
13164 + u16 token)
13165 +{
13166 + struct fsl_mc_command cmd = { 0 };
13167 +
13168 + /* prepare command */
13169 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
13170 + cmd_flags,
13171 + token);
13172 +
13173 + /* send command to mc*/
13174 + return mc_send_command(mc_io, &cmd);
13175 +}
13176 +
13177 +/**
13178 + * dpsw_enable() - Enable DPSW functionality
13179 + * @mc_io: Pointer to MC portal's I/O object
13180 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13181 + * @token: Token of DPSW object
13182 + *
13183 + * Return: Completion status. '0' on Success; Error code otherwise.
13184 + */
13185 +int dpsw_enable(struct fsl_mc_io *mc_io,
13186 + u32 cmd_flags,
13187 + u16 token)
13188 +{
13189 + struct fsl_mc_command cmd = { 0 };
13190 +
13191 + /* prepare command */
13192 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
13193 + cmd_flags,
13194 + token);
13195 +
13196 + /* send command to mc*/
13197 + return mc_send_command(mc_io, &cmd);
13198 +}
13199 +
13200 +/**
13201 + * dpsw_disable() - Disable DPSW functionality
13202 + * @mc_io: Pointer to MC portal's I/O object
13203 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13204 + * @token: Token of DPSW object
13205 + *
13206 + * Return: Completion status. '0' on Success; Error code otherwise.
13207 + */
13208 +int dpsw_disable(struct fsl_mc_io *mc_io,
13209 + u32 cmd_flags,
13210 + u16 token)
13211 +{
13212 + struct fsl_mc_command cmd = { 0 };
13213 +
13214 + /* prepare command */
13215 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
13216 + cmd_flags,
13217 + token);
13218 +
13219 + /* send command to mc*/
13220 + return mc_send_command(mc_io, &cmd);
13221 +}
13222 +
13223 +/**
13224 + * dpsw_reset() - Reset the DPSW, returns the object to initial state.
13225 + * @mc_io: Pointer to MC portal's I/O object
13226 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13227 + * @token: Token of DPSW object
13228 + *
13229 + * Return: '0' on Success; Error code otherwise.
13230 + */
13231 +int dpsw_reset(struct fsl_mc_io *mc_io,
13232 + u32 cmd_flags,
13233 + u16 token)
13234 +{
13235 + struct fsl_mc_command cmd = { 0 };
13236 +
13237 + /* prepare command */
13238 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
13239 + cmd_flags,
13240 + token);
13241 +
13242 + /* send command to mc*/
13243 + return mc_send_command(mc_io, &cmd);
13244 +}
13245 +
13246 +/**
13247 + * dpsw_set_irq_enable() - Set overall interrupt state.
13248 + * @mc_io: Pointer to MC portal's I/O object
13249 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13250 + * @token: Token of DPCI object
13251 + * @irq_index: The interrupt index to configure
13252 + * @en: Interrupt state - enable = 1, disable = 0
13253 + *
13254 + * Allows GPP software to control when interrupts are generated.
13255 + * Each interrupt can have up to 32 causes. The enable/disable control's the
13256 + * overall interrupt state. if the interrupt is disabled no causes will cause
13257 + * an interrupt
13258 + *
13259 + * Return: '0' on Success; Error code otherwise.
13260 + */
13261 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
13262 + u32 cmd_flags,
13263 + u16 token,
13264 + u8 irq_index,
13265 + u8 en)
13266 +{
13267 + struct fsl_mc_command cmd = { 0 };
13268 + struct dpsw_cmd_set_irq_enable *cmd_params;
13269 +
13270 + /* prepare command */
13271 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
13272 + cmd_flags,
13273 + token);
13274 + cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
13275 + dpsw_set_field(cmd_params->enable_state, ENABLE, en);
13276 + cmd_params->irq_index = irq_index;
13277 +
13278 + /* send command to mc*/
13279 + return mc_send_command(mc_io, &cmd);
13280 +}
13281 +
13282 +/**
13283 + * dpsw_set_irq_mask() - Set interrupt mask.
13284 + * @mc_io: Pointer to MC portal's I/O object
13285 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13286 + * @token: Token of DPCI object
13287 + * @irq_index: The interrupt index to configure
13288 + * @mask: Event mask to trigger interrupt;
13289 + * each bit:
13290 + * 0 = ignore event
13291 + * 1 = consider event for asserting IRQ
13292 + *
13293 + * Every interrupt can have up to 32 causes and the interrupt model supports
13294 + * masking/unmasking each cause independently
13295 + *
13296 + * Return: '0' on Success; Error code otherwise.
13297 + */
13298 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
13299 + u32 cmd_flags,
13300 + u16 token,
13301 + u8 irq_index,
13302 + u32 mask)
13303 +{
13304 + struct fsl_mc_command cmd = { 0 };
13305 + struct dpsw_cmd_set_irq_mask *cmd_params;
13306 +
13307 + /* prepare command */
13308 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
13309 + cmd_flags,
13310 + token);
13311 + cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
13312 + cmd_params->mask = cpu_to_le32(mask);
13313 + cmd_params->irq_index = irq_index;
13314 +
13315 + /* send command to mc*/
13316 + return mc_send_command(mc_io, &cmd);
13317 +}
13318 +
13319 +/**
13320 + * dpsw_get_irq_status() - Get the current status of any pending interrupts
13321 + * @mc_io: Pointer to MC portal's I/O object
13322 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13323 + * @token: Token of DPSW object
13324 + * @irq_index: The interrupt index to configure
13325 + * @status: Returned interrupts status - one bit per cause:
13326 + * 0 = no interrupt pending
13327 + * 1 = interrupt pending
13328 + *
13329 + * Return: '0' on Success; Error code otherwise.
13330 + */
13331 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
13332 + u32 cmd_flags,
13333 + u16 token,
13334 + u8 irq_index,
13335 + u32 *status)
13336 +{
13337 + struct fsl_mc_command cmd = { 0 };
13338 + struct dpsw_cmd_get_irq_status *cmd_params;
13339 + struct dpsw_rsp_get_irq_status *rsp_params;
13340 + int err;
13341 +
13342 + /* prepare command */
13343 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
13344 + cmd_flags,
13345 + token);
13346 + cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
13347 + cmd_params->status = cpu_to_le32(*status);
13348 + cmd_params->irq_index = irq_index;
13349 +
13350 + /* send command to mc*/
13351 + err = mc_send_command(mc_io, &cmd);
13352 + if (err)
13353 + return err;
13354 +
13355 + /* retrieve response parameters */
13356 + rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
13357 + *status = le32_to_cpu(rsp_params->status);
13358 +
13359 + return 0;
13360 +}
13361 +
13362 +/**
13363 + * dpsw_clear_irq_status() - Clear a pending interrupt's status
13364 + * @mc_io: Pointer to MC portal's I/O object
13365 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13366 + * @token: Token of DPCI object
13367 + * @irq_index: The interrupt index to configure
13368 + * @status: bits to clear (W1C) - one bit per cause:
13369 + * 0 = don't change
13370 + * 1 = clear status bit
13371 + *
13372 + * Return: '0' on Success; Error code otherwise.
13373 + */
13374 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
13375 + u32 cmd_flags,
13376 + u16 token,
13377 + u8 irq_index,
13378 + u32 status)
13379 +{
13380 + struct fsl_mc_command cmd = { 0 };
13381 + struct dpsw_cmd_clear_irq_status *cmd_params;
13382 +
13383 + /* prepare command */
13384 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
13385 + cmd_flags,
13386 + token);
13387 + cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
13388 + cmd_params->status = cpu_to_le32(status);
13389 + cmd_params->irq_index = irq_index;
13390 +
13391 + /* send command to mc*/
13392 + return mc_send_command(mc_io, &cmd);
13393 +}
13394 +
13395 +/**
13396 + * dpsw_get_attributes() - Retrieve DPSW attributes
13397 + * @mc_io: Pointer to MC portal's I/O object
13398 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13399 + * @token: Token of DPSW object
13400 + * @attr: Returned DPSW attributes
13401 + *
13402 + * Return: Completion status. '0' on Success; Error code otherwise.
13403 + */
13404 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
13405 + u32 cmd_flags,
13406 + u16 token,
13407 + struct dpsw_attr *attr)
13408 +{
13409 + struct fsl_mc_command cmd = { 0 };
13410 + struct dpsw_rsp_get_attr *rsp_params;
13411 + int err;
13412 +
13413 + /* prepare command */
13414 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
13415 + cmd_flags,
13416 + token);
13417 +
13418 + /* send command to mc*/
13419 + err = mc_send_command(mc_io, &cmd);
13420 + if (err)
13421 + return err;
13422 +
13423 + /* retrieve response parameters */
13424 + rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
13425 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13426 + attr->max_fdbs = rsp_params->max_fdbs;
13427 + attr->num_fdbs = rsp_params->num_fdbs;
13428 + attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
13429 + attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
13430 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
13431 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
13432 + attr->id = le32_to_cpu(rsp_params->dpsw_id);
13433 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
13434 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
13435 + attr->max_meters_per_if = rsp_params->max_meters_per_if;
13436 + attr->options = le64_to_cpu(rsp_params->options);
13437 + attr->component_type = dpsw_get_field(rsp_params->component_type,
13438 + COMPONENT_TYPE);
13439 +
13440 + return 0;
13441 +}
13442 +
13443 +/**
13444 + * dpsw_if_set_link_cfg() - Set the link configuration.
13445 + * @mc_io: Pointer to MC portal's I/O object
13446 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13447 + * @token: Token of DPSW object
13448 + * @if_id: Interface id
13449 + * @cfg: Link configuration
13450 + *
13451 + * Return: '0' on Success; Error code otherwise.
13452 + */
13453 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
13454 + u32 cmd_flags,
13455 + u16 token,
13456 + u16 if_id,
13457 + struct dpsw_link_cfg *cfg)
13458 +{
13459 + struct fsl_mc_command cmd = { 0 };
13460 + struct dpsw_cmd_if_set_link_cfg *cmd_params;
13461 +
13462 + /* prepare command */
13463 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
13464 + cmd_flags,
13465 + token);
13466 + cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
13467 + cmd_params->if_id = cpu_to_le16(if_id);
13468 + cmd_params->rate = cpu_to_le32(cfg->rate);
13469 + cmd_params->options = cpu_to_le64(cfg->options);
13470 +
13471 + /* send command to mc*/
13472 + return mc_send_command(mc_io, &cmd);
13473 +}
13474 +
13475 +/**
13476 + * dpsw_if_get_link_state - Return the link state
13477 + * @mc_io: Pointer to MC portal's I/O object
13478 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13479 + * @token: Token of DPSW object
13480 + * @if_id: Interface id
13481 + * @state: Link state 1 - linkup, 0 - link down or disconnected
13482 + *
13483 + * @Return '0' on Success; Error code otherwise.
13484 + */
13485 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
13486 + u32 cmd_flags,
13487 + u16 token,
13488 + u16 if_id,
13489 + struct dpsw_link_state *state)
13490 +{
13491 + struct fsl_mc_command cmd = { 0 };
13492 + struct dpsw_cmd_if_get_link_state *cmd_params;
13493 + struct dpsw_rsp_if_get_link_state *rsp_params;
13494 + int err;
13495 +
13496 + /* prepare command */
13497 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
13498 + cmd_flags,
13499 + token);
13500 + cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
13501 + cmd_params->if_id = cpu_to_le16(if_id);
13502 +
13503 + /* send command to mc*/
13504 + err = mc_send_command(mc_io, &cmd);
13505 + if (err)
13506 + return err;
13507 +
13508 + /* retrieve response parameters */
13509 + rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
13510 + state->rate = le32_to_cpu(rsp_params->rate);
13511 + state->options = le64_to_cpu(rsp_params->options);
13512 + state->up = dpsw_get_field(rsp_params->up, UP);
13513 +
13514 + return 0;
13515 +}
13516 +
13517 +/**
13518 + * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
13519 + * @mc_io: Pointer to MC portal's I/O object
13520 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13521 + * @token: Token of DPSW object
13522 + * @if_id: Interface Identifier
13523 + * @en: 1 - enable, 0 - disable
13524 + *
13525 + * Return: Completion status. '0' on Success; Error code otherwise.
13526 + */
13527 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
13528 + u32 cmd_flags,
13529 + u16 token,
13530 + u16 if_id,
13531 + u8 en)
13532 +{
13533 + struct fsl_mc_command cmd = { 0 };
13534 + struct dpsw_cmd_if_set_flooding *cmd_params;
13535 +
13536 + /* prepare command */
13537 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
13538 + cmd_flags,
13539 + token);
13540 + cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
13541 + cmd_params->if_id = cpu_to_le16(if_id);
13542 + dpsw_set_field(cmd_params->enable, ENABLE, en);
13543 +
13544 + /* send command to mc*/
13545 + return mc_send_command(mc_io, &cmd);
13546 +}
13547 +
13548 +/**
13549 + * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
13550 + * @mc_io: Pointer to MC portal's I/O object
13551 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13552 + * @token: Token of DPSW object
13553 + * @if_id: Interface Identifier
13554 + * @en: 1 - enable, 0 - disable
13555 + *
13556 + * Return: Completion status. '0' on Success; Error code otherwise.
13557 + */
13558 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
13559 + u32 cmd_flags,
13560 + u16 token,
13561 + u16 if_id,
13562 + u8 en)
13563 +{
13564 + struct fsl_mc_command cmd = { 0 };
13565 + struct dpsw_cmd_if_set_broadcast *cmd_params;
13566 +
13567 + /* prepare command */
13568 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
13569 + cmd_flags,
13570 + token);
13571 + cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
13572 + cmd_params->if_id = cpu_to_le16(if_id);
13573 + dpsw_set_field(cmd_params->enable, ENABLE, en);
13574 +
13575 + /* send command to mc*/
13576 + return mc_send_command(mc_io, &cmd);
13577 +}
13578 +
13579 +/**
13580 + * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
13581 + * @mc_io: Pointer to MC portal's I/O object
13582 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13583 + * @token: Token of DPSW object
13584 + * @if_id: Interface Identifier
13585 + * @cfg: Tag Control Information Configuration
13586 + *
13587 + * Return: Completion status. '0' on Success; Error code otherwise.
13588 + */
13589 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
13590 + u32 cmd_flags,
13591 + u16 token,
13592 + u16 if_id,
13593 + const struct dpsw_tci_cfg *cfg)
13594 +{
13595 + struct fsl_mc_command cmd = { 0 };
13596 + struct dpsw_cmd_if_set_tci *cmd_params;
13597 + u16 tmp_conf = 0;
13598 +
13599 + /* prepare command */
13600 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
13601 + cmd_flags,
13602 + token);
13603 + cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
13604 + cmd_params->if_id = cpu_to_le16(if_id);
13605 + dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
13606 + dpsw_set_field(tmp_conf, DEI, cfg->dei);
13607 + dpsw_set_field(tmp_conf, PCP, cfg->pcp);
13608 + cmd_params->conf = cpu_to_le16(tmp_conf);
13609 +
13610 + /* send command to mc*/
13611 + return mc_send_command(mc_io, &cmd);
13612 +}
13613 +
13614 +/**
13615 + * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
13616 + * @mc_io: Pointer to MC portal's I/O object
13617 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13618 + * @token: Token of DPSW object
13619 + * @if_id: Interface Identifier
13620 + * @cfg: Tag Control Information Configuration
13621 + *
13622 + * Return: Completion status. '0' on Success; Error code otherwise.
13623 + */
13624 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
13625 + u32 cmd_flags,
13626 + u16 token,
13627 + u16 if_id,
13628 + struct dpsw_tci_cfg *cfg)
13629 +{
13630 + struct fsl_mc_command cmd = { 0 };
13631 + struct dpsw_cmd_if_get_tci *cmd_params;
13632 + struct dpsw_rsp_if_get_tci *rsp_params;
13633 + int err;
13634 +
13635 + /* prepare command */
13636 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
13637 + cmd_flags,
13638 + token);
13639 + cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
13640 + cmd_params->if_id = cpu_to_le16(if_id);
13641 +
13642 + /* send command to mc*/
13643 + err = mc_send_command(mc_io, &cmd);
13644 + if (err)
13645 + return err;
13646 +
13647 + /* retrieve response parameters */
13648 + rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
13649 + cfg->pcp = rsp_params->pcp;
13650 + cfg->dei = rsp_params->dei;
13651 + cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
13652 +
13653 + return 0;
13654 +}
13655 +
13656 +/**
13657 + * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
13658 + * @mc_io: Pointer to MC portal's I/O object
13659 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13660 + * @token: Token of DPSW object
13661 + * @if_id: Interface Identifier
13662 + * @cfg: STP State configuration parameters
13663 + *
13664 + * The following STP states are supported -
13665 + * blocking, listening, learning, forwarding and disabled.
13666 + *
13667 + * Return: Completion status. '0' on Success; Error code otherwise.
13668 + */
13669 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
13670 + u32 cmd_flags,
13671 + u16 token,
13672 + u16 if_id,
13673 + const struct dpsw_stp_cfg *cfg)
13674 +{
13675 + struct fsl_mc_command cmd = { 0 };
13676 + struct dpsw_cmd_if_set_stp *cmd_params;
13677 +
13678 + /* prepare command */
13679 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
13680 + cmd_flags,
13681 + token);
13682 + cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
13683 + cmd_params->if_id = cpu_to_le16(if_id);
13684 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
13685 + dpsw_set_field(cmd_params->state, STATE, cfg->state);
13686 +
13687 + /* send command to mc*/
13688 + return mc_send_command(mc_io, &cmd);
13689 +}
13690 +
13691 +/**
13692 + * dpsw_if_get_counter() - Get specific counter of particular interface
13693 + * @mc_io: Pointer to MC portal's I/O object
13694 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13695 + * @token: Token of DPSW object
13696 + * @if_id: Interface Identifier
13697 + * @type: Counter type
13698 + * @counter: return value
13699 + *
13700 + * Return: Completion status. '0' on Success; Error code otherwise.
13701 + */
13702 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
13703 + u32 cmd_flags,
13704 + u16 token,
13705 + u16 if_id,
13706 + enum dpsw_counter type,
13707 + u64 *counter)
13708 +{
13709 + struct fsl_mc_command cmd = { 0 };
13710 + struct dpsw_cmd_if_get_counter *cmd_params;
13711 + struct dpsw_rsp_if_get_counter *rsp_params;
13712 + int err;
13713 +
13714 + /* prepare command */
13715 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
13716 + cmd_flags,
13717 + token);
13718 + cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
13719 + cmd_params->if_id = cpu_to_le16(if_id);
13720 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
13721 +
13722 + /* send command to mc*/
13723 + err = mc_send_command(mc_io, &cmd);
13724 + if (err)
13725 + return err;
13726 +
13727 + /* retrieve response parameters */
13728 + rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
13729 + *counter = le64_to_cpu(rsp_params->counter);
13730 +
13731 + return 0;
13732 +}
13733 +
13734 +/**
13735 + * dpsw_if_enable() - Enable Interface
13736 + * @mc_io: Pointer to MC portal's I/O object
13737 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13738 + * @token: Token of DPSW object
13739 + * @if_id: Interface Identifier
13740 + *
13741 + * Return: Completion status. '0' on Success; Error code otherwise.
13742 + */
13743 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
13744 + u32 cmd_flags,
13745 + u16 token,
13746 + u16 if_id)
13747 +{
13748 + struct fsl_mc_command cmd = { 0 };
13749 + struct dpsw_cmd_if *cmd_params;
13750 +
13751 + /* prepare command */
13752 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
13753 + cmd_flags,
13754 + token);
13755 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
13756 + cmd_params->if_id = cpu_to_le16(if_id);
13757 +
13758 + /* send command to mc*/
13759 + return mc_send_command(mc_io, &cmd);
13760 +}
13761 +
13762 +/**
13763 + * dpsw_if_disable() - Disable Interface
13764 + * @mc_io: Pointer to MC portal's I/O object
13765 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13766 + * @token: Token of DPSW object
13767 + * @if_id: Interface Identifier
13768 + *
13769 + * Return: Completion status. '0' on Success; Error code otherwise.
13770 + */
13771 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
13772 + u32 cmd_flags,
13773 + u16 token,
13774 + u16 if_id)
13775 +{
13776 + struct fsl_mc_command cmd = { 0 };
13777 + struct dpsw_cmd_if *cmd_params;
13778 +
13779 + /* prepare command */
13780 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
13781 + cmd_flags,
13782 + token);
13783 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
13784 + cmd_params->if_id = cpu_to_le16(if_id);
13785 +
13786 + /* send command to mc*/
13787 + return mc_send_command(mc_io, &cmd);
13788 +}
13789 +
13790 +/**
13791 + * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
13792 + * @mc_io: Pointer to MC portal's I/O object
13793 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13794 + * @token: Token of DPSW object
13795 + * @if_id: Interface Identifier
13796 + * @frame_length: Maximum Frame Length
13797 + *
13798 + * Return: Completion status. '0' on Success; Error code otherwise.
13799 + */
13800 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
13801 + u32 cmd_flags,
13802 + u16 token,
13803 + u16 if_id,
13804 + u16 frame_length)
13805 +{
13806 + struct fsl_mc_command cmd = { 0 };
13807 + struct dpsw_cmd_if_set_max_frame_length *cmd_params;
13808 +
13809 + /* prepare command */
13810 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
13811 + cmd_flags,
13812 + token);
13813 + cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
13814 + cmd_params->if_id = cpu_to_le16(if_id);
13815 + cmd_params->frame_length = cpu_to_le16(frame_length);
13816 +
13817 + /* send command to mc*/
13818 + return mc_send_command(mc_io, &cmd);
13819 +}
13820 +
13821 +/**
13822 + * dpsw_vlan_add() - Adding new VLAN to DPSW.
13823 + * @mc_io: Pointer to MC portal's I/O object
13824 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13825 + * @token: Token of DPSW object
13826 + * @vlan_id: VLAN Identifier
13827 + * @cfg: VLAN configuration
13828 + *
13829 + * Only VLAN ID and FDB ID are required parameters here.
13830 + * 12 bit VLAN ID is defined in IEEE802.1Q.
13831 + * Adding a duplicate VLAN ID is not allowed.
13832 + * FDB ID can be shared across multiple VLANs. Shared learning
13833 + * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
13834 + * with same fdb_id
13835 + *
13836 + * Return: Completion status. '0' on Success; Error code otherwise.
13837 + */
13838 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
13839 + u32 cmd_flags,
13840 + u16 token,
13841 + u16 vlan_id,
13842 + const struct dpsw_vlan_cfg *cfg)
13843 +{
13844 + struct fsl_mc_command cmd = { 0 };
13845 + struct dpsw_vlan_add *cmd_params;
13846 +
13847 + /* prepare command */
13848 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
13849 + cmd_flags,
13850 + token);
13851 + cmd_params = (struct dpsw_vlan_add *)cmd.params;
13852 + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
13853 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13854 +
13855 + /* send command to mc*/
13856 + return mc_send_command(mc_io, &cmd);
13857 +}
13858 +
13859 +/**
13860 + * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
13861 + * @mc_io: Pointer to MC portal's I/O object
13862 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13863 + * @token: Token of DPSW object
13864 + * @vlan_id: VLAN Identifier
13865 + * @cfg: Set of interfaces to add
13866 + *
13867 + * It adds only interfaces not belonging to this VLAN yet,
13868 + * otherwise an error is generated and an entire command is
13869 + * ignored. This function can be called numerous times always
13870 + * providing required interfaces delta.
13871 + *
13872 + * Return: Completion status. '0' on Success; Error code otherwise.
13873 + */
13874 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
13875 + u32 cmd_flags,
13876 + u16 token,
13877 + u16 vlan_id,
13878 + const struct dpsw_vlan_if_cfg *cfg)
13879 +{
13880 + struct fsl_mc_command cmd = { 0 };
13881 + struct dpsw_cmd_vlan_manage_if *cmd_params;
13882 +
13883 + /* prepare command */
13884 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
13885 + cmd_flags,
13886 + token);
13887 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
13888 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13889 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13890 +
13891 + /* send command to mc*/
13892 + return mc_send_command(mc_io, &cmd);
13893 +}
13894 +
13895 +/**
13896 + * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
13897 + * transmitted as untagged.
13898 + * @mc_io: Pointer to MC portal's I/O object
13899 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13900 + * @token: Token of DPSW object
13901 + * @vlan_id: VLAN Identifier
13902 + * @cfg: Set of interfaces that should be transmitted as untagged
13903 + *
13904 + * These interfaces should already belong to this VLAN.
13905 + * By default all interfaces are transmitted as tagged.
13906 + * Providing un-existing interface or untagged interface that is
13907 + * configured untagged already generates an error and the entire
13908 + * command is ignored.
13909 + *
13910 + * Return: Completion status. '0' on Success; Error code otherwise.
13911 + */
13912 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
13913 + u32 cmd_flags,
13914 + u16 token,
13915 + u16 vlan_id,
13916 + const struct dpsw_vlan_if_cfg *cfg)
13917 +{
13918 + struct fsl_mc_command cmd = { 0 };
13919 + struct dpsw_cmd_vlan_manage_if *cmd_params;
13920 +
13921 + /* prepare command */
13922 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
13923 + cmd_flags,
13924 + token);
13925 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
13926 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13927 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13928 +
13929 + /* send command to mc*/
13930 + return mc_send_command(mc_io, &cmd);
13931 +}
13932 +
13933 +/**
13934 + * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
13935 + * @mc_io: Pointer to MC portal's I/O object
13936 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13937 + * @token: Token of DPSW object
13938 + * @vlan_id: VLAN Identifier
13939 + * @cfg: Set of interfaces that should be removed
13940 + *
13941 + * Interfaces must belong to this VLAN, otherwise an error
13942 + * is returned and an the command is ignored
13943 + *
13944 + * Return: Completion status. '0' on Success; Error code otherwise.
13945 + */
13946 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
13947 + u32 cmd_flags,
13948 + u16 token,
13949 + u16 vlan_id,
13950 + const struct dpsw_vlan_if_cfg *cfg)
13951 +{
13952 + struct fsl_mc_command cmd = { 0 };
13953 + struct dpsw_cmd_vlan_manage_if *cmd_params;
13954 +
13955 + /* prepare command */
13956 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
13957 + cmd_flags,
13958 + token);
13959 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
13960 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13961 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13962 +
13963 + /* send command to mc*/
13964 + return mc_send_command(mc_io, &cmd);
13965 +}
13966 +
13967 +/**
13968 + * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
13969 + * converted from transmitted as untagged to transmit as tagged.
13970 + * @mc_io: Pointer to MC portal's I/O object
13971 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13972 + * @token: Token of DPSW object
13973 + * @vlan_id: VLAN Identifier
13974 + * @cfg: Set of interfaces that should be removed
13975 + *
13976 + * Interfaces provided by API have to belong to this VLAN and
13977 + * configured untagged, otherwise an error is returned and the
13978 + * command is ignored
13979 + *
13980 + * Return: Completion status. '0' on Success; Error code otherwise.
13981 + */
13982 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
13983 + u32 cmd_flags,
13984 + u16 token,
13985 + u16 vlan_id,
13986 + const struct dpsw_vlan_if_cfg *cfg)
13987 +{
13988 + struct fsl_mc_command cmd = { 0 };
13989 + struct dpsw_cmd_vlan_manage_if *cmd_params;
13990 +
13991 + /* prepare command */
13992 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
13993 + cmd_flags,
13994 + token);
13995 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
13996 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13997 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13998 +
13999 + /* send command to mc*/
14000 + return mc_send_command(mc_io, &cmd);
14001 +}
14002 +
14003 +/**
14004 + * dpsw_vlan_remove() - Remove an entire VLAN
14005 + * @mc_io: Pointer to MC portal's I/O object
14006 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14007 + * @token: Token of DPSW object
14008 + * @vlan_id: VLAN Identifier
14009 + *
14010 + * Return: Completion status. '0' on Success; Error code otherwise.
14011 + */
14012 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
14013 + u32 cmd_flags,
14014 + u16 token,
14015 + u16 vlan_id)
14016 +{
14017 + struct fsl_mc_command cmd = { 0 };
14018 + struct dpsw_cmd_vlan_remove *cmd_params;
14019 +
14020 + /* prepare command */
14021 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
14022 + cmd_flags,
14023 + token);
14024 + cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
14025 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
14026 +
14027 + /* send command to mc*/
14028 + return mc_send_command(mc_io, &cmd);
14029 +}
14030 +
14031 +/**
14032 + * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
14033 + * @mc_io: Pointer to MC portal's I/O object
14034 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14035 + * @token: Token of DPSW object
14036 + * @fdb_id: Forwarding Database Identifier
14037 + * @cfg: Unicast entry configuration
14038 + *
14039 + * Return: Completion status. '0' on Success; Error code otherwise.
14040 + */
14041 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
14042 + u32 cmd_flags,
14043 + u16 token,
14044 + u16 fdb_id,
14045 + const struct dpsw_fdb_unicast_cfg *cfg)
14046 +{
14047 + struct fsl_mc_command cmd = { 0 };
14048 + struct dpsw_cmd_fdb_unicast_op *cmd_params;
14049 + int i;
14050 +
14051 + /* prepare command */
14052 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
14053 + cmd_flags,
14054 + token);
14055 + cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
14056 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
14057 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
14058 + for (i = 0; i < 6; i++)
14059 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
14060 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
14061 +
14062 + /* send command to mc*/
14063 + return mc_send_command(mc_io, &cmd);
14064 +}
14065 +
14066 +/**
14067 + * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
14068 + * @mc_io: Pointer to MC portal's I/O object
14069 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14070 + * @token: Token of DPSW object
14071 + * @fdb_id: Forwarding Database Identifier
14072 + * @cfg: Unicast entry configuration
14073 + *
14074 + * Return: Completion status. '0' on Success; Error code otherwise.
14075 + */
14076 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
14077 + u32 cmd_flags,
14078 + u16 token,
14079 + u16 fdb_id,
14080 + const struct dpsw_fdb_unicast_cfg *cfg)
14081 +{
14082 + struct fsl_mc_command cmd = { 0 };
14083 + struct dpsw_cmd_fdb_unicast_op *cmd_params;
14084 + int i;
14085 +
14086 + /* prepare command */
14087 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
14088 + cmd_flags,
14089 + token);
14090 + cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
14091 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
14092 + for (i = 0; i < 6; i++)
14093 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
14094 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
14095 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
14096 +
14097 + /* send command to mc*/
14098 + return mc_send_command(mc_io, &cmd);
14099 +}
14100 +
14101 +/**
14102 + * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
14103 + * @mc_io: Pointer to MC portal's I/O object
14104 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14105 + * @token: Token of DPSW object
14106 + * @fdb_id: Forwarding Database Identifier
14107 + * @cfg: Multicast entry configuration
14108 + *
14109 + * If group doesn't exist, it will be created.
14110 + * It adds only interfaces not belonging to this multicast group
14111 + * yet, otherwise error will be generated and the command is
14112 + * ignored.
14113 + * This function may be called numerous times always providing
14114 + * required interfaces delta.
14115 + *
14116 + * Return: Completion status. '0' on Success; Error code otherwise.
14117 + */
14118 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
14119 + u32 cmd_flags,
14120 + u16 token,
14121 + u16 fdb_id,
14122 + const struct dpsw_fdb_multicast_cfg *cfg)
14123 +{
14124 + struct fsl_mc_command cmd = { 0 };
14125 + struct dpsw_cmd_fdb_multicast_op *cmd_params;
14126 + int i;
14127 +
14128 + /* prepare command */
14129 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
14130 + cmd_flags,
14131 + token);
14132 + cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
14133 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
14134 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
14135 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
14136 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
14137 + for (i = 0; i < 6; i++)
14138 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
14139 +
14140 + /* send command to mc*/
14141 + return mc_send_command(mc_io, &cmd);
14142 +}
14143 +
14144 +/**
14145 + * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
14146 + * group.
14147 + * @mc_io: Pointer to MC portal's I/O object
14148 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14149 + * @token: Token of DPSW object
14150 + * @fdb_id: Forwarding Database Identifier
14151 + * @cfg: Multicast entry configuration
14152 + *
14153 + * Interfaces provided by this API have to exist in the group,
14154 + * otherwise an error will be returned and an entire command
14155 + * ignored. If there is no interface left in the group,
14156 + * an entire group is deleted
14157 + *
14158 + * Return: Completion status. '0' on Success; Error code otherwise.
14159 + */
14160 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
14161 + u32 cmd_flags,
14162 + u16 token,
14163 + u16 fdb_id,
14164 + const struct dpsw_fdb_multicast_cfg *cfg)
14165 +{
14166 + struct fsl_mc_command cmd = { 0 };
14167 + struct dpsw_cmd_fdb_multicast_op *cmd_params;
14168 + int i;
14169 +
14170 + /* prepare command */
14171 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
14172 + cmd_flags,
14173 + token);
14174 + cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
14175 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
14176 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
14177 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
14178 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
14179 + for (i = 0; i < 6; i++)
14180 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
14181 +
14182 + /* send command to mc*/
14183 + return mc_send_command(mc_io, &cmd);
14184 +}
14185 +
14186 +/**
14187 + * dpsw_fdb_set_learning_mode() - Define FDB learning mode
14188 + * @mc_io: Pointer to MC portal's I/O object
14189 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14190 + * @token: Token of DPSW object
14191 + * @fdb_id: Forwarding Database Identifier
14192 + * @mode: Learning mode
14193 + *
14194 + * Return: Completion status. '0' on Success; Error code otherwise.
14195 + */
14196 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
14197 + u32 cmd_flags,
14198 + u16 token,
14199 + u16 fdb_id,
14200 + enum dpsw_fdb_learning_mode mode)
14201 +{
14202 + struct fsl_mc_command cmd = { 0 };
14203 + struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
14204 +
14205 + /* prepare command */
14206 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
14207 + cmd_flags,
14208 + token);
14209 + cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
14210 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
14211 + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
14212 +
14213 + /* send command to mc*/
14214 + return mc_send_command(mc_io, &cmd);
14215 +}
14216 +
14217 +/**
14218 + * dpsw_get_api_version() - Get Data Path Switch API version
14219 + * @mc_io: Pointer to MC portal's I/O object
14220 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
14221 + * @major_ver: Major version of data path switch API
14222 + * @minor_ver: Minor version of data path switch API
14223 + *
14224 + * Return: '0' on Success; Error code otherwise.
14225 + */
14226 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
14227 + u32 cmd_flags,
14228 + u16 *major_ver,
14229 + u16 *minor_ver)
14230 +{
14231 + struct fsl_mc_command cmd = { 0 };
14232 + struct dpsw_rsp_get_api_version *rsp_params;
14233 + int err;
14234 +
14235 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
14236 + cmd_flags,
14237 + 0);
14238 +
14239 + err = mc_send_command(mc_io, &cmd);
14240 + if (err)
14241 + return err;
14242 +
14243 + rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
14244 + *major_ver = le16_to_cpu(rsp_params->version_major);
14245 + *minor_ver = le16_to_cpu(rsp_params->version_minor);
14246 +
14247 + return 0;
14248 +}
14249 --- /dev/null
14250 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
14251 @@ -0,0 +1,592 @@
14252 +// SPDX-License-Identifier: GPL-2.0
14253 +/*
14254 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
14255 + * Copyright 2017-2018 NXP
14256 + *
14257 + */
14258 +
14259 +#ifndef __FSL_DPSW_H
14260 +#define __FSL_DPSW_H
14261 +
14262 +/* Data Path L2-Switch API
14263 + * Contains API for handling DPSW topology and functionality
14264 + */
14265 +
14266 +struct fsl_mc_io;
14267 +
14268 +/**
14269 + * DPSW general definitions
14270 + */
14271 +
14272 +/**
14273 + * Maximum number of traffic class priorities
14274 + */
14275 +#define DPSW_MAX_PRIORITIES 8
14276 +/**
14277 + * Maximum number of interfaces
14278 + */
14279 +#define DPSW_MAX_IF 64
14280 +
14281 +int dpsw_open(struct fsl_mc_io *mc_io,
14282 + u32 cmd_flags,
14283 + int dpsw_id,
14284 + u16 *token);
14285 +
14286 +int dpsw_close(struct fsl_mc_io *mc_io,
14287 + u32 cmd_flags,
14288 + u16 token);
14289 +
14290 +/**
14291 + * DPSW options
14292 + */
14293 +
14294 +/**
14295 + * Disable flooding
14296 + */
14297 +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
14298 +/**
14299 + * Disable Multicast
14300 + */
14301 +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
14302 +/**
14303 + * Support control interface
14304 + */
14305 +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
14306 +/**
14307 + * Disable flooding metering
14308 + */
14309 +#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
14310 +/**
14311 + * Enable metering
14312 + */
14313 +#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
14314 +
14315 +/**
14316 + * enum dpsw_component_type - component type of a bridge
14317 + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
14318 + * enterprise VLAN bridge or of a Provider Bridge used
14319 + * to process C-tagged frames
14320 + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
14321 + * Provider Bridge
14322 + *
14323 + */
14324 +enum dpsw_component_type {
14325 + DPSW_COMPONENT_TYPE_C_VLAN = 0,
14326 + DPSW_COMPONENT_TYPE_S_VLAN
14327 +};
14328 +
14329 +/**
14330 + * struct dpsw_cfg - DPSW configuration
14331 + * @num_ifs: Number of external and internal interfaces
14332 + * @adv: Advanced parameters; default is all zeros;
14333 + * use this structure to change default settings
14334 + */
14335 +struct dpsw_cfg {
14336 + u16 num_ifs;
14337 + /**
14338 + * struct adv - Advanced parameters
14339 + * @options: Enable/Disable DPSW features (bitmap)
14340 + * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
14341 + * @max_meters_per_if: Number of meters per interface
14342 + * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
14343 + * @max_fdb_entries: Number of FDB entries for default FDB table;
14344 + * 0 - indicates default 1024 entries.
14345 + * @fdb_aging_time: Default FDB aging time for default FDB table;
14346 + * 0 - indicates default 300 seconds
14347 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
14348 + * 0 - indicates default 32
14349 + * @component_type: Indicates the component type of this bridge
14350 + */
14351 + struct {
14352 + u64 options;
14353 + u16 max_vlans;
14354 + u8 max_meters_per_if;
14355 + u8 max_fdbs;
14356 + u16 max_fdb_entries;
14357 + u16 fdb_aging_time;
14358 + u16 max_fdb_mc_groups;
14359 + enum dpsw_component_type component_type;
14360 + } adv;
14361 +};
14362 +
14363 +int dpsw_enable(struct fsl_mc_io *mc_io,
14364 + u32 cmd_flags,
14365 + u16 token);
14366 +
14367 +int dpsw_disable(struct fsl_mc_io *mc_io,
14368 + u32 cmd_flags,
14369 + u16 token);
14370 +
14371 +int dpsw_reset(struct fsl_mc_io *mc_io,
14372 + u32 cmd_flags,
14373 + u16 token);
14374 +
14375 +/**
14376 + * DPSW IRQ Index and Events
14377 + */
14378 +
14379 +#define DPSW_IRQ_INDEX_IF 0x0000
14380 +#define DPSW_IRQ_INDEX_L2SW 0x0001
14381 +
14382 +/**
14383 + * IRQ event - Indicates that the link state changed
14384 + */
14385 +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
14386 +
14387 +/**
14388 + * struct dpsw_irq_cfg - IRQ configuration
14389 + * @addr: Address that must be written to signal a message-based interrupt
14390 + * @val: Value to write into irq_addr address
14391 + * @irq_num: A user defined number associated with this IRQ
14392 + */
14393 +struct dpsw_irq_cfg {
14394 + u64 addr;
14395 + u32 val;
14396 + int irq_num;
14397 +};
14398 +
14399 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
14400 + u32 cmd_flags,
14401 + u16 token,
14402 + u8 irq_index,
14403 + u8 en);
14404 +
14405 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
14406 + u32 cmd_flags,
14407 + u16 token,
14408 + u8 irq_index,
14409 + u32 mask);
14410 +
14411 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
14412 + u32 cmd_flags,
14413 + u16 token,
14414 + u8 irq_index,
14415 + u32 *status);
14416 +
14417 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
14418 + u32 cmd_flags,
14419 + u16 token,
14420 + u8 irq_index,
14421 + u32 status);
14422 +
14423 +/**
14424 + * struct dpsw_attr - Structure representing DPSW attributes
14425 + * @id: DPSW object ID
14426 + * @options: Enable/Disable DPSW features
14427 + * @max_vlans: Maximum Number of VLANs
14428 + * @max_meters_per_if: Number of meters per interface
14429 + * @max_fdbs: Maximum Number of FDBs
14430 + * @max_fdb_entries: Number of FDB entries for default FDB table;
14431 + * 0 - indicates default 1024 entries.
14432 + * @fdb_aging_time: Default FDB aging time for default FDB table;
14433 + * 0 - indicates default 300 seconds
14434 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
14435 + * 0 - indicates default 32
14436 + * @mem_size: DPSW frame storage memory size
14437 + * @num_ifs: Number of interfaces
14438 + * @num_vlans: Current number of VLANs
14439 + * @num_fdbs: Current number of FDBs
14440 + * @component_type: Component type of this bridge
14441 + */
14442 +struct dpsw_attr {
14443 + int id;
14444 + u64 options;
14445 + u16 max_vlans;
14446 + u8 max_meters_per_if;
14447 + u8 max_fdbs;
14448 + u16 max_fdb_entries;
14449 + u16 fdb_aging_time;
14450 + u16 max_fdb_mc_groups;
14451 + u16 num_ifs;
14452 + u16 mem_size;
14453 + u16 num_vlans;
14454 + u8 num_fdbs;
14455 + enum dpsw_component_type component_type;
14456 +};
14457 +
14458 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
14459 + u32 cmd_flags,
14460 + u16 token,
14461 + struct dpsw_attr *attr);
14462 +
14463 +/**
14464 + * enum dpsw_action - Action selection for special/control frames
14465 + * @DPSW_ACTION_DROP: Drop frame
14466 + * @DPSW_ACTION_REDIRECT: Redirect frame to control port
14467 + */
14468 +enum dpsw_action {
14469 + DPSW_ACTION_DROP = 0,
14470 + DPSW_ACTION_REDIRECT = 1
14471 +};
14472 +
14473 +/**
14474 + * Enable auto-negotiation
14475 + */
14476 +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
14477 +/**
14478 + * Enable half-duplex mode
14479 + */
14480 +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
14481 +/**
14482 + * Enable pause frames
14483 + */
14484 +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
14485 +/**
14486 + * Enable a-symmetric pause frames
14487 + */
14488 +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
14489 +
14490 +/**
14491 + * struct dpsw_link_cfg - Structure representing DPSW link configuration
14492 + * @rate: Rate
14493 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
14494 + */
14495 +struct dpsw_link_cfg {
14496 + u32 rate;
14497 + u64 options;
14498 +};
14499 +
14500 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
14501 + u32 cmd_flags,
14502 + u16 token,
14503 + u16 if_id,
14504 + struct dpsw_link_cfg *cfg);
14505 +/**
14506 + * struct dpsw_link_state - Structure representing DPSW link state
14507 + * @rate: Rate
14508 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
14509 + * @up: 0 - covers two cases: down and disconnected, 1 - up
14510 + */
14511 +struct dpsw_link_state {
14512 + u32 rate;
14513 + u64 options;
14514 + u8 up;
14515 +};
14516 +
14517 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
14518 + u32 cmd_flags,
14519 + u16 token,
14520 + u16 if_id,
14521 + struct dpsw_link_state *state);
14522 +
14523 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
14524 + u32 cmd_flags,
14525 + u16 token,
14526 + u16 if_id,
14527 + u8 en);
14528 +
14529 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
14530 + u32 cmd_flags,
14531 + u16 token,
14532 + u16 if_id,
14533 + u8 en);
14534 +
14535 +/**
14536 + * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
14537 + * @pcp: Priority Code Point (PCP): a 3-bit field which refers
14538 + * to the IEEE 802.1p priority
14539 + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
14540 + * separately or in conjunction with PCP to indicate frames
14541 + * eligible to be dropped in the presence of congestion
14542 + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
14543 + * to which the frame belongs. The hexadecimal values
14544 + * of 0x000 and 0xFFF are reserved;
14545 + * all other values may be used as VLAN identifiers,
14546 + * allowing up to 4,094 VLANs
14547 + */
14548 +struct dpsw_tci_cfg {
14549 + u8 pcp;
14550 + u8 dei;
14551 + u16 vlan_id;
14552 +};
14553 +
14554 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
14555 + u32 cmd_flags,
14556 + u16 token,
14557 + u16 if_id,
14558 + const struct dpsw_tci_cfg *cfg);
14559 +
14560 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
14561 + u32 cmd_flags,
14562 + u16 token,
14563 + u16 if_id,
14564 + struct dpsw_tci_cfg *cfg);
14565 +
14566 +/**
14567 + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
14568 + * @DPSW_STP_STATE_BLOCKING: Blocking state
14569 + * @DPSW_STP_STATE_LISTENING: Listening state
14570 + * @DPSW_STP_STATE_LEARNING: Learning state
14571 + * @DPSW_STP_STATE_FORWARDING: Forwarding state
14572 + *
14573 + */
14574 +enum dpsw_stp_state {
14575 + DPSW_STP_STATE_DISABLED = 0,
14576 + DPSW_STP_STATE_LISTENING = 1,
14577 + DPSW_STP_STATE_LEARNING = 2,
14578 + DPSW_STP_STATE_FORWARDING = 3,
14579 + DPSW_STP_STATE_BLOCKING = 0
14580 +};
14581 +
14582 +/**
14583 + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
14584 + * @vlan_id: VLAN ID STP state
14585 + * @state: STP state
14586 + */
14587 +struct dpsw_stp_cfg {
14588 + u16 vlan_id;
14589 + enum dpsw_stp_state state;
14590 +};
14591 +
14592 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
14593 + u32 cmd_flags,
14594 + u16 token,
14595 + u16 if_id,
14596 + const struct dpsw_stp_cfg *cfg);
14597 +
14598 +/**
14599 + * enum dpsw_accepted_frames - Types of frames to accept
14600 + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
14601 + * priority tagged frames
14602 + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
14603 + * Priority-Tagged frames received on this interface.
14604 + *
14605 + */
14606 +enum dpsw_accepted_frames {
14607 + DPSW_ADMIT_ALL = 1,
14608 + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
14609 +};
14610 +
14611 +/**
14612 + * enum dpsw_counter - Counters types
14613 + * @DPSW_CNT_ING_FRAME: Counts ingress frames
14614 + * @DPSW_CNT_ING_BYTE: Counts ingress bytes
14615 + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
14616 + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
14617 + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
14618 + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
14619 + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
14620 + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
14621 + * @DPSW_CNT_EGR_FRAME: Counts egress frames
14622 + * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
14623 + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
14624 + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
14625 + */
14626 +enum dpsw_counter {
14627 + DPSW_CNT_ING_FRAME = 0x0,
14628 + DPSW_CNT_ING_BYTE = 0x1,
14629 + DPSW_CNT_ING_FLTR_FRAME = 0x2,
14630 + DPSW_CNT_ING_FRAME_DISCARD = 0x3,
14631 + DPSW_CNT_ING_MCAST_FRAME = 0x4,
14632 + DPSW_CNT_ING_MCAST_BYTE = 0x5,
14633 + DPSW_CNT_ING_BCAST_FRAME = 0x6,
14634 + DPSW_CNT_ING_BCAST_BYTES = 0x7,
14635 + DPSW_CNT_EGR_FRAME = 0x8,
14636 + DPSW_CNT_EGR_BYTE = 0x9,
14637 + DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
14638 + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
14639 +};
14640 +
14641 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
14642 + u32 cmd_flags,
14643 + u16 token,
14644 + u16 if_id,
14645 + enum dpsw_counter type,
14646 + u64 *counter);
14647 +
14648 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
14649 + u32 cmd_flags,
14650 + u16 token,
14651 + u16 if_id);
14652 +
14653 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
14654 + u32 cmd_flags,
14655 + u16 token,
14656 + u16 if_id);
14657 +
14658 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
14659 + u32 cmd_flags,
14660 + u16 token,
14661 + u16 if_id,
14662 + u16 frame_length);
14663 +
14664 +/**
14665 + * struct dpsw_vlan_cfg - VLAN Configuration
14666 + * @fdb_id: Forwarding Data Base
14667 + */
14668 +struct dpsw_vlan_cfg {
14669 + u16 fdb_id;
14670 +};
14671 +
14672 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
14673 + u32 cmd_flags,
14674 + u16 token,
14675 + u16 vlan_id,
14676 + const struct dpsw_vlan_cfg *cfg);
14677 +
14678 +/**
14679 + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
14680 + * @num_ifs: The number of interfaces that are assigned to the egress
14681 + * list for this VLAN
14682 + * @if_id: The set of interfaces that are
14683 + * assigned to the egress list for this VLAN
14684 + */
14685 +struct dpsw_vlan_if_cfg {
14686 + u16 num_ifs;
14687 + u16 if_id[DPSW_MAX_IF];
14688 +};
14689 +
14690 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
14691 + u32 cmd_flags,
14692 + u16 token,
14693 + u16 vlan_id,
14694 + const struct dpsw_vlan_if_cfg *cfg);
14695 +
14696 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
14697 + u32 cmd_flags,
14698 + u16 token,
14699 + u16 vlan_id,
14700 + const struct dpsw_vlan_if_cfg *cfg);
14701 +
14702 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
14703 + u32 cmd_flags,
14704 + u16 token,
14705 + u16 vlan_id,
14706 + const struct dpsw_vlan_if_cfg *cfg);
14707 +
14708 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
14709 + u32 cmd_flags,
14710 + u16 token,
14711 + u16 vlan_id,
14712 + const struct dpsw_vlan_if_cfg *cfg);
14713 +
14714 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
14715 + u32 cmd_flags,
14716 + u16 token,
14717 + u16 vlan_id);
14718 +
14719 +/**
14720 + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
14721 + * @DPSW_FDB_ENTRY_STATIC: Static entry
14722 + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
14723 + */
14724 +enum dpsw_fdb_entry_type {
14725 + DPSW_FDB_ENTRY_STATIC = 0,
14726 + DPSW_FDB_ENTRY_DINAMIC = 1
14727 +};
14728 +
14729 +/**
14730 + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
14731 + * @type: Select static or dynamic entry
14732 + * @mac_addr: MAC address
14733 + * @if_egress: Egress interface ID
14734 + */
14735 +struct dpsw_fdb_unicast_cfg {
14736 + enum dpsw_fdb_entry_type type;
14737 + u8 mac_addr[6];
14738 + u16 if_egress;
14739 +};
14740 +
14741 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
14742 + u32 cmd_flags,
14743 + u16 token,
14744 + u16 fdb_id,
14745 + const struct dpsw_fdb_unicast_cfg *cfg);
14746 +
14747 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
14748 + u32 cmd_flags,
14749 + u16 token,
14750 + u16 fdb_id,
14751 + const struct dpsw_fdb_unicast_cfg *cfg);
14752 +
14753 +/**
14754 + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
14755 + * @type: Select static or dynamic entry
14756 + * @mac_addr: MAC address
14757 + * @num_ifs: Number of external and internal interfaces
14758 + * @if_id: Egress interface IDs
14759 + */
14760 +struct dpsw_fdb_multicast_cfg {
14761 + enum dpsw_fdb_entry_type type;
14762 + u8 mac_addr[6];
14763 + u16 num_ifs;
14764 + u16 if_id[DPSW_MAX_IF];
14765 +};
14766 +
14767 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
14768 + u32 cmd_flags,
14769 + u16 token,
14770 + u16 fdb_id,
14771 + const struct dpsw_fdb_multicast_cfg *cfg);
14772 +
14773 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
14774 + u32 cmd_flags,
14775 + u16 token,
14776 + u16 fdb_id,
14777 + const struct dpsw_fdb_multicast_cfg *cfg);
14778 +
14779 +/**
14780 + * enum dpsw_fdb_learning_mode - Auto-learning modes
14781 + * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
14782 + * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
14783 + * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
14784 + * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
14785 + *
14786 + * NONE - SECURE LEARNING
14787 + * SMAC found DMAC found CTLU Action
14788 + * v v Forward frame to
14789 + * 1. DMAC destination
14790 + * - v Forward frame to
14791 + * 1. DMAC destination
14792 + * 2. Control interface
14793 + * v - Forward frame to
14794 + * 1. Flooding list of interfaces
14795 + * - - Forward frame to
14796 + * 1. Flooding list of interfaces
14797 + * 2. Control interface
14798 + * SECURE LEARING
14799 + * SMAC found DMAC found CTLU Action
14800 + * v v Forward frame to
14801 + * 1. DMAC destination
14802 + * - v Forward frame to
14803 + * 1. Control interface
14804 + * v - Forward frame to
14805 + * 1. Flooding list of interfaces
14806 + * - - Forward frame to
14807 + * 1. Control interface
14808 + */
14809 +enum dpsw_fdb_learning_mode {
14810 + DPSW_FDB_LEARNING_MODE_DIS = 0,
14811 + DPSW_FDB_LEARNING_MODE_HW = 1,
14812 + DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
14813 + DPSW_FDB_LEARNING_MODE_SECURE = 3
14814 +};
14815 +
14816 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
14817 + u32 cmd_flags,
14818 + u16 token,
14819 + u16 fdb_id,
14820 + enum dpsw_fdb_learning_mode mode);
14821 +
14822 +/**
14823 + * struct dpsw_fdb_attr - FDB Attributes
14824 + * @max_fdb_entries: Number of FDB entries
14825 + * @fdb_aging_time: Aging time in seconds
14826 + * @learning_mode: Learning mode
14827 + * @num_fdb_mc_groups: Current number of multicast groups
14828 + * @max_fdb_mc_groups: Maximum number of multicast groups
14829 + */
14830 +struct dpsw_fdb_attr {
14831 + u16 max_fdb_entries;
14832 + u16 fdb_aging_time;
14833 + enum dpsw_fdb_learning_mode learning_mode;
14834 + u16 num_fdb_mc_groups;
14835 + u16 max_fdb_mc_groups;
14836 +};
14837 +
14838 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
14839 + u32 cmd_flags,
14840 + u16 *major_ver,
14841 + u16 *minor_ver);
14842 +
14843 +#endif /* __FSL_DPSW_H */
14844 --- /dev/null
14845 +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
14846 @@ -0,0 +1,206 @@
14847 +/* Copyright 2014-2016 Freescale Semiconductor Inc.
14848 + * Copyright 2017 NXP
14849 + *
14850 + * Redistribution and use in source and binary forms, with or without
14851 + * modification, are permitted provided that the following conditions are met:
14852 + * * Redistributions of source code must retain the above copyright
14853 + * notice, this list of conditions and the following disclaimer.
14854 + * * Redistributions in binary form must reproduce the above copyright
14855 + * notice, this list of conditions and the following disclaimer in the
14856 + * documentation and/or other materials provided with the distribution.
14857 + * * Neither the name of the above-listed copyright holders nor the
14858 + * names of any contributors may be used to endorse or promote products
14859 + * derived from this software without specific prior written permission.
14860 + *
14861 + *
14862 + * ALTERNATIVELY, this software may be distributed under the terms of the
14863 + * GNU General Public License ("GPL") as published by the Free Software
14864 + * Foundation, either version 2 of that License or (at your option) any
14865 + * later version.
14866 + *
14867 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
14868 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14869 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
14870 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
14871 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
14872 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
14873 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
14874 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
14875 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
14876 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
14877 + * POSSIBILITY OF SUCH DAMAGE.
14878 + */
14879 +
14880 +#include "ethsw.h"
14881 +
14882 +static struct {
14883 + enum dpsw_counter id;
14884 + char name[ETH_GSTRING_LEN];
14885 +} ethsw_ethtool_counters[] = {
14886 + {DPSW_CNT_ING_FRAME, "rx frames"},
14887 + {DPSW_CNT_ING_BYTE, "rx bytes"},
14888 + {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
14889 + {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
14890 + {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
14891 + {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
14892 + {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
14893 + {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
14894 + {DPSW_CNT_EGR_FRAME, "tx frames"},
14895 + {DPSW_CNT_EGR_BYTE, "tx bytes"},
14896 + {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
14897 +
14898 +};
14899 +
14900 +#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
14901 +
14902 +static void ethsw_get_drvinfo(struct net_device *netdev,
14903 + struct ethtool_drvinfo *drvinfo)
14904 +{
14905 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14906 + u16 version_major, version_minor;
14907 + int err;
14908 +
14909 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
14910 +
14911 + err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
14912 + &version_major,
14913 + &version_minor);
14914 + if (err)
14915 + strlcpy(drvinfo->fw_version, "N/A",
14916 + sizeof(drvinfo->fw_version));
14917 + else
14918 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
14919 + "%u.%u", version_major, version_minor);
14920 +
14921 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
14922 + sizeof(drvinfo->bus_info));
14923 +}
14924 +
14925 +static int
14926 +ethsw_get_link_ksettings(struct net_device *netdev,
14927 + struct ethtool_link_ksettings *link_ksettings)
14928 +{
14929 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14930 + struct dpsw_link_state state = {0};
14931 + int err = 0;
14932 +
14933 + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
14934 + port_priv->ethsw_data->dpsw_handle,
14935 + port_priv->idx,
14936 + &state);
14937 + if (err) {
14938 + netdev_err(netdev, "ERROR %d getting link state", err);
14939 + goto out;
14940 + }
14941 +
14942 + /* At the moment, we have no way of interrogating the DPMAC
14943 + * from the DPSW side or there may not exist a DPMAC at all.
14944 + * Report only autoneg state, duplexity and speed.
14945 + */
14946 + if (state.options & DPSW_LINK_OPT_AUTONEG)
14947 + link_ksettings->base.autoneg = AUTONEG_ENABLE;
14948 + if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
14949 + link_ksettings->base.duplex = DUPLEX_FULL;
14950 + link_ksettings->base.speed = state.rate;
14951 +
14952 +out:
14953 + return err;
14954 +}
14955 +
14956 +static int
14957 +ethsw_set_link_ksettings(struct net_device *netdev,
14958 + const struct ethtool_link_ksettings *link_ksettings)
14959 +{
14960 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14961 + struct dpsw_link_cfg cfg = {0};
14962 + int err = 0;
14963 +
14964 + netdev_dbg(netdev, "Setting link parameters...");
14965 +
14966 + /* Due to a temporary MC limitation, the DPSW port must be down
14967 + * in order to be able to change link settings. Taking steps to let
14968 + * the user know that.
14969 + */
14970 + if (netif_running(netdev)) {
14971 + netdev_info(netdev, "Sorry, interface must be brought down first.\n");
14972 + return -EACCES;
14973 + }
14974 +
14975 + cfg.rate = link_ksettings->base.speed;
14976 + if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
14977 + cfg.options |= DPSW_LINK_OPT_AUTONEG;
14978 + else
14979 + cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
14980 + if (link_ksettings->base.duplex == DUPLEX_HALF)
14981 + cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
14982 + else
14983 + cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
14984 +
14985 + err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
14986 + port_priv->ethsw_data->dpsw_handle,
14987 + port_priv->idx,
14988 + &cfg);
14989 + if (err)
14990 + /* ethtool will be loud enough if we return an error; no point
14991 + * in putting our own error message on the console by default
14992 + */
14993 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
14994 +
14995 + return err;
14996 +}
14997 +
14998 +static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
14999 +{
15000 + switch (sset) {
15001 + case ETH_SS_STATS:
15002 + return ETHSW_NUM_COUNTERS;
15003 + default:
15004 + return -EOPNOTSUPP;
15005 + }
15006 +}
15007 +
15008 +static void ethsw_ethtool_get_strings(struct net_device *netdev,
15009 + u32 stringset, u8 *data)
15010 +{
15011 + int i;
15012 +
15013 + switch (stringset) {
15014 + case ETH_SS_STATS:
15015 + for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
15016 + memcpy(data + i * ETH_GSTRING_LEN,
15017 + ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
15018 + break;
15019 + }
15020 +}
15021 +
15022 +static void ethsw_ethtool_get_stats(struct net_device *netdev,
15023 + struct ethtool_stats *stats,
15024 + u64 *data)
15025 +{
15026 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15027 + int i, err;
15028 +
15029 + memset(data, 0,
15030 + sizeof(u64) * ETHSW_NUM_COUNTERS);
15031 +
15032 + for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
15033 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15034 + port_priv->ethsw_data->dpsw_handle,
15035 + port_priv->idx,
15036 + ethsw_ethtool_counters[i].id,
15037 + &data[i]);
15038 + if (err)
15039 + netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
15040 + ethsw_ethtool_counters[i].name, err);
15041 + }
15042 +}
15043 +
15044 +const struct ethtool_ops ethsw_port_ethtool_ops = {
15045 + .get_drvinfo = ethsw_get_drvinfo,
15046 + .get_link = ethtool_op_get_link,
15047 + .get_link_ksettings = ethsw_get_link_ksettings,
15048 + .set_link_ksettings = ethsw_set_link_ksettings,
15049 + .get_strings = ethsw_ethtool_get_strings,
15050 + .get_ethtool_stats = ethsw_ethtool_get_stats,
15051 + .get_sset_count = ethsw_ethtool_get_sset_count,
15052 +};
15053 --- /dev/null
15054 +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
15055 @@ -0,0 +1,1438 @@
15056 +/* Copyright 2014-2016 Freescale Semiconductor Inc.
15057 + * Copyright 2017 NXP
15058 + *
15059 + * Redistribution and use in source and binary forms, with or without
15060 + * modification, are permitted provided that the following conditions are met:
15061 + * * Redistributions of source code must retain the above copyright
15062 + * notice, this list of conditions and the following disclaimer.
15063 + * * Redistributions in binary form must reproduce the above copyright
15064 + * notice, this list of conditions and the following disclaimer in the
15065 + * documentation and/or other materials provided with the distribution.
15066 + * * Neither the name of the above-listed copyright holders nor the
15067 + * names of any contributors may be used to endorse or promote products
15068 + * derived from this software without specific prior written permission.
15069 + *
15070 + *
15071 + * ALTERNATIVELY, this software may be distributed under the terms of the
15072 + * GNU General Public License ("GPL") as published by the Free Software
15073 + * Foundation, either version 2 of that License or (at your option) any
15074 + * later version.
15075 + *
15076 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15077 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15078 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15079 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15080 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15081 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15082 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15083 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15084 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15085 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15086 + * POSSIBILITY OF SUCH DAMAGE.
15087 + */
15088 +
15089 +#include <linux/module.h>
15090 +
15091 +#include <linux/interrupt.h>
15092 +#include <linux/msi.h>
15093 +#include <linux/kthread.h>
15094 +#include <linux/workqueue.h>
15095 +
15096 +#include <linux/fsl/mc.h>
15097 +
15098 +#include "ethsw.h"
15099 +
15100 +static struct workqueue_struct *ethsw_owq;
15101 +
15102 +/* Minimal supported DPSW version */
15103 +#define DPSW_MIN_VER_MAJOR 8
15104 +#define DPSW_MIN_VER_MINOR 0
15105 +
15106 +#define DEFAULT_VLAN_ID 1
15107 +
15108 +static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
15109 +{
15110 + int err;
15111 +
15112 + struct dpsw_vlan_cfg vcfg = {
15113 + .fdb_id = 0,
15114 + };
15115 +
15116 + if (ethsw->vlans[vid]) {
15117 + dev_err(ethsw->dev, "VLAN already configured\n");
15118 + return -EEXIST;
15119 + }
15120 +
15121 + err = dpsw_vlan_add(ethsw->mc_io, 0,
15122 + ethsw->dpsw_handle, vid, &vcfg);
15123 + if (err) {
15124 + dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
15125 + return err;
15126 + }
15127 + ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
15128 +
15129 + return 0;
15130 +}
15131 +
15132 +static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
15133 +{
15134 + struct ethsw_core *ethsw = port_priv->ethsw_data;
15135 + struct net_device *netdev = port_priv->netdev;
15136 + struct dpsw_tci_cfg tci_cfg = { 0 };
15137 + bool is_oper;
15138 + int err, ret;
15139 +
15140 + err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
15141 + port_priv->idx, &tci_cfg);
15142 + if (err) {
15143 + netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
15144 + return err;
15145 + }
15146 +
15147 + tci_cfg.vlan_id = pvid;
15148 +
15149 + /* Interface needs to be down to change PVID */
15150 + is_oper = netif_oper_up(netdev);
15151 + if (is_oper) {
15152 + err = dpsw_if_disable(ethsw->mc_io, 0,
15153 + ethsw->dpsw_handle,
15154 + port_priv->idx);
15155 + if (err) {
15156 + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
15157 + return err;
15158 + }
15159 + }
15160 +
15161 + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
15162 + port_priv->idx, &tci_cfg);
15163 + if (err) {
15164 + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
15165 + goto set_tci_error;
15166 + }
15167 +
15168 + /* Delete previous PVID info and mark the new one */
15169 + port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
15170 + port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
15171 + port_priv->pvid = pvid;
15172 +
15173 +set_tci_error:
15174 + if (is_oper) {
15175 + ret = dpsw_if_enable(ethsw->mc_io, 0,
15176 + ethsw->dpsw_handle,
15177 + port_priv->idx);
15178 + if (ret) {
15179 + netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
15180 + return ret;
15181 + }
15182 + }
15183 +
15184 + return err;
15185 +}
15186 +
15187 +static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
15188 + u16 vid, u16 flags)
15189 +{
15190 + struct ethsw_core *ethsw = port_priv->ethsw_data;
15191 + struct net_device *netdev = port_priv->netdev;
15192 + struct dpsw_vlan_if_cfg vcfg;
15193 + int err;
15194 +
15195 + if (port_priv->vlans[vid]) {
15196 + netdev_warn(netdev, "VLAN %d already configured\n", vid);
15197 + return -EEXIST;
15198 + }
15199 +
15200 + vcfg.num_ifs = 1;
15201 + vcfg.if_id[0] = port_priv->idx;
15202 + err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
15203 + if (err) {
15204 + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
15205 + return err;
15206 + }
15207 +
15208 + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
15209 +
15210 + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
15211 + err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
15212 + ethsw->dpsw_handle,
15213 + vid, &vcfg);
15214 + if (err) {
15215 + netdev_err(netdev,
15216 + "dpsw_vlan_add_if_untagged err %d\n", err);
15217 + return err;
15218 + }
15219 + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
15220 + }
15221 +
15222 + if (flags & BRIDGE_VLAN_INFO_PVID) {
15223 + err = ethsw_port_set_pvid(port_priv, vid);
15224 + if (err)
15225 + return err;
15226 + }
15227 +
15228 + return 0;
15229 +}
15230 +
15231 +static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
15232 +{
15233 + enum dpsw_fdb_learning_mode learn_mode;
15234 + int err;
15235 +
15236 + if (flag)
15237 + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
15238 + else
15239 + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
15240 +
15241 + err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
15242 + learn_mode);
15243 + if (err) {
15244 + dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
15245 + return err;
15246 + }
15247 + ethsw->learning = !!flag;
15248 +
15249 + return 0;
15250 +}
15251 +
15252 +static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
15253 +{
15254 + int err;
15255 +
15256 + err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
15257 + port_priv->ethsw_data->dpsw_handle,
15258 + port_priv->idx, flag);
15259 + if (err) {
15260 + netdev_err(port_priv->netdev,
15261 + "dpsw_fdb_set_learning_mode err %d\n", err);
15262 + return err;
15263 + }
15264 + port_priv->flood = !!flag;
15265 +
15266 + return 0;
15267 +}
15268 +
15269 +static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
15270 +{
15271 + struct dpsw_stp_cfg stp_cfg = {
15272 + .vlan_id = DEFAULT_VLAN_ID,
15273 + .state = state,
15274 + };
15275 + int err;
15276 +
15277 + if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
15278 + return 0; /* Nothing to do */
15279 +
15280 + err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
15281 + port_priv->ethsw_data->dpsw_handle,
15282 + port_priv->idx, &stp_cfg);
15283 + if (err) {
15284 + netdev_err(port_priv->netdev,
15285 + "dpsw_if_set_stp err %d\n", err);
15286 + return err;
15287 + }
15288 +
15289 + port_priv->stp_state = state;
15290 +
15291 + return 0;
15292 +}
15293 +
15294 +static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
15295 +{
15296 + struct ethsw_port_priv *ppriv_local = NULL;
15297 + int i, err;
15298 +
15299 + if (!ethsw->vlans[vid])
15300 + return -ENOENT;
15301 +
15302 + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
15303 + if (err) {
15304 + dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
15305 + return err;
15306 + }
15307 + ethsw->vlans[vid] = 0;
15308 +
15309 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
15310 + ppriv_local = ethsw->ports[i];
15311 + ppriv_local->vlans[vid] = 0;
15312 + }
15313 +
15314 + return 0;
15315 +}
15316 +
15317 +static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
15318 + const unsigned char *addr)
15319 +{
15320 + struct dpsw_fdb_unicast_cfg entry = {0};
15321 + int err;
15322 +
15323 + entry.if_egress = port_priv->idx;
15324 + entry.type = DPSW_FDB_ENTRY_STATIC;
15325 + ether_addr_copy(entry.mac_addr, addr);
15326 +
15327 + err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
15328 + port_priv->ethsw_data->dpsw_handle,
15329 + 0, &entry);
15330 + if (err)
15331 + netdev_err(port_priv->netdev,
15332 + "dpsw_fdb_add_unicast err %d\n", err);
15333 + return err;
15334 +}
15335 +
15336 +static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
15337 + const unsigned char *addr)
15338 +{
15339 + struct dpsw_fdb_unicast_cfg entry = {0};
15340 + int err;
15341 +
15342 + entry.if_egress = port_priv->idx;
15343 + entry.type = DPSW_FDB_ENTRY_STATIC;
15344 + ether_addr_copy(entry.mac_addr, addr);
15345 +
15346 + err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
15347 + port_priv->ethsw_data->dpsw_handle,
15348 + 0, &entry);
15349 + /* Silently discard calling multiple times the del command */
15350 + if (err && err != -ENXIO)
15351 + netdev_err(port_priv->netdev,
15352 + "dpsw_fdb_remove_unicast err %d\n", err);
15353 + return err;
15354 +}
15355 +
15356 +static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
15357 + const unsigned char *addr)
15358 +{
15359 + struct dpsw_fdb_multicast_cfg entry = {0};
15360 + int err;
15361 +
15362 + ether_addr_copy(entry.mac_addr, addr);
15363 + entry.type = DPSW_FDB_ENTRY_STATIC;
15364 + entry.num_ifs = 1;
15365 + entry.if_id[0] = port_priv->idx;
15366 +
15367 + err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
15368 + port_priv->ethsw_data->dpsw_handle,
15369 + 0, &entry);
15370 + /* Silently discard calling multiple times the add command */
15371 + if (err && err != -ENXIO)
15372 + netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
15373 + err);
15374 + return err;
15375 +}
15376 +
15377 +static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
15378 + const unsigned char *addr)
15379 +{
15380 + struct dpsw_fdb_multicast_cfg entry = {0};
15381 + int err;
15382 +
15383 + ether_addr_copy(entry.mac_addr, addr);
15384 + entry.type = DPSW_FDB_ENTRY_STATIC;
15385 + entry.num_ifs = 1;
15386 + entry.if_id[0] = port_priv->idx;
15387 +
15388 + err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
15389 + port_priv->ethsw_data->dpsw_handle,
15390 + 0, &entry);
15391 + /* Silently discard calling multiple times the del command */
15392 + if (err && err != -ENAVAIL)
15393 + netdev_err(port_priv->netdev,
15394 + "dpsw_fdb_remove_multicast err %d\n", err);
15395 + return err;
15396 +}
15397 +
15398 +static void port_get_stats(struct net_device *netdev,
15399 + struct rtnl_link_stats64 *stats)
15400 +{
15401 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15402 + u64 tmp;
15403 + int err;
15404 +
15405 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15406 + port_priv->ethsw_data->dpsw_handle,
15407 + port_priv->idx,
15408 + DPSW_CNT_ING_FRAME, &stats->rx_packets);
15409 + if (err)
15410 + goto error;
15411 +
15412 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15413 + port_priv->ethsw_data->dpsw_handle,
15414 + port_priv->idx,
15415 + DPSW_CNT_EGR_FRAME, &stats->tx_packets);
15416 + if (err)
15417 + goto error;
15418 +
15419 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15420 + port_priv->ethsw_data->dpsw_handle,
15421 + port_priv->idx,
15422 + DPSW_CNT_ING_BYTE, &stats->rx_bytes);
15423 + if (err)
15424 + goto error;
15425 +
15426 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15427 + port_priv->ethsw_data->dpsw_handle,
15428 + port_priv->idx,
15429 + DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
15430 + if (err)
15431 + goto error;
15432 +
15433 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15434 + port_priv->ethsw_data->dpsw_handle,
15435 + port_priv->idx,
15436 + DPSW_CNT_ING_FRAME_DISCARD,
15437 + &stats->rx_dropped);
15438 + if (err)
15439 + goto error;
15440 +
15441 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15442 + port_priv->ethsw_data->dpsw_handle,
15443 + port_priv->idx,
15444 + DPSW_CNT_ING_FLTR_FRAME,
15445 + &tmp);
15446 + if (err)
15447 + goto error;
15448 + stats->rx_dropped += tmp;
15449 +
15450 + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
15451 + port_priv->ethsw_data->dpsw_handle,
15452 + port_priv->idx,
15453 + DPSW_CNT_EGR_FRAME_DISCARD,
15454 + &stats->tx_dropped);
15455 + if (err)
15456 + goto error;
15457 +
15458 + return;
15459 +
15460 +error:
15461 + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
15462 +}
15463 +
15464 +static bool port_has_offload_stats(const struct net_device *netdev,
15465 + int attr_id)
15466 +{
15467 + return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
15468 +}
15469 +
15470 +static int port_get_offload_stats(int attr_id,
15471 + const struct net_device *netdev,
15472 + void *sp)
15473 +{
15474 + switch (attr_id) {
15475 + case IFLA_OFFLOAD_XSTATS_CPU_HIT:
15476 + port_get_stats((struct net_device *)netdev, sp);
15477 + return 0;
15478 + }
15479 +
15480 + return -EINVAL;
15481 +}
15482 +
15483 +static int port_change_mtu(struct net_device *netdev, int mtu)
15484 +{
15485 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15486 + int err;
15487 +
15488 + err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
15489 + 0,
15490 + port_priv->ethsw_data->dpsw_handle,
15491 + port_priv->idx,
15492 + (u16)ETHSW_L2_MAX_FRM(mtu));
15493 + if (err) {
15494 + netdev_err(netdev,
15495 + "dpsw_if_set_max_frame_length() err %d\n", err);
15496 + return err;
15497 + }
15498 +
15499 + netdev->mtu = mtu;
15500 + return 0;
15501 +}
15502 +
15503 +static int port_carrier_state_sync(struct net_device *netdev)
15504 +{
15505 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15506 + struct dpsw_link_state state;
15507 + int err;
15508 +
15509 + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
15510 + port_priv->ethsw_data->dpsw_handle,
15511 + port_priv->idx, &state);
15512 + if (err) {
15513 + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
15514 + return err;
15515 + }
15516 +
15517 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
15518 +
15519 + if (state.up != port_priv->link_state) {
15520 + if (state.up)
15521 + netif_carrier_on(netdev);
15522 + else
15523 + netif_carrier_off(netdev);
15524 + port_priv->link_state = state.up;
15525 + }
15526 + return 0;
15527 +}
15528 +
15529 +static int port_open(struct net_device *netdev)
15530 +{
15531 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15532 + int err;
15533 +
15534 + /* No need to allow Tx as control interface is disabled */
15535 + netif_tx_stop_all_queues(netdev);
15536 +
15537 + err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
15538 + port_priv->ethsw_data->dpsw_handle,
15539 + port_priv->idx);
15540 + if (err) {
15541 + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
15542 + return err;
15543 + }
15544 +
15545 + /* sync carrier state */
15546 + err = port_carrier_state_sync(netdev);
15547 + if (err) {
15548 + netdev_err(netdev,
15549 + "port_carrier_state_sync err %d\n", err);
15550 + goto err_carrier_sync;
15551 + }
15552 +
15553 + return 0;
15554 +
15555 +err_carrier_sync:
15556 + dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
15557 + port_priv->ethsw_data->dpsw_handle,
15558 + port_priv->idx);
15559 + return err;
15560 +}
15561 +
15562 +static int port_stop(struct net_device *netdev)
15563 +{
15564 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15565 + int err;
15566 +
15567 + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
15568 + port_priv->ethsw_data->dpsw_handle,
15569 + port_priv->idx);
15570 + if (err) {
15571 + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
15572 + return err;
15573 + }
15574 +
15575 + return 0;
15576 +}
15577 +
15578 +static netdev_tx_t port_dropframe(struct sk_buff *skb,
15579 + struct net_device *netdev)
15580 +{
15581 + /* we don't support I/O for now, drop the frame */
15582 + dev_kfree_skb_any(skb);
15583 +
15584 + return NETDEV_TX_OK;
15585 +}
15586 +
15587 +static const struct net_device_ops ethsw_port_ops = {
15588 + .ndo_open = port_open,
15589 + .ndo_stop = port_stop,
15590 +
15591 + .ndo_set_mac_address = eth_mac_addr,
15592 + .ndo_change_mtu = port_change_mtu,
15593 + .ndo_has_offload_stats = port_has_offload_stats,
15594 + .ndo_get_offload_stats = port_get_offload_stats,
15595 +
15596 + .ndo_start_xmit = port_dropframe,
15597 +};
15598 +
15599 +static void ethsw_links_state_update(struct ethsw_core *ethsw)
15600 +{
15601 + int i;
15602 +
15603 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
15604 + port_carrier_state_sync(ethsw->ports[i]->netdev);
15605 +}
15606 +
15607 +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
15608 +{
15609 + return IRQ_WAKE_THREAD;
15610 +}
15611 +
15612 +static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
15613 +{
15614 + struct device *dev = (struct device *)arg;
15615 + struct ethsw_core *ethsw = dev_get_drvdata(dev);
15616 +
15617 + /* Mask the events and the if_id reserved bits to be cleared on read */
15618 + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
15619 + int err;
15620 +
15621 + err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
15622 + DPSW_IRQ_INDEX_IF, &status);
15623 + if (err) {
15624 + dev_err(dev, "Can't get irq status (err %d)", err);
15625 +
15626 + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
15627 + DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
15628 + if (err)
15629 + dev_err(dev, "Can't clear irq status (err %d)", err);
15630 + goto out;
15631 + }
15632 +
15633 + if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
15634 + ethsw_links_state_update(ethsw);
15635 +
15636 +out:
15637 + return IRQ_HANDLED;
15638 +}
15639 +
15640 +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
15641 +{
15642 + struct device *dev = &sw_dev->dev;
15643 + struct ethsw_core *ethsw = dev_get_drvdata(dev);
15644 + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
15645 + struct fsl_mc_device_irq *irq;
15646 + int err;
15647 +
15648 + err = fsl_mc_allocate_irqs(sw_dev);
15649 + if (err) {
15650 + dev_err(dev, "MC irqs allocation failed\n");
15651 + return err;
15652 + }
15653 +
15654 + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
15655 + err = -EINVAL;
15656 + goto free_irq;
15657 + }
15658 +
15659 + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
15660 + DPSW_IRQ_INDEX_IF, 0);
15661 + if (err) {
15662 + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
15663 + goto free_irq;
15664 + }
15665 +
15666 + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
15667 +
15668 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
15669 + ethsw_irq0_handler,
15670 + ethsw_irq0_handler_thread,
15671 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
15672 + dev_name(dev), dev);
15673 + if (err) {
15674 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
15675 + goto free_irq;
15676 + }
15677 +
15678 + err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
15679 + DPSW_IRQ_INDEX_IF, mask);
15680 + if (err) {
15681 + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
15682 + goto free_devm_irq;
15683 + }
15684 +
15685 + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
15686 + DPSW_IRQ_INDEX_IF, 1);
15687 + if (err) {
15688 + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
15689 + goto free_devm_irq;
15690 + }
15691 +
15692 + return 0;
15693 +
15694 +free_devm_irq:
15695 + devm_free_irq(dev, irq->msi_desc->irq, dev);
15696 +free_irq:
15697 + fsl_mc_free_irqs(sw_dev);
15698 + return err;
15699 +}
15700 +
15701 +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
15702 +{
15703 + struct device *dev = &sw_dev->dev;
15704 + struct ethsw_core *ethsw = dev_get_drvdata(dev);
15705 + struct fsl_mc_device_irq *irq;
15706 + int err;
15707 +
15708 + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
15709 + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
15710 + DPSW_IRQ_INDEX_IF, 0);
15711 + if (err)
15712 + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
15713 +
15714 + fsl_mc_free_irqs(sw_dev);
15715 +}
15716 +
15717 +static int swdev_port_attr_get(struct net_device *netdev,
15718 + struct switchdev_attr *attr)
15719 +{
15720 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15721 +
15722 + switch (attr->id) {
15723 + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
15724 + attr->u.ppid.id_len = 1;
15725 + attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
15726 + break;
15727 + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
15728 + attr->u.brport_flags =
15729 + (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
15730 + (port_priv->flood ? BR_FLOOD : 0);
15731 + break;
15732 + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
15733 + attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
15734 + break;
15735 + default:
15736 + return -EOPNOTSUPP;
15737 + }
15738 +
15739 + return 0;
15740 +}
15741 +
15742 +static int port_attr_stp_state_set(struct net_device *netdev,
15743 + struct switchdev_trans *trans,
15744 + u8 state)
15745 +{
15746 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15747 +
15748 + if (switchdev_trans_ph_prepare(trans))
15749 + return 0;
15750 +
15751 + return ethsw_port_set_stp_state(port_priv, state);
15752 +}
15753 +
15754 +static int port_attr_br_flags_set(struct net_device *netdev,
15755 + struct switchdev_trans *trans,
15756 + unsigned long flags)
15757 +{
15758 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15759 + int err = 0;
15760 +
15761 + if (switchdev_trans_ph_prepare(trans))
15762 + return 0;
15763 +
15764 + /* Learning is enabled per switch */
15765 + err = ethsw_set_learning(port_priv->ethsw_data, !!(flags & BR_LEARNING));
15766 + if (err)
15767 + goto exit;
15768 +
15769 + err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
15770 +
15771 +exit:
15772 + return err;
15773 +}
15774 +
15775 +static int swdev_port_attr_set(struct net_device *netdev,
15776 + const struct switchdev_attr *attr,
15777 + struct switchdev_trans *trans)
15778 +{
15779 + int err = 0;
15780 +
15781 + switch (attr->id) {
15782 + case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
15783 + err = port_attr_stp_state_set(netdev, trans,
15784 + attr->u.stp_state);
15785 + break;
15786 + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
15787 + err = port_attr_br_flags_set(netdev, trans,
15788 + attr->u.brport_flags);
15789 + break;
15790 + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
15791 + /* VLANs are supported by default */
15792 + break;
15793 + default:
15794 + err = -EOPNOTSUPP;
15795 + break;
15796 + }
15797 +
15798 + return err;
15799 +}
15800 +
15801 +static int port_vlans_add(struct net_device *netdev,
15802 + const struct switchdev_obj_port_vlan *vlan,
15803 + struct switchdev_trans *trans)
15804 +{
15805 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15806 + int vid, err;
15807 +
15808 + if (switchdev_trans_ph_prepare(trans))
15809 + return 0;
15810 +
15811 + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
15812 + if (!port_priv->ethsw_data->vlans[vid]) {
15813 + /* this is a new VLAN */
15814 + err = ethsw_add_vlan(port_priv->ethsw_data, vid);
15815 + if (err)
15816 + return err;
15817 +
15818 + port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
15819 + }
15820 + err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
15821 + if (err)
15822 + break;
15823 + }
15824 +
15825 + return err;
15826 +}
15827 +
15828 +static int swdev_port_obj_add(struct net_device *netdev,
15829 + const struct switchdev_obj *obj,
15830 + struct switchdev_trans *trans)
15831 +{
15832 + int err;
15833 +
15834 + switch (obj->id) {
15835 + case SWITCHDEV_OBJ_ID_PORT_VLAN:
15836 + err = port_vlans_add(netdev,
15837 + SWITCHDEV_OBJ_PORT_VLAN(obj),
15838 + trans);
15839 + break;
15840 + default:
15841 + err = -EOPNOTSUPP;
15842 + break;
15843 + }
15844 +
15845 + return err;
15846 +}
15847 +
15848 +static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
15849 +{
15850 + struct ethsw_core *ethsw = port_priv->ethsw_data;
15851 + struct net_device *netdev = port_priv->netdev;
15852 + struct dpsw_vlan_if_cfg vcfg;
15853 + int i, err;
15854 +
15855 + if (!port_priv->vlans[vid])
15856 + return -ENOENT;
15857 +
15858 + if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
15859 + err = ethsw_port_set_pvid(port_priv, 0);
15860 + if (err)
15861 + return err;
15862 + }
15863 +
15864 + vcfg.num_ifs = 1;
15865 + vcfg.if_id[0] = port_priv->idx;
15866 + if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
15867 + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
15868 + ethsw->dpsw_handle,
15869 + vid, &vcfg);
15870 + if (err) {
15871 + netdev_err(netdev,
15872 + "dpsw_vlan_remove_if_untagged err %d\n",
15873 + err);
15874 + }
15875 + port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
15876 + }
15877 +
15878 + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
15879 + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
15880 + vid, &vcfg);
15881 + if (err) {
15882 + netdev_err(netdev,
15883 + "dpsw_vlan_remove_if err %d\n", err);
15884 + return err;
15885 + }
15886 + port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
15887 +
15888 + /* Delete VLAN from switch if it is no longer configured on
15889 + * any port
15890 + */
15891 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
15892 + if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
15893 + return 0; /* Found a port member in VID */
15894 +
15895 + ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
15896 +
15897 + err = ethsw_dellink_switch(ethsw, vid);
15898 + if (err)
15899 + return err;
15900 + }
15901 +
15902 + return 0;
15903 +}
15904 +
15905 +static int port_vlans_del(struct net_device *netdev,
15906 + const struct switchdev_obj_port_vlan *vlan)
15907 +{
15908 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15909 + int vid, err;
15910 +
15911 + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
15912 + err = ethsw_port_del_vlan(port_priv, vid);
15913 + if (err)
15914 + break;
15915 + }
15916 +
15917 + return err;
15918 +}
15919 +
15920 +static int swdev_port_obj_del(struct net_device *netdev,
15921 + const struct switchdev_obj *obj)
15922 +{
15923 + int err;
15924 +
15925 + switch (obj->id) {
15926 + case SWITCHDEV_OBJ_ID_PORT_VLAN:
15927 + err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
15928 + break;
15929 + default:
15930 + err = -EOPNOTSUPP;
15931 + break;
15932 + }
15933 + return err;
15934 +}
15935 +
15936 +static const struct switchdev_ops ethsw_port_switchdev_ops = {
15937 + .switchdev_port_attr_get = swdev_port_attr_get,
15938 + .switchdev_port_attr_set = swdev_port_attr_set,
15939 + .switchdev_port_obj_add = swdev_port_obj_add,
15940 + .switchdev_port_obj_del = swdev_port_obj_del,
15941 +};
15942 +
15943 +/* For the moment, only flood setting needs to be updated */
15944 +static int port_bridge_join(struct net_device *netdev)
15945 +{
15946 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15947 +
15948 + /* Enable flooding */
15949 + return ethsw_port_set_flood(port_priv, 1);
15950 +}
15951 +
15952 +static int port_bridge_leave(struct net_device *netdev)
15953 +{
15954 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15955 +
15956 + /* Disable flooding */
15957 + return ethsw_port_set_flood(port_priv, 0);
15958 +}
15959 +
15960 +static int port_netdevice_event(struct notifier_block *unused,
15961 + unsigned long event, void *ptr)
15962 +{
15963 + struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
15964 + struct netdev_notifier_changeupper_info *info = ptr;
15965 + struct net_device *upper_dev;
15966 + int err = 0;
15967 +
15968 + if (netdev->netdev_ops != &ethsw_port_ops)
15969 + return NOTIFY_DONE;
15970 +
15971 + /* Handle just upper dev link/unlink for the moment */
15972 + if (event == NETDEV_CHANGEUPPER) {
15973 + upper_dev = info->upper_dev;
15974 + if (netif_is_bridge_master(upper_dev)) {
15975 + if (info->linking)
15976 + err = port_bridge_join(netdev);
15977 + else
15978 + err = port_bridge_leave(netdev);
15979 + }
15980 + }
15981 +
15982 + return notifier_from_errno(err);
15983 +}
15984 +
15985 +static struct notifier_block port_nb __read_mostly = {
15986 + .notifier_call = port_netdevice_event,
15987 +};
15988 +
15989 +struct ethsw_switchdev_event_work {
15990 + struct work_struct work;
15991 + struct switchdev_notifier_fdb_info fdb_info;
15992 + struct net_device *dev;
15993 + unsigned long event;
15994 +};
15995 +
15996 +static void ethsw_switchdev_event_work(struct work_struct *work)
15997 +{
15998 + struct ethsw_switchdev_event_work *switchdev_work =
15999 + container_of(work, struct ethsw_switchdev_event_work, work);
16000 + struct net_device *dev = switchdev_work->dev;
16001 + struct switchdev_notifier_fdb_info *fdb_info;
16002 + struct ethsw_port_priv *port_priv;
16003 +
16004 + rtnl_lock();
16005 + port_priv = netdev_priv(dev);
16006 + fdb_info = &switchdev_work->fdb_info;
16007 +
16008 + switch (switchdev_work->event) {
16009 + case SWITCHDEV_FDB_ADD_TO_DEVICE:
16010 + if (is_unicast_ether_addr(fdb_info->addr))
16011 + ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
16012 + else
16013 + ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
16014 + break;
16015 + case SWITCHDEV_FDB_DEL_TO_DEVICE:
16016 + if (is_unicast_ether_addr(fdb_info->addr))
16017 + ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
16018 + else
16019 + ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
16020 + break;
16021 + }
16022 +
16023 + rtnl_unlock();
16024 + kfree(switchdev_work->fdb_info.addr);
16025 + kfree(switchdev_work);
16026 + dev_put(dev);
16027 +}
16028 +
16029 +/* Called under rcu_read_lock() */
16030 +static int port_switchdev_event(struct notifier_block *unused,
16031 + unsigned long event, void *ptr)
16032 +{
16033 + struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
16034 + struct ethsw_switchdev_event_work *switchdev_work;
16035 + struct switchdev_notifier_fdb_info *fdb_info = ptr;
16036 +
16037 + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
16038 + if (!switchdev_work)
16039 + return NOTIFY_BAD;
16040 +
16041 + INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
16042 + switchdev_work->dev = dev;
16043 + switchdev_work->event = event;
16044 +
16045 + switch (event) {
16046 + case SWITCHDEV_FDB_ADD_TO_DEVICE:
16047 + case SWITCHDEV_FDB_DEL_TO_DEVICE:
16048 + memcpy(&switchdev_work->fdb_info, ptr,
16049 + sizeof(switchdev_work->fdb_info));
16050 + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
16051 + if (!switchdev_work->fdb_info.addr)
16052 + goto err_addr_alloc;
16053 +
16054 + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
16055 + fdb_info->addr);
16056 +
16057 + /* Take a reference on the device to avoid being freed. */
16058 + dev_hold(dev);
16059 + break;
16060 + default:
16061 + return NOTIFY_DONE;
16062 + }
16063 +
16064 + queue_work(ethsw_owq, &switchdev_work->work);
16065 +
16066 + return NOTIFY_DONE;
16067 +
16068 +err_addr_alloc:
16069 + kfree(switchdev_work);
16070 + return NOTIFY_BAD;
16071 +}
16072 +
16073 +static struct notifier_block port_switchdev_nb = {
16074 + .notifier_call = port_switchdev_event,
16075 +};
16076 +
16077 +static int ethsw_register_notifier(struct device *dev)
16078 +{
16079 + int err;
16080 +
16081 + err = register_netdevice_notifier(&port_nb);
16082 + if (err) {
16083 + dev_err(dev, "Failed to register netdev notifier\n");
16084 + return err;
16085 + }
16086 +
16087 + err = register_switchdev_notifier(&port_switchdev_nb);
16088 + if (err) {
16089 + dev_err(dev, "Failed to register switchdev notifier\n");
16090 + goto err_switchdev_nb;
16091 + }
16092 +
16093 + return 0;
16094 +
16095 +err_switchdev_nb:
16096 + unregister_netdevice_notifier(&port_nb);
16097 + return err;
16098 +}
16099 +
16100 +static int ethsw_open(struct ethsw_core *ethsw)
16101 +{
16102 + struct ethsw_port_priv *port_priv = NULL;
16103 + int i, err;
16104 +
16105 + err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
16106 + if (err) {
16107 + dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
16108 + return err;
16109 + }
16110 +
16111 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
16112 + port_priv = ethsw->ports[i];
16113 + err = dev_open(port_priv->netdev);
16114 + if (err) {
16115 + netdev_err(port_priv->netdev, "dev_open err %d\n", err);
16116 + return err;
16117 + }
16118 + }
16119 +
16120 + return 0;
16121 +}
16122 +
16123 +static int ethsw_stop(struct ethsw_core *ethsw)
16124 +{
16125 + struct ethsw_port_priv *port_priv = NULL;
16126 + int i, err;
16127 +
16128 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
16129 + port_priv = ethsw->ports[i];
16130 + dev_close(port_priv->netdev);
16131 + }
16132 +
16133 + err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
16134 + if (err) {
16135 + dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
16136 + return err;
16137 + }
16138 +
16139 + return 0;
16140 +}
16141 +
16142 +static int ethsw_init(struct fsl_mc_device *sw_dev)
16143 +{
16144 + struct device *dev = &sw_dev->dev;
16145 + struct ethsw_core *ethsw = dev_get_drvdata(dev);
16146 + u16 version_major, version_minor, i;
16147 + struct dpsw_stp_cfg stp_cfg;
16148 + int err;
16149 +
16150 + ethsw->dev_id = sw_dev->obj_desc.id;
16151 +
16152 + err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
16153 + if (err) {
16154 + dev_err(dev, "dpsw_open err %d\n", err);
16155 + return err;
16156 + }
16157 +
16158 + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
16159 + &ethsw->sw_attr);
16160 + if (err) {
16161 + dev_err(dev, "dpsw_get_attributes err %d\n", err);
16162 + goto err_close;
16163 + }
16164 +
16165 + err = dpsw_get_api_version(ethsw->mc_io, 0,
16166 + &version_major,
16167 + &version_minor);
16168 + if (err) {
16169 + dev_err(dev, "dpsw_get_api_version err %d\n", err);
16170 + goto err_close;
16171 + }
16172 +
16173 + /* Minimum supported DPSW version check */
16174 + if (version_major < DPSW_MIN_VER_MAJOR ||
16175 + (version_major == DPSW_MIN_VER_MAJOR &&
16176 + version_minor < DPSW_MIN_VER_MINOR)) {
16177 + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
16178 + version_major,
16179 + version_minor,
16180 + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
16181 + err = -ENOTSUPP;
16182 + goto err_close;
16183 + }
16184 +
16185 + err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
16186 + if (err) {
16187 + dev_err(dev, "dpsw_reset err %d\n", err);
16188 + goto err_close;
16189 + }
16190 +
16191 + err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
16192 + DPSW_FDB_LEARNING_MODE_HW);
16193 + if (err) {
16194 + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
16195 + goto err_close;
16196 + }
16197 +
16198 + stp_cfg.vlan_id = DEFAULT_VLAN_ID;
16199 + stp_cfg.state = DPSW_STP_STATE_FORWARDING;
16200 +
16201 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
16202 + err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
16203 + &stp_cfg);
16204 + if (err) {
16205 + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
16206 + err, i);
16207 + goto err_close;
16208 + }
16209 +
16210 + err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
16211 + ethsw->dpsw_handle, i, 1);
16212 + if (err) {
16213 + dev_err(dev,
16214 + "dpsw_if_set_broadcast err %d for port %d\n",
16215 + err, i);
16216 + goto err_close;
16217 + }
16218 + }
16219 +
16220 + ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
16221 + "ethsw");
16222 + if (!ethsw_owq) {
16223 + err = -ENOMEM;
16224 + goto err_close;
16225 + }
16226 +
16227 + err = ethsw_register_notifier(dev);
16228 + if (err)
16229 + goto err_destroy_ordered_workqueue;
16230 +
16231 + return 0;
16232 +
16233 +err_destroy_ordered_workqueue:
16234 + destroy_workqueue(ethsw_owq);
16235 +
16236 +err_close:
16237 + dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
16238 + return err;
16239 +}
16240 +
16241 +static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
16242 +{
16243 + const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
16244 + struct net_device *netdev = port_priv->netdev;
16245 + struct ethsw_core *ethsw = port_priv->ethsw_data;
16246 + struct dpsw_vlan_if_cfg vcfg;
16247 + int err;
16248 +
16249 + /* Switch starts with all ports configured to VLAN 1. Need to
16250 + * remove this setting to allow configuration at bridge join
16251 + */
16252 + vcfg.num_ifs = 1;
16253 + vcfg.if_id[0] = port_priv->idx;
16254 +
16255 + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
16256 + DEFAULT_VLAN_ID, &vcfg);
16257 + if (err) {
16258 + netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
16259 + err);
16260 + return err;
16261 + }
16262 +
16263 + err = ethsw_port_set_pvid(port_priv, 0);
16264 + if (err)
16265 + return err;
16266 +
16267 + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
16268 + DEFAULT_VLAN_ID, &vcfg);
16269 + if (err) {
16270 + netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
16271 + return err;
16272 + }
16273 +
16274 + err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
16275 +
16276 + return err;
16277 +}
16278 +
16279 +static void ethsw_unregister_notifier(struct device *dev)
16280 +{
16281 + int err;
16282 +
16283 + err = unregister_switchdev_notifier(&port_switchdev_nb);
16284 + if (err)
16285 + dev_err(dev,
16286 + "Failed to unregister switchdev notifier (%d)\n", err);
16287 +
16288 + err = unregister_netdevice_notifier(&port_nb);
16289 + if (err)
16290 + dev_err(dev,
16291 + "Failed to unregister netdev notifier (%d)\n", err);
16292 +}
16293 +
16294 +static void ethsw_takedown(struct fsl_mc_device *sw_dev)
16295 +{
16296 + struct device *dev = &sw_dev->dev;
16297 + struct ethsw_core *ethsw = dev_get_drvdata(dev);
16298 + int err;
16299 +
16300 + ethsw_unregister_notifier(dev);
16301 +
16302 + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
16303 + if (err)
16304 + dev_warn(dev, "dpsw_close err %d\n", err);
16305 +}
16306 +
16307 +static int ethsw_remove(struct fsl_mc_device *sw_dev)
16308 +{
16309 + struct ethsw_port_priv *port_priv;
16310 + struct ethsw_core *ethsw;
16311 + struct device *dev;
16312 + int i;
16313 +
16314 + dev = &sw_dev->dev;
16315 + ethsw = dev_get_drvdata(dev);
16316 +
16317 + ethsw_teardown_irqs(sw_dev);
16318 +
16319 + destroy_workqueue(ethsw_owq);
16320 +
16321 + rtnl_lock();
16322 + ethsw_stop(ethsw);
16323 + rtnl_unlock();
16324 +
16325 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
16326 + port_priv = ethsw->ports[i];
16327 + unregister_netdev(port_priv->netdev);
16328 + free_netdev(port_priv->netdev);
16329 + }
16330 + kfree(ethsw->ports);
16331 +
16332 + ethsw_takedown(sw_dev);
16333 + fsl_mc_portal_free(ethsw->mc_io);
16334 +
16335 + kfree(ethsw);
16336 +
16337 + dev_set_drvdata(dev, NULL);
16338 +
16339 + return 0;
16340 +}
16341 +
16342 +static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
16343 +{
16344 + struct ethsw_port_priv *port_priv;
16345 + struct device *dev = ethsw->dev;
16346 + struct net_device *port_netdev;
16347 + int err;
16348 +
16349 + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
16350 + if (!port_netdev) {
16351 + dev_err(dev, "alloc_etherdev error\n");
16352 + return -ENOMEM;
16353 + }
16354 +
16355 + port_priv = netdev_priv(port_netdev);
16356 + port_priv->netdev = port_netdev;
16357 + port_priv->ethsw_data = ethsw;
16358 +
16359 + port_priv->idx = port_idx;
16360 + port_priv->stp_state = BR_STATE_FORWARDING;
16361 +
16362 + /* Flooding is implicitly enabled */
16363 + port_priv->flood = true;
16364 +
16365 + SET_NETDEV_DEV(port_netdev, dev);
16366 + port_netdev->netdev_ops = &ethsw_port_ops;
16367 + port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
16368 + port_netdev->switchdev_ops = &ethsw_port_switchdev_ops;
16369 +
16370 + /* Set MTU limits */
16371 + port_netdev->min_mtu = ETH_MIN_MTU;
16372 + port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
16373 +
16374 + err = register_netdev(port_netdev);
16375 + if (err < 0) {
16376 + dev_err(dev, "register_netdev error %d\n", err);
16377 + free_netdev(port_netdev);
16378 + return err;
16379 + }
16380 +
16381 + ethsw->ports[port_idx] = port_priv;
16382 +
16383 + return ethsw_port_init(port_priv, port_idx);
16384 +}
16385 +
16386 +static int ethsw_probe(struct fsl_mc_device *sw_dev)
16387 +{
16388 + struct device *dev = &sw_dev->dev;
16389 + struct ethsw_core *ethsw;
16390 + int i, err;
16391 +
16392 + /* Allocate switch core*/
16393 + ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
16394 +
16395 + if (!ethsw)
16396 + return -ENOMEM;
16397 +
16398 + ethsw->dev = dev;
16399 + dev_set_drvdata(dev, ethsw);
16400 +
16401 + err = fsl_mc_portal_allocate(sw_dev, 0, &ethsw->mc_io);
16402 + if (err) {
16403 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
16404 + goto err_free_drvdata;
16405 + }
16406 +
16407 + err = ethsw_init(sw_dev);
16408 + if (err)
16409 + goto err_free_cmdport;
16410 +
16411 + /* DEFAULT_VLAN_ID is implicitly configured on the switch */
16412 + ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
16413 +
16414 + /* Learning is implicitly enabled */
16415 + ethsw->learning = true;
16416 +
16417 + ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
16418 + GFP_KERNEL);
16419 + if (!(ethsw->ports)) {
16420 + err = -ENOMEM;
16421 + goto err_takedown;
16422 + }
16423 +
16424 + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
16425 + err = ethsw_probe_port(ethsw, i);
16426 + if (err)
16427 + goto err_free_ports;
16428 + }
16429 +
16430 + /* Switch starts up enabled */
16431 + rtnl_lock();
16432 + err = ethsw_open(ethsw);
16433 + rtnl_unlock();
16434 + if (err)
16435 + goto err_free_ports;
16436 +
16437 + /* Setup IRQs */
16438 + err = ethsw_setup_irqs(sw_dev);
16439 + if (err)
16440 + goto err_stop;
16441 +
16442 + dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
16443 + return 0;
16444 +
16445 +err_stop:
16446 + rtnl_lock();
16447 + ethsw_stop(ethsw);
16448 + rtnl_unlock();
16449 +
16450 +err_free_ports:
16451 + /* Cleanup registered ports only */
16452 + for (i--; i >= 0; i--) {
16453 + unregister_netdev(ethsw->ports[i]->netdev);
16454 + free_netdev(ethsw->ports[i]->netdev);
16455 + }
16456 + kfree(ethsw->ports);
16457 +
16458 +err_takedown:
16459 + ethsw_takedown(sw_dev);
16460 +
16461 +err_free_cmdport:
16462 + fsl_mc_portal_free(ethsw->mc_io);
16463 +
16464 +err_free_drvdata:
16465 + kfree(ethsw);
16466 + dev_set_drvdata(dev, NULL);
16467 +
16468 + return err;
16469 +}
16470 +
16471 +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
16472 + {
16473 + .vendor = FSL_MC_VENDOR_FREESCALE,
16474 + .obj_type = "dpsw",
16475 + },
16476 + { .vendor = 0x0 }
16477 +};
16478 +MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
16479 +
16480 +static struct fsl_mc_driver eth_sw_drv = {
16481 + .driver = {
16482 + .name = KBUILD_MODNAME,
16483 + .owner = THIS_MODULE,
16484 + },
16485 + .probe = ethsw_probe,
16486 + .remove = ethsw_remove,
16487 + .match_id_table = ethsw_match_id_table
16488 +};
16489 +
16490 +module_fsl_mc_driver(eth_sw_drv);
16491 +
16492 +MODULE_LICENSE("Dual BSD/GPL");
16493 +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
16494 --- /dev/null
16495 +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
16496 @@ -0,0 +1,90 @@
16497 +/* Copyright 2014-2017 Freescale Semiconductor Inc.
16498 + * Copyright 2017 NXP
16499 + *
16500 + * Redistribution and use in source and binary forms, with or without
16501 + * modification, are permitted provided that the following conditions are met:
16502 + * * Redistributions of source code must retain the above copyright
16503 + * notice, this list of conditions and the following disclaimer.
16504 + * * Redistributions in binary form must reproduce the above copyright
16505 + * notice, this list of conditions and the following disclaimer in the
16506 + * documentation and/or other materials provided with the distribution.
16507 + * * Neither the name of the above-listed copyright holders nor the
16508 + * names of any contributors may be used to endorse or promote products
16509 + * derived from this software without specific prior written permission.
16510 + *
16511 + *
16512 + * ALTERNATIVELY, this software may be distributed under the terms of the
16513 + * GNU General Public License ("GPL") as published by the Free Software
16514 + * Foundation, either version 2 of that License or (at your option) any
16515 + * later version.
16516 + *
16517 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16518 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16519 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16520 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16521 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16522 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16523 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16524 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16525 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16526 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16527 + * POSSIBILITY OF SUCH DAMAGE.
16528 + */
16529 +
16530 +#ifndef __ETHSW_H
16531 +#define __ETHSW_H
16532 +
16533 +#include <linux/netdevice.h>
16534 +#include <linux/etherdevice.h>
16535 +#include <linux/rtnetlink.h>
16536 +#include <linux/if_vlan.h>
16537 +#include <uapi/linux/if_bridge.h>
16538 +#include <net/switchdev.h>
16539 +#include <linux/if_bridge.h>
16540 +
16541 +#include "dpsw.h"
16542 +
16543 +/* Number of IRQs supported */
16544 +#define DPSW_IRQ_NUM 2
16545 +
16546 +#define ETHSW_VLAN_MEMBER 1
16547 +#define ETHSW_VLAN_UNTAGGED 2
16548 +#define ETHSW_VLAN_PVID 4
16549 +#define ETHSW_VLAN_GLOBAL 8
16550 +
16551 +/* Maximum Frame Length supported by HW (currently 10k) */
16552 +#define DPAA2_MFL (10 * 1024)
16553 +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
16554 +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
16555 +
16556 +extern const struct ethtool_ops ethsw_port_ethtool_ops;
16557 +
16558 +struct ethsw_core;
16559 +
16560 +/* Per port private data */
16561 +struct ethsw_port_priv {
16562 + struct net_device *netdev;
16563 + u16 idx;
16564 + struct ethsw_core *ethsw_data;
16565 + u8 link_state;
16566 + u8 stp_state;
16567 + bool flood;
16568 +
16569 + u8 vlans[VLAN_VID_MASK + 1];
16570 + u16 pvid;
16571 +};
16572 +
16573 +/* Switch data */
16574 +struct ethsw_core {
16575 + struct device *dev;
16576 + struct fsl_mc_io *mc_io;
16577 + u16 dpsw_handle;
16578 + struct dpsw_attr sw_attr;
16579 + int dev_id;
16580 + struct ethsw_port_priv **ports;
16581 +
16582 + u8 vlans[VLAN_VID_MASK + 1];
16583 + bool learning;
16584 +};
16585 +
16586 +#endif /* __ETHSW_H */
16587 --- /dev/null
16588 +++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
16589 @@ -0,0 +1,7 @@
16590 +config FSL_DPAA2_EVB
16591 + tristate "DPAA2 Edge Virtual Bridge"
16592 + depends on FSL_MC_BUS && FSL_DPAA2
16593 + select VLAN_8021Q
16594 + default y
16595 + ---help---
16596 + Prototype driver for DPAA2 Edge Virtual Bridge.
16597 --- /dev/null
16598 +++ b/drivers/staging/fsl-dpaa2/evb/Makefile
16599 @@ -0,0 +1,10 @@
16600 +
16601 +obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
16602 +
16603 +dpaa2-evb-objs := evb.o dpdmux.o
16604 +
16605 +all:
16606 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
16607 +
16608 +clean:
16609 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
16610 --- /dev/null
16611 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
16612 @@ -0,0 +1,279 @@
16613 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16614 + *
16615 + * Redistribution and use in source and binary forms, with or without
16616 + * modification, are permitted provided that the following conditions are met:
16617 + * * Redistributions of source code must retain the above copyright
16618 + * notice, this list of conditions and the following disclaimer.
16619 + * * Redistributions in binary form must reproduce the above copyright
16620 + * notice, this list of conditions and the following disclaimer in the
16621 + * documentation and/or other materials provided with the distribution.
16622 + * * Neither the name of the above-listed copyright holders nor the
16623 + * names of any contributors may be used to endorse or promote products
16624 + * derived from this software without specific prior written permission.
16625 + *
16626 + *
16627 + * ALTERNATIVELY, this software may be distributed under the terms of the
16628 + * GNU General Public License ("GPL") as published by the Free Software
16629 + * Foundation, either version 2 of that License or (at your option) any
16630 + * later version.
16631 + *
16632 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16633 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16634 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16635 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16636 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16637 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16638 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16639 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16640 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16641 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16642 + * POSSIBILITY OF SUCH DAMAGE.
16643 + */
16644 +#ifndef _FSL_DPDMUX_CMD_H
16645 +#define _FSL_DPDMUX_CMD_H
16646 +
16647 +/* DPDMUX Version */
16648 +#define DPDMUX_VER_MAJOR 6
16649 +#define DPDMUX_VER_MINOR 1
16650 +
16651 +#define DPDMUX_CMD_BASE_VER 1
16652 +#define DPDMUX_CMD_ID_OFFSET 4
16653 +
16654 +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
16655 +
16656 +/* Command IDs */
16657 +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
16658 +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
16659 +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
16660 +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
16661 +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
16662 +
16663 +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
16664 +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
16665 +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
16666 +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
16667 +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
16668 +
16669 +#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
16670 +#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
16671 +#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
16672 +#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
16673 +#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
16674 +#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
16675 +
16676 +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
16677 +
16678 +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
16679 +
16680 +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
16681 +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
16682 +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
16683 +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
16684 +
16685 +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
16686 +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
16687 +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
16688 +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
16689 +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
16690 +
16691 +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
16692 +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
16693 +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
16694 +
16695 +#define DPDMUX_MASK(field) \
16696 + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
16697 + DPDMUX_##field##_SHIFT)
16698 +#define dpdmux_set_field(var, field, val) \
16699 + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
16700 +#define dpdmux_get_field(var, field) \
16701 + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
16702 +
16703 +struct dpdmux_cmd_open {
16704 + u32 dpdmux_id;
16705 +};
16706 +
16707 +struct dpdmux_cmd_create {
16708 + u8 method;
16709 + u8 manip;
16710 + u16 num_ifs;
16711 + u32 pad;
16712 +
16713 + u16 adv_max_dmat_entries;
16714 + u16 adv_max_mc_groups;
16715 + u16 adv_max_vlan_ids;
16716 + u16 pad1;
16717 +
16718 + u64 options;
16719 +};
16720 +
16721 +struct dpdmux_cmd_destroy {
16722 + u32 dpdmux_id;
16723 +};
16724 +
16725 +#define DPDMUX_ENABLE_SHIFT 0
16726 +#define DPDMUX_ENABLE_SIZE 1
16727 +
16728 +struct dpdmux_rsp_is_enabled {
16729 + u8 en;
16730 +};
16731 +
16732 +struct dpdmux_cmd_set_irq_enable {
16733 + u8 enable;
16734 + u8 pad[3];
16735 + u8 irq_index;
16736 +};
16737 +
16738 +struct dpdmux_cmd_get_irq_enable {
16739 + u32 pad;
16740 + u8 irq_index;
16741 +};
16742 +
16743 +struct dpdmux_rsp_get_irq_enable {
16744 + u8 enable;
16745 +};
16746 +
16747 +struct dpdmux_cmd_set_irq_mask {
16748 + u32 mask;
16749 + u8 irq_index;
16750 +};
16751 +
16752 +struct dpdmux_cmd_get_irq_mask {
16753 + u32 pad;
16754 + u8 irq_index;
16755 +};
16756 +
16757 +struct dpdmux_rsp_get_irq_mask {
16758 + u32 mask;
16759 +};
16760 +
16761 +struct dpdmux_cmd_get_irq_status {
16762 + u32 status;
16763 + u8 irq_index;
16764 +};
16765 +
16766 +struct dpdmux_rsp_get_irq_status {
16767 + u32 status;
16768 +};
16769 +
16770 +struct dpdmux_cmd_clear_irq_status {
16771 + u32 status;
16772 + u8 irq_index;
16773 +};
16774 +
16775 +struct dpdmux_rsp_get_attr {
16776 + u8 method;
16777 + u8 manip;
16778 + u16 num_ifs;
16779 + u16 mem_size;
16780 + u16 pad;
16781 +
16782 + u64 pad1;
16783 +
16784 + u32 id;
16785 + u32 pad2;
16786 +
16787 + u64 options;
16788 +};
16789 +
16790 +struct dpdmux_cmd_set_max_frame_length {
16791 + u16 max_frame_length;
16792 +};
16793 +
16794 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
16795 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
16796 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
16797 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
16798 +
16799 +struct dpdmux_cmd_if_set_accepted_frames {
16800 + u16 if_id;
16801 + u8 frames_options;
16802 +};
16803 +
16804 +struct dpdmux_cmd_if {
16805 + u16 if_id;
16806 +};
16807 +
16808 +struct dpdmux_rsp_if_get_attr {
16809 + u8 pad[3];
16810 + u8 enabled;
16811 + u8 pad1[3];
16812 + u8 accepted_frames_type;
16813 + u32 rate;
16814 +};
16815 +
16816 +struct dpdmux_cmd_if_l2_rule {
16817 + u16 if_id;
16818 + u8 mac_addr5;
16819 + u8 mac_addr4;
16820 + u8 mac_addr3;
16821 + u8 mac_addr2;
16822 + u8 mac_addr1;
16823 + u8 mac_addr0;
16824 +
16825 + u32 pad;
16826 + u16 vlan_id;
16827 +};
16828 +
16829 +struct dpdmux_cmd_if_get_counter {
16830 + u16 if_id;
16831 + u8 counter_type;
16832 +};
16833 +
16834 +struct dpdmux_rsp_if_get_counter {
16835 + u64 pad;
16836 + u64 counter;
16837 +};
16838 +
16839 +struct dpdmux_cmd_if_set_link_cfg {
16840 + u16 if_id;
16841 + u16 pad[3];
16842 +
16843 + u32 rate;
16844 + u32 pad1;
16845 +
16846 + u64 options;
16847 +};
16848 +
16849 +struct dpdmux_cmd_if_get_link_state {
16850 + u16 if_id;
16851 +};
16852 +
16853 +struct dpdmux_rsp_if_get_link_state {
16854 + u32 pad;
16855 + u8 up;
16856 + u8 pad1[3];
16857 +
16858 + u32 rate;
16859 + u32 pad2;
16860 +
16861 + u64 options;
16862 +};
16863 +
16864 +struct dpdmux_rsp_get_api_version {
16865 + u16 major;
16866 + u16 minor;
16867 +};
16868 +
16869 +struct dpdmux_set_custom_key {
16870 + u64 pad[6];
16871 + u64 key_cfg_iova;
16872 +};
16873 +
16874 +struct dpdmux_cmd_add_custom_cls_entry {
16875 + u8 pad[3];
16876 + u8 key_size;
16877 + u16 pad1;
16878 + u16 dest_if;
16879 + u64 key_iova;
16880 + u64 mask_iova;
16881 +};
16882 +
16883 +struct dpdmux_cmd_remove_custom_cls_entry {
16884 + u8 pad[3];
16885 + u8 key_size;
16886 + u32 pad1;
16887 + u64 key_iova;
16888 + u64 mask_iova;
16889 +};
16890 +
16891 +#endif /* _FSL_DPDMUX_CMD_H */
16892 --- /dev/null
16893 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
16894 @@ -0,0 +1,1111 @@
16895 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16896 + *
16897 + * Redistribution and use in source and binary forms, with or without
16898 + * modification, are permitted provided that the following conditions are met:
16899 + * * Redistributions of source code must retain the above copyright
16900 + * notice, this list of conditions and the following disclaimer.
16901 + * * Redistributions in binary form must reproduce the above copyright
16902 + * notice, this list of conditions and the following disclaimer in the
16903 + * documentation and/or other materials provided with the distribution.
16904 + * * Neither the name of the above-listed copyright holders nor the
16905 + * names of any contributors may be used to endorse or promote products
16906 + * derived from this software without specific prior written permission.
16907 + *
16908 + *
16909 + * ALTERNATIVELY, this software may be distributed under the terms of the
16910 + * GNU General Public License ("GPL") as published by the Free Software
16911 + * Foundation, either version 2 of that License or (at your option) any
16912 + * later version.
16913 + *
16914 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16915 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16916 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16917 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16918 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16919 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16920 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16921 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16922 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16923 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16924 + * POSSIBILITY OF SUCH DAMAGE.
16925 + */
16926 +#include <linux/fsl/mc.h>
16927 +#include "dpdmux.h"
16928 +#include "dpdmux-cmd.h"
16929 +
16930 +/**
16931 + * dpdmux_open() - Open a control session for the specified object
16932 + * @mc_io: Pointer to MC portal's I/O object
16933 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16934 + * @dpdmux_id: DPDMUX unique ID
16935 + * @token: Returned token; use in subsequent API calls
16936 + *
16937 + * This function can be used to open a control session for an
16938 + * already created object; an object may have been declared in
16939 + * the DPL or by calling the dpdmux_create() function.
16940 + * This function returns a unique authentication token,
16941 + * associated with the specific object ID and the specific MC
16942 + * portal; this token must be used in all subsequent commands for
16943 + * this specific object.
16944 + *
16945 + * Return: '0' on Success; Error code otherwise.
16946 + */
16947 +int dpdmux_open(struct fsl_mc_io *mc_io,
16948 + u32 cmd_flags,
16949 + int dpdmux_id,
16950 + u16 *token)
16951 +{
16952 + struct fsl_mc_command cmd = { 0 };
16953 + struct dpdmux_cmd_open *cmd_params;
16954 + int err;
16955 +
16956 + /* prepare command */
16957 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
16958 + cmd_flags,
16959 + 0);
16960 + cmd_params = (struct dpdmux_cmd_open *)cmd.params;
16961 + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
16962 +
16963 + /* send command to mc*/
16964 + err = mc_send_command(mc_io, &cmd);
16965 + if (err)
16966 + return err;
16967 +
16968 + /* retrieve response parameters */
16969 + *token = mc_cmd_hdr_read_token(&cmd);
16970 +
16971 + return 0;
16972 +}
16973 +
16974 +/**
16975 + * dpdmux_close() - Close the control session of the object
16976 + * @mc_io: Pointer to MC portal's I/O object
16977 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16978 + * @token: Token of DPDMUX object
16979 + *
16980 + * After this function is called, no further operations are
16981 + * allowed on the object without opening a new control session.
16982 + *
16983 + * Return: '0' on Success; Error code otherwise.
16984 + */
16985 +int dpdmux_close(struct fsl_mc_io *mc_io,
16986 + u32 cmd_flags,
16987 + u16 token)
16988 +{
16989 + struct fsl_mc_command cmd = { 0 };
16990 +
16991 + /* prepare command */
16992 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
16993 + cmd_flags,
16994 + token);
16995 +
16996 + /* send command to mc*/
16997 + return mc_send_command(mc_io, &cmd);
16998 +}
16999 +
17000 +/**
17001 + * dpdmux_create() - Create the DPDMUX object
17002 + * @mc_io: Pointer to MC portal's I/O object
17003 + * @dprc_token: Parent container token; '0' for default container
17004 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17005 + * @cfg: Configuration structure
17006 + * @obj_id: returned object id
17007 + *
17008 + * Create the DPDMUX object, allocate required resources and
17009 + * perform required initialization.
17010 + *
17011 + * The object can be created either by declaring it in the
17012 + * DPL file, or by calling this function.
17013 + *
17014 + * The function accepts an authentication token of a parent
17015 + * container that this object should be assigned to. The token
17016 + * can be '0' so the object will be assigned to the default container.
17017 + * The newly created object can be opened with the returned
17018 + * object id and using the container's associated tokens and MC portals.
17019 + *
17020 + * Return: '0' on Success; Error code otherwise.
17021 + */
17022 +int dpdmux_create(struct fsl_mc_io *mc_io,
17023 + u16 dprc_token,
17024 + u32 cmd_flags,
17025 + const struct dpdmux_cfg *cfg,
17026 + u32 *obj_id)
17027 +{
17028 + struct fsl_mc_command cmd = { 0 };
17029 + struct dpdmux_cmd_create *cmd_params;
17030 + int err;
17031 +
17032 + /* prepare command */
17033 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
17034 + cmd_flags,
17035 + dprc_token);
17036 + cmd_params = (struct dpdmux_cmd_create *)cmd.params;
17037 + cmd_params->method = cfg->method;
17038 + cmd_params->manip = cfg->manip;
17039 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
17040 + cmd_params->adv_max_dmat_entries =
17041 + cpu_to_le16(cfg->adv.max_dmat_entries);
17042 + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
17043 + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
17044 + cmd_params->options = cpu_to_le64(cfg->adv.options);
17045 +
17046 + /* send command to mc*/
17047 + err = mc_send_command(mc_io, &cmd);
17048 + if (err)
17049 + return err;
17050 +
17051 + /* retrieve response parameters */
17052 + *obj_id = mc_cmd_hdr_read_token(&cmd);
17053 +
17054 + return 0;
17055 +}
17056 +
17057 +/**
17058 + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
17059 + * @mc_io: Pointer to MC portal's I/O object
17060 + * @dprc_token: Parent container token; '0' for default container
17061 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17062 + * @object_id: The object id; it must be a valid id within the container that
17063 + * created this object;
17064 + *
17065 + * The function accepts the authentication token of the parent container that
17066 + * created the object (not the one that currently owns the object). The object
17067 + * is searched within parent using the provided 'object_id'.
17068 + * All tokens to the object must be closed before calling destroy.
17069 + *
17070 + * Return: '0' on Success; error code otherwise.
17071 + */
17072 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
17073 + u16 dprc_token,
17074 + u32 cmd_flags,
17075 + u32 object_id)
17076 +{
17077 + struct fsl_mc_command cmd = { 0 };
17078 + struct dpdmux_cmd_destroy *cmd_params;
17079 +
17080 + /* prepare command */
17081 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
17082 + cmd_flags,
17083 + dprc_token);
17084 + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
17085 + cmd_params->dpdmux_id = cpu_to_le32(object_id);
17086 +
17087 + /* send command to mc*/
17088 + return mc_send_command(mc_io, &cmd);
17089 +}
17090 +
17091 +/**
17092 + * dpdmux_enable() - Enable DPDMUX functionality
17093 + * @mc_io: Pointer to MC portal's I/O object
17094 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17095 + * @token: Token of DPDMUX object
17096 + *
17097 + * Return: '0' on Success; Error code otherwise.
17098 + */
17099 +int dpdmux_enable(struct fsl_mc_io *mc_io,
17100 + u32 cmd_flags,
17101 + u16 token)
17102 +{
17103 + struct fsl_mc_command cmd = { 0 };
17104 +
17105 + /* prepare command */
17106 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
17107 + cmd_flags,
17108 + token);
17109 +
17110 + /* send command to mc*/
17111 + return mc_send_command(mc_io, &cmd);
17112 +}
17113 +
17114 +/**
17115 + * dpdmux_disable() - Disable DPDMUX functionality
17116 + * @mc_io: Pointer to MC portal's I/O object
17117 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17118 + * @token: Token of DPDMUX object
17119 + *
17120 + * Return: '0' on Success; Error code otherwise.
17121 + */
17122 +int dpdmux_disable(struct fsl_mc_io *mc_io,
17123 + u32 cmd_flags,
17124 + u16 token)
17125 +{
17126 + struct fsl_mc_command cmd = { 0 };
17127 +
17128 + /* prepare command */
17129 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
17130 + cmd_flags,
17131 + token);
17132 +
17133 + /* send command to mc*/
17134 + return mc_send_command(mc_io, &cmd);
17135 +}
17136 +
17137 +/**
17138 + * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
17139 + * @mc_io: Pointer to MC portal's I/O object
17140 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17141 + * @token: Token of DPDMUX object
17142 + * @en: Returns '1' if object is enabled; '0' otherwise
17143 + *
17144 + * Return: '0' on Success; Error code otherwise.
17145 + */
17146 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
17147 + u32 cmd_flags,
17148 + u16 token,
17149 + int *en)
17150 +{
17151 + struct fsl_mc_command cmd = { 0 };
17152 + struct dpdmux_rsp_is_enabled *rsp_params;
17153 + int err;
17154 +
17155 + /* prepare command */
17156 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
17157 + cmd_flags,
17158 + token);
17159 +
17160 + /* send command to mc*/
17161 + err = mc_send_command(mc_io, &cmd);
17162 + if (err)
17163 + return err;
17164 +
17165 + /* retrieve response parameters */
17166 + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
17167 + *en = dpdmux_get_field(rsp_params->en, ENABLE);
17168 +
17169 + return 0;
17170 +}
17171 +
17172 +/**
17173 + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
17174 + * @mc_io: Pointer to MC portal's I/O object
17175 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17176 + * @token: Token of DPDMUX object
17177 + *
17178 + * Return: '0' on Success; Error code otherwise.
17179 + */
17180 +int dpdmux_reset(struct fsl_mc_io *mc_io,
17181 + u32 cmd_flags,
17182 + u16 token)
17183 +{
17184 + struct fsl_mc_command cmd = { 0 };
17185 +
17186 + /* prepare command */
17187 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
17188 + cmd_flags,
17189 + token);
17190 +
17191 + /* send command to mc*/
17192 + return mc_send_command(mc_io, &cmd);
17193 +}
17194 +
17195 +/**
17196 + * dpdmux_set_irq_enable() - Set overall interrupt state.
17197 + * @mc_io: Pointer to MC portal's I/O object
17198 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17199 + * @token: Token of DPDMUX object
17200 + * @irq_index: The interrupt index to configure
17201 + * @en: Interrupt state - enable = 1, disable = 0
17202 + *
17203 + * Allows GPP software to control when interrupts are generated.
17204 + * Each interrupt can have up to 32 causes. The enable/disable control's the
17205 + * overall interrupt state. if the interrupt is disabled no causes will cause
17206 + * an interrupt.
17207 + *
17208 + * Return: '0' on Success; Error code otherwise.
17209 + */
17210 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
17211 + u32 cmd_flags,
17212 + u16 token,
17213 + u8 irq_index,
17214 + u8 en)
17215 +{
17216 + struct fsl_mc_command cmd = { 0 };
17217 + struct dpdmux_cmd_set_irq_enable *cmd_params;
17218 +
17219 + /* prepare command */
17220 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
17221 + cmd_flags,
17222 + token);
17223 + cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
17224 + cmd_params->enable = en;
17225 + cmd_params->irq_index = irq_index;
17226 +
17227 + /* send command to mc*/
17228 + return mc_send_command(mc_io, &cmd);
17229 +}
17230 +
17231 +/**
17232 + * dpdmux_get_irq_enable() - Get overall interrupt state.
17233 + * @mc_io: Pointer to MC portal's I/O object
17234 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17235 + * @token: Token of DPDMUX object
17236 + * @irq_index: The interrupt index to configure
17237 + * @en: Returned interrupt state - enable = 1, disable = 0
17238 + *
17239 + * Return: '0' on Success; Error code otherwise.
17240 + */
17241 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
17242 + u32 cmd_flags,
17243 + u16 token,
17244 + u8 irq_index,
17245 + u8 *en)
17246 +{
17247 + struct fsl_mc_command cmd = { 0 };
17248 + struct dpdmux_cmd_get_irq_enable *cmd_params;
17249 + struct dpdmux_rsp_get_irq_enable *rsp_params;
17250 + int err;
17251 +
17252 + /* prepare command */
17253 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
17254 + cmd_flags,
17255 + token);
17256 + cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
17257 + cmd_params->irq_index = irq_index;
17258 +
17259 + /* send command to mc*/
17260 + err = mc_send_command(mc_io, &cmd);
17261 + if (err)
17262 + return err;
17263 +
17264 + /* retrieve response parameters */
17265 + rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
17266 + *en = rsp_params->enable;
17267 +
17268 + return 0;
17269 +}
17270 +
17271 +/**
17272 + * dpdmux_set_irq_mask() - Set interrupt mask.
17273 + * @mc_io: Pointer to MC portal's I/O object
17274 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17275 + * @token: Token of DPDMUX object
17276 + * @irq_index: The interrupt index to configure
17277 + * @mask: event mask to trigger interrupt;
17278 + * each bit:
17279 + * 0 = ignore event
17280 + * 1 = consider event for asserting IRQ
17281 + *
17282 + * Every interrupt can have up to 32 causes and the interrupt model supports
17283 + * masking/unmasking each cause independently
17284 + *
17285 + * Return: '0' on Success; Error code otherwise.
17286 + */
17287 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
17288 + u32 cmd_flags,
17289 + u16 token,
17290 + u8 irq_index,
17291 + u32 mask)
17292 +{
17293 + struct fsl_mc_command cmd = { 0 };
17294 + struct dpdmux_cmd_set_irq_mask *cmd_params;
17295 +
17296 + /* prepare command */
17297 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
17298 + cmd_flags,
17299 + token);
17300 + cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
17301 + cmd_params->mask = cpu_to_le32(mask);
17302 + cmd_params->irq_index = irq_index;
17303 +
17304 + /* send command to mc*/
17305 + return mc_send_command(mc_io, &cmd);
17306 +}
17307 +
17308 +/**
17309 + * dpdmux_get_irq_mask() - Get interrupt mask.
17310 + * @mc_io: Pointer to MC portal's I/O object
17311 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17312 + * @token: Token of DPDMUX object
17313 + * @irq_index: The interrupt index to configure
17314 + * @mask: Returned event mask to trigger interrupt
17315 + *
17316 + * Every interrupt can have up to 32 causes and the interrupt model supports
17317 + * masking/unmasking each cause independently
17318 + *
17319 + * Return: '0' on Success; Error code otherwise.
17320 + */
17321 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
17322 + u32 cmd_flags,
17323 + u16 token,
17324 + u8 irq_index,
17325 + u32 *mask)
17326 +{
17327 + struct fsl_mc_command cmd = { 0 };
17328 + struct dpdmux_cmd_get_irq_mask *cmd_params;
17329 + struct dpdmux_rsp_get_irq_mask *rsp_params;
17330 + int err;
17331 +
17332 + /* prepare command */
17333 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
17334 + cmd_flags,
17335 + token);
17336 + cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
17337 + cmd_params->irq_index = irq_index;
17338 +
17339 + /* send command to mc*/
17340 + err = mc_send_command(mc_io, &cmd);
17341 + if (err)
17342 + return err;
17343 +
17344 + /* retrieve response parameters */
17345 + rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
17346 + *mask = le32_to_cpu(rsp_params->mask);
17347 +
17348 + return 0;
17349 +}
17350 +
17351 +/**
17352 + * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
17353 + * @mc_io: Pointer to MC portal's I/O object
17354 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17355 + * @token: Token of DPDMUX object
17356 + * @irq_index: The interrupt index to configure
17357 + * @status: Returned interrupts status - one bit per cause:
17358 + * 0 = no interrupt pending
17359 + * 1 = interrupt pending
17360 + *
17361 + * Return: '0' on Success; Error code otherwise.
17362 + */
17363 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
17364 + u32 cmd_flags,
17365 + u16 token,
17366 + u8 irq_index,
17367 + u32 *status)
17368 +{
17369 + struct fsl_mc_command cmd = { 0 };
17370 + struct dpdmux_cmd_get_irq_status *cmd_params;
17371 + struct dpdmux_rsp_get_irq_status *rsp_params;
17372 + int err;
17373 +
17374 + /* prepare command */
17375 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
17376 + cmd_flags,
17377 + token);
17378 + cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
17379 + cmd_params->status = cpu_to_le32(*status);
17380 + cmd_params->irq_index = irq_index;
17381 +
17382 + /* send command to mc*/
17383 + err = mc_send_command(mc_io, &cmd);
17384 + if (err)
17385 + return err;
17386 +
17387 + /* retrieve response parameters */
17388 + rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
17389 + *status = le32_to_cpu(rsp_params->status);
17390 +
17391 + return 0;
17392 +}
17393 +
17394 +/**
17395 + * dpdmux_clear_irq_status() - Clear a pending interrupt's status
17396 + * @mc_io: Pointer to MC portal's I/O object
17397 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17398 + * @token: Token of DPDMUX object
17399 + * @irq_index: The interrupt index to configure
17400 + * @status: bits to clear (W1C) - one bit per cause:
17401 + * 0 = don't change
17402 + * 1 = clear status bit
17403 + *
17404 + * Return: '0' on Success; Error code otherwise.
17405 + */
17406 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
17407 + u32 cmd_flags,
17408 + u16 token,
17409 + u8 irq_index,
17410 + u32 status)
17411 +{
17412 + struct fsl_mc_command cmd = { 0 };
17413 + struct dpdmux_cmd_clear_irq_status *cmd_params;
17414 +
17415 + /* prepare command */
17416 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
17417 + cmd_flags,
17418 + token);
17419 + cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
17420 + cmd_params->status = cpu_to_le32(status);
17421 + cmd_params->irq_index = irq_index;
17422 +
17423 + /* send command to mc*/
17424 + return mc_send_command(mc_io, &cmd);
17425 +}
17426 +
17427 +/**
17428 + * dpdmux_get_attributes() - Retrieve DPDMUX attributes
17429 + * @mc_io: Pointer to MC portal's I/O object
17430 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17431 + * @token: Token of DPDMUX object
17432 + * @attr: Returned object's attributes
17433 + *
17434 + * Return: '0' on Success; Error code otherwise.
17435 + */
17436 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
17437 + u32 cmd_flags,
17438 + u16 token,
17439 + struct dpdmux_attr *attr)
17440 +{
17441 + struct fsl_mc_command cmd = { 0 };
17442 + struct dpdmux_rsp_get_attr *rsp_params;
17443 + int err;
17444 +
17445 + /* prepare command */
17446 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
17447 + cmd_flags,
17448 + token);
17449 +
17450 + /* send command to mc*/
17451 + err = mc_send_command(mc_io, &cmd);
17452 + if (err)
17453 + return err;
17454 +
17455 + /* retrieve response parameters */
17456 + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
17457 + attr->id = le32_to_cpu(rsp_params->id);
17458 + attr->options = le64_to_cpu(rsp_params->options);
17459 + attr->method = rsp_params->method;
17460 + attr->manip = rsp_params->manip;
17461 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
17462 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
17463 +
17464 + return 0;
17465 +}
17466 +
17467 +/**
17468 + * dpdmux_if_enable() - Enable Interface
17469 + * @mc_io: Pointer to MC portal's I/O object
17470 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17471 + * @token: Token of DPDMUX object
17472 + * @if_id: Interface Identifier
17473 + *
17474 + * Return: Completion status. '0' on Success; Error code otherwise.
17475 + */
17476 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
17477 + u32 cmd_flags,
17478 + u16 token,
17479 + u16 if_id)
17480 +{
17481 + struct dpdmux_cmd_if *cmd_params;
17482 + struct fsl_mc_command cmd = { 0 };
17483 +
17484 + /* prepare command */
17485 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
17486 + cmd_flags,
17487 + token);
17488 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17489 + cmd_params->if_id = cpu_to_le16(if_id);
17490 +
17491 + /* send command to mc*/
17492 + return mc_send_command(mc_io, &cmd);
17493 +}
17494 +
17495 +/**
17496 + * dpdmux_if_disable() - Disable Interface
17497 + * @mc_io: Pointer to MC portal's I/O object
17498 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17499 + * @token: Token of DPDMUX object
17500 + * @if_id: Interface Identifier
17501 + *
17502 + * Return: Completion status. '0' on Success; Error code otherwise.
17503 + */
17504 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
17505 + u32 cmd_flags,
17506 + u16 token,
17507 + u16 if_id)
17508 +{
17509 + struct dpdmux_cmd_if *cmd_params;
17510 + struct fsl_mc_command cmd = { 0 };
17511 +
17512 + /* prepare command */
17513 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
17514 + cmd_flags,
17515 + token);
17516 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17517 + cmd_params->if_id = cpu_to_le16(if_id);
17518 +
17519 + /* send command to mc*/
17520 + return mc_send_command(mc_io, &cmd);
17521 +}
17522 +
17523 +/**
17524 + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
17525 + * @mc_io: Pointer to MC portal's I/O object
17526 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17527 + * @token: Token of DPDMUX object
17528 + * @max_frame_length: The required maximum frame length
17529 + *
17530 + * Update the maximum frame length on all DMUX interfaces.
17531 + * In case of VEPA, the maximum frame length on all dmux interfaces
17532 + * will be updated with the minimum value of the mfls of the connected
17533 + * dpnis and the actual value of dmux mfl.
17534 + *
17535 + * Return: '0' on Success; Error code otherwise.
17536 + */
17537 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
17538 + u32 cmd_flags,
17539 + u16 token,
17540 + u16 max_frame_length)
17541 +{
17542 + struct fsl_mc_command cmd = { 0 };
17543 + struct dpdmux_cmd_set_max_frame_length *cmd_params;
17544 +
17545 + /* prepare command */
17546 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
17547 + cmd_flags,
17548 + token);
17549 + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
17550 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
17551 +
17552 + /* send command to mc*/
17553 + return mc_send_command(mc_io, &cmd);
17554 +}
17555 +
17556 +/**
17557 + * dpdmux_ul_reset_counters() - Function resets the uplink counter
17558 + * @mc_io: Pointer to MC portal's I/O object
17559 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17560 + * @token: Token of DPDMUX object
17561 + *
17562 + * Return: '0' on Success; Error code otherwise.
17563 + */
17564 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
17565 + u32 cmd_flags,
17566 + u16 token)
17567 +{
17568 + struct fsl_mc_command cmd = { 0 };
17569 +
17570 + /* prepare command */
17571 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
17572 + cmd_flags,
17573 + token);
17574 +
17575 + /* send command to mc*/
17576 + return mc_send_command(mc_io, &cmd);
17577 +}
17578 +
17579 +/**
17580 + * dpdmux_if_set_accepted_frames() - Set the accepted frame types
17581 + * @mc_io: Pointer to MC portal's I/O object
17582 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17583 + * @token: Token of DPDMUX object
17584 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17585 + * @cfg: Frame types configuration
17586 + *
17587 + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
17588 + * priority-tagged frames are discarded.
17589 + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
17590 + * priority-tagged frames are accepted.
17591 + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
17592 + * untagged and priority-tagged frame are accepted;
17593 + *
17594 + * Return: '0' on Success; Error code otherwise.
17595 + */
17596 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
17597 + u32 cmd_flags,
17598 + u16 token,
17599 + u16 if_id,
17600 + const struct dpdmux_accepted_frames *cfg)
17601 +{
17602 + struct fsl_mc_command cmd = { 0 };
17603 + struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
17604 +
17605 + /* prepare command */
17606 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
17607 + cmd_flags,
17608 + token);
17609 + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
17610 + cmd_params->if_id = cpu_to_le16(if_id);
17611 + dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
17612 + cfg->type);
17613 + dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
17614 + cfg->unaccept_act);
17615 +
17616 + /* send command to mc*/
17617 + return mc_send_command(mc_io, &cmd);
17618 +}
17619 +
17620 +/**
17621 + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
17622 + * @mc_io: Pointer to MC portal's I/O object
17623 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17624 + * @token: Token of DPDMUX object
17625 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17626 + * @attr: Interface attributes
17627 + *
17628 + * Return: '0' on Success; Error code otherwise.
17629 + */
17630 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
17631 + u32 cmd_flags,
17632 + u16 token,
17633 + u16 if_id,
17634 + struct dpdmux_if_attr *attr)
17635 +{
17636 + struct fsl_mc_command cmd = { 0 };
17637 + struct dpdmux_cmd_if *cmd_params;
17638 + struct dpdmux_rsp_if_get_attr *rsp_params;
17639 + int err;
17640 +
17641 + /* prepare command */
17642 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
17643 + cmd_flags,
17644 + token);
17645 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17646 + cmd_params->if_id = cpu_to_le16(if_id);
17647 +
17648 + /* send command to mc*/
17649 + err = mc_send_command(mc_io, &cmd);
17650 + if (err)
17651 + return err;
17652 +
17653 + /* retrieve response parameters */
17654 + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
17655 + attr->rate = le32_to_cpu(rsp_params->rate);
17656 + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
17657 + attr->accept_frame_type =
17658 + dpdmux_get_field(rsp_params->accepted_frames_type,
17659 + ACCEPTED_FRAMES_TYPE);
17660 +
17661 + return 0;
17662 +}
17663 +
17664 +/**
17665 + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
17666 + * @mc_io: Pointer to MC portal's I/O object
17667 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17668 + * @token: Token of DPDMUX object
17669 + * @if_id: Destination interface ID
17670 + * @rule: L2 rule
17671 + *
17672 + * Function removes a L2 rule from DPDMUX table
17673 + * or adds an interface to an existing multicast address
17674 + *
17675 + * Return: '0' on Success; Error code otherwise.
17676 + */
17677 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
17678 + u32 cmd_flags,
17679 + u16 token,
17680 + u16 if_id,
17681 + const struct dpdmux_l2_rule *rule)
17682 +{
17683 + struct fsl_mc_command cmd = { 0 };
17684 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17685 +
17686 + /* prepare command */
17687 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
17688 + cmd_flags,
17689 + token);
17690 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17691 + cmd_params->if_id = cpu_to_le16(if_id);
17692 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17693 + cmd_params->mac_addr5 = rule->mac_addr[5];
17694 + cmd_params->mac_addr4 = rule->mac_addr[4];
17695 + cmd_params->mac_addr3 = rule->mac_addr[3];
17696 + cmd_params->mac_addr2 = rule->mac_addr[2];
17697 + cmd_params->mac_addr1 = rule->mac_addr[1];
17698 + cmd_params->mac_addr0 = rule->mac_addr[0];
17699 +
17700 + /* send command to mc*/
17701 + return mc_send_command(mc_io, &cmd);
17702 +}
17703 +
17704 +/**
17705 + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
17706 + * @mc_io: Pointer to MC portal's I/O object
17707 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17708 + * @token: Token of DPDMUX object
17709 + * @if_id: Destination interface ID
17710 + * @rule: L2 rule
17711 + *
17712 + * Function adds a L2 rule into DPDMUX table
17713 + * or adds an interface to an existing multicast address
17714 + *
17715 + * Return: '0' on Success; Error code otherwise.
17716 + */
17717 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
17718 + u32 cmd_flags,
17719 + u16 token,
17720 + u16 if_id,
17721 + const struct dpdmux_l2_rule *rule)
17722 +{
17723 + struct fsl_mc_command cmd = { 0 };
17724 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17725 +
17726 + /* prepare command */
17727 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
17728 + cmd_flags,
17729 + token);
17730 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17731 + cmd_params->if_id = cpu_to_le16(if_id);
17732 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17733 + cmd_params->mac_addr5 = rule->mac_addr[5];
17734 + cmd_params->mac_addr4 = rule->mac_addr[4];
17735 + cmd_params->mac_addr3 = rule->mac_addr[3];
17736 + cmd_params->mac_addr2 = rule->mac_addr[2];
17737 + cmd_params->mac_addr1 = rule->mac_addr[1];
17738 + cmd_params->mac_addr0 = rule->mac_addr[0];
17739 +
17740 + /* send command to mc*/
17741 + return mc_send_command(mc_io, &cmd);
17742 +}
17743 +
17744 +/**
17745 + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
17746 + * @mc_io: Pointer to MC portal's I/O object
17747 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17748 + * @token: Token of DPDMUX object
17749 + * @if_id: Interface Id
17750 + * @counter_type: counter type
17751 + * @counter: Returned specific counter information
17752 + *
17753 + * Return: '0' on Success; Error code otherwise.
17754 + */
17755 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
17756 + u32 cmd_flags,
17757 + u16 token,
17758 + u16 if_id,
17759 + enum dpdmux_counter_type counter_type,
17760 + u64 *counter)
17761 +{
17762 + struct fsl_mc_command cmd = { 0 };
17763 + struct dpdmux_cmd_if_get_counter *cmd_params;
17764 + struct dpdmux_rsp_if_get_counter *rsp_params;
17765 + int err;
17766 +
17767 + /* prepare command */
17768 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
17769 + cmd_flags,
17770 + token);
17771 + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
17772 + cmd_params->if_id = cpu_to_le16(if_id);
17773 + cmd_params->counter_type = counter_type;
17774 +
17775 + /* send command to mc*/
17776 + err = mc_send_command(mc_io, &cmd);
17777 + if (err)
17778 + return err;
17779 +
17780 + /* retrieve response parameters */
17781 + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
17782 + *counter = le64_to_cpu(rsp_params->counter);
17783 +
17784 + return 0;
17785 +}
17786 +
17787 +/**
17788 + * dpdmux_if_set_link_cfg() - set the link configuration.
17789 + * @mc_io: Pointer to MC portal's I/O object
17790 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17791 + * @token: Token of DPSW object
17792 + * @if_id: interface id
17793 + * @cfg: Link configuration
17794 + *
17795 + * Return: '0' on Success; Error code otherwise.
17796 + */
17797 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
17798 + u32 cmd_flags,
17799 + u16 token,
17800 + u16 if_id,
17801 + struct dpdmux_link_cfg *cfg)
17802 +{
17803 + struct fsl_mc_command cmd = { 0 };
17804 + struct dpdmux_cmd_if_set_link_cfg *cmd_params;
17805 +
17806 + /* prepare command */
17807 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
17808 + cmd_flags,
17809 + token);
17810 + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
17811 + cmd_params->if_id = cpu_to_le16(if_id);
17812 + cmd_params->rate = cpu_to_le32(cfg->rate);
17813 + cmd_params->options = cpu_to_le64(cfg->options);
17814 +
17815 + /* send command to mc*/
17816 + return mc_send_command(mc_io, &cmd);
17817 +}
17818 +
17819 +/**
17820 + * dpdmux_if_get_link_state - Return the link state
17821 + * @mc_io: Pointer to MC portal's I/O object
17822 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17823 + * @token: Token of DPSW object
17824 + * @if_id: interface id
17825 + * @state: link state
17826 + *
17827 + * @returns '0' on Success; Error code otherwise.
17828 + */
17829 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
17830 + u32 cmd_flags,
17831 + u16 token,
17832 + u16 if_id,
17833 + struct dpdmux_link_state *state)
17834 +{
17835 + struct fsl_mc_command cmd = { 0 };
17836 + struct dpdmux_cmd_if_get_link_state *cmd_params;
17837 + struct dpdmux_rsp_if_get_link_state *rsp_params;
17838 + int err;
17839 +
17840 + /* prepare command */
17841 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
17842 + cmd_flags,
17843 + token);
17844 + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
17845 + cmd_params->if_id = cpu_to_le16(if_id);
17846 +
17847 + /* send command to mc*/
17848 + err = mc_send_command(mc_io, &cmd);
17849 + if (err)
17850 + return err;
17851 +
17852 + /* retrieve response parameters */
17853 + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
17854 + state->rate = le32_to_cpu(rsp_params->rate);
17855 + state->options = le64_to_cpu(rsp_params->options);
17856 + state->up = dpdmux_get_field(rsp_params->up, ENABLE);
17857 +
17858 + return 0;
17859 +}
17860 +
17861 +/**
17862 + * dpdmux_set_custom_key - Set a custom classification key.
17863 + *
17864 + * This API is only available for DPDMUX instance created with
17865 + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
17866 + * classification table using dpdmux_add_custom_cls_entry.
17867 + *
17868 + * Calls to dpdmux_set_custom_key remove all existing classification entries
17869 + * that may have been added previously using dpdmux_add_custom_cls_entry.
17870 + *
17871 + * @mc_io: Pointer to MC portal's I/O object
17872 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17873 + * @token: Token of DPSW object
17874 + * @if_id: interface id
17875 + * @key_cfg_iova: DMA address of a configuration structure set up using
17876 + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
17877 + *
17878 + * @returns '0' on Success; Error code otherwise.
17879 + */
17880 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
17881 + u32 cmd_flags,
17882 + u16 token,
17883 + u64 key_cfg_iova)
17884 +{
17885 + struct dpdmux_set_custom_key *cmd_params;
17886 + struct fsl_mc_command cmd = { 0 };
17887 +
17888 + /* prepare command */
17889 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
17890 + cmd_flags,
17891 + token);
17892 + cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
17893 + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
17894 +
17895 + /* send command to mc*/
17896 + return mc_send_command(mc_io, &cmd);
17897 +}
17898 +
17899 +/**
17900 + * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
17901 + *
17902 + * This API is only available for DPDMUX instances created with
17903 + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
17904 + * composition rule must be set up using dpdmux_set_custom_key.
17905 + *
17906 + * @mc_io: Pointer to MC portal's I/O object
17907 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17908 + * @token: Token of DPSW object
17909 + * @rule: Classification rule to insert. Rules cannot be duplicated, if a
17910 + * matching rule already exists, the action will be replaced.
17911 + * @action: Action to perform for matching traffic.
17912 + *
17913 + * @returns '0' on Success; Error code otherwise.
17914 + */
17915 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
17916 + u32 cmd_flags,
17917 + u16 token,
17918 + struct dpdmux_rule_cfg *rule,
17919 + struct dpdmux_cls_action *action)
17920 +{
17921 + struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
17922 + struct fsl_mc_command cmd = { 0 };
17923 +
17924 + /* prepare command */
17925 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
17926 + cmd_flags,
17927 + token);
17928 +
17929 + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
17930 + cmd_params->key_size = rule->key_size;
17931 + cmd_params->dest_if = cpu_to_le16(action->dest_if);
17932 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17933 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17934 +
17935 + /* send command to mc*/
17936 + return mc_send_command(mc_io, &cmd);
17937 +}
17938 +
17939 +/**
17940 + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
17941 + *
17942 + * This API is only available for DPDMUX instances created with
17943 + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
17944 + * entries previously inserted using dpdmux_add_custom_cls_entry.
17945 + *
17946 + * @mc_io: Pointer to MC portal's I/O object
17947 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17948 + * @token: Token of DPSW object
17949 + * @rule: Classification rule to remove
17950 + *
17951 + * @returns '0' on Success; Error code otherwise.
17952 + */
17953 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
17954 + u32 cmd_flags,
17955 + u16 token,
17956 + struct dpdmux_rule_cfg *rule)
17957 +{
17958 + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
17959 + struct fsl_mc_command cmd = { 0 };
17960 +
17961 + /* prepare command */
17962 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
17963 + cmd_flags,
17964 + token);
17965 + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
17966 + cmd_params->key_size = rule->key_size;
17967 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17968 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17969 +
17970 + /* send command to mc*/
17971 + return mc_send_command(mc_io, &cmd);
17972 +}
17973 +
17974 +/**
17975 + * dpdmux_get_api_version() - Get Data Path Demux API version
17976 + * @mc_io: Pointer to MC portal's I/O object
17977 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17978 + * @major_ver: Major version of data path demux API
17979 + * @minor_ver: Minor version of data path demux API
17980 + *
17981 + * Return: '0' on Success; Error code otherwise.
17982 + */
17983 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
17984 + u32 cmd_flags,
17985 + u16 *major_ver,
17986 + u16 *minor_ver)
17987 +{
17988 + struct fsl_mc_command cmd = { 0 };
17989 + struct dpdmux_rsp_get_api_version *rsp_params;
17990 + int err;
17991 +
17992 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
17993 + cmd_flags,
17994 + 0);
17995 +
17996 + err = mc_send_command(mc_io, &cmd);
17997 + if (err)
17998 + return err;
17999 +
18000 + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
18001 + *major_ver = le16_to_cpu(rsp_params->major);
18002 + *minor_ver = le16_to_cpu(rsp_params->minor);
18003 +
18004 + return 0;
18005 +}
18006 --- /dev/null
18007 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
18008 @@ -0,0 +1,453 @@
18009 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
18010 + *
18011 + * Redistribution and use in source and binary forms, with or without
18012 + * modification, are permitted provided that the following conditions are met:
18013 + * * Redistributions of source code must retain the above copyright
18014 + * notice, this list of conditions and the following disclaimer.
18015 + * * Redistributions in binary form must reproduce the above copyright
18016 + * notice, this list of conditions and the following disclaimer in the
18017 + * documentation and/or other materials provided with the distribution.
18018 + * * Neither the name of the above-listed copyright holders nor the
18019 + * names of any contributors may be used to endorse or promote products
18020 + * derived from this software without specific prior written permission.
18021 + *
18022 + *
18023 + * ALTERNATIVELY, this software may be distributed under the terms of the
18024 + * GNU General Public License ("GPL") as published by the Free Software
18025 + * Foundation, either version 2 of that License or (at your option) any
18026 + * later version.
18027 + *
18028 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18029 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18030 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18031 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
18032 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18033 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18034 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18035 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
18036 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
18037 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
18038 + * POSSIBILITY OF SUCH DAMAGE.
18039 + */
18040 +#ifndef __FSL_DPDMUX_H
18041 +#define __FSL_DPDMUX_H
18042 +
18043 +struct fsl_mc_io;
18044 +
18045 +/* Data Path Demux API
18046 + * Contains API for handling DPDMUX topology and functionality
18047 + */
18048 +
18049 +int dpdmux_open(struct fsl_mc_io *mc_io,
18050 + u32 cmd_flags,
18051 + int dpdmux_id,
18052 + u16 *token);
18053 +
18054 +int dpdmux_close(struct fsl_mc_io *mc_io,
18055 + u32 cmd_flags,
18056 + u16 token);
18057 +
18058 +/**
18059 + * DPDMUX general options
18060 + */
18061 +
18062 +/**
18063 + * Enable bridging between internal interfaces
18064 + */
18065 +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
18066 +
18067 +/**
18068 + * Mask support for classification
18069 + */
18070 +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
18071 +
18072 +#define DPDMUX_IRQ_INDEX_IF 0x0000
18073 +#define DPDMUX_IRQ_INDEX 0x0001
18074 +
18075 +/**
18076 + * IRQ event - Indicates that the link state changed
18077 + */
18078 +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
18079 +
18080 +/**
18081 + * enum dpdmux_manip - DPDMUX manipulation operations
18082 + * @DPDMUX_MANIP_NONE: No manipulation on frames
18083 + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
18084 + */
18085 +enum dpdmux_manip {
18086 + DPDMUX_MANIP_NONE = 0x0,
18087 + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
18088 +};
18089 +
18090 +/**
18091 + * enum dpdmux_method - DPDMUX method options
18092 + * @DPDMUX_METHOD_NONE: no DPDMUX method
18093 + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
18094 + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
18095 + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
18096 + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
18097 + */
18098 +enum dpdmux_method {
18099 + DPDMUX_METHOD_NONE = 0x0,
18100 + DPDMUX_METHOD_C_VLAN_MAC = 0x1,
18101 + DPDMUX_METHOD_MAC = 0x2,
18102 + DPDMUX_METHOD_C_VLAN = 0x3,
18103 + DPDMUX_METHOD_S_VLAN = 0x4,
18104 + DPDMUX_METHOD_CUSTOM = 0x5
18105 +};
18106 +
18107 +/**
18108 + * struct dpdmux_cfg - DPDMUX configuration parameters
18109 + * @method: Defines the operation method for the DPDMUX address table
18110 + * @manip: Required manipulation operation
18111 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18112 + * @adv: Advanced parameters; default is all zeros;
18113 + * use this structure to change default settings
18114 + */
18115 +struct dpdmux_cfg {
18116 + enum dpdmux_method method;
18117 + enum dpdmux_manip manip;
18118 + u16 num_ifs;
18119 + /**
18120 + * struct adv - Advanced parameters
18121 + * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
18122 + * @max_dmat_entries: Maximum entries in DPDMUX address table
18123 + * 0 - indicates default: 64 entries per interface.
18124 + * @max_mc_groups: Number of multicast groups in DPDMUX table
18125 + * 0 - indicates default: 32 multicast groups
18126 + * @max_vlan_ids: max vlan ids allowed in the system -
18127 + * relevant only case of working in mac+vlan method.
18128 + * 0 - indicates default 16 vlan ids.
18129 + */
18130 + struct {
18131 + u64 options;
18132 + u16 max_dmat_entries;
18133 + u16 max_mc_groups;
18134 + u16 max_vlan_ids;
18135 + } adv;
18136 +};
18137 +
18138 +int dpdmux_create(struct fsl_mc_io *mc_io,
18139 + u16 dprc_token,
18140 + u32 cmd_flags,
18141 + const struct dpdmux_cfg *cfg,
18142 + u32 *obj_id);
18143 +
18144 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
18145 + u16 dprc_token,
18146 + u32 cmd_flags,
18147 + u32 object_id);
18148 +
18149 +int dpdmux_enable(struct fsl_mc_io *mc_io,
18150 + u32 cmd_flags,
18151 + u16 token);
18152 +
18153 +int dpdmux_disable(struct fsl_mc_io *mc_io,
18154 + u32 cmd_flags,
18155 + u16 token);
18156 +
18157 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
18158 + u32 cmd_flags,
18159 + u16 token,
18160 + int *en);
18161 +
18162 +int dpdmux_reset(struct fsl_mc_io *mc_io,
18163 + u32 cmd_flags,
18164 + u16 token);
18165 +
18166 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
18167 + u32 cmd_flags,
18168 + u16 token,
18169 + u8 irq_index,
18170 + u8 en);
18171 +
18172 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
18173 + u32 cmd_flags,
18174 + u16 token,
18175 + u8 irq_index,
18176 + u8 *en);
18177 +
18178 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
18179 + u32 cmd_flags,
18180 + u16 token,
18181 + u8 irq_index,
18182 + u32 mask);
18183 +
18184 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
18185 + u32 cmd_flags,
18186 + u16 token,
18187 + u8 irq_index,
18188 + u32 *mask);
18189 +
18190 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
18191 + u32 cmd_flags,
18192 + u16 token,
18193 + u8 irq_index,
18194 + u32 *status);
18195 +
18196 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
18197 + u32 cmd_flags,
18198 + u16 token,
18199 + u8 irq_index,
18200 + u32 status);
18201 +
18202 +/**
18203 + * struct dpdmux_attr - Structure representing DPDMUX attributes
18204 + * @id: DPDMUX object ID
18205 + * @options: Configuration options (bitmap)
18206 + * @method: DPDMUX address table method
18207 + * @manip: DPDMUX manipulation type
18208 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18209 + * @mem_size: DPDMUX frame storage memory size
18210 + */
18211 +struct dpdmux_attr {
18212 + int id;
18213 + u64 options;
18214 + enum dpdmux_method method;
18215 + enum dpdmux_manip manip;
18216 + u16 num_ifs;
18217 + u16 mem_size;
18218 +};
18219 +
18220 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
18221 + u32 cmd_flags,
18222 + u16 token,
18223 + struct dpdmux_attr *attr);
18224 +
18225 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
18226 + u32 cmd_flags,
18227 + u16 token,
18228 + u16 max_frame_length);
18229 +
18230 +/**
18231 + * enum dpdmux_counter_type - Counter types
18232 + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
18233 + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
18234 + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
18235 + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
18236 + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
18237 + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
18238 + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
18239 + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
18240 + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
18241 + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
18242 + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
18243 + */
18244 +enum dpdmux_counter_type {
18245 + DPDMUX_CNT_ING_FRAME = 0x0,
18246 + DPDMUX_CNT_ING_BYTE = 0x1,
18247 + DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
18248 + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
18249 + DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
18250 + DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
18251 + DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
18252 + DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
18253 + DPDMUX_CNT_EGR_FRAME = 0x8,
18254 + DPDMUX_CNT_EGR_BYTE = 0x9,
18255 + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
18256 +};
18257 +
18258 +/**
18259 + * enum dpdmux_accepted_frames_type - DPDMUX frame types
18260 + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
18261 + * priority-tagged frames
18262 + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
18263 + * priority-tagged frames that are received on this
18264 + * interface
18265 + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
18266 + * received on this interface are accepted
18267 + */
18268 +enum dpdmux_accepted_frames_type {
18269 + DPDMUX_ADMIT_ALL = 0,
18270 + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
18271 + DPDMUX_ADMIT_ONLY_UNTAGGED = 2
18272 +};
18273 +
18274 +/**
18275 + * enum dpdmux_action - DPDMUX action for un-accepted frames
18276 + * @DPDMUX_ACTION_DROP: Drop un-accepted frames
18277 + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
18278 + * control interface
18279 + */
18280 +enum dpdmux_action {
18281 + DPDMUX_ACTION_DROP = 0,
18282 + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
18283 +};
18284 +
18285 +/**
18286 + * struct dpdmux_accepted_frames - Frame types configuration
18287 + * @type: Defines ingress accepted frames
18288 + * @unaccept_act: Defines action on frames not accepted
18289 + */
18290 +struct dpdmux_accepted_frames {
18291 + enum dpdmux_accepted_frames_type type;
18292 + enum dpdmux_action unaccept_act;
18293 +};
18294 +
18295 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
18296 + u32 cmd_flags,
18297 + u16 token,
18298 + u16 if_id,
18299 + const struct dpdmux_accepted_frames *cfg);
18300 +
18301 +/**
18302 + * struct dpdmux_if_attr - Structure representing frame types configuration
18303 + * @rate: Configured interface rate (in bits per second)
18304 + * @enabled: Indicates if interface is enabled
18305 + * @accept_frame_type: Indicates type of accepted frames for the interface
18306 + */
18307 +struct dpdmux_if_attr {
18308 + u32 rate;
18309 + int enabled;
18310 + enum dpdmux_accepted_frames_type accept_frame_type;
18311 +};
18312 +
18313 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
18314 + u32 cmd_flags,
18315 + u16 token,
18316 + u16 if_id,
18317 + struct dpdmux_if_attr *attr);
18318 +
18319 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
18320 + u32 cmd_flags,
18321 + u16 token,
18322 + u16 if_id);
18323 +
18324 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
18325 + u32 cmd_flags,
18326 + u16 token,
18327 + u16 if_id);
18328 +
18329 +/**
18330 + * struct dpdmux_l2_rule - Structure representing L2 rule
18331 + * @mac_addr: MAC address
18332 + * @vlan_id: VLAN ID
18333 + */
18334 +struct dpdmux_l2_rule {
18335 + u8 mac_addr[6];
18336 + u16 vlan_id;
18337 +};
18338 +
18339 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
18340 + u32 cmd_flags,
18341 + u16 token,
18342 + u16 if_id,
18343 + const struct dpdmux_l2_rule *rule);
18344 +
18345 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
18346 + u32 cmd_flags,
18347 + u16 token,
18348 + u16 if_id,
18349 + const struct dpdmux_l2_rule *rule);
18350 +
18351 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
18352 + u32 cmd_flags,
18353 + u16 token,
18354 + u16 if_id,
18355 + enum dpdmux_counter_type counter_type,
18356 + u64 *counter);
18357 +
18358 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
18359 + u32 cmd_flags,
18360 + u16 token);
18361 +
18362 +/**
18363 + * Enable auto-negotiation
18364 + */
18365 +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
18366 +/**
18367 + * Enable half-duplex mode
18368 + */
18369 +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
18370 +/**
18371 + * Enable pause frames
18372 + */
18373 +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
18374 +/**
18375 + * Enable a-symmetric pause frames
18376 + */
18377 +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
18378 +
18379 +/**
18380 + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
18381 + * @rate: Rate
18382 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18383 + */
18384 +struct dpdmux_link_cfg {
18385 + u32 rate;
18386 + u64 options;
18387 +};
18388 +
18389 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
18390 + u32 cmd_flags,
18391 + u16 token,
18392 + u16 if_id,
18393 + struct dpdmux_link_cfg *cfg);
18394 +/**
18395 + * struct dpdmux_link_state - Structure representing DPDMUX link state
18396 + * @rate: Rate
18397 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18398 + * @up: 0 - down, 1 - up
18399 + */
18400 +struct dpdmux_link_state {
18401 + u32 rate;
18402 + u64 options;
18403 + int up;
18404 +};
18405 +
18406 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
18407 + u32 cmd_flags,
18408 + u16 token,
18409 + u16 if_id,
18410 + struct dpdmux_link_state *state);
18411 +
18412 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
18413 + u32 cmd_flags,
18414 + u16 token,
18415 + u64 key_cfg_iova);
18416 +
18417 +/**
18418 + * struct dpdmux_rule_cfg - Custom classification rule.
18419 + *
18420 + * @key_iova: DMA address of buffer storing the look-up value
18421 + * @mask_iova: DMA address of the mask used for TCAM classification
18422 + * @key_size: size, in bytes, of the look-up value. This must match the size
18423 + * of the look-up key defined using dpdmux_set_custom_key, otherwise the
18424 + * entry will never be hit
18425 + */
18426 +struct dpdmux_rule_cfg {
18427 + u64 key_iova;
18428 + u64 mask_iova;
18429 + u8 key_size;
18430 +};
18431 +
18432 +/**
18433 + * struct dpdmux_cls_action - Action to execute for frames matching the
18434 + * classification entry
18435 + *
18436 + * @dest_if: Interface to forward the frames to. Port numbering is similar to
18437 + * the one used to connect interfaces:
18438 + * - 0 is the uplink port,
18439 + * - all others are downlink ports.
18440 + */
18441 +struct dpdmux_cls_action {
18442 + u16 dest_if;
18443 +};
18444 +
18445 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
18446 + u32 cmd_flags,
18447 + u16 token,
18448 + struct dpdmux_rule_cfg *rule,
18449 + struct dpdmux_cls_action *action);
18450 +
18451 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
18452 + u32 cmd_flags,
18453 + u16 token,
18454 + struct dpdmux_rule_cfg *rule);
18455 +
18456 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
18457 + u32 cmd_flags,
18458 + u16 *major_ver,
18459 + u16 *minor_ver);
18460 +
18461 +#endif /* __FSL_DPDMUX_H */
18462 --- /dev/null
18463 +++ b/drivers/staging/fsl-dpaa2/evb/evb.c
18464 @@ -0,0 +1,1354 @@
18465 +/* Copyright 2015 Freescale Semiconductor Inc.
18466 + *
18467 + * Redistribution and use in source and binary forms, with or without
18468 + * modification, are permitted provided that the following conditions are met:
18469 + * * Redistributions of source code must retain the above copyright
18470 + * notice, this list of conditions and the following disclaimer.
18471 + * * Redistributions in binary form must reproduce the above copyright
18472 + * notice, this list of conditions and the following disclaimer in the
18473 + * documentation and/or other materials provided with the distribution.
18474 + * * Neither the name of Freescale Semiconductor nor the
18475 + * names of its contributors may be used to endorse or promote products
18476 + * derived from this software without specific prior written permission.
18477 + *
18478 + *
18479 + * ALTERNATIVELY, this software may be distributed under the terms of the
18480 + * GNU General Public License ("GPL") as published by the Free Software
18481 + * Foundation, either version 2 of that License or (at your option) any
18482 + * later version.
18483 + *
18484 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18485 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18486 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18487 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18488 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18489 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18490 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18491 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18492 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18493 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18494 + */
18495 +#include <linux/module.h>
18496 +#include <linux/msi.h>
18497 +#include <linux/netdevice.h>
18498 +#include <linux/etherdevice.h>
18499 +#include <linux/rtnetlink.h>
18500 +#include <linux/if_vlan.h>
18501 +
18502 +#include <uapi/linux/if_bridge.h>
18503 +#include <net/netlink.h>
18504 +
18505 +#include <linux/fsl/mc.h>
18506 +
18507 +#include "dpdmux.h"
18508 +#include "dpdmux-cmd.h"
18509 +
18510 +static const char evb_drv_version[] = "0.1";
18511 +
18512 +/* Minimal supported DPDMUX version */
18513 +#define DPDMUX_MIN_VER_MAJOR 6
18514 +#define DPDMUX_MIN_VER_MINOR 0
18515 +
18516 +/* IRQ index */
18517 +#define DPDMUX_MAX_IRQ_NUM 2
18518 +
18519 +/* MAX FRAME LENGTH (currently 10k) */
18520 +#define EVB_MAX_FRAME_LENGTH (10 * 1024)
18521 +#define EVB_MAX_MTU (EVB_MAX_FRAME_LENGTH - VLAN_ETH_HLEN)
18522 +#define EVB_MIN_MTU 68
18523 +
18524 +struct evb_port_priv {
18525 + struct net_device *netdev;
18526 + struct list_head list;
18527 + u16 port_index;
18528 + struct evb_priv *evb_priv;
18529 + u8 vlans[VLAN_VID_MASK + 1];
18530 +};
18531 +
18532 +struct evb_priv {
18533 + /* keep first */
18534 + struct evb_port_priv uplink;
18535 +
18536 + struct fsl_mc_io *mc_io;
18537 + struct list_head port_list;
18538 + struct dpdmux_attr attr;
18539 + u16 mux_handle;
18540 + int dev_id;
18541 +};
18542 +
18543 +static int _evb_port_carrier_state_sync(struct net_device *netdev)
18544 +{
18545 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18546 + struct dpdmux_link_state state;
18547 + int err;
18548 +
18549 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
18550 + port_priv->evb_priv->mux_handle,
18551 + port_priv->port_index, &state);
18552 + if (unlikely(err)) {
18553 + netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
18554 + return err;
18555 + }
18556 +
18557 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
18558 +
18559 + if (state.up)
18560 + netif_carrier_on(port_priv->netdev);
18561 + else
18562 + netif_carrier_off(port_priv->netdev);
18563 +
18564 + return 0;
18565 +}
18566 +
18567 +static int evb_port_open(struct net_device *netdev)
18568 +{
18569 + int err;
18570 +
18571 + /* FIXME: enable port when support added */
18572 +
18573 + err = _evb_port_carrier_state_sync(netdev);
18574 + if (err) {
18575 + netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
18576 + err);
18577 + return err;
18578 + }
18579 +
18580 + return 0;
18581 +}
18582 +
18583 +static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
18584 +{
18585 + /* we don't support I/O for now, drop the frame */
18586 + dev_kfree_skb_any(skb);
18587 + return NETDEV_TX_OK;
18588 +}
18589 +
18590 +static int evb_links_state_update(struct evb_priv *priv)
18591 +{
18592 + struct evb_port_priv *port_priv;
18593 + struct list_head *pos;
18594 + int err;
18595 +
18596 + list_for_each(pos, &priv->port_list) {
18597 + port_priv = list_entry(pos, struct evb_port_priv, list);
18598 +
18599 + err = _evb_port_carrier_state_sync(port_priv->netdev);
18600 + if (err)
18601 + netdev_err(port_priv->netdev,
18602 + "_evb_port_carrier_state_sync err %d\n",
18603 + err);
18604 + }
18605 +
18606 + return 0;
18607 +}
18608 +
18609 +static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
18610 +{
18611 + return IRQ_WAKE_THREAD;
18612 +}
18613 +
18614 +static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
18615 +{
18616 + struct device *dev = (struct device *)arg;
18617 + struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
18618 + struct net_device *netdev = dev_get_drvdata(dev);
18619 + struct evb_priv *priv = netdev_priv(netdev);
18620 + struct fsl_mc_io *io = priv->mc_io;
18621 + u16 token = priv->mux_handle;
18622 + int irq_index = DPDMUX_IRQ_INDEX_IF;
18623 +
18624 + /* Mask the events and the if_id reserved bits to be cleared on read */
18625 + u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
18626 + int err;
18627 +
18628 + /* Sanity check */
18629 + if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
18630 + goto out;
18631 + if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
18632 + goto out;
18633 +
18634 + err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
18635 + if (unlikely(err)) {
18636 + netdev_err(netdev, "Can't get irq status (err %d)", err);
18637 + err = dpdmux_clear_irq_status(io, 0, token, irq_index,
18638 + 0xFFFFFFFF);
18639 + if (unlikely(err))
18640 + netdev_err(netdev, "Can't clear irq status (err %d)",
18641 + err);
18642 + goto out;
18643 + }
18644 +
18645 + if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
18646 + err = evb_links_state_update(priv);
18647 + if (unlikely(err))
18648 + goto out;
18649 + }
18650 +
18651 +out:
18652 + return IRQ_HANDLED;
18653 +}
18654 +
18655 +static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
18656 +{
18657 + struct device *dev = &evb_dev->dev;
18658 + struct net_device *netdev = dev_get_drvdata(dev);
18659 + struct evb_priv *priv = netdev_priv(netdev);
18660 + int err = 0;
18661 + struct fsl_mc_device_irq *irq;
18662 + const int irq_index = DPDMUX_IRQ_INDEX_IF;
18663 + u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
18664 +
18665 + err = fsl_mc_allocate_irqs(evb_dev);
18666 + if (unlikely(err)) {
18667 + dev_err(dev, "MC irqs allocation failed\n");
18668 + return err;
18669 + }
18670 +
18671 + if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
18672 + err = -EINVAL;
18673 + goto free_irq;
18674 + }
18675 +
18676 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18677 + irq_index, 0);
18678 + if (unlikely(err)) {
18679 + dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
18680 + goto free_irq;
18681 + }
18682 +
18683 + irq = evb_dev->irqs[irq_index];
18684 +
18685 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
18686 + evb_irq0_handler,
18687 + _evb_irq0_handler_thread,
18688 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
18689 + dev_name(dev), dev);
18690 + if (unlikely(err)) {
18691 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
18692 + goto free_irq;
18693 + }
18694 +
18695 + err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
18696 + irq_index, mask);
18697 + if (unlikely(err)) {
18698 + dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
18699 + goto free_devm_irq;
18700 + }
18701 +
18702 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18703 + irq_index, 1);
18704 + if (unlikely(err)) {
18705 + dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
18706 + goto free_devm_irq;
18707 + }
18708 +
18709 + return 0;
18710 +
18711 +free_devm_irq:
18712 + devm_free_irq(dev, irq->msi_desc->irq, dev);
18713 +free_irq:
18714 + fsl_mc_free_irqs(evb_dev);
18715 + return err;
18716 +}
18717 +
18718 +static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
18719 +{
18720 + struct device *dev = &evb_dev->dev;
18721 + struct net_device *netdev = dev_get_drvdata(dev);
18722 + struct evb_priv *priv = netdev_priv(netdev);
18723 +
18724 + dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18725 + DPDMUX_IRQ_INDEX_IF, 0);
18726 +
18727 + devm_free_irq(dev,
18728 + evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
18729 + dev);
18730 + fsl_mc_free_irqs(evb_dev);
18731 +}
18732 +
18733 +static int evb_port_add_rule(struct net_device *netdev,
18734 + const unsigned char *addr, u16 vid)
18735 +{
18736 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18737 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18738 + int err;
18739 +
18740 + if (addr)
18741 + ether_addr_copy(rule.mac_addr, addr);
18742 +
18743 + err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
18744 + 0,
18745 + port_priv->evb_priv->mux_handle,
18746 + port_priv->port_index, &rule);
18747 + if (unlikely(err))
18748 + netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
18749 + return err;
18750 +}
18751 +
18752 +static int evb_port_del_rule(struct net_device *netdev,
18753 + const unsigned char *addr, u16 vid)
18754 +{
18755 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18756 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18757 + int err;
18758 +
18759 + if (addr)
18760 + ether_addr_copy(rule.mac_addr, addr);
18761 +
18762 + err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
18763 + 0,
18764 + port_priv->evb_priv->mux_handle,
18765 + port_priv->port_index, &rule);
18766 + if (unlikely(err))
18767 + netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
18768 + return err;
18769 +}
18770 +
18771 +static bool _lookup_address(struct net_device *netdev,
18772 + const unsigned char *addr)
18773 +{
18774 + struct netdev_hw_addr *ha;
18775 + struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
18776 + &netdev->uc : &netdev->mc;
18777 +
18778 + netif_addr_lock_bh(netdev);
18779 + list_for_each_entry(ha, &list->list, list) {
18780 + if (ether_addr_equal(ha->addr, addr)) {
18781 + netif_addr_unlock_bh(netdev);
18782 + return true;
18783 + }
18784 + }
18785 + netif_addr_unlock_bh(netdev);
18786 + return false;
18787 +}
18788 +
18789 +static inline int evb_port_fdb_prep(struct nlattr *tb[],
18790 + struct net_device *netdev,
18791 + const unsigned char *addr, u16 *vid,
18792 + bool del)
18793 +{
18794 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18795 + struct evb_priv *evb_priv = port_priv->evb_priv;
18796 +
18797 + *vid = 0;
18798 +
18799 + if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
18800 + evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
18801 + netdev_err(netdev,
18802 + "EVB mode does not support MAC classification\n");
18803 + return -EOPNOTSUPP;
18804 + }
18805 +
18806 + /* check if the address is configured on this port */
18807 + if (_lookup_address(netdev, addr)) {
18808 + if (!del)
18809 + return -EEXIST;
18810 + } else {
18811 + if (del)
18812 + return -ENOENT;
18813 + }
18814 +
18815 + if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18816 + if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
18817 + netdev_err(netdev, "invalid vlan size %d\n",
18818 + nla_len(tb[NDA_VLAN]));
18819 + return -EINVAL;
18820 + }
18821 +
18822 + *vid = nla_get_u16(tb[NDA_VLAN]);
18823 +
18824 + if (!*vid || *vid >= VLAN_VID_MASK) {
18825 + netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
18826 + return -EINVAL;
18827 + }
18828 + } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18829 + netdev_err(netdev,
18830 + "EVB mode requires explicit VLAN configuration\n");
18831 + return -EINVAL;
18832 + } else if (tb[NDA_VLAN]) {
18833 + netdev_warn(netdev, "VLAN not supported, argument ignored\n");
18834 + }
18835 +
18836 + return 0;
18837 +}
18838 +
18839 +static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
18840 + struct net_device *netdev,
18841 + const unsigned char *addr, u16 vid, u16 flags)
18842 +{
18843 + u16 _vid;
18844 + int err;
18845 +
18846 + /* TODO: add replace support when added to iproute bridge */
18847 + if (!(flags & NLM_F_REQUEST)) {
18848 + netdev_err(netdev,
18849 + "evb_port_fdb_add unexpected flags value %08x\n",
18850 + flags);
18851 + return -EINVAL;
18852 + }
18853 +
18854 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
18855 + if (unlikely(err))
18856 + return err;
18857 +
18858 + err = evb_port_add_rule(netdev, addr, _vid);
18859 + if (unlikely(err))
18860 + return err;
18861 +
18862 + if (is_unicast_ether_addr(addr)) {
18863 + err = dev_uc_add(netdev, addr);
18864 + if (unlikely(err)) {
18865 + netdev_err(netdev, "dev_uc_add err %d\n", err);
18866 + return err;
18867 + }
18868 + } else {
18869 + err = dev_mc_add(netdev, addr);
18870 + if (unlikely(err)) {
18871 + netdev_err(netdev, "dev_mc_add err %d\n", err);
18872 + return err;
18873 + }
18874 + }
18875 +
18876 + return 0;
18877 +}
18878 +
18879 +static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
18880 + struct net_device *netdev,
18881 + const unsigned char *addr, u16 vid)
18882 +{
18883 + u16 _vid;
18884 + int err;
18885 +
18886 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
18887 + if (unlikely(err))
18888 + return err;
18889 +
18890 + err = evb_port_del_rule(netdev, addr, _vid);
18891 + if (unlikely(err))
18892 + return err;
18893 +
18894 + if (is_unicast_ether_addr(addr)) {
18895 + err = dev_uc_del(netdev, addr);
18896 + if (unlikely(err)) {
18897 + netdev_err(netdev, "dev_uc_del err %d\n", err);
18898 + return err;
18899 + }
18900 + } else {
18901 + err = dev_mc_del(netdev, addr);
18902 + if (unlikely(err)) {
18903 + netdev_err(netdev, "dev_mc_del err %d\n", err);
18904 + return err;
18905 + }
18906 + }
18907 +
18908 + return 0;
18909 +}
18910 +
18911 +static int evb_change_mtu(struct net_device *netdev,
18912 + int mtu)
18913 +{
18914 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18915 + struct evb_priv *evb_priv = port_priv->evb_priv;
18916 + struct list_head *pos;
18917 + int err = 0;
18918 +
18919 + /* This operation is not permitted on downlinks */
18920 + if (port_priv->port_index > 0)
18921 + return -EPERM;
18922 +
18923 + err = dpdmux_set_max_frame_length(evb_priv->mc_io,
18924 + 0,
18925 + evb_priv->mux_handle,
18926 + (uint16_t)(mtu + VLAN_ETH_HLEN));
18927 +
18928 + if (unlikely(err)) {
18929 + netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
18930 + err);
18931 + return err;
18932 + }
18933 +
18934 + /* Update the max frame length for downlinks */
18935 + list_for_each(pos, &evb_priv->port_list) {
18936 + port_priv = list_entry(pos, struct evb_port_priv, list);
18937 + port_priv->netdev->mtu = mtu;
18938 + }
18939 +
18940 + netdev->mtu = mtu;
18941 + return 0;
18942 +}
18943 +
18944 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
18945 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
18946 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
18947 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
18948 + .len = sizeof(struct bridge_vlan_info), },
18949 +};
18950 +
18951 +static int evb_setlink_af_spec(struct net_device *netdev,
18952 + struct nlattr **tb)
18953 +{
18954 + struct bridge_vlan_info *vinfo;
18955 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18956 + int err = 0;
18957 +
18958 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
18959 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
18960 + return -EOPNOTSUPP;
18961 + }
18962 +
18963 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
18964 +
18965 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
18966 + return -EINVAL;
18967 +
18968 + err = evb_port_add_rule(netdev, NULL, vinfo->vid);
18969 + if (unlikely(err))
18970 + return err;
18971 +
18972 + port_priv->vlans[vinfo->vid] = 1;
18973 +
18974 + return 0;
18975 +}
18976 +
18977 +static int evb_setlink(struct net_device *netdev,
18978 + struct nlmsghdr *nlh,
18979 + u16 flags)
18980 +{
18981 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18982 + struct evb_priv *evb_priv = port_priv->evb_priv;
18983 + struct nlattr *attr;
18984 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
18985 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
18986 + int err = 0;
18987 +
18988 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
18989 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
18990 + netdev_err(netdev,
18991 + "EVB mode does not support VLAN only classification\n");
18992 + return -EOPNOTSUPP;
18993 + }
18994 +
18995 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
18996 + if (attr) {
18997 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
18998 + ifla_br_policy);
18999 + if (unlikely(err)) {
19000 + netdev_err(netdev,
19001 + "nla_parse_nested for br_policy err %d\n",
19002 + err);
19003 + return err;
19004 + }
19005 +
19006 + err = evb_setlink_af_spec(netdev, tb);
19007 + return err;
19008 + }
19009 +
19010 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
19011 + return -EOPNOTSUPP;
19012 +}
19013 +
19014 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
19015 +{
19016 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19017 + struct evb_priv *evb_priv = port_priv->evb_priv;
19018 + u8 operstate = netif_running(netdev) ?
19019 + netdev->operstate : IF_OPER_DOWN;
19020 + int iflink;
19021 + int err;
19022 +
19023 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
19024 + if (unlikely(err))
19025 + goto nla_put_err;
19026 + err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
19027 + if (unlikely(err))
19028 + goto nla_put_err;
19029 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
19030 + if (unlikely(err))
19031 + goto nla_put_err;
19032 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
19033 + if (unlikely(err))
19034 + goto nla_put_err;
19035 + if (netdev->addr_len) {
19036 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
19037 + netdev->dev_addr);
19038 + if (unlikely(err))
19039 + goto nla_put_err;
19040 + }
19041 +
19042 + iflink = dev_get_iflink(netdev);
19043 + if (netdev->ifindex != iflink) {
19044 + err = nla_put_u32(skb, IFLA_LINK, iflink);
19045 + if (unlikely(err))
19046 + goto nla_put_err;
19047 + }
19048 +
19049 + return 0;
19050 +
19051 +nla_put_err:
19052 + netdev_err(netdev, "nla_put_ err %d\n", err);
19053 + return err;
19054 +}
19055 +
19056 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
19057 +{
19058 + struct nlattr *nest;
19059 + int err;
19060 +
19061 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
19062 + if (!nest) {
19063 + netdev_err(netdev, "nla_nest_start failed\n");
19064 + return -ENOMEM;
19065 + }
19066 +
19067 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
19068 + if (unlikely(err))
19069 + goto nla_put_err;
19070 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
19071 + if (unlikely(err))
19072 + goto nla_put_err;
19073 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
19074 + if (unlikely(err))
19075 + goto nla_put_err;
19076 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
19077 + if (unlikely(err))
19078 + goto nla_put_err;
19079 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
19080 + if (unlikely(err))
19081 + goto nla_put_err;
19082 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
19083 + if (unlikely(err))
19084 + goto nla_put_err;
19085 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
19086 + if (unlikely(err))
19087 + goto nla_put_err;
19088 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
19089 + if (unlikely(err))
19090 + goto nla_put_err;
19091 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
19092 + if (unlikely(err))
19093 + goto nla_put_err;
19094 + nla_nest_end(skb, nest);
19095 +
19096 + return 0;
19097 +
19098 +nla_put_err:
19099 + netdev_err(netdev, "nla_put_ err %d\n", err);
19100 + nla_nest_cancel(skb, nest);
19101 + return err;
19102 +}
19103 +
19104 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
19105 +{
19106 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19107 + struct nlattr *nest;
19108 + struct bridge_vlan_info vinfo;
19109 + const u8 *vlans = port_priv->vlans;
19110 + u16 i;
19111 + int err;
19112 +
19113 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
19114 + if (!nest) {
19115 + netdev_err(netdev, "nla_nest_start failed");
19116 + return -ENOMEM;
19117 + }
19118 +
19119 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
19120 + if (!vlans[i])
19121 + continue;
19122 +
19123 + vinfo.flags = 0;
19124 + vinfo.vid = i;
19125 +
19126 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
19127 + sizeof(vinfo), &vinfo);
19128 + if (unlikely(err))
19129 + goto nla_put_err;
19130 + }
19131 +
19132 + nla_nest_end(skb, nest);
19133 +
19134 + return 0;
19135 +
19136 +nla_put_err:
19137 + netdev_err(netdev, "nla_put_ err %d\n", err);
19138 + nla_nest_cancel(skb, nest);
19139 + return err;
19140 +}
19141 +
19142 +static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
19143 + struct net_device *netdev, u32 filter_mask, int nlflags)
19144 +{
19145 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19146 + struct evb_priv *evb_priv = port_priv->evb_priv;
19147 + struct ifinfomsg *hdr;
19148 + struct nlmsghdr *nlh;
19149 + int err;
19150 +
19151 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
19152 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
19153 + return 0;
19154 + }
19155 +
19156 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
19157 + if (!nlh)
19158 + return -EMSGSIZE;
19159 +
19160 + hdr = nlmsg_data(nlh);
19161 + memset(hdr, 0, sizeof(*hdr));
19162 + hdr->ifi_family = AF_BRIDGE;
19163 + hdr->ifi_type = netdev->type;
19164 + hdr->ifi_index = netdev->ifindex;
19165 + hdr->ifi_flags = dev_get_flags(netdev);
19166 +
19167 + err = __nla_put_netdev(skb, netdev);
19168 + if (unlikely(err))
19169 + goto nla_put_err;
19170 +
19171 + err = __nla_put_port(skb, netdev);
19172 + if (unlikely(err))
19173 + goto nla_put_err;
19174 +
19175 + /* Check if the VID information is requested */
19176 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
19177 + err = __nla_put_vlan(skb, netdev);
19178 + if (unlikely(err))
19179 + goto nla_put_err;
19180 + }
19181 +
19182 + nlmsg_end(skb, nlh);
19183 + return skb->len;
19184 +
19185 +nla_put_err:
19186 + nlmsg_cancel(skb, nlh);
19187 + return -EMSGSIZE;
19188 +}
19189 +
19190 +static int evb_dellink(struct net_device *netdev,
19191 + struct nlmsghdr *nlh,
19192 + u16 flags)
19193 +{
19194 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
19195 + struct nlattr *spec;
19196 + struct bridge_vlan_info *vinfo;
19197 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19198 + int err = 0;
19199 +
19200 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
19201 + if (!spec)
19202 + return 0;
19203 +
19204 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
19205 + if (unlikely(err))
19206 + return err;
19207 +
19208 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
19209 + return -EOPNOTSUPP;
19210 +
19211 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
19212 +
19213 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
19214 + return -EINVAL;
19215 +
19216 + err = evb_port_del_rule(netdev, NULL, vinfo->vid);
19217 + if (unlikely(err)) {
19218 + netdev_err(netdev, "evb_port_del_rule err %d\n", err);
19219 + return err;
19220 + }
19221 + port_priv->vlans[vinfo->vid] = 0;
19222 +
19223 + return 0;
19224 +}
19225 +
19226 +struct rtnl_link_stats64 *evb_port_get_stats(struct net_device *netdev,
19227 + struct rtnl_link_stats64 *storage)
19228 +{
19229 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19230 + u64 tmp;
19231 + int err;
19232 +
19233 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19234 + 0,
19235 + port_priv->evb_priv->mux_handle,
19236 + port_priv->port_index,
19237 + DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
19238 + if (unlikely(err))
19239 + goto error;
19240 +
19241 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19242 + 0,
19243 + port_priv->evb_priv->mux_handle,
19244 + port_priv->port_index,
19245 + DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
19246 + if (unlikely(err))
19247 + goto error;
19248 +
19249 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19250 + 0,
19251 + port_priv->evb_priv->mux_handle,
19252 + port_priv->port_index,
19253 + DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
19254 + if (unlikely(err))
19255 + goto error;
19256 +
19257 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19258 + 0,
19259 + port_priv->evb_priv->mux_handle,
19260 + port_priv->port_index,
19261 + DPDMUX_CNT_ING_FRAME_DISCARD,
19262 + &storage->rx_dropped);
19263 + if (unlikely(err)) {
19264 + storage->rx_dropped = tmp;
19265 + goto error;
19266 + }
19267 + storage->rx_dropped += tmp;
19268 +
19269 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19270 + 0,
19271 + port_priv->evb_priv->mux_handle,
19272 + port_priv->port_index,
19273 + DPDMUX_CNT_ING_MCAST_FRAME,
19274 + &storage->multicast);
19275 + if (unlikely(err))
19276 + goto error;
19277 +
19278 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19279 + 0,
19280 + port_priv->evb_priv->mux_handle,
19281 + port_priv->port_index,
19282 + DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
19283 + if (unlikely(err))
19284 + goto error;
19285 +
19286 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19287 + 0,
19288 + port_priv->evb_priv->mux_handle,
19289 + port_priv->port_index,
19290 + DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
19291 + if (unlikely(err))
19292 + goto error;
19293 +
19294 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19295 + 0,
19296 + port_priv->evb_priv->mux_handle,
19297 + port_priv->port_index,
19298 + DPDMUX_CNT_EGR_FRAME_DISCARD,
19299 + &storage->tx_dropped);
19300 + if (unlikely(err))
19301 + goto error;
19302 +
19303 + return storage;
19304 +
19305 +error:
19306 + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
19307 + return storage;
19308 +}
19309 +
19310 +static const struct net_device_ops evb_port_ops = {
19311 + .ndo_open = &evb_port_open,
19312 +
19313 + .ndo_start_xmit = &evb_dropframe,
19314 +
19315 + .ndo_fdb_add = &evb_port_fdb_add,
19316 + .ndo_fdb_del = &evb_port_fdb_del,
19317 +
19318 + .ndo_get_stats64 = &evb_port_get_stats,
19319 + .ndo_change_mtu = &evb_change_mtu,
19320 +};
19321 +
19322 +static void evb_get_drvinfo(struct net_device *netdev,
19323 + struct ethtool_drvinfo *drvinfo)
19324 +{
19325 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19326 + u16 version_major, version_minor;
19327 + int err;
19328 +
19329 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
19330 + strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
19331 +
19332 + err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
19333 + &version_major,
19334 + &version_minor);
19335 + if (err)
19336 + strlcpy(drvinfo->fw_version, "N/A",
19337 + sizeof(drvinfo->fw_version));
19338 + else
19339 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
19340 + "%u.%u", version_major, version_minor);
19341 +
19342 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
19343 + sizeof(drvinfo->bus_info));
19344 +}
19345 +
19346 +static int evb_get_settings(struct net_device *netdev,
19347 + struct ethtool_cmd *cmd)
19348 +{
19349 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19350 + struct dpdmux_link_state state = {0};
19351 + int err = 0;
19352 +
19353 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19354 + port_priv->evb_priv->mux_handle,
19355 + port_priv->port_index,
19356 + &state);
19357 + if (err) {
19358 + netdev_err(netdev, "ERROR %d getting link state", err);
19359 + goto out;
19360 + }
19361 +
19362 + /* At the moment, we have no way of interrogating the DPMAC
19363 + * from the DPDMUX side or there may not exist a DPMAC at all.
19364 + * Report only autoneg state, duplexity and speed.
19365 + */
19366 + if (state.options & DPDMUX_LINK_OPT_AUTONEG)
19367 + cmd->autoneg = AUTONEG_ENABLE;
19368 + if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
19369 + cmd->duplex = DUPLEX_FULL;
19370 + ethtool_cmd_speed_set(cmd, state.rate);
19371 +
19372 +out:
19373 + return err;
19374 +}
19375 +
19376 +static int evb_set_settings(struct net_device *netdev,
19377 + struct ethtool_cmd *cmd)
19378 +{
19379 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19380 + struct dpdmux_link_state state = {0};
19381 + struct dpdmux_link_cfg cfg = {0};
19382 + int err = 0;
19383 +
19384 + netdev_dbg(netdev, "Setting link parameters...");
19385 +
19386 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19387 + port_priv->evb_priv->mux_handle,
19388 + port_priv->port_index,
19389 + &state);
19390 + if (err) {
19391 + netdev_err(netdev, "ERROR %d getting link state", err);
19392 + goto out;
19393 + }
19394 +
19395 + /* Due to a temporary MC limitation, the DPDMUX port must be down
19396 + * in order to be able to change link settings. Taking steps to let
19397 + * the user know that.
19398 + */
19399 + if (netif_running(netdev)) {
19400 + netdev_info(netdev,
19401 + "Sorry, interface must be brought down first.\n");
19402 + return -EACCES;
19403 + }
19404 +
19405 + cfg.options = state.options;
19406 + cfg.rate = ethtool_cmd_speed(cmd);
19407 + if (cmd->autoneg == AUTONEG_ENABLE)
19408 + cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
19409 + else
19410 + cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
19411 + if (cmd->duplex == DUPLEX_HALF)
19412 + cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
19413 + else
19414 + cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
19415 +
19416 + err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
19417 + port_priv->evb_priv->mux_handle,
19418 + port_priv->port_index,
19419 + &cfg);
19420 + if (err)
19421 + /* ethtool will be loud enough if we return an error; no point
19422 + * in putting our own error message on the console by default
19423 + */
19424 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
19425 +
19426 +out:
19427 + return err;
19428 +}
19429 +
19430 +static struct {
19431 + enum dpdmux_counter_type id;
19432 + char name[ETH_GSTRING_LEN];
19433 +} evb_ethtool_counters[] = {
19434 + {DPDMUX_CNT_ING_FRAME, "rx frames"},
19435 + {DPDMUX_CNT_ING_BYTE, "rx bytes"},
19436 + {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
19437 + {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
19438 + {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
19439 + {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
19440 + {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
19441 + {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
19442 + {DPDMUX_CNT_EGR_FRAME, "tx frames"},
19443 + {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
19444 + {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
19445 +};
19446 +
19447 +static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
19448 +{
19449 + switch (sset) {
19450 + case ETH_SS_STATS:
19451 + return ARRAY_SIZE(evb_ethtool_counters);
19452 + default:
19453 + return -EOPNOTSUPP;
19454 + }
19455 +}
19456 +
19457 +static void evb_ethtool_get_strings(struct net_device *netdev,
19458 + u32 stringset, u8 *data)
19459 +{
19460 + u32 i;
19461 +
19462 + switch (stringset) {
19463 + case ETH_SS_STATS:
19464 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
19465 + memcpy(data + i * ETH_GSTRING_LEN,
19466 + evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
19467 + break;
19468 + }
19469 +}
19470 +
19471 +static void evb_ethtool_get_stats(struct net_device *netdev,
19472 + struct ethtool_stats *stats,
19473 + u64 *data)
19474 +{
19475 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19476 + u32 i;
19477 + int err;
19478 +
19479 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
19480 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19481 + 0,
19482 + port_priv->evb_priv->mux_handle,
19483 + port_priv->port_index,
19484 + evb_ethtool_counters[i].id,
19485 + &data[i]);
19486 + if (err)
19487 + netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
19488 + evb_ethtool_counters[i].name, err);
19489 + }
19490 +}
19491 +
19492 +static const struct ethtool_ops evb_port_ethtool_ops = {
19493 + .get_drvinfo = &evb_get_drvinfo,
19494 + .get_link = &ethtool_op_get_link,
19495 + .get_settings = &evb_get_settings,
19496 + .set_settings = &evb_set_settings,
19497 + .get_strings = &evb_ethtool_get_strings,
19498 + .get_ethtool_stats = &evb_ethtool_get_stats,
19499 + .get_sset_count = &evb_ethtool_get_sset_count,
19500 +};
19501 +
19502 +static int evb_open(struct net_device *netdev)
19503 +{
19504 + struct evb_priv *priv = netdev_priv(netdev);
19505 + int err = 0;
19506 +
19507 + err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
19508 + if (unlikely(err))
19509 + netdev_err(netdev, "dpdmux_enable err %d\n", err);
19510 +
19511 + return err;
19512 +}
19513 +
19514 +static int evb_close(struct net_device *netdev)
19515 +{
19516 + struct evb_priv *priv = netdev_priv(netdev);
19517 + int err = 0;
19518 +
19519 + err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
19520 + if (unlikely(err))
19521 + netdev_err(netdev, "dpdmux_disable err %d\n", err);
19522 +
19523 + return err;
19524 +}
19525 +
19526 +static const struct net_device_ops evb_ops = {
19527 + .ndo_start_xmit = &evb_dropframe,
19528 + .ndo_open = &evb_open,
19529 + .ndo_stop = &evb_close,
19530 +
19531 + .ndo_bridge_setlink = &evb_setlink,
19532 + .ndo_bridge_getlink = &evb_getlink,
19533 + .ndo_bridge_dellink = &evb_dellink,
19534 +
19535 + .ndo_get_stats64 = &evb_port_get_stats,
19536 + .ndo_change_mtu = &evb_change_mtu,
19537 +};
19538 +
19539 +static int evb_takedown(struct fsl_mc_device *evb_dev)
19540 +{
19541 + struct device *dev = &evb_dev->dev;
19542 + struct net_device *netdev = dev_get_drvdata(dev);
19543 + struct evb_priv *priv = netdev_priv(netdev);
19544 + int err;
19545 +
19546 + err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19547 + if (unlikely(err))
19548 + dev_warn(dev, "dpdmux_close err %d\n", err);
19549 +
19550 + return 0;
19551 +}
19552 +
19553 +static int evb_init(struct fsl_mc_device *evb_dev)
19554 +{
19555 + struct device *dev = &evb_dev->dev;
19556 + struct net_device *netdev = dev_get_drvdata(dev);
19557 + struct evb_priv *priv = netdev_priv(netdev);
19558 + u16 version_major;
19559 + u16 version_minor;
19560 + int err = 0;
19561 +
19562 + priv->dev_id = evb_dev->obj_desc.id;
19563 +
19564 + err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
19565 + if (unlikely(err)) {
19566 + dev_err(dev, "dpdmux_open err %d\n", err);
19567 + goto err_exit;
19568 + }
19569 + if (!priv->mux_handle) {
19570 + dev_err(dev, "dpdmux_open returned null handle but no error\n");
19571 + err = -EFAULT;
19572 + goto err_exit;
19573 + }
19574 +
19575 + err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
19576 + &priv->attr);
19577 + if (unlikely(err)) {
19578 + dev_err(dev, "dpdmux_get_attributes err %d\n", err);
19579 + goto err_close;
19580 + }
19581 +
19582 + err = dpdmux_get_api_version(priv->mc_io, 0,
19583 + &version_major,
19584 + &version_minor);
19585 + if (unlikely(err)) {
19586 + dev_err(dev, "dpdmux_get_api_version err %d\n", err);
19587 + goto err_close;
19588 + }
19589 +
19590 + /* Minimum supported DPDMUX version check */
19591 + if (version_major < DPDMUX_MIN_VER_MAJOR ||
19592 + (version_major == DPDMUX_MIN_VER_MAJOR &&
19593 + version_minor < DPDMUX_MIN_VER_MINOR)) {
19594 + dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
19595 + version_major, version_minor,
19596 + DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
19597 + err = -ENOTSUPP;
19598 + goto err_close;
19599 + }
19600 +
19601 + err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
19602 + if (unlikely(err)) {
19603 + dev_err(dev, "dpdmux_reset err %d\n", err);
19604 + goto err_close;
19605 + }
19606 +
19607 + return 0;
19608 +
19609 +err_close:
19610 + dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19611 +err_exit:
19612 + return err;
19613 +}
19614 +
19615 +static int evb_remove(struct fsl_mc_device *evb_dev)
19616 +{
19617 + struct device *dev = &evb_dev->dev;
19618 + struct net_device *netdev = dev_get_drvdata(dev);
19619 + struct evb_priv *priv = netdev_priv(netdev);
19620 + struct evb_port_priv *port_priv;
19621 + struct list_head *pos;
19622 +
19623 + list_for_each(pos, &priv->port_list) {
19624 + port_priv = list_entry(pos, struct evb_port_priv, list);
19625 +
19626 + rtnl_lock();
19627 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
19628 + rtnl_unlock();
19629 +
19630 + unregister_netdev(port_priv->netdev);
19631 + free_netdev(port_priv->netdev);
19632 + }
19633 +
19634 + evb_teardown_irqs(evb_dev);
19635 +
19636 + unregister_netdev(netdev);
19637 +
19638 + evb_takedown(evb_dev);
19639 + fsl_mc_portal_free(priv->mc_io);
19640 +
19641 + dev_set_drvdata(dev, NULL);
19642 + free_netdev(netdev);
19643 +
19644 + return 0;
19645 +}
19646 +
19647 +static int evb_probe(struct fsl_mc_device *evb_dev)
19648 +{
19649 + struct device *dev;
19650 + struct evb_priv *priv = NULL;
19651 + struct net_device *netdev = NULL;
19652 + char port_name[IFNAMSIZ];
19653 + int i;
19654 + int err = 0;
19655 +
19656 + dev = &evb_dev->dev;
19657 +
19658 + /* register switch device, it's for management only - no I/O */
19659 + netdev = alloc_etherdev(sizeof(*priv));
19660 + if (!netdev) {
19661 + dev_err(dev, "alloc_etherdev error\n");
19662 + return -ENOMEM;
19663 + }
19664 + netdev->netdev_ops = &evb_ops;
19665 +
19666 + dev_set_drvdata(dev, netdev);
19667 +
19668 + priv = netdev_priv(netdev);
19669 +
19670 + err = fsl_mc_portal_allocate(evb_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
19671 + &priv->mc_io);
19672 + if (err) {
19673 + if (err == -ENXIO)
19674 + err = -EPROBE_DEFER;
19675 + else
19676 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
19677 + goto err_free_netdev;
19678 + }
19679 +
19680 + if (!priv->mc_io) {
19681 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
19682 + err = -EFAULT;
19683 + goto err_free_netdev;
19684 + }
19685 +
19686 + err = evb_init(evb_dev);
19687 + if (unlikely(err)) {
19688 + dev_err(dev, "evb init err %d\n", err);
19689 + goto err_free_cmdport;
19690 + }
19691 +
19692 + INIT_LIST_HEAD(&priv->port_list);
19693 + netdev->flags |= IFF_PROMISC | IFF_MASTER;
19694 +
19695 + dev_alloc_name(netdev, "evb%d");
19696 +
19697 + /* register switch ports */
19698 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
19699 +
19700 + /* only register downlinks? */
19701 + for (i = 0; i < priv->attr.num_ifs + 1; i++) {
19702 + struct net_device *port_netdev;
19703 + struct evb_port_priv *port_priv;
19704 +
19705 + if (i) {
19706 + port_netdev =
19707 + alloc_etherdev(sizeof(struct evb_port_priv));
19708 + if (!port_netdev) {
19709 + dev_err(dev, "alloc_etherdev error\n");
19710 + goto err_takedown;
19711 + }
19712 +
19713 + port_priv = netdev_priv(port_netdev);
19714 +
19715 + port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
19716 +
19717 + dev_alloc_name(port_netdev, port_name);
19718 + } else {
19719 + port_netdev = netdev;
19720 + port_priv = &priv->uplink;
19721 + }
19722 +
19723 + port_priv->netdev = port_netdev;
19724 + port_priv->evb_priv = priv;
19725 + port_priv->port_index = i;
19726 +
19727 + SET_NETDEV_DEV(port_netdev, dev);
19728 +
19729 + if (i) {
19730 + port_netdev->netdev_ops = &evb_port_ops;
19731 +
19732 + err = register_netdev(port_netdev);
19733 + if (err < 0) {
19734 + dev_err(dev, "register_netdev err %d\n", err);
19735 + free_netdev(port_netdev);
19736 + goto err_takedown;
19737 + }
19738 +
19739 + rtnl_lock();
19740 + err = netdev_master_upper_dev_link(port_netdev, netdev,
19741 + NULL, NULL);
19742 + if (unlikely(err)) {
19743 + dev_err(dev, "netdev_master_upper_dev_link err %d\n",
19744 + err);
19745 + unregister_netdev(port_netdev);
19746 + free_netdev(port_netdev);
19747 + rtnl_unlock();
19748 + goto err_takedown;
19749 + }
19750 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
19751 + IFF_SLAVE, GFP_KERNEL);
19752 + rtnl_unlock();
19753 +
19754 + list_add(&port_priv->list, &priv->port_list);
19755 + } else {
19756 + /* Set MTU limits only on uplink */
19757 + port_netdev->min_mtu = EVB_MIN_MTU;
19758 + port_netdev->max_mtu = EVB_MAX_MTU;
19759 +
19760 + err = register_netdev(netdev);
19761 +
19762 + if (err < 0) {
19763 + dev_err(dev, "register_netdev error %d\n", err);
19764 + goto err_takedown;
19765 + }
19766 + }
19767 +
19768 + port_netdev->ethtool_ops = &evb_port_ethtool_ops;
19769 +
19770 + /* ports are up from init */
19771 + rtnl_lock();
19772 + err = dev_open(port_netdev);
19773 + rtnl_unlock();
19774 + if (unlikely(err))
19775 + dev_warn(dev, "dev_open err %d\n", err);
19776 + }
19777 +
19778 + /* setup irqs */
19779 + err = evb_setup_irqs(evb_dev);
19780 + if (unlikely(err)) {
19781 + dev_warn(dev, "evb_setup_irqs err %d\n", err);
19782 + goto err_takedown;
19783 + }
19784 +
19785 + dev_info(dev, "probed evb device with %d ports\n",
19786 + priv->attr.num_ifs);
19787 + return 0;
19788 +
19789 +err_takedown:
19790 + evb_remove(evb_dev);
19791 +err_free_cmdport:
19792 + fsl_mc_portal_free(priv->mc_io);
19793 +err_free_netdev:
19794 + return err;
19795 +}
19796 +
19797 +static const struct fsl_mc_device_id evb_match_id_table[] = {
19798 + {
19799 + .vendor = FSL_MC_VENDOR_FREESCALE,
19800 + .obj_type = "dpdmux",
19801 + },
19802 + {}
19803 +};
19804 +
19805 +static struct fsl_mc_driver evb_drv = {
19806 + .driver = {
19807 + .name = KBUILD_MODNAME,
19808 + .owner = THIS_MODULE,
19809 + },
19810 + .probe = evb_probe,
19811 + .remove = evb_remove,
19812 + .match_id_table = evb_match_id_table,
19813 +};
19814 +
19815 +module_fsl_mc_driver(evb_drv);
19816 +
19817 +MODULE_LICENSE("GPL");
19818 +MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
19819 --- /dev/null
19820 +++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
19821 @@ -0,0 +1,23 @@
19822 +config FSL_DPAA2_MAC
19823 + tristate "DPAA2 MAC / PHY interface"
19824 + depends on FSL_MC_BUS && FSL_DPAA2
19825 + select MDIO_BUS_MUX_MMIOREG
19826 + select FSL_XGMAC_MDIO
19827 + select FIXED_PHY
19828 + ---help---
19829 + Prototype driver for DPAA2 MAC / PHY interface object.
19830 + This driver works as a proxy between phylib including phy drivers and
19831 + the MC firmware. It receives updates on link state changes from PHY
19832 + lib and forwards them to MC and receives interrupt from MC whenever
19833 + a request is made to change the link state.
19834 +
19835 +
19836 +config FSL_DPAA2_MAC_NETDEVS
19837 + bool "Expose net interfaces for PHYs"
19838 + default n
19839 + depends on FSL_DPAA2_MAC
19840 + ---help---
19841 + Exposes macX net interfaces which allow direct control over MACs and
19842 + PHYs.
19843 + .
19844 + Leave disabled if unsure.
19845 --- /dev/null
19846 +++ b/drivers/staging/fsl-dpaa2/mac/Makefile
19847 @@ -0,0 +1,10 @@
19848 +
19849 +obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
19850 +
19851 +dpaa2-mac-objs := mac.o dpmac.o
19852 +
19853 +all:
19854 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
19855 +
19856 +clean:
19857 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
19858 --- /dev/null
19859 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
19860 @@ -0,0 +1,172 @@
19861 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
19862 + *
19863 + * Redistribution and use in source and binary forms, with or without
19864 + * modification, are permitted provided that the following conditions are met:
19865 + * * Redistributions of source code must retain the above copyright
19866 + * notice, this list of conditions and the following disclaimer.
19867 + * * Redistributions in binary form must reproduce the above copyright
19868 + * notice, this list of conditions and the following disclaimer in the
19869 + * documentation and/or other materials provided with the distribution.
19870 + * * Neither the name of the above-listed copyright holders nor the
19871 + * names of any contributors may be used to endorse or promote products
19872 + * derived from this software without specific prior written permission.
19873 + *
19874 + *
19875 + * ALTERNATIVELY, this software may be distributed under the terms of the
19876 + * GNU General Public License ("GPL") as published by the Free Software
19877 + * Foundation, either version 2 of that License or (at your option) any
19878 + * later version.
19879 + *
19880 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19881 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19882 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19883 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19884 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19885 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19886 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19887 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19888 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19889 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19890 + * POSSIBILITY OF SUCH DAMAGE.
19891 + */
19892 +#ifndef _FSL_DPMAC_CMD_H
19893 +#define _FSL_DPMAC_CMD_H
19894 +
19895 +/* DPMAC Version */
19896 +#define DPMAC_VER_MAJOR 4
19897 +#define DPMAC_VER_MINOR 2
19898 +#define DPMAC_CMD_BASE_VERSION 1
19899 +#define DPMAC_CMD_ID_OFFSET 4
19900 +
19901 +#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
19902 +
19903 +/* Command IDs */
19904 +#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
19905 +#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
19906 +#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
19907 +#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
19908 +#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
19909 +
19910 +#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
19911 +#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
19912 +
19913 +#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
19914 +#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
19915 +#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
19916 +#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
19917 +#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
19918 +#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
19919 +
19920 +#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
19921 +#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
19922 +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
19923 +
19924 +#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
19925 +
19926 +/* Macros for accessing command fields smaller than 1byte */
19927 +#define DPMAC_MASK(field) \
19928 + GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
19929 + DPMAC_##field##_SHIFT)
19930 +#define dpmac_set_field(var, field, val) \
19931 + ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
19932 +#define dpmac_get_field(var, field) \
19933 + (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
19934 +
19935 +struct dpmac_cmd_open {
19936 + u32 dpmac_id;
19937 +};
19938 +
19939 +struct dpmac_cmd_create {
19940 + u32 mac_id;
19941 +};
19942 +
19943 +struct dpmac_cmd_destroy {
19944 + u32 dpmac_id;
19945 +};
19946 +
19947 +struct dpmac_cmd_set_irq_enable {
19948 + u8 enable;
19949 + u8 pad[3];
19950 + u8 irq_index;
19951 +};
19952 +
19953 +struct dpmac_cmd_get_irq_enable {
19954 + u32 pad;
19955 + u8 irq_index;
19956 +};
19957 +
19958 +struct dpmac_rsp_get_irq_enable {
19959 + u8 enabled;
19960 +};
19961 +
19962 +struct dpmac_cmd_set_irq_mask {
19963 + u32 mask;
19964 + u8 irq_index;
19965 +};
19966 +
19967 +struct dpmac_cmd_get_irq_mask {
19968 + u32 pad;
19969 + u8 irq_index;
19970 +};
19971 +
19972 +struct dpmac_rsp_get_irq_mask {
19973 + u32 mask;
19974 +};
19975 +
19976 +struct dpmac_cmd_get_irq_status {
19977 + u32 status;
19978 + u8 irq_index;
19979 +};
19980 +
19981 +struct dpmac_rsp_get_irq_status {
19982 + u32 status;
19983 +};
19984 +
19985 +struct dpmac_cmd_clear_irq_status {
19986 + u32 status;
19987 + u8 irq_index;
19988 +};
19989 +
19990 +struct dpmac_rsp_get_attributes {
19991 + u8 eth_if;
19992 + u8 link_type;
19993 + u16 id;
19994 + u32 max_rate;
19995 +};
19996 +
19997 +struct dpmac_rsp_get_link_cfg {
19998 + u64 options;
19999 + u32 rate;
20000 +};
20001 +
20002 +#define DPMAC_STATE_SIZE 1
20003 +#define DPMAC_STATE_SHIFT 0
20004 +
20005 +struct dpmac_cmd_set_link_state {
20006 + u64 options;
20007 + u32 rate;
20008 + u32 pad;
20009 + /* only least significant bit is valid */
20010 + u8 up;
20011 +};
20012 +
20013 +struct dpmac_cmd_get_counter {
20014 + u8 type;
20015 +};
20016 +
20017 +struct dpmac_rsp_get_counter {
20018 + u64 pad;
20019 + u64 counter;
20020 +};
20021 +
20022 +struct dpmac_rsp_get_api_version {
20023 + u16 major;
20024 + u16 minor;
20025 +};
20026 +
20027 +struct dpmac_cmd_set_port_mac_addr {
20028 + u8 pad[2];
20029 + u8 addr[6];
20030 +};
20031 +
20032 +#endif /* _FSL_DPMAC_CMD_H */
20033 --- /dev/null
20034 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
20035 @@ -0,0 +1,619 @@
20036 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20037 + *
20038 + * Redistribution and use in source and binary forms, with or without
20039 + * modification, are permitted provided that the following conditions are met:
20040 + * * Redistributions of source code must retain the above copyright
20041 + * notice, this list of conditions and the following disclaimer.
20042 + * * Redistributions in binary form must reproduce the above copyright
20043 + * notice, this list of conditions and the following disclaimer in the
20044 + * documentation and/or other materials provided with the distribution.
20045 + * * Neither the name of the above-listed copyright holders nor the
20046 + * names of any contributors may be used to endorse or promote products
20047 + * derived from this software without specific prior written permission.
20048 + *
20049 + *
20050 + * ALTERNATIVELY, this software may be distributed under the terms of the
20051 + * GNU General Public License ("GPL") as published by the Free Software
20052 + * Foundation, either version 2 of that License or (at your option) any
20053 + * later version.
20054 + *
20055 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20056 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20057 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20058 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20059 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20060 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20061 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20062 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20063 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20064 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20065 + * POSSIBILITY OF SUCH DAMAGE.
20066 + */
20067 +#include <linux/fsl/mc.h>
20068 +#include "dpmac.h"
20069 +#include "dpmac-cmd.h"
20070 +
20071 +/**
20072 + * dpmac_open() - Open a control session for the specified object.
20073 + * @mc_io: Pointer to MC portal's I/O object
20074 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20075 + * @dpmac_id: DPMAC unique ID
20076 + * @token: Returned token; use in subsequent API calls
20077 + *
20078 + * This function can be used to open a control session for an
20079 + * already created object; an object may have been declared in
20080 + * the DPL or by calling the dpmac_create function.
20081 + * This function returns a unique authentication token,
20082 + * associated with the specific object ID and the specific MC
20083 + * portal; this token must be used in all subsequent commands for
20084 + * this specific object
20085 + *
20086 + * Return: '0' on Success; Error code otherwise.
20087 + */
20088 +int dpmac_open(struct fsl_mc_io *mc_io,
20089 + u32 cmd_flags,
20090 + int dpmac_id,
20091 + u16 *token)
20092 +{
20093 + struct dpmac_cmd_open *cmd_params;
20094 + struct fsl_mc_command cmd = { 0 };
20095 + int err;
20096 +
20097 + /* prepare command */
20098 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
20099 + cmd_flags,
20100 + 0);
20101 + cmd_params = (struct dpmac_cmd_open *)cmd.params;
20102 + cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
20103 +
20104 + /* send command to mc*/
20105 + err = mc_send_command(mc_io, &cmd);
20106 + if (err)
20107 + return err;
20108 +
20109 + /* retrieve response parameters */
20110 + *token = mc_cmd_hdr_read_token(&cmd);
20111 +
20112 + return err;
20113 +}
20114 +
20115 +/**
20116 + * dpmac_close() - Close the control session of the object
20117 + * @mc_io: Pointer to MC portal's I/O object
20118 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20119 + * @token: Token of DPMAC object
20120 + *
20121 + * After this function is called, no further operations are
20122 + * allowed on the object without opening a new control session.
20123 + *
20124 + * Return: '0' on Success; Error code otherwise.
20125 + */
20126 +int dpmac_close(struct fsl_mc_io *mc_io,
20127 + u32 cmd_flags,
20128 + u16 token)
20129 +{
20130 + struct fsl_mc_command cmd = { 0 };
20131 +
20132 + /* prepare command */
20133 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
20134 + token);
20135 +
20136 + /* send command to mc*/
20137 + return mc_send_command(mc_io, &cmd);
20138 +}
20139 +
20140 +/**
20141 + * dpmac_create() - Create the DPMAC object.
20142 + * @mc_io: Pointer to MC portal's I/O object
20143 + * @dprc_token: Parent container token; '0' for default container
20144 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20145 + * @cfg: Configuration structure
20146 + * @obj_id: Returned object id
20147 + *
20148 + * Create the DPMAC object, allocate required resources and
20149 + * perform required initialization.
20150 + *
20151 + * The function accepts an authentication token of a parent
20152 + * container that this object should be assigned to. The token
20153 + * can be '0' so the object will be assigned to the default container.
20154 + * The newly created object can be opened with the returned
20155 + * object id and using the container's associated tokens and MC portals.
20156 + *
20157 + * Return: '0' on Success; Error code otherwise.
20158 + */
20159 +int dpmac_create(struct fsl_mc_io *mc_io,
20160 + u16 dprc_token,
20161 + u32 cmd_flags,
20162 + const struct dpmac_cfg *cfg,
20163 + u32 *obj_id)
20164 +{
20165 + struct dpmac_cmd_create *cmd_params;
20166 + struct fsl_mc_command cmd = { 0 };
20167 + int err;
20168 +
20169 + /* prepare command */
20170 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
20171 + cmd_flags,
20172 + dprc_token);
20173 + cmd_params = (struct dpmac_cmd_create *)cmd.params;
20174 + cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
20175 +
20176 + /* send command to mc*/
20177 + err = mc_send_command(mc_io, &cmd);
20178 + if (err)
20179 + return err;
20180 +
20181 + /* retrieve response parameters */
20182 + *obj_id = mc_cmd_read_object_id(&cmd);
20183 +
20184 + return 0;
20185 +}
20186 +
20187 +/**
20188 + * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
20189 + * @mc_io: Pointer to MC portal's I/O object
20190 + * @dprc_token: Parent container token; '0' for default container
20191 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20192 + * @object_id: The object id; it must be a valid id within the container that
20193 + * created this object;
20194 + *
20195 + * The function accepts the authentication token of the parent container that
20196 + * created the object (not the one that currently owns the object). The object
20197 + * is searched within parent using the provided 'object_id'.
20198 + * All tokens to the object must be closed before calling destroy.
20199 + *
20200 + * Return: '0' on Success; error code otherwise.
20201 + */
20202 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20203 + u16 dprc_token,
20204 + u32 cmd_flags,
20205 + u32 object_id)
20206 +{
20207 + struct dpmac_cmd_destroy *cmd_params;
20208 + struct fsl_mc_command cmd = { 0 };
20209 +
20210 + /* prepare command */
20211 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
20212 + cmd_flags,
20213 + dprc_token);
20214 + cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
20215 + cmd_params->dpmac_id = cpu_to_le32(object_id);
20216 +
20217 + /* send command to mc*/
20218 + return mc_send_command(mc_io, &cmd);
20219 +}
20220 +
20221 +/**
20222 + * dpmac_set_irq_enable() - Set overall interrupt state.
20223 + * @mc_io: Pointer to MC portal's I/O object
20224 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20225 + * @token: Token of DPMAC object
20226 + * @irq_index: The interrupt index to configure
20227 + * @en: Interrupt state - enable = 1, disable = 0
20228 + *
20229 + * Allows GPP software to control when interrupts are generated.
20230 + * Each interrupt can have up to 32 causes. The enable/disable control's the
20231 + * overall interrupt state. if the interrupt is disabled no causes will cause
20232 + * an interrupt.
20233 + *
20234 + * Return: '0' on Success; Error code otherwise.
20235 + */
20236 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20237 + u32 cmd_flags,
20238 + u16 token,
20239 + u8 irq_index,
20240 + u8 en)
20241 +{
20242 + struct dpmac_cmd_set_irq_enable *cmd_params;
20243 + struct fsl_mc_command cmd = { 0 };
20244 +
20245 + /* prepare command */
20246 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
20247 + cmd_flags,
20248 + token);
20249 + cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
20250 + cmd_params->irq_index = irq_index;
20251 + cmd_params->enable = en;
20252 +
20253 + /* send command to mc*/
20254 + return mc_send_command(mc_io, &cmd);
20255 +}
20256 +
20257 +/**
20258 + * dpmac_get_irq_enable() - Get overall interrupt state
20259 + * @mc_io: Pointer to MC portal's I/O object
20260 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20261 + * @token: Token of DPMAC object
20262 + * @irq_index: The interrupt index to configure
20263 + * @en: Returned interrupt state - enable = 1, disable = 0
20264 + *
20265 + * Return: '0' on Success; Error code otherwise.
20266 + */
20267 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20268 + u32 cmd_flags,
20269 + u16 token,
20270 + u8 irq_index,
20271 + u8 *en)
20272 +{
20273 + struct dpmac_cmd_get_irq_enable *cmd_params;
20274 + struct dpmac_rsp_get_irq_enable *rsp_params;
20275 + struct fsl_mc_command cmd = { 0 };
20276 + int err;
20277 +
20278 + /* prepare command */
20279 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
20280 + cmd_flags,
20281 + token);
20282 + cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
20283 + cmd_params->irq_index = irq_index;
20284 +
20285 + /* send command to mc*/
20286 + err = mc_send_command(mc_io, &cmd);
20287 + if (err)
20288 + return err;
20289 +
20290 + /* retrieve response parameters */
20291 + rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
20292 + *en = rsp_params->enabled;
20293 +
20294 + return 0;
20295 +}
20296 +
20297 +/**
20298 + * dpmac_set_irq_mask() - Set interrupt mask.
20299 + * @mc_io: Pointer to MC portal's I/O object
20300 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20301 + * @token: Token of DPMAC object
20302 + * @irq_index: The interrupt index to configure
20303 + * @mask: Event mask to trigger interrupt;
20304 + * each bit:
20305 + * 0 = ignore event
20306 + * 1 = consider event for asserting IRQ
20307 + *
20308 + * Every interrupt can have up to 32 causes and the interrupt model supports
20309 + * masking/unmasking each cause independently
20310 + *
20311 + * Return: '0' on Success; Error code otherwise.
20312 + */
20313 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20314 + u32 cmd_flags,
20315 + u16 token,
20316 + u8 irq_index,
20317 + u32 mask)
20318 +{
20319 + struct dpmac_cmd_set_irq_mask *cmd_params;
20320 + struct fsl_mc_command cmd = { 0 };
20321 +
20322 + /* prepare command */
20323 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
20324 + cmd_flags,
20325 + token);
20326 + cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
20327 + cmd_params->mask = cpu_to_le32(mask);
20328 + cmd_params->irq_index = irq_index;
20329 +
20330 + /* send command to mc*/
20331 + return mc_send_command(mc_io, &cmd);
20332 +}
20333 +
20334 +/**
20335 + * dpmac_get_irq_mask() - Get interrupt mask.
20336 + * @mc_io: Pointer to MC portal's I/O object
20337 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20338 + * @token: Token of DPMAC object
20339 + * @irq_index: The interrupt index to configure
20340 + * @mask: Returned event mask to trigger interrupt
20341 + *
20342 + * Every interrupt can have up to 32 causes and the interrupt model supports
20343 + * masking/unmasking each cause independently
20344 + *
20345 + * Return: '0' on Success; Error code otherwise.
20346 + */
20347 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20348 + u32 cmd_flags,
20349 + u16 token,
20350 + u8 irq_index,
20351 + u32 *mask)
20352 +{
20353 + struct dpmac_cmd_get_irq_mask *cmd_params;
20354 + struct dpmac_rsp_get_irq_mask *rsp_params;
20355 + struct fsl_mc_command cmd = { 0 };
20356 + int err;
20357 +
20358 + /* prepare command */
20359 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
20360 + cmd_flags,
20361 + token);
20362 + cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
20363 + cmd_params->irq_index = irq_index;
20364 +
20365 + /* send command to mc*/
20366 + err = mc_send_command(mc_io, &cmd);
20367 + if (err)
20368 + return err;
20369 +
20370 + /* retrieve response parameters */
20371 + rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
20372 + *mask = le32_to_cpu(rsp_params->mask);
20373 +
20374 + return 0;
20375 +}
20376 +
20377 +/**
20378 + * dpmac_get_irq_status() - Get the current status of any pending interrupts.
20379 + *
20380 + * @mc_io: Pointer to MC portal's I/O object
20381 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20382 + * @token: Token of DPMAC object
20383 + * @irq_index: The interrupt index to configure
20384 + * @status: Returned interrupts status - one bit per cause:
20385 + * 0 = no interrupt pending
20386 + * 1 = interrupt pending
20387 + *
20388 + * Return: '0' on Success; Error code otherwise.
20389 + */
20390 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20391 + u32 cmd_flags,
20392 + u16 token,
20393 + u8 irq_index,
20394 + u32 *status)
20395 +{
20396 + struct dpmac_cmd_get_irq_status *cmd_params;
20397 + struct dpmac_rsp_get_irq_status *rsp_params;
20398 + struct fsl_mc_command cmd = { 0 };
20399 + int err;
20400 +
20401 + /* prepare command */
20402 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
20403 + cmd_flags,
20404 + token);
20405 + cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
20406 + cmd_params->status = cpu_to_le32(*status);
20407 + cmd_params->irq_index = irq_index;
20408 +
20409 + /* send command to mc*/
20410 + err = mc_send_command(mc_io, &cmd);
20411 + if (err)
20412 + return err;
20413 +
20414 + /* retrieve response parameters */
20415 + rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
20416 + *status = le32_to_cpu(rsp_params->status);
20417 +
20418 + return 0;
20419 +}
20420 +
20421 +/**
20422 + * dpmac_clear_irq_status() - Clear a pending interrupt's status
20423 + *
20424 + * @mc_io: Pointer to MC portal's I/O object
20425 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20426 + * @token: Token of DPMAC object
20427 + * @irq_index: The interrupt index to configure
20428 + * @status: Bits to clear (W1C) - one bit per cause:
20429 + * 0 = don't change
20430 + * 1 = clear status bit
20431 + *
20432 + * Return: '0' on Success; Error code otherwise.
20433 + */
20434 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20435 + u32 cmd_flags,
20436 + u16 token,
20437 + u8 irq_index,
20438 + u32 status)
20439 +{
20440 + struct dpmac_cmd_clear_irq_status *cmd_params;
20441 + struct fsl_mc_command cmd = { 0 };
20442 +
20443 + /* prepare command */
20444 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
20445 + cmd_flags,
20446 + token);
20447 + cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
20448 + cmd_params->status = cpu_to_le32(status);
20449 + cmd_params->irq_index = irq_index;
20450 +
20451 + /* send command to mc*/
20452 + return mc_send_command(mc_io, &cmd);
20453 +}
20454 +
20455 +/**
20456 + * dpmac_get_attributes - Retrieve DPMAC attributes.
20457 + *
20458 + * @mc_io: Pointer to MC portal's I/O object
20459 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20460 + * @token: Token of DPMAC object
20461 + * @attr: Returned object's attributes
20462 + *
20463 + * Return: '0' on Success; Error code otherwise.
20464 + */
20465 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20466 + u32 cmd_flags,
20467 + u16 token,
20468 + struct dpmac_attr *attr)
20469 +{
20470 + struct dpmac_rsp_get_attributes *rsp_params;
20471 + struct fsl_mc_command cmd = { 0 };
20472 + int err;
20473 +
20474 + /* prepare command */
20475 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
20476 + cmd_flags,
20477 + token);
20478 +
20479 + /* send command to mc*/
20480 + err = mc_send_command(mc_io, &cmd);
20481 + if (err)
20482 + return err;
20483 +
20484 + /* retrieve response parameters */
20485 + rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
20486 + attr->eth_if = rsp_params->eth_if;
20487 + attr->link_type = rsp_params->link_type;
20488 + attr->id = le16_to_cpu(rsp_params->id);
20489 + attr->max_rate = le32_to_cpu(rsp_params->max_rate);
20490 +
20491 + return 0;
20492 +}
20493 +
20494 +/**
20495 + * dpmac_get_link_cfg() - Get Ethernet link configuration
20496 + * @mc_io: Pointer to opaque I/O object
20497 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20498 + * @token: Token of DPMAC object
20499 + * @cfg: Returned structure with the link configuration
20500 + *
20501 + * Return: '0' on Success; Error code otherwise.
20502 + */
20503 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20504 + u32 cmd_flags,
20505 + u16 token,
20506 + struct dpmac_link_cfg *cfg)
20507 +{
20508 + struct dpmac_rsp_get_link_cfg *rsp_params;
20509 + struct fsl_mc_command cmd = { 0 };
20510 + int err = 0;
20511 +
20512 + /* prepare command */
20513 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
20514 + cmd_flags,
20515 + token);
20516 +
20517 + /* send command to mc*/
20518 + err = mc_send_command(mc_io, &cmd);
20519 + if (err)
20520 + return err;
20521 +
20522 + rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
20523 + cfg->options = le64_to_cpu(rsp_params->options);
20524 + cfg->rate = le32_to_cpu(rsp_params->rate);
20525 +
20526 + return 0;
20527 +}
20528 +
20529 +/**
20530 + * dpmac_set_link_state() - Set the Ethernet link status
20531 + * @mc_io: Pointer to opaque I/O object
20532 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20533 + * @token: Token of DPMAC object
20534 + * @link_state: Link state configuration
20535 + *
20536 + * Return: '0' on Success; Error code otherwise.
20537 + */
20538 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20539 + u32 cmd_flags,
20540 + u16 token,
20541 + struct dpmac_link_state *link_state)
20542 +{
20543 + struct dpmac_cmd_set_link_state *cmd_params;
20544 + struct fsl_mc_command cmd = { 0 };
20545 +
20546 + /* prepare command */
20547 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
20548 + cmd_flags,
20549 + token);
20550 + cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
20551 + cmd_params->options = cpu_to_le64(link_state->options);
20552 + cmd_params->rate = cpu_to_le32(link_state->rate);
20553 + cmd_params->up = dpmac_get_field(link_state->up, STATE);
20554 +
20555 + /* send command to mc*/
20556 + return mc_send_command(mc_io, &cmd);
20557 +}
20558 +
20559 +/**
20560 + * dpmac_get_counter() - Read a specific DPMAC counter
20561 + * @mc_io: Pointer to opaque I/O object
20562 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20563 + * @token: Token of DPMAC object
20564 + * @type: The requested counter
20565 + * @counter: Returned counter value
20566 + *
20567 + * Return: The requested counter; '0' otherwise.
20568 + */
20569 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20570 + u32 cmd_flags,
20571 + u16 token,
20572 + enum dpmac_counter type,
20573 + u64 *counter)
20574 +{
20575 + struct dpmac_cmd_get_counter *dpmac_cmd;
20576 + struct dpmac_rsp_get_counter *dpmac_rsp;
20577 + struct fsl_mc_command cmd = { 0 };
20578 + int err = 0;
20579 +
20580 + /* prepare command */
20581 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
20582 + cmd_flags,
20583 + token);
20584 + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
20585 + dpmac_cmd->type = type;
20586 +
20587 + /* send command to mc*/
20588 + err = mc_send_command(mc_io, &cmd);
20589 + if (err)
20590 + return err;
20591 +
20592 + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
20593 + *counter = le64_to_cpu(dpmac_rsp->counter);
20594 +
20595 + return 0;
20596 +}
20597 +
20598 +/* untested */
20599 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20600 + u32 cmd_flags,
20601 + u16 token,
20602 + const u8 addr[6])
20603 +{
20604 + struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
20605 + struct fsl_mc_command cmd = { 0 };
20606 +
20607 + /* prepare command */
20608 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
20609 + cmd_flags,
20610 + token);
20611 + dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
20612 + dpmac_cmd->addr[0] = addr[5];
20613 + dpmac_cmd->addr[1] = addr[4];
20614 + dpmac_cmd->addr[2] = addr[3];
20615 + dpmac_cmd->addr[3] = addr[2];
20616 + dpmac_cmd->addr[4] = addr[1];
20617 + dpmac_cmd->addr[5] = addr[0];
20618 +
20619 + /* send command to mc*/
20620 + return mc_send_command(mc_io, &cmd);
20621 +}
20622 +
20623 +/**
20624 + * dpmac_get_api_version() - Get Data Path MAC version
20625 + * @mc_io: Pointer to MC portal's I/O object
20626 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20627 + * @major_ver: Major version of data path mac API
20628 + * @minor_ver: Minor version of data path mac API
20629 + *
20630 + * Return: '0' on Success; Error code otherwise.
20631 + */
20632 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20633 + u32 cmd_flags,
20634 + u16 *major_ver,
20635 + u16 *minor_ver)
20636 +{
20637 + struct dpmac_rsp_get_api_version *rsp_params;
20638 + struct fsl_mc_command cmd = { 0 };
20639 + int err;
20640 +
20641 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
20642 + cmd_flags,
20643 + 0);
20644 +
20645 + err = mc_send_command(mc_io, &cmd);
20646 + if (err)
20647 + return err;
20648 +
20649 + rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
20650 + *major_ver = le16_to_cpu(rsp_params->major);
20651 + *minor_ver = le16_to_cpu(rsp_params->minor);
20652 +
20653 + return 0;
20654 +}
20655 --- /dev/null
20656 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
20657 @@ -0,0 +1,342 @@
20658 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20659 + *
20660 + * Redistribution and use in source and binary forms, with or without
20661 + * modification, are permitted provided that the following conditions are met:
20662 + * * Redistributions of source code must retain the above copyright
20663 + * notice, this list of conditions and the following disclaimer.
20664 + * * Redistributions in binary form must reproduce the above copyright
20665 + * notice, this list of conditions and the following disclaimer in the
20666 + * documentation and/or other materials provided with the distribution.
20667 + * * Neither the name of the above-listed copyright holders nor the
20668 + * names of any contributors may be used to endorse or promote products
20669 + * derived from this software without specific prior written permission.
20670 + *
20671 + *
20672 + * ALTERNATIVELY, this software may be distributed under the terms of the
20673 + * GNU General Public License ("GPL") as published by the Free Software
20674 + * Foundation, either version 2 of that License or (at your option) any
20675 + * later version.
20676 + *
20677 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20678 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20679 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20680 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20681 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20682 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20683 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20684 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20685 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20686 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20687 + * POSSIBILITY OF SUCH DAMAGE.
20688 + */
20689 +#ifndef __FSL_DPMAC_H
20690 +#define __FSL_DPMAC_H
20691 +
20692 +/* Data Path MAC API
20693 + * Contains initialization APIs and runtime control APIs for DPMAC
20694 + */
20695 +
20696 +struct fsl_mc_io;
20697 +
20698 +int dpmac_open(struct fsl_mc_io *mc_io,
20699 + u32 cmd_flags,
20700 + int dpmac_id,
20701 + u16 *token);
20702 +
20703 +int dpmac_close(struct fsl_mc_io *mc_io,
20704 + u32 cmd_flags,
20705 + u16 token);
20706 +
20707 +/**
20708 + * enum dpmac_link_type - DPMAC link type
20709 + * @DPMAC_LINK_TYPE_NONE: No link
20710 + * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
20711 + * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
20712 + * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
20713 + */
20714 +enum dpmac_link_type {
20715 + DPMAC_LINK_TYPE_NONE,
20716 + DPMAC_LINK_TYPE_FIXED,
20717 + DPMAC_LINK_TYPE_PHY,
20718 + DPMAC_LINK_TYPE_BACKPLANE
20719 +};
20720 +
20721 +/**
20722 + * enum dpmac_eth_if - DPMAC Ethrnet interface
20723 + * @DPMAC_ETH_IF_MII: MII interface
20724 + * @DPMAC_ETH_IF_RMII: RMII interface
20725 + * @DPMAC_ETH_IF_SMII: SMII interface
20726 + * @DPMAC_ETH_IF_GMII: GMII interface
20727 + * @DPMAC_ETH_IF_RGMII: RGMII interface
20728 + * @DPMAC_ETH_IF_SGMII: SGMII interface
20729 + * @DPMAC_ETH_IF_QSGMII: QSGMII interface
20730 + * @DPMAC_ETH_IF_XAUI: XAUI interface
20731 + * @DPMAC_ETH_IF_XFI: XFI interface
20732 + */
20733 +enum dpmac_eth_if {
20734 + DPMAC_ETH_IF_MII,
20735 + DPMAC_ETH_IF_RMII,
20736 + DPMAC_ETH_IF_SMII,
20737 + DPMAC_ETH_IF_GMII,
20738 + DPMAC_ETH_IF_RGMII,
20739 + DPMAC_ETH_IF_SGMII,
20740 + DPMAC_ETH_IF_QSGMII,
20741 + DPMAC_ETH_IF_XAUI,
20742 + DPMAC_ETH_IF_XFI
20743 +};
20744 +
20745 +/**
20746 + * struct dpmac_cfg - Structure representing DPMAC configuration
20747 + * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
20748 + * the MAC IDs are continuous.
20749 + * For example: 2 WRIOPs, 16 MACs in each:
20750 + * MAC IDs for the 1st WRIOP: 1-16,
20751 + * MAC IDs for the 2nd WRIOP: 17-32.
20752 + */
20753 +struct dpmac_cfg {
20754 + u16 mac_id;
20755 +};
20756 +
20757 +int dpmac_create(struct fsl_mc_io *mc_io,
20758 + u16 dprc_token,
20759 + u32 cmd_flags,
20760 + const struct dpmac_cfg *cfg,
20761 + u32 *obj_id);
20762 +
20763 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20764 + u16 dprc_token,
20765 + u32 cmd_flags,
20766 + u32 object_id);
20767 +
20768 +/**
20769 + * DPMAC IRQ Index and Events
20770 + */
20771 +
20772 +/**
20773 + * IRQ index
20774 + */
20775 +#define DPMAC_IRQ_INDEX 0
20776 +/**
20777 + * IRQ event - indicates a change in link state
20778 + */
20779 +#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
20780 +/**
20781 + * IRQ event - Indicates that the link state changed
20782 + */
20783 +#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
20784 +
20785 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20786 + u32 cmd_flags,
20787 + u16 token,
20788 + u8 irq_index,
20789 + u8 en);
20790 +
20791 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20792 + u32 cmd_flags,
20793 + u16 token,
20794 + u8 irq_index,
20795 + u8 *en);
20796 +
20797 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20798 + u32 cmd_flags,
20799 + u16 token,
20800 + u8 irq_index,
20801 + u32 mask);
20802 +
20803 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20804 + u32 cmd_flags,
20805 + u16 token,
20806 + u8 irq_index,
20807 + u32 *mask);
20808 +
20809 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20810 + u32 cmd_flags,
20811 + u16 token,
20812 + u8 irq_index,
20813 + u32 *status);
20814 +
20815 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20816 + u32 cmd_flags,
20817 + u16 token,
20818 + u8 irq_index,
20819 + u32 status);
20820 +
20821 +/**
20822 + * struct dpmac_attr - Structure representing DPMAC attributes
20823 + * @id: DPMAC object ID
20824 + * @max_rate: Maximum supported rate - in Mbps
20825 + * @eth_if: Ethernet interface
20826 + * @link_type: link type
20827 + */
20828 +struct dpmac_attr {
20829 + u16 id;
20830 + u32 max_rate;
20831 + enum dpmac_eth_if eth_if;
20832 + enum dpmac_link_type link_type;
20833 +};
20834 +
20835 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20836 + u32 cmd_flags,
20837 + u16 token,
20838 + struct dpmac_attr *attr);
20839 +
20840 +/**
20841 + * DPMAC link configuration/state options
20842 + */
20843 +
20844 +/**
20845 + * Enable auto-negotiation
20846 + */
20847 +#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
20848 +/**
20849 + * Enable half-duplex mode
20850 + */
20851 +#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
20852 +/**
20853 + * Enable pause frames
20854 + */
20855 +#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
20856 +/**
20857 + * Enable a-symmetric pause frames
20858 + */
20859 +#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
20860 +
20861 +/**
20862 + * struct dpmac_link_cfg - Structure representing DPMAC link configuration
20863 + * @rate: Link's rate - in Mbps
20864 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20865 + */
20866 +struct dpmac_link_cfg {
20867 + u32 rate;
20868 + u64 options;
20869 +};
20870 +
20871 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20872 + u32 cmd_flags,
20873 + u16 token,
20874 + struct dpmac_link_cfg *cfg);
20875 +
20876 +/**
20877 + * struct dpmac_link_state - DPMAC link configuration request
20878 + * @rate: Rate in Mbps
20879 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20880 + * @up: Link state
20881 + */
20882 +struct dpmac_link_state {
20883 + u32 rate;
20884 + u64 options;
20885 + int up;
20886 +};
20887 +
20888 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20889 + u32 cmd_flags,
20890 + u16 token,
20891 + struct dpmac_link_state *link_state);
20892 +
20893 +/**
20894 + * enum dpmac_counter - DPMAC counter types
20895 + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
20896 + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
20897 + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
20898 + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
20899 + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
20900 + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
20901 + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
20902 + * (up to max frame length specified),
20903 + * good or bad.
20904 + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
20905 + * with a wrong CRC
20906 + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
20907 + * specified, with a bad frame check sequence.
20908 + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
20909 + * Occurs when a receive FIFO overflows.
20910 + * Includes also frames truncated as a result of
20911 + * the receive FIFO overflow.
20912 + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
20913 + * (optional used for wrong SFD).
20914 + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
20915 + * bytes long with a good CRC.
20916 + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
20917 + * specified, with a good frame check sequence.
20918 + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
20919 + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
20920 + * (regular and PFC).
20921 + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
20922 + * frames and valid pause frames.
20923 + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
20924 + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
20925 + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
20926 + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
20927 + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
20928 + * (except for undersized/fragment frame).
20929 + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
20930 + * frames and valid pause frames transmitted.
20931 + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
20932 + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
20933 + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
20934 + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
20935 + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
20936 + * pause frames.
20937 + * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
20938 + * pause frames.
20939 + */
20940 +enum dpmac_counter {
20941 + DPMAC_CNT_ING_FRAME_64,
20942 + DPMAC_CNT_ING_FRAME_127,
20943 + DPMAC_CNT_ING_FRAME_255,
20944 + DPMAC_CNT_ING_FRAME_511,
20945 + DPMAC_CNT_ING_FRAME_1023,
20946 + DPMAC_CNT_ING_FRAME_1518,
20947 + DPMAC_CNT_ING_FRAME_1519_MAX,
20948 + DPMAC_CNT_ING_FRAG,
20949 + DPMAC_CNT_ING_JABBER,
20950 + DPMAC_CNT_ING_FRAME_DISCARD,
20951 + DPMAC_CNT_ING_ALIGN_ERR,
20952 + DPMAC_CNT_EGR_UNDERSIZED,
20953 + DPMAC_CNT_ING_OVERSIZED,
20954 + DPMAC_CNT_ING_VALID_PAUSE_FRAME,
20955 + DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
20956 + DPMAC_CNT_ING_BYTE,
20957 + DPMAC_CNT_ING_MCAST_FRAME,
20958 + DPMAC_CNT_ING_BCAST_FRAME,
20959 + DPMAC_CNT_ING_ALL_FRAME,
20960 + DPMAC_CNT_ING_UCAST_FRAME,
20961 + DPMAC_CNT_ING_ERR_FRAME,
20962 + DPMAC_CNT_EGR_BYTE,
20963 + DPMAC_CNT_EGR_MCAST_FRAME,
20964 + DPMAC_CNT_EGR_BCAST_FRAME,
20965 + DPMAC_CNT_EGR_UCAST_FRAME,
20966 + DPMAC_CNT_EGR_ERR_FRAME,
20967 + DPMAC_CNT_ING_GOOD_FRAME,
20968 + DPMAC_CNT_ENG_GOOD_FRAME
20969 +};
20970 +
20971 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20972 + u32 cmd_flags,
20973 + u16 token,
20974 + enum dpmac_counter type,
20975 + u64 *counter);
20976 +
20977 +/**
20978 + * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
20979 + * port. This is not used for filtering, MAC is always in
20980 + * promiscuous mode, it is passed to DPNIs through DPNI API for
20981 + * application used.
20982 + * @mc_io: Pointer to opaque I/O object
20983 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20984 + * @token: Token of DPMAC object
20985 + * @addr: MAC address to set
20986 + *
20987 + * Return: The requested counter; '0' otherwise.
20988 + */
20989 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20990 + u32 cmd_flags,
20991 + u16 token,
20992 + const u8 addr[6]);
20993 +
20994 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20995 + u32 cmd_flags,
20996 + u16 *major_ver,
20997 + u16 *minor_ver);
20998 +
20999 +#endif /* __FSL_DPMAC_H */
21000 --- /dev/null
21001 +++ b/drivers/staging/fsl-dpaa2/mac/mac.c
21002 @@ -0,0 +1,673 @@
21003 +/* Copyright 2015 Freescale Semiconductor Inc.
21004 + *
21005 + * Redistribution and use in source and binary forms, with or without
21006 + * modification, are permitted provided that the following conditions are met:
21007 + * * Redistributions of source code must retain the above copyright
21008 + * notice, this list of conditions and the following disclaimer.
21009 + * * Redistributions in binary form must reproduce the above copyright
21010 + * notice, this list of conditions and the following disclaimer in the
21011 + * documentation and/or other materials provided with the distribution.
21012 + * * Neither the name of Freescale Semiconductor nor the
21013 + * names of its contributors may be used to endorse or promote products
21014 + * derived from this software without specific prior written permission.
21015 + *
21016 + *
21017 + * ALTERNATIVELY, this software may be distributed under the terms of the
21018 + * GNU General Public License ("GPL") as published by the Free Software
21019 + * Foundation, either version 2 of that License or (at your option) any
21020 + * later version.
21021 + *
21022 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21023 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21024 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21025 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21026 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21027 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21028 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
21029 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21030 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21031 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21032 + */
21033 +
21034 +#include <linux/module.h>
21035 +
21036 +#include <linux/netdevice.h>
21037 +#include <linux/etherdevice.h>
21038 +#include <linux/msi.h>
21039 +#include <linux/rtnetlink.h>
21040 +#include <linux/if_vlan.h>
21041 +
21042 +#include <uapi/linux/if_bridge.h>
21043 +#include <net/netlink.h>
21044 +
21045 +#include <linux/of.h>
21046 +#include <linux/of_mdio.h>
21047 +#include <linux/of_net.h>
21048 +#include <linux/phy.h>
21049 +#include <linux/phy_fixed.h>
21050 +
21051 +#include <linux/fsl/mc.h>
21052 +
21053 +#include "dpmac.h"
21054 +#include "dpmac-cmd.h"
21055 +
21056 +struct dpaa2_mac_priv {
21057 + struct net_device *netdev;
21058 + struct fsl_mc_device *mc_dev;
21059 + struct dpmac_attr attr;
21060 + struct dpmac_link_state old_state;
21061 +};
21062 +
21063 +/* TODO: fix the 10G modes, mapping can't be right:
21064 + * XGMII is paralel
21065 + * XAUI is serial, using 8b/10b encoding
21066 + * XFI is also serial but using 64b/66b encoding
21067 + * they can't all map to XGMII...
21068 + *
21069 + * This must be kept in sync with enum dpmac_eth_if.
21070 + */
21071 +static phy_interface_t dpaa2_mac_iface_mode[] = {
21072 + PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
21073 + PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
21074 + PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
21075 + PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
21076 + PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
21077 + PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
21078 + PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
21079 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
21080 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
21081 +};
21082 +
21083 +static void dpaa2_mac_link_changed(struct net_device *netdev)
21084 +{
21085 + struct phy_device *phydev;
21086 + struct dpmac_link_state state = { 0 };
21087 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21088 + int err;
21089 +
21090 + /* the PHY just notified us of link state change */
21091 + phydev = netdev->phydev;
21092 +
21093 + state.up = !!phydev->link;
21094 + if (phydev->link) {
21095 + state.rate = phydev->speed;
21096 +
21097 + if (!phydev->duplex)
21098 + state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
21099 + if (phydev->autoneg)
21100 + state.options |= DPMAC_LINK_OPT_AUTONEG;
21101 +
21102 + netif_carrier_on(netdev);
21103 + } else {
21104 + netif_carrier_off(netdev);
21105 + }
21106 +
21107 + if (priv->old_state.up != state.up ||
21108 + priv->old_state.rate != state.rate ||
21109 + priv->old_state.options != state.options) {
21110 + priv->old_state = state;
21111 + phy_print_status(phydev);
21112 + }
21113 +
21114 + /* We must interrogate MC at all times, because we don't know
21115 + * when and whether a potential DPNI may have read the link state.
21116 + */
21117 + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
21118 + priv->mc_dev->mc_handle, &state);
21119 + if (unlikely(err))
21120 + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
21121 +}
21122 +
21123 +static int dpaa2_mac_open(struct net_device *netdev)
21124 +{
21125 + /* start PHY state machine */
21126 + phy_start(netdev->phydev);
21127 +
21128 + return 0;
21129 +}
21130 +
21131 +static int dpaa2_mac_stop(struct net_device *netdev)
21132 +{
21133 + if (!netdev->phydev)
21134 + goto done;
21135 +
21136 + /* stop PHY state machine */
21137 + phy_stop(netdev->phydev);
21138 +
21139 + /* signal link down to firmware */
21140 + netdev->phydev->link = 0;
21141 + dpaa2_mac_link_changed(netdev);
21142 +
21143 +done:
21144 + return 0;
21145 +}
21146 +
21147 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21148 +static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
21149 + struct net_device *dev)
21150 +{
21151 + /* we don't support I/O for now, drop the frame */
21152 + dev_kfree_skb_any(skb);
21153 + return NETDEV_TX_OK;
21154 +}
21155 +
21156 +static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
21157 + struct ethtool_link_ksettings *ks)
21158 +{
21159 + phy_ethtool_ksettings_get(netdev->phydev, ks);
21160 +
21161 + return 0;
21162 +}
21163 +
21164 +static int dpaa2_mac_set_link_ksettings(struct net_device *netdev,
21165 + const struct ethtool_link_ksettings *ks)
21166 +{
21167 + return phy_ethtool_ksettings_set(netdev->phydev, ks);
21168 +}
21169 +
21170 +static struct rtnl_link_stats64 *dpaa2_mac_get_stats(struct net_device *netdev,
21171 + struct rtnl_link_stats64 *storage)
21172 +{
21173 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21174 + u64 tmp;
21175 + int err;
21176 +
21177 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21178 + DPMAC_CNT_EGR_MCAST_FRAME,
21179 + &storage->tx_packets);
21180 + if (err)
21181 + goto error;
21182 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21183 + DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
21184 + if (err)
21185 + goto error;
21186 + storage->tx_packets += tmp;
21187 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21188 + DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
21189 + if (err)
21190 + goto error;
21191 + storage->tx_packets += tmp;
21192 +
21193 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21194 + DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
21195 + if (err)
21196 + goto error;
21197 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21198 + DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
21199 + if (err)
21200 + goto error;
21201 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21202 + DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
21203 + if (err)
21204 + goto error;
21205 +
21206 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21207 + DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
21208 + if (err)
21209 + goto error;
21210 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21211 + DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
21212 + if (err)
21213 + goto error;
21214 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21215 + DPMAC_CNT_ING_FRAME_DISCARD,
21216 + &storage->rx_dropped);
21217 + if (err)
21218 + goto error;
21219 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21220 + DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
21221 + if (err)
21222 + goto error;
21223 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21224 + DPMAC_CNT_ING_OVERSIZED, &tmp);
21225 + if (err)
21226 + goto error;
21227 + storage->rx_errors += tmp;
21228 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21229 + DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
21230 + if (err)
21231 + goto error;
21232 +
21233 + return storage;
21234 +error:
21235 + netdev_err(netdev, "dpmac_get_counter err %d\n", err);
21236 + return storage;
21237 +}
21238 +
21239 +static struct {
21240 + enum dpmac_counter id;
21241 + char name[ETH_GSTRING_LEN];
21242 +} dpaa2_mac_counters[] = {
21243 + {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
21244 + {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
21245 + {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
21246 + {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
21247 + {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
21248 + {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
21249 + {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
21250 + {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
21251 + {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
21252 + {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
21253 + {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
21254 + {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
21255 + {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
21256 + {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
21257 + {DPMAC_CNT_ING_FRAG, "rx frags"},
21258 + {DPMAC_CNT_ING_JABBER, "rx jabber"},
21259 + {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
21260 + {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
21261 + {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
21262 + {DPMAC_CNT_ING_BYTE, "rx bytes"},
21263 + {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
21264 + {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
21265 + {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
21266 + {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
21267 + {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
21268 + {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
21269 + {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
21270 + {DPMAC_CNT_EGR_BYTE, "tx bytes"},
21271 +
21272 +};
21273 +
21274 +static void dpaa2_mac_get_strings(struct net_device *netdev,
21275 + u32 stringset, u8 *data)
21276 +{
21277 + int i;
21278 +
21279 + switch (stringset) {
21280 + case ETH_SS_STATS:
21281 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
21282 + memcpy(data + i * ETH_GSTRING_LEN,
21283 + dpaa2_mac_counters[i].name,
21284 + ETH_GSTRING_LEN);
21285 + break;
21286 + }
21287 +}
21288 +
21289 +static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
21290 + struct ethtool_stats *stats,
21291 + u64 *data)
21292 +{
21293 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21294 + int i;
21295 + int err;
21296 +
21297 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
21298 + err = dpmac_get_counter(priv->mc_dev->mc_io,
21299 + 0,
21300 + priv->mc_dev->mc_handle,
21301 + dpaa2_mac_counters[i].id, &data[i]);
21302 + if (err)
21303 + netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
21304 + dpaa2_mac_counters[i].name, err);
21305 + }
21306 +}
21307 +
21308 +static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
21309 +{
21310 + switch (sset) {
21311 + case ETH_SS_STATS:
21312 + return ARRAY_SIZE(dpaa2_mac_counters);
21313 + default:
21314 + return -EOPNOTSUPP;
21315 + }
21316 +}
21317 +
21318 +static const struct net_device_ops dpaa2_mac_ndo_ops = {
21319 + .ndo_open = &dpaa2_mac_open,
21320 + .ndo_stop = &dpaa2_mac_stop,
21321 + .ndo_start_xmit = &dpaa2_mac_drop_frame,
21322 + .ndo_get_stats64 = &dpaa2_mac_get_stats,
21323 +};
21324 +
21325 +static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
21326 + .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
21327 + .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
21328 + .get_strings = &dpaa2_mac_get_strings,
21329 + .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
21330 + .get_sset_count = &dpaa2_mac_get_sset_count,
21331 +};
21332 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21333 +
21334 +static void configure_link(struct dpaa2_mac_priv *priv,
21335 + struct dpmac_link_cfg *cfg)
21336 +{
21337 + struct phy_device *phydev = priv->netdev->phydev;
21338 +
21339 + if (unlikely(!phydev))
21340 + return;
21341 +
21342 + phydev->speed = cfg->rate;
21343 + phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
21344 +
21345 + if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
21346 + phydev->autoneg = 1;
21347 + phydev->advertising |= ADVERTISED_Autoneg;
21348 + } else {
21349 + phydev->autoneg = 0;
21350 + phydev->advertising &= ~ADVERTISED_Autoneg;
21351 + }
21352 +
21353 + phy_start_aneg(phydev);
21354 +}
21355 +
21356 +static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
21357 +{
21358 + struct device *dev = (struct device *)arg;
21359 + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
21360 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21361 + struct dpmac_link_cfg link_cfg;
21362 + u32 status;
21363 + int err;
21364 +
21365 + err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21366 + DPMAC_IRQ_INDEX, &status);
21367 + if (unlikely(err || !status))
21368 + return IRQ_NONE;
21369 +
21370 + /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
21371 + if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
21372 + err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
21373 + &link_cfg);
21374 + if (unlikely(err))
21375 + goto out;
21376 +
21377 + configure_link(priv, &link_cfg);
21378 + }
21379 +
21380 +out:
21381 + dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21382 + DPMAC_IRQ_INDEX, status);
21383 +
21384 + return IRQ_HANDLED;
21385 +}
21386 +
21387 +static int setup_irqs(struct fsl_mc_device *mc_dev)
21388 +{
21389 + int err = 0;
21390 + struct fsl_mc_device_irq *irq;
21391 +
21392 + err = fsl_mc_allocate_irqs(mc_dev);
21393 + if (err) {
21394 + dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
21395 + return err;
21396 + }
21397 +
21398 + irq = mc_dev->irqs[0];
21399 + err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
21400 + NULL, &dpaa2_mac_irq_handler,
21401 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
21402 + dev_name(&mc_dev->dev), &mc_dev->dev);
21403 + if (err) {
21404 + dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
21405 + err);
21406 + goto free_irq;
21407 + }
21408 +
21409 + err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
21410 + DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
21411 + if (err) {
21412 + dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
21413 + goto free_irq;
21414 + }
21415 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21416 + DPMAC_IRQ_INDEX, 1);
21417 + if (err) {
21418 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21419 + goto free_irq;
21420 + }
21421 +
21422 + return 0;
21423 +
21424 +free_irq:
21425 + fsl_mc_free_irqs(mc_dev);
21426 +
21427 + return err;
21428 +}
21429 +
21430 +static void teardown_irqs(struct fsl_mc_device *mc_dev)
21431 +{
21432 + int err;
21433 +
21434 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21435 + DPMAC_IRQ_INDEX, 0);
21436 + if (err)
21437 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21438 +
21439 + fsl_mc_free_irqs(mc_dev);
21440 +}
21441 +
21442 +static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
21443 +{
21444 + struct device_node *dpmacs, *dpmac = NULL;
21445 + struct device_node *mc_node = dev->of_node;
21446 + u32 id;
21447 + int err;
21448 +
21449 + dpmacs = of_find_node_by_name(mc_node, "dpmacs");
21450 + if (!dpmacs) {
21451 + dev_err(dev, "No dpmacs subnode in device-tree\n");
21452 + return NULL;
21453 + }
21454 +
21455 + while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
21456 + err = of_property_read_u32(dpmac, "reg", &id);
21457 + if (err)
21458 + continue;
21459 + if (id == dpmac_id)
21460 + return dpmac;
21461 + }
21462 +
21463 + return NULL;
21464 +}
21465 +
21466 +static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
21467 +{
21468 + struct device *dev;
21469 + struct dpaa2_mac_priv *priv = NULL;
21470 + struct device_node *phy_node, *dpmac_node;
21471 + struct net_device *netdev;
21472 + phy_interface_t if_mode;
21473 + int err = 0;
21474 +
21475 + dev = &mc_dev->dev;
21476 +
21477 + /* prepare a net_dev structure to make the phy lib API happy */
21478 + netdev = alloc_etherdev(sizeof(*priv));
21479 + if (!netdev) {
21480 + dev_err(dev, "alloc_etherdev error\n");
21481 + err = -ENOMEM;
21482 + goto err_exit;
21483 + }
21484 + priv = netdev_priv(netdev);
21485 + priv->mc_dev = mc_dev;
21486 + priv->netdev = netdev;
21487 +
21488 + SET_NETDEV_DEV(netdev, dev);
21489 +
21490 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21491 + snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
21492 +#endif
21493 +
21494 + dev_set_drvdata(dev, priv);
21495 +
21496 + /* We may need to issue MC commands while in atomic context */
21497 + err = fsl_mc_portal_allocate(mc_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
21498 + &mc_dev->mc_io);
21499 + if (err || !mc_dev->mc_io) {
21500 + dev_dbg(dev, "fsl_mc_portal_allocate error: %d\n", err);
21501 + err = -EPROBE_DEFER;
21502 + goto err_free_netdev;
21503 + }
21504 +
21505 + err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
21506 + &mc_dev->mc_handle);
21507 + if (err || !mc_dev->mc_handle) {
21508 + dev_err(dev, "dpmac_open error: %d\n", err);
21509 + err = -ENODEV;
21510 + goto err_free_mcp;
21511 + }
21512 +
21513 + err = dpmac_get_attributes(mc_dev->mc_io, 0,
21514 + mc_dev->mc_handle, &priv->attr);
21515 + if (err) {
21516 + dev_err(dev, "dpmac_get_attributes err %d\n", err);
21517 + err = -EINVAL;
21518 + goto err_close;
21519 + }
21520 +
21521 + /* Look up the DPMAC node in the device-tree. */
21522 + dpmac_node = find_dpmac_node(dev, priv->attr.id);
21523 + if (!dpmac_node) {
21524 + dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
21525 + err = -ENODEV;
21526 + goto err_close;
21527 + }
21528 +
21529 + err = setup_irqs(mc_dev);
21530 + if (err) {
21531 + err = -EFAULT;
21532 + goto err_close;
21533 + }
21534 +
21535 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21536 + /* OPTIONAL, register netdev just to make it visible to the user */
21537 + netdev->netdev_ops = &dpaa2_mac_ndo_ops;
21538 + netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
21539 +
21540 + /* phy starts up enabled so netdev should be up too */
21541 + netdev->flags |= IFF_UP;
21542 +
21543 + err = register_netdev(priv->netdev);
21544 + if (err < 0) {
21545 + dev_err(dev, "register_netdev error %d\n", err);
21546 + err = -ENODEV;
21547 + goto err_free_irq;
21548 + }
21549 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21550 +
21551 + /* probe the PHY as a fixed-link if there's a phy-handle defined
21552 + * in the device tree
21553 + */
21554 + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
21555 + if (!phy_node) {
21556 + goto probe_fixed_link;
21557 + }
21558 +
21559 + if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
21560 + if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
21561 + dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
21562 + phy_modes(if_mode), priv->attr.eth_if);
21563 + } else {
21564 + dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
21565 + priv->attr.eth_if);
21566 + goto probe_fixed_link;
21567 + }
21568 +
21569 + /* try to connect to the PHY */
21570 + netdev->phydev = of_phy_connect(netdev, phy_node,
21571 + &dpaa2_mac_link_changed, 0, if_mode);
21572 + if (!netdev->phydev) {
21573 + /* No need for dev_err(); the kernel's loud enough as it is. */
21574 + dev_dbg(dev, "Can't of_phy_connect() now.\n");
21575 + /* We might be waiting for the MDIO MUX to probe, so defer
21576 + * our own probing.
21577 + */
21578 + err = -EPROBE_DEFER;
21579 + goto err_defer;
21580 + }
21581 + dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
21582 +
21583 +probe_fixed_link:
21584 + if (!netdev->phydev) {
21585 + struct fixed_phy_status status = {
21586 + .link = 1,
21587 + /* fixed-phys don't support 10Gbps speed for now */
21588 + .speed = 1000,
21589 + .duplex = 1,
21590 + };
21591 +
21592 + /* try to register a fixed link phy */
21593 + netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
21594 + NULL);
21595 + if (!netdev->phydev || IS_ERR(netdev->phydev)) {
21596 + dev_err(dev, "error trying to register fixed PHY\n");
21597 + /* So we don't crash unregister_netdev() later on */
21598 + netdev->phydev = NULL;
21599 + err = -EFAULT;
21600 + goto err_no_phy;
21601 + }
21602 + dev_info(dev, "Registered fixed PHY.\n");
21603 + }
21604 +
21605 + dpaa2_mac_open(netdev);
21606 +
21607 + return 0;
21608 +
21609 +err_defer:
21610 +err_no_phy:
21611 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21612 + unregister_netdev(netdev);
21613 +err_free_irq:
21614 +#endif
21615 + teardown_irqs(mc_dev);
21616 +err_close:
21617 + dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
21618 +err_free_mcp:
21619 + fsl_mc_portal_free(mc_dev->mc_io);
21620 +err_free_netdev:
21621 + free_netdev(netdev);
21622 +err_exit:
21623 + return err;
21624 +}
21625 +
21626 +static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
21627 +{
21628 + struct device *dev = &mc_dev->dev;
21629 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21630 + struct net_device *netdev = priv->netdev;
21631 +
21632 + dpaa2_mac_stop(netdev);
21633 +
21634 + if (phy_is_pseudo_fixed_link(netdev->phydev))
21635 + fixed_phy_unregister(netdev->phydev);
21636 + else
21637 + phy_disconnect(netdev->phydev);
21638 + netdev->phydev = NULL;
21639 +
21640 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21641 + unregister_netdev(priv->netdev);
21642 +#endif
21643 + teardown_irqs(priv->mc_dev);
21644 + dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
21645 + fsl_mc_portal_free(priv->mc_dev->mc_io);
21646 + free_netdev(priv->netdev);
21647 +
21648 + dev_set_drvdata(dev, NULL);
21649 +
21650 + return 0;
21651 +}
21652 +
21653 +static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
21654 + {
21655 + .vendor = FSL_MC_VENDOR_FREESCALE,
21656 + .obj_type = "dpmac",
21657 + },
21658 + { .vendor = 0x0 }
21659 +};
21660 +MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
21661 +
21662 +static struct fsl_mc_driver dpaa2_mac_drv = {
21663 + .driver = {
21664 + .name = KBUILD_MODNAME,
21665 + .owner = THIS_MODULE,
21666 + },
21667 + .probe = dpaa2_mac_probe,
21668 + .remove = dpaa2_mac_remove,
21669 + .match_id_table = dpaa2_mac_match_id_table,
21670 +};
21671 +
21672 +module_fsl_mc_driver(dpaa2_mac_drv);
21673 +
21674 +MODULE_LICENSE("GPL");
21675 +MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
21676 --- /dev/null
21677 +++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
21678 @@ -0,0 +1,10 @@
21679 +
21680 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
21681 +
21682 +dpaa2-rtc-objs := rtc.o dprtc.o
21683 +
21684 +all:
21685 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
21686 +
21687 +clean:
21688 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
21689 --- /dev/null
21690 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
21691 @@ -0,0 +1,160 @@
21692 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21693 + *
21694 + * Redistribution and use in source and binary forms, with or without
21695 + * modification, are permitted provided that the following conditions are met:
21696 + * * Redistributions of source code must retain the above copyright
21697 + * notice, this list of conditions and the following disclaimer.
21698 + * * Redistributions in binary form must reproduce the above copyright
21699 + * notice, this list of conditions and the following disclaimer in the
21700 + * documentation and/or other materials provided with the distribution.
21701 + * * Neither the name of the above-listed copyright holders nor the
21702 + * names of any contributors may be used to endorse or promote products
21703 + * derived from this software without specific prior written permission.
21704 + *
21705 + *
21706 + * ALTERNATIVELY, this software may be distributed under the terms of the
21707 + * GNU General Public License ("GPL") as published by the Free Software
21708 + * Foundation, either version 2 of that License or (at your option) any
21709 + * later version.
21710 + *
21711 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21712 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21713 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21714 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21715 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21716 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21717 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21718 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21719 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21720 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21721 + * POSSIBILITY OF SUCH DAMAGE.
21722 + */
21723 +#ifndef _FSL_DPRTC_CMD_H
21724 +#define _FSL_DPRTC_CMD_H
21725 +
21726 +/* DPRTC Version */
21727 +#define DPRTC_VER_MAJOR 2
21728 +#define DPRTC_VER_MINOR 0
21729 +
21730 +/* Command versioning */
21731 +#define DPRTC_CMD_BASE_VERSION 1
21732 +#define DPRTC_CMD_ID_OFFSET 4
21733 +
21734 +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
21735 +
21736 +/* Command IDs */
21737 +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
21738 +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
21739 +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
21740 +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
21741 +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
21742 +
21743 +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
21744 +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
21745 +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
21746 +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
21747 +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
21748 +
21749 +#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
21750 +#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
21751 +#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
21752 +#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
21753 +#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
21754 +#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
21755 +
21756 +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
21757 +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
21758 +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
21759 +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
21760 +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
21761 +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
21762 +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
21763 +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
21764 +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
21765 +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
21766 +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
21767 +
21768 +/* Macros for accessing command fields smaller than 1byte */
21769 +#define DPRTC_MASK(field) \
21770 + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
21771 + DPRTC_##field##_SHIFT)
21772 +#define dprtc_get_field(var, field) \
21773 + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
21774 +
21775 +#pragma pack(push, 1)
21776 +struct dprtc_cmd_open {
21777 + uint32_t dprtc_id;
21778 +};
21779 +
21780 +struct dprtc_cmd_destroy {
21781 + uint32_t object_id;
21782 +};
21783 +
21784 +#define DPRTC_ENABLE_SHIFT 0
21785 +#define DPRTC_ENABLE_SIZE 1
21786 +
21787 +struct dprtc_rsp_is_enabled {
21788 + uint8_t en;
21789 +};
21790 +
21791 +struct dprtc_cmd_get_irq {
21792 + uint32_t pad;
21793 + uint8_t irq_index;
21794 +};
21795 +
21796 +struct dprtc_cmd_set_irq_enable {
21797 + uint8_t en;
21798 + uint8_t pad[3];
21799 + uint8_t irq_index;
21800 +};
21801 +
21802 +struct dprtc_rsp_get_irq_enable {
21803 + uint8_t en;
21804 +};
21805 +
21806 +struct dprtc_cmd_set_irq_mask {
21807 + uint32_t mask;
21808 + uint8_t irq_index;
21809 +};
21810 +
21811 +struct dprtc_rsp_get_irq_mask {
21812 + uint32_t mask;
21813 +};
21814 +
21815 +struct dprtc_cmd_get_irq_status {
21816 + uint32_t status;
21817 + uint8_t irq_index;
21818 +};
21819 +
21820 +struct dprtc_rsp_get_irq_status {
21821 + uint32_t status;
21822 +};
21823 +
21824 +struct dprtc_cmd_clear_irq_status {
21825 + uint32_t status;
21826 + uint8_t irq_index;
21827 +};
21828 +
21829 +struct dprtc_rsp_get_attributes {
21830 + uint32_t pad;
21831 + uint32_t id;
21832 +};
21833 +
21834 +struct dprtc_cmd_set_clock_offset {
21835 + uint64_t offset;
21836 +};
21837 +
21838 +struct dprtc_get_freq_compensation {
21839 + uint32_t freq_compensation;
21840 +};
21841 +
21842 +struct dprtc_time {
21843 + uint64_t time;
21844 +};
21845 +
21846 +struct dprtc_rsp_get_api_version {
21847 + uint16_t major;
21848 + uint16_t minor;
21849 +};
21850 +#pragma pack(pop)
21851 +#endif /* _FSL_DPRTC_CMD_H */
21852 --- /dev/null
21853 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
21854 @@ -0,0 +1,746 @@
21855 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21856 + *
21857 + * Redistribution and use in source and binary forms, with or without
21858 + * modification, are permitted provided that the following conditions are met:
21859 + * * Redistributions of source code must retain the above copyright
21860 + * notice, this list of conditions and the following disclaimer.
21861 + * * Redistributions in binary form must reproduce the above copyright
21862 + * notice, this list of conditions and the following disclaimer in the
21863 + * documentation and/or other materials provided with the distribution.
21864 + * * Neither the name of the above-listed copyright holders nor the
21865 + * names of any contributors may be used to endorse or promote products
21866 + * derived from this software without specific prior written permission.
21867 + *
21868 + *
21869 + * ALTERNATIVELY, this software may be distributed under the terms of the
21870 + * GNU General Public License ("GPL") as published by the Free Software
21871 + * Foundation, either version 2 of that License or (at your option) any
21872 + * later version.
21873 + *
21874 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21875 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21876 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21877 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21878 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21879 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21880 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21881 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21882 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21883 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21884 + * POSSIBILITY OF SUCH DAMAGE.
21885 + */
21886 +#include <linux/fsl/mc.h>
21887 +
21888 +#include "dprtc.h"
21889 +#include "dprtc-cmd.h"
21890 +
21891 +/**
21892 + * dprtc_open() - Open a control session for the specified object.
21893 + * @mc_io: Pointer to MC portal's I/O object
21894 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21895 + * @dprtc_id: DPRTC unique ID
21896 + * @token: Returned token; use in subsequent API calls
21897 + *
21898 + * This function can be used to open a control session for an
21899 + * already created object; an object may have been declared in
21900 + * the DPL or by calling the dprtc_create function.
21901 + * This function returns a unique authentication token,
21902 + * associated with the specific object ID and the specific MC
21903 + * portal; this token must be used in all subsequent commands for
21904 + * this specific object
21905 + *
21906 + * Return: '0' on Success; Error code otherwise.
21907 + */
21908 +int dprtc_open(struct fsl_mc_io *mc_io,
21909 + uint32_t cmd_flags,
21910 + int dprtc_id,
21911 + uint16_t *token)
21912 +{
21913 + struct dprtc_cmd_open *cmd_params;
21914 + struct fsl_mc_command cmd = { 0 };
21915 + int err;
21916 +
21917 + /* prepare command */
21918 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
21919 + cmd_flags,
21920 + 0);
21921 + cmd_params = (struct dprtc_cmd_open *)cmd.params;
21922 + cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
21923 +
21924 + /* send command to mc*/
21925 + err = mc_send_command(mc_io, &cmd);
21926 + if (err)
21927 + return err;
21928 +
21929 + /* retrieve response parameters */
21930 + *token = mc_cmd_hdr_read_token(&cmd);
21931 +
21932 + return err;
21933 +}
21934 +
21935 +/**
21936 + * dprtc_close() - Close the control session of the object
21937 + * @mc_io: Pointer to MC portal's I/O object
21938 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21939 + * @token: Token of DPRTC object
21940 + *
21941 + * After this function is called, no further operations are
21942 + * allowed on the object without opening a new control session.
21943 + *
21944 + * Return: '0' on Success; Error code otherwise.
21945 + */
21946 +int dprtc_close(struct fsl_mc_io *mc_io,
21947 + uint32_t cmd_flags,
21948 + uint16_t token)
21949 +{
21950 + struct fsl_mc_command cmd = { 0 };
21951 +
21952 + /* prepare command */
21953 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
21954 + token);
21955 +
21956 + /* send command to mc*/
21957 + return mc_send_command(mc_io, &cmd);
21958 +}
21959 +
21960 +/**
21961 + * dprtc_create() - Create the DPRTC object.
21962 + * @mc_io: Pointer to MC portal's I/O object
21963 + * @dprc_token: Parent container token; '0' for default container
21964 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21965 + * @cfg: Configuration structure
21966 + * @obj_id: Returned object id
21967 + *
21968 + * Create the DPRTC object, allocate required resources and
21969 + * perform required initialization.
21970 + *
21971 + * The function accepts an authentication token of a parent
21972 + * container that this object should be assigned to. The token
21973 + * can be '0' so the object will be assigned to the default container.
21974 + * The newly created object can be opened with the returned
21975 + * object id and using the container's associated tokens and MC portals.
21976 + *
21977 + * Return: '0' on Success; Error code otherwise.
21978 + */
21979 +int dprtc_create(struct fsl_mc_io *mc_io,
21980 + uint16_t dprc_token,
21981 + uint32_t cmd_flags,
21982 + const struct dprtc_cfg *cfg,
21983 + uint32_t *obj_id)
21984 +{
21985 + struct fsl_mc_command cmd = { 0 };
21986 + int err;
21987 +
21988 + (void)(cfg); /* unused */
21989 +
21990 + /* prepare command */
21991 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
21992 + cmd_flags,
21993 + dprc_token);
21994 +
21995 + /* send command to mc*/
21996 + err = mc_send_command(mc_io, &cmd);
21997 + if (err)
21998 + return err;
21999 +
22000 + /* retrieve response parameters */
22001 + *obj_id = mc_cmd_read_object_id(&cmd);
22002 +
22003 + return 0;
22004 +}
22005 +
22006 +/**
22007 + * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
22008 + * @mc_io: Pointer to MC portal's I/O object
22009 + * @dprc_token: Parent container token; '0' for default container
22010 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22011 + * @object_id: The object id; it must be a valid id within the container that
22012 + * created this object;
22013 + *
22014 + * The function accepts the authentication token of the parent container that
22015 + * created the object (not the one that currently owns the object). The object
22016 + * is searched within parent using the provided 'object_id'.
22017 + * All tokens to the object must be closed before calling destroy.
22018 + *
22019 + * Return: '0' on Success; error code otherwise.
22020 + */
22021 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22022 + uint16_t dprc_token,
22023 + uint32_t cmd_flags,
22024 + uint32_t object_id)
22025 +{
22026 + struct dprtc_cmd_destroy *cmd_params;
22027 + struct fsl_mc_command cmd = { 0 };
22028 +
22029 + /* prepare command */
22030 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
22031 + cmd_flags,
22032 + dprc_token);
22033 + cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
22034 + cmd_params->object_id = cpu_to_le32(object_id);
22035 +
22036 + /* send command to mc*/
22037 + return mc_send_command(mc_io, &cmd);
22038 +}
22039 +
22040 +int dprtc_enable(struct fsl_mc_io *mc_io,
22041 + uint32_t cmd_flags,
22042 + uint16_t token)
22043 +{
22044 + struct fsl_mc_command cmd = { 0 };
22045 +
22046 + /* prepare command */
22047 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
22048 + token);
22049 +
22050 + /* send command to mc*/
22051 + return mc_send_command(mc_io, &cmd);
22052 +}
22053 +
22054 +int dprtc_disable(struct fsl_mc_io *mc_io,
22055 + uint32_t cmd_flags,
22056 + uint16_t token)
22057 +{
22058 + struct fsl_mc_command cmd = { 0 };
22059 +
22060 + /* prepare command */
22061 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
22062 + cmd_flags,
22063 + token);
22064 +
22065 + /* send command to mc*/
22066 + return mc_send_command(mc_io, &cmd);
22067 +}
22068 +
22069 +int dprtc_is_enabled(struct fsl_mc_io *mc_io,
22070 + uint32_t cmd_flags,
22071 + uint16_t token,
22072 + int *en)
22073 +{
22074 + struct dprtc_rsp_is_enabled *rsp_params;
22075 + struct fsl_mc_command cmd = { 0 };
22076 + int err;
22077 +
22078 + /* prepare command */
22079 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
22080 + token);
22081 +
22082 + /* send command to mc*/
22083 + err = mc_send_command(mc_io, &cmd);
22084 + if (err)
22085 + return err;
22086 +
22087 + /* retrieve response parameters */
22088 + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
22089 + *en = dprtc_get_field(rsp_params->en, ENABLE);
22090 +
22091 + return 0;
22092 +}
22093 +
22094 +int dprtc_reset(struct fsl_mc_io *mc_io,
22095 + uint32_t cmd_flags,
22096 + uint16_t token)
22097 +{
22098 + struct fsl_mc_command cmd = { 0 };
22099 +
22100 + /* prepare command */
22101 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
22102 + cmd_flags,
22103 + token);
22104 +
22105 + /* send command to mc*/
22106 + return mc_send_command(mc_io, &cmd);
22107 +}
22108 +
22109 +/**
22110 + * dprtc_set_irq_enable() - Set overall interrupt state.
22111 + * @mc_io: Pointer to MC portal's I/O object
22112 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22113 + * @token: Token of DPRTC object
22114 + * @irq_index: The interrupt index to configure
22115 + * @en: Interrupt state - enable = 1, disable = 0
22116 + *
22117 + * Allows GPP software to control when interrupts are generated.
22118 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22119 + * overall interrupt state. if the interrupt is disabled no causes will cause
22120 + * an interrupt.
22121 + *
22122 + * Return: '0' on Success; Error code otherwise.
22123 + */
22124 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22125 + uint32_t cmd_flags,
22126 + uint16_t token,
22127 + uint8_t irq_index,
22128 + uint8_t en)
22129 +{
22130 + struct dprtc_cmd_set_irq_enable *cmd_params;
22131 + struct fsl_mc_command cmd = { 0 };
22132 +
22133 + /* prepare command */
22134 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
22135 + cmd_flags,
22136 + token);
22137 + cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
22138 + cmd_params->irq_index = irq_index;
22139 + cmd_params->en = en;
22140 +
22141 + /* send command to mc*/
22142 + return mc_send_command(mc_io, &cmd);
22143 +}
22144 +
22145 +/**
22146 + * dprtc_get_irq_enable() - Get overall interrupt state
22147 + * @mc_io: Pointer to MC portal's I/O object
22148 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22149 + * @token: Token of DPRTC object
22150 + * @irq_index: The interrupt index to configure
22151 + * @en: Returned interrupt state - enable = 1, disable = 0
22152 + *
22153 + * Return: '0' on Success; Error code otherwise.
22154 + */
22155 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22156 + uint32_t cmd_flags,
22157 + uint16_t token,
22158 + uint8_t irq_index,
22159 + uint8_t *en)
22160 +{
22161 + struct dprtc_rsp_get_irq_enable *rsp_params;
22162 + struct dprtc_cmd_get_irq *cmd_params;
22163 + struct fsl_mc_command cmd = { 0 };
22164 + int err;
22165 +
22166 + /* prepare command */
22167 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
22168 + cmd_flags,
22169 + token);
22170 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22171 + cmd_params->irq_index = irq_index;
22172 +
22173 + /* send command to mc*/
22174 + err = mc_send_command(mc_io, &cmd);
22175 + if (err)
22176 + return err;
22177 +
22178 + /* retrieve response parameters */
22179 + rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
22180 + *en = rsp_params->en;
22181 +
22182 + return 0;
22183 +}
22184 +
22185 +/**
22186 + * dprtc_set_irq_mask() - Set interrupt mask.
22187 + * @mc_io: Pointer to MC portal's I/O object
22188 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22189 + * @token: Token of DPRTC object
22190 + * @irq_index: The interrupt index to configure
22191 + * @mask: Event mask to trigger interrupt;
22192 + * each bit:
22193 + * 0 = ignore event
22194 + * 1 = consider event for asserting IRQ
22195 + *
22196 + * Every interrupt can have up to 32 causes and the interrupt model supports
22197 + * masking/unmasking each cause independently
22198 + *
22199 + * Return: '0' on Success; Error code otherwise.
22200 + */
22201 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22202 + uint32_t cmd_flags,
22203 + uint16_t token,
22204 + uint8_t irq_index,
22205 + uint32_t mask)
22206 +{
22207 + struct dprtc_cmd_set_irq_mask *cmd_params;
22208 + struct fsl_mc_command cmd = { 0 };
22209 +
22210 + /* prepare command */
22211 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
22212 + cmd_flags,
22213 + token);
22214 + cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
22215 + cmd_params->mask = cpu_to_le32(mask);
22216 + cmd_params->irq_index = irq_index;
22217 +
22218 + /* send command to mc*/
22219 + return mc_send_command(mc_io, &cmd);
22220 +}
22221 +
22222 +/**
22223 + * dprtc_get_irq_mask() - Get interrupt mask.
22224 + * @mc_io: Pointer to MC portal's I/O object
22225 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22226 + * @token: Token of DPRTC object
22227 + * @irq_index: The interrupt index to configure
22228 + * @mask: Returned event mask to trigger interrupt
22229 + *
22230 + * Every interrupt can have up to 32 causes and the interrupt model supports
22231 + * masking/unmasking each cause independently
22232 + *
22233 + * Return: '0' on Success; Error code otherwise.
22234 + */
22235 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22236 + uint32_t cmd_flags,
22237 + uint16_t token,
22238 + uint8_t irq_index,
22239 + uint32_t *mask)
22240 +{
22241 + struct dprtc_rsp_get_irq_mask *rsp_params;
22242 + struct dprtc_cmd_get_irq *cmd_params;
22243 + struct fsl_mc_command cmd = { 0 };
22244 + int err;
22245 +
22246 + /* prepare command */
22247 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
22248 + cmd_flags,
22249 + token);
22250 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22251 + cmd_params->irq_index = irq_index;
22252 +
22253 + /* send command to mc*/
22254 + err = mc_send_command(mc_io, &cmd);
22255 + if (err)
22256 + return err;
22257 +
22258 + /* retrieve response parameters */
22259 + rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
22260 + *mask = le32_to_cpu(rsp_params->mask);
22261 +
22262 + return 0;
22263 +}
22264 +
22265 +/**
22266 + * dprtc_get_irq_status() - Get the current status of any pending interrupts.
22267 + *
22268 + * @mc_io: Pointer to MC portal's I/O object
22269 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22270 + * @token: Token of DPRTC object
22271 + * @irq_index: The interrupt index to configure
22272 + * @status: Returned interrupts status - one bit per cause:
22273 + * 0 = no interrupt pending
22274 + * 1 = interrupt pending
22275 + *
22276 + * Return: '0' on Success; Error code otherwise.
22277 + */
22278 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22279 + uint32_t cmd_flags,
22280 + uint16_t token,
22281 + uint8_t irq_index,
22282 + uint32_t *status)
22283 +{
22284 + struct dprtc_cmd_get_irq_status *cmd_params;
22285 + struct dprtc_rsp_get_irq_status *rsp_params;
22286 + struct fsl_mc_command cmd = { 0 };
22287 + int err;
22288 +
22289 + /* prepare command */
22290 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
22291 + cmd_flags,
22292 + token);
22293 + cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
22294 + cmd_params->status = cpu_to_le32(*status);
22295 + cmd_params->irq_index = irq_index;
22296 +
22297 + /* send command to mc*/
22298 + err = mc_send_command(mc_io, &cmd);
22299 + if (err)
22300 + return err;
22301 +
22302 + /* retrieve response parameters */
22303 + rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
22304 + *status = rsp_params->status;
22305 +
22306 + return 0;
22307 +}
22308 +
22309 +/**
22310 + * dprtc_clear_irq_status() - Clear a pending interrupt's status
22311 + *
22312 + * @mc_io: Pointer to MC portal's I/O object
22313 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22314 + * @token: Token of DPRTC object
22315 + * @irq_index: The interrupt index to configure
22316 + * @status: Bits to clear (W1C) - one bit per cause:
22317 + * 0 = don't change
22318 + * 1 = clear status bit
22319 + *
22320 + * Return: '0' on Success; Error code otherwise.
22321 + */
22322 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22323 + uint32_t cmd_flags,
22324 + uint16_t token,
22325 + uint8_t irq_index,
22326 + uint32_t status)
22327 +{
22328 + struct dprtc_cmd_clear_irq_status *cmd_params;
22329 + struct fsl_mc_command cmd = { 0 };
22330 +
22331 + /* prepare command */
22332 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
22333 + cmd_flags,
22334 + token);
22335 + cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
22336 + cmd_params->irq_index = irq_index;
22337 + cmd_params->status = cpu_to_le32(status);
22338 +
22339 + /* send command to mc*/
22340 + return mc_send_command(mc_io, &cmd);
22341 +}
22342 +
22343 +/**
22344 + * dprtc_get_attributes - Retrieve DPRTC attributes.
22345 + *
22346 + * @mc_io: Pointer to MC portal's I/O object
22347 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22348 + * @token: Token of DPRTC object
22349 + * @attr: Returned object's attributes
22350 + *
22351 + * Return: '0' on Success; Error code otherwise.
22352 + */
22353 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22354 + uint32_t cmd_flags,
22355 + uint16_t token,
22356 + struct dprtc_attr *attr)
22357 +{
22358 + struct dprtc_rsp_get_attributes *rsp_params;
22359 + struct fsl_mc_command cmd = { 0 };
22360 + int err;
22361 +
22362 + /* prepare command */
22363 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
22364 + cmd_flags,
22365 + token);
22366 +
22367 + /* send command to mc*/
22368 + err = mc_send_command(mc_io, &cmd);
22369 + if (err)
22370 + return err;
22371 +
22372 + /* retrieve response parameters */
22373 + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
22374 + attr->id = le32_to_cpu(rsp_params->id);
22375 +
22376 + return 0;
22377 +}
22378 +
22379 +/**
22380 + * dprtc_set_clock_offset() - Sets the clock's offset
22381 + * (usually relative to another clock).
22382 + *
22383 + * @mc_io: Pointer to MC portal's I/O object
22384 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22385 + * @token: Token of DPRTC object
22386 + * @offset: New clock offset (in nanoseconds).
22387 + *
22388 + * Return: '0' on Success; Error code otherwise.
22389 + */
22390 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22391 + uint32_t cmd_flags,
22392 + uint16_t token,
22393 + int64_t offset)
22394 +{
22395 + struct dprtc_cmd_set_clock_offset *cmd_params;
22396 + struct fsl_mc_command cmd = { 0 };
22397 +
22398 + /* prepare command */
22399 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
22400 + cmd_flags,
22401 + token);
22402 + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
22403 + cmd_params->offset = cpu_to_le64(offset);
22404 +
22405 + /* send command to mc*/
22406 + return mc_send_command(mc_io, &cmd);
22407 +}
22408 +
22409 +/**
22410 + * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
22411 + *
22412 + * @mc_io: Pointer to MC portal's I/O object
22413 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22414 + * @token: Token of DPRTC object
22415 + * @freq_compensation: The new frequency compensation value to set.
22416 + *
22417 + * Return: '0' on Success; Error code otherwise.
22418 + */
22419 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22420 + uint32_t cmd_flags,
22421 + uint16_t token,
22422 + uint32_t freq_compensation)
22423 +{
22424 + struct dprtc_get_freq_compensation *cmd_params;
22425 + struct fsl_mc_command cmd = { 0 };
22426 +
22427 + /* prepare command */
22428 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
22429 + cmd_flags,
22430 + token);
22431 + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
22432 + cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
22433 +
22434 + /* send command to mc*/
22435 + return mc_send_command(mc_io, &cmd);
22436 +}
22437 +
22438 +/**
22439 + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
22440 + *
22441 + * @mc_io: Pointer to MC portal's I/O object
22442 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22443 + * @token: Token of DPRTC object
22444 + * @freq_compensation: Frequency compensation value
22445 + *
22446 + * Return: '0' on Success; Error code otherwise.
22447 + */
22448 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22449 + uint32_t cmd_flags,
22450 + uint16_t token,
22451 + uint32_t *freq_compensation)
22452 +{
22453 + struct dprtc_get_freq_compensation *rsp_params;
22454 + struct fsl_mc_command cmd = { 0 };
22455 + int err;
22456 +
22457 + /* prepare command */
22458 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
22459 + cmd_flags,
22460 + token);
22461 +
22462 + /* send command to mc*/
22463 + err = mc_send_command(mc_io, &cmd);
22464 + if (err)
22465 + return err;
22466 +
22467 + /* retrieve response parameters */
22468 + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
22469 + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
22470 +
22471 + return 0;
22472 +}
22473 +
22474 +/**
22475 + * dprtc_get_time() - Returns the current RTC time.
22476 + *
22477 + * @mc_io: Pointer to MC portal's I/O object
22478 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22479 + * @token: Token of DPRTC object
22480 + * @time: Current RTC time.
22481 + *
22482 + * Return: '0' on Success; Error code otherwise.
22483 + */
22484 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22485 + uint32_t cmd_flags,
22486 + uint16_t token,
22487 + uint64_t *time)
22488 +{
22489 + struct dprtc_time *rsp_params;
22490 + struct fsl_mc_command cmd = { 0 };
22491 + int err;
22492 +
22493 + /* prepare command */
22494 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
22495 + cmd_flags,
22496 + token);
22497 +
22498 + /* send command to mc*/
22499 + err = mc_send_command(mc_io, &cmd);
22500 + if (err)
22501 + return err;
22502 +
22503 + /* retrieve response parameters */
22504 + rsp_params = (struct dprtc_time *)cmd.params;
22505 + *time = le64_to_cpu(rsp_params->time);
22506 +
22507 + return 0;
22508 +}
22509 +
22510 +/**
22511 + * dprtc_set_time() - Updates current RTC time.
22512 + *
22513 + * @mc_io: Pointer to MC portal's I/O object
22514 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22515 + * @token: Token of DPRTC object
22516 + * @time: New RTC time.
22517 + *
22518 + * Return: '0' on Success; Error code otherwise.
22519 + */
22520 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22521 + uint32_t cmd_flags,
22522 + uint16_t token,
22523 + uint64_t time)
22524 +{
22525 + struct dprtc_time *cmd_params;
22526 + struct fsl_mc_command cmd = { 0 };
22527 +
22528 + /* prepare command */
22529 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
22530 + cmd_flags,
22531 + token);
22532 + cmd_params = (struct dprtc_time *)cmd.params;
22533 + cmd_params->time = cpu_to_le64(time);
22534 +
22535 + /* send command to mc*/
22536 + return mc_send_command(mc_io, &cmd);
22537 +}
22538 +
22539 +/**
22540 + * dprtc_set_alarm() - Defines and sets alarm.
22541 + *
22542 + * @mc_io: Pointer to MC portal's I/O object
22543 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22544 + * @token: Token of DPRTC object
22545 + * @time: In nanoseconds, the time when the alarm
22546 + * should go off - must be a multiple of
22547 + * 1 microsecond
22548 + *
22549 + * Return: '0' on Success; Error code otherwise.
22550 + */
22551 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22552 + uint32_t cmd_flags,
22553 + uint16_t token, uint64_t time)
22554 +{
22555 + struct dprtc_time *cmd_params;
22556 + struct fsl_mc_command cmd = { 0 };
22557 +
22558 + /* prepare command */
22559 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
22560 + cmd_flags,
22561 + token);
22562 + cmd_params = (struct dprtc_time *)cmd.params;
22563 + cmd_params->time = cpu_to_le64(time);
22564 +
22565 + /* send command to mc*/
22566 + return mc_send_command(mc_io, &cmd);
22567 +}
22568 +
22569 +/**
22570 + * dprtc_get_api_version() - Get Data Path Real Time Counter API version
22571 + * @mc_io: Pointer to MC portal's I/O object
22572 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22573 + * @major_ver: Major version of data path real time counter API
22574 + * @minor_ver: Minor version of data path real time counter API
22575 + *
22576 + * Return: '0' on Success; Error code otherwise.
22577 + */
22578 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22579 + uint32_t cmd_flags,
22580 + uint16_t *major_ver,
22581 + uint16_t *minor_ver)
22582 +{
22583 + struct dprtc_rsp_get_api_version *rsp_params;
22584 + struct fsl_mc_command cmd = { 0 };
22585 + int err;
22586 +
22587 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
22588 + cmd_flags,
22589 + 0);
22590 +
22591 + err = mc_send_command(mc_io, &cmd);
22592 + if (err)
22593 + return err;
22594 +
22595 + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
22596 + *major_ver = le16_to_cpu(rsp_params->major);
22597 + *minor_ver = le16_to_cpu(rsp_params->minor);
22598 +
22599 + return 0;
22600 +}
22601 --- /dev/null
22602 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
22603 @@ -0,0 +1,172 @@
22604 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
22605 + *
22606 + * Redistribution and use in source and binary forms, with or without
22607 + * modification, are permitted provided that the following conditions are met:
22608 + * * Redistributions of source code must retain the above copyright
22609 + * notice, this list of conditions and the following disclaimer.
22610 + * * Redistributions in binary form must reproduce the above copyright
22611 + * notice, this list of conditions and the following disclaimer in the
22612 + * documentation and/or other materials provided with the distribution.
22613 + * * Neither the name of the above-listed copyright holders nor the
22614 + * names of any contributors may be used to endorse or promote products
22615 + * derived from this software without specific prior written permission.
22616 + *
22617 + *
22618 + * ALTERNATIVELY, this software may be distributed under the terms of the
22619 + * GNU General Public License ("GPL") as published by the Free Software
22620 + * Foundation, either version 2 of that License or (at your option) any
22621 + * later version.
22622 + *
22623 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22624 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22625 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22626 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22627 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22628 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22629 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22630 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22631 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22632 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22633 + * POSSIBILITY OF SUCH DAMAGE.
22634 + */
22635 +#ifndef __FSL_DPRTC_H
22636 +#define __FSL_DPRTC_H
22637 +
22638 +/* Data Path Real Time Counter API
22639 + * Contains initialization APIs and runtime control APIs for RTC
22640 + */
22641 +
22642 +struct fsl_mc_io;
22643 +
22644 +/**
22645 + * Number of irq's
22646 + */
22647 +#define DPRTC_MAX_IRQ_NUM 1
22648 +#define DPRTC_IRQ_INDEX 0
22649 +
22650 +/**
22651 + * Interrupt event masks:
22652 + */
22653 +
22654 +/**
22655 + * Interrupt event mask indicating alarm event had occurred
22656 + */
22657 +#define DPRTC_EVENT_ALARM 0x40000000
22658 +/**
22659 + * Interrupt event mask indicating periodic pulse event had occurred
22660 + */
22661 +#define DPRTC_EVENT_PPS 0x08000000
22662 +
22663 +int dprtc_open(struct fsl_mc_io *mc_io,
22664 + uint32_t cmd_flags,
22665 + int dprtc_id,
22666 + uint16_t *token);
22667 +
22668 +int dprtc_close(struct fsl_mc_io *mc_io,
22669 + uint32_t cmd_flags,
22670 + uint16_t token);
22671 +
22672 +/**
22673 + * struct dprtc_cfg - Structure representing DPRTC configuration
22674 + * @options: place holder
22675 + */
22676 +struct dprtc_cfg {
22677 + uint32_t options;
22678 +};
22679 +
22680 +int dprtc_create(struct fsl_mc_io *mc_io,
22681 + uint16_t dprc_token,
22682 + uint32_t cmd_flags,
22683 + const struct dprtc_cfg *cfg,
22684 + uint32_t *obj_id);
22685 +
22686 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22687 + uint16_t dprc_token,
22688 + uint32_t cmd_flags,
22689 + uint32_t object_id);
22690 +
22691 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22692 + uint32_t cmd_flags,
22693 + uint16_t token,
22694 + int64_t offset);
22695 +
22696 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22697 + uint32_t cmd_flags,
22698 + uint16_t token,
22699 + uint32_t freq_compensation);
22700 +
22701 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22702 + uint32_t cmd_flags,
22703 + uint16_t token,
22704 + uint32_t *freq_compensation);
22705 +
22706 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22707 + uint32_t cmd_flags,
22708 + uint16_t token,
22709 + uint64_t *time);
22710 +
22711 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22712 + uint32_t cmd_flags,
22713 + uint16_t token,
22714 + uint64_t time);
22715 +
22716 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22717 + uint32_t cmd_flags,
22718 + uint16_t token,
22719 + uint64_t time);
22720 +
22721 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22722 + uint32_t cmd_flags,
22723 + uint16_t token,
22724 + uint8_t irq_index,
22725 + uint8_t en);
22726 +
22727 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22728 + uint32_t cmd_flags,
22729 + uint16_t token,
22730 + uint8_t irq_index,
22731 + uint8_t *en);
22732 +
22733 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22734 + uint32_t cmd_flags,
22735 + uint16_t token,
22736 + uint8_t irq_index,
22737 + uint32_t mask);
22738 +
22739 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22740 + uint32_t cmd_flags,
22741 + uint16_t token,
22742 + uint8_t irq_index,
22743 + uint32_t *mask);
22744 +
22745 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22746 + uint32_t cmd_flags,
22747 + uint16_t token,
22748 + uint8_t irq_index,
22749 + uint32_t *status);
22750 +
22751 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22752 + uint32_t cmd_flags,
22753 + uint16_t token,
22754 + uint8_t irq_index,
22755 + uint32_t status);
22756 +
22757 +/**
22758 + * struct dprtc_attr - Structure representing DPRTC attributes
22759 + * @id: DPRTC object ID
22760 + */
22761 +struct dprtc_attr {
22762 + int id;
22763 +};
22764 +
22765 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22766 + uint32_t cmd_flags,
22767 + uint16_t token,
22768 + struct dprtc_attr *attr);
22769 +
22770 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22771 + uint32_t cmd_flags,
22772 + uint16_t *major_ver,
22773 + uint16_t *minor_ver);
22774 +
22775 +#endif /* __FSL_DPRTC_H */
22776 --- /dev/null
22777 +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
22778 @@ -0,0 +1,242 @@
22779 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
22780 + *
22781 + * Redistribution and use in source and binary forms, with or without
22782 + * modification, are permitted provided that the following conditions are met:
22783 + * * Redistributions of source code must retain the above copyright
22784 + * notice, this list of conditions and the following disclaimer.
22785 + * * Redistributions in binary form must reproduce the above copyright
22786 + * notice, this list of conditions and the following disclaimer in the
22787 + * documentation and/or other materials provided with the distribution.
22788 + * * Neither the name of the above-listed copyright holders nor the
22789 + * names of any contributors may be used to endorse or promote products
22790 + * derived from this software without specific prior written permission.
22791 + *
22792 + *
22793 + * ALTERNATIVELY, this software may be distributed under the terms of the
22794 + * GNU General Public License ("GPL") as published by the Free Software
22795 + * Foundation, either version 2 of that License or (at your option) any
22796 + * later version.
22797 + *
22798 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22799 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22800 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22801 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22802 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22803 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22804 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22805 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22806 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22807 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22808 + * POSSIBILITY OF SUCH DAMAGE.
22809 + */
22810 +
22811 +#include <linux/module.h>
22812 +#include <linux/ptp_clock_kernel.h>
22813 +
22814 +#include <linux/fsl/mc.h>
22815 +
22816 +#include "dprtc.h"
22817 +#include "dprtc-cmd.h"
22818 +
22819 +#define N_EXT_TS 2
22820 +
22821 +struct ptp_clock *clock;
22822 +struct fsl_mc_device *rtc_mc_dev;
22823 +u32 freqCompensation;
22824 +
22825 +/* PTP clock operations */
22826 +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
22827 +{
22828 + u64 adj;
22829 + u32 diff, tmr_add;
22830 + int neg_adj = 0;
22831 + int err = 0;
22832 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22833 + struct device *dev = &mc_dev->dev;
22834 +
22835 + if (ppb < 0) {
22836 + neg_adj = 1;
22837 + ppb = -ppb;
22838 + }
22839 +
22840 + tmr_add = freqCompensation;
22841 + adj = tmr_add;
22842 + adj *= ppb;
22843 + diff = div_u64(adj, 1000000000ULL);
22844 +
22845 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
22846 +
22847 + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
22848 + mc_dev->mc_handle, tmr_add);
22849 + if (err)
22850 + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
22851 + return 0;
22852 +}
22853 +
22854 +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
22855 +{
22856 + s64 now;
22857 + int err = 0;
22858 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22859 + struct device *dev = &mc_dev->dev;
22860 +
22861 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
22862 + if (err) {
22863 + dev_err(dev, "dprtc_get_time err %d\n", err);
22864 + return 0;
22865 + }
22866 +
22867 + now += delta;
22868 +
22869 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
22870 + if (err) {
22871 + dev_err(dev, "dprtc_set_time err %d\n", err);
22872 + return 0;
22873 + }
22874 + return 0;
22875 +}
22876 +
22877 +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
22878 +{
22879 + u64 ns;
22880 + u32 remainder;
22881 + int err = 0;
22882 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22883 + struct device *dev = &mc_dev->dev;
22884 +
22885 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
22886 + if (err) {
22887 + dev_err(dev, "dprtc_get_time err %d\n", err);
22888 + return 0;
22889 + }
22890 +
22891 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
22892 + ts->tv_nsec = remainder;
22893 + return 0;
22894 +}
22895 +
22896 +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
22897 + const struct timespec *ts)
22898 +{
22899 + u64 ns;
22900 + int err = 0;
22901 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22902 + struct device *dev = &mc_dev->dev;
22903 +
22904 + ns = ts->tv_sec * 1000000000ULL;
22905 + ns += ts->tv_nsec;
22906 +
22907 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
22908 + if (err)
22909 + dev_err(dev, "dprtc_set_time err %d\n", err);
22910 + return 0;
22911 +}
22912 +
22913 +static struct ptp_clock_info ptp_dpaa2_caps = {
22914 + .owner = THIS_MODULE,
22915 + .name = "dpaa2 clock",
22916 + .max_adj = 512000,
22917 + .n_alarm = 0,
22918 + .n_ext_ts = N_EXT_TS,
22919 + .n_per_out = 0,
22920 + .n_pins = 0,
22921 + .pps = 1,
22922 + .adjfreq = ptp_dpaa2_adjfreq,
22923 + .adjtime = ptp_dpaa2_adjtime,
22924 + .gettime64 = ptp_dpaa2_gettime,
22925 + .settime64 = ptp_dpaa2_settime,
22926 +};
22927 +
22928 +static int rtc_probe(struct fsl_mc_device *mc_dev)
22929 +{
22930 + struct device *dev;
22931 + int err = 0;
22932 + int dpaa2_phc_index;
22933 + u32 tmr_add = 0;
22934 +
22935 + if (!mc_dev)
22936 + return -EFAULT;
22937 +
22938 + dev = &mc_dev->dev;
22939 +
22940 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
22941 + if (unlikely(err)) {
22942 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
22943 + goto err_exit;
22944 + }
22945 + if (!mc_dev->mc_io) {
22946 + dev_err(dev,
22947 + "fsl_mc_portal_allocate returned null handle but no error\n");
22948 + err = -EFAULT;
22949 + goto err_exit;
22950 + }
22951 +
22952 + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
22953 + &mc_dev->mc_handle);
22954 + if (err) {
22955 + dev_err(dev, "dprtc_open err %d\n", err);
22956 + goto err_free_mcp;
22957 + }
22958 + if (!mc_dev->mc_handle) {
22959 + dev_err(dev, "dprtc_open returned null handle but no error\n");
22960 + err = -EFAULT;
22961 + goto err_free_mcp;
22962 + }
22963 +
22964 + rtc_mc_dev = mc_dev;
22965 +
22966 + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
22967 + mc_dev->mc_handle, &tmr_add);
22968 + if (err) {
22969 + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
22970 + goto err_close;
22971 + }
22972 + freqCompensation = tmr_add;
22973 +
22974 + clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
22975 + if (IS_ERR(clock)) {
22976 + err = PTR_ERR(clock);
22977 + goto err_close;
22978 + }
22979 + dpaa2_phc_index = ptp_clock_index(clock);
22980 +
22981 + return 0;
22982 +err_close:
22983 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22984 +err_free_mcp:
22985 + fsl_mc_portal_free(mc_dev->mc_io);
22986 +err_exit:
22987 + return err;
22988 +}
22989 +
22990 +static int rtc_remove(struct fsl_mc_device *mc_dev)
22991 +{
22992 + ptp_clock_unregister(clock);
22993 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22994 + fsl_mc_portal_free(mc_dev->mc_io);
22995 +
22996 + return 0;
22997 +}
22998 +
22999 +static const struct fsl_mc_device_id rtc_match_id_table[] = {
23000 + {
23001 + .vendor = FSL_MC_VENDOR_FREESCALE,
23002 + .obj_type = "dprtc",
23003 + },
23004 + {}
23005 +};
23006 +
23007 +static struct fsl_mc_driver rtc_drv = {
23008 + .driver = {
23009 + .name = KBUILD_MODNAME,
23010 + .owner = THIS_MODULE,
23011 + },
23012 + .probe = rtc_probe,
23013 + .remove = rtc_remove,
23014 + .match_id_table = rtc_match_id_table,
23015 +};
23016 +
23017 +module_fsl_mc_driver(rtc_drv);
23018 +
23019 +MODULE_LICENSE("GPL");
23020 +MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
23021 --- a/include/linux/filter.h
23022 +++ b/include/linux/filter.h
23023 @@ -429,12 +429,15 @@ struct sk_filter {
23024
23025 struct bpf_skb_data_end {
23026 struct qdisc_skb_cb qdisc_cb;
23027 + void *data_meta;
23028 void *data_end;
23029 };
23030
23031 struct xdp_buff {
23032 void *data;
23033 void *data_end;
23034 + void *data_meta;
23035 + void *data_hard_start;
23036 };
23037
23038 /* compute the linear packet data range [data, data_end) which