1 From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:07:58 +0800
4 Subject: [PATCH] dpaa2: support layerscape
6 This is a integrated patch for layerscape dpaa2 support.
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
10 Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
11 Signed-off-by: costi <constantin.tudor@freescale.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
15 drivers/soc/fsl/ls2-console/Kconfig | 4 +
16 drivers/soc/fsl/ls2-console/Makefile | 1 +
17 drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
18 drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
19 drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
20 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++
21 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
22 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++
23 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++
24 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++
25 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++
26 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++
27 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++
28 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++
29 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++
30 drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
31 drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
32 drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
33 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++
34 drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++
35 drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++
36 drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++
37 drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
38 drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
39 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
40 drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
41 drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
42 drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++
43 drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
44 drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
45 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++
46 drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
47 drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++
48 drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++
49 drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
50 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
51 drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
52 drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++
53 drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
54 39 files changed, 22696 insertions(+)
55 create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
56 create mode 100644 drivers/soc/fsl/ls2-console/Makefile
57 create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
58 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
59 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
60 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
61 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
62 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
63 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
64 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
65 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
66 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
67 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
68 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
69 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
70 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
71 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
72 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
73 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
74 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
75 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
76 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
77 create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
78 create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
79 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
80 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
81 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
82 create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
83 create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
84 create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
85 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
86 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
87 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
88 create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
89 create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
90 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
91 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
92 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
93 create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
95 diff --git a/drivers/soc/fsl/ls2-console/Kconfig b/drivers/soc/fsl/ls2-console/Kconfig
97 index 00000000..47d0dc11
99 +++ b/drivers/soc/fsl/ls2-console/Kconfig
101 +config FSL_LS2_CONSOLE
102 + tristate "Layerscape MC and AIOP console support"
103 + depends on ARCH_LAYERSCAPE
105 diff --git a/drivers/soc/fsl/ls2-console/Makefile b/drivers/soc/fsl/ls2-console/Makefile
107 index 00000000..62b96346
109 +++ b/drivers/soc/fsl/ls2-console/Makefile
111 +obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
112 diff --git a/drivers/soc/fsl/ls2-console/ls2-console.c b/drivers/soc/fsl/ls2-console/ls2-console.c
114 index 00000000..68415ad0
116 +++ b/drivers/soc/fsl/ls2-console/ls2-console.c
118 +/* Copyright 2015-2016 Freescale Semiconductor Inc.
120 + * Redistribution and use in source and binary forms, with or without
121 + * modification, are permitted provided that the following conditions are met:
122 + * * Redistributions of source code must retain the above copyright
123 + * notice, this list of conditions and the following disclaimer.
124 + * * Redistributions in binary form must reproduce the above copyright
125 + * notice, this list of conditions and the following disclaimer in the
126 + * documentation and/or other materials provided with the distribution.
127 + * * Neither the name of the above-listed copyright holders nor the
128 + * names of any contributors may be used to endorse or promote products
129 + * derived from this software without specific prior written permission.
132 + * ALTERNATIVELY, this software may be distributed under the terms of the
133 + * GNU General Public License ("GPL") as published by the Free Software
134 + * Foundation, either version 2 of that License or (at your option) any
137 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
138 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
139 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
140 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
141 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
142 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
143 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
144 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
145 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
146 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
147 + * POSSIBILITY OF SUCH DAMAGE.
150 +#include <linux/miscdevice.h>
151 +#include <linux/uaccess.h>
152 +#include <linux/poll.h>
153 +#include <linux/compat.h>
154 +#include <linux/module.h>
155 +#include <linux/slab.h>
156 +#include <linux/io.h>
158 +/* SoC address for the MC firmware base low/high registers */
159 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
160 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
161 +/* MC firmware base low/high registers indexes */
162 +#define MCFBALR_OFFSET 0
163 +#define MCFBAHR_OFFSET 1
165 +/* Bit mask used to obtain the most significant part of the MC base address */
166 +#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
167 +/* Bit mask used to obtain the least significant part of the MC base address */
168 +#define MC_FW_LOW_ADDR_MASK 0xE0000000
170 +#define MC_BUFFER_OFFSET 0x01000000
171 +#define MC_BUFFER_SIZE (1024*1024*16)
172 +#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
174 +#define AIOP_BUFFER_OFFSET 0x06000000
175 +#define AIOP_BUFFER_SIZE (1024*1024*16)
176 +#define AIOP_OFFSET_DELTA (0)
179 + char magic_word[8]; /* magic word */
180 + uint32_t buf_start; /* holds the 32-bit little-endian
181 + * offset of the start of the buffer
183 + uint32_t buf_length; /* holds the 32-bit little-endian
184 + * length of the buffer
186 + uint32_t last_byte; /* holds the 32-bit little-endian offset
187 + * of the byte after the last byte that
193 +#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
194 +#define LOG_VERSION_MAJOR 1
195 +#define LOG_VERSION_MINOR 0
198 +#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
200 +struct console_data {
202 + struct log_header *hdr;
203 + char *start_addr; /* Start of buffer */
204 + char *end_addr; /* End of buffer */
205 + char *end_of_data; /* Current end of data */
206 + char *cur_ptr; /* Last data sent to console */
209 +#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
211 +static inline void __adjust_end(struct console_data *cd)
213 + cd->end_of_data = cd->start_addr
214 + + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
217 +static inline void adjust_end(struct console_data *cd)
219 + invalidate(cd->hdr);
223 +static inline uint64_t get_mc_fw_base_address(void)
225 + u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
226 + SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
227 + u64 mcfwbase = 0ULL;
229 + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
231 + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
232 + iounmap(mcfbaregs);
233 + pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
237 +static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
238 + u64 offset, u64 size,
239 + uint8_t *emagic, uint8_t magic_len,
242 + struct console_data *cd;
246 + cd = kmalloc(sizeof(*cd), GFP_KERNEL);
249 + fp->private_data = cd;
250 + cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
252 + cd->hdr = (struct log_header *) cd->map_addr;
253 + invalidate(cd->hdr);
255 + magic = cd->hdr->magic_word;
256 + if (memcmp(magic, emagic, magic_len)) {
257 + pr_info("magic didn't match!\n");
258 + pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
259 + emagic[0], emagic[1], emagic[2], emagic[3],
260 + emagic[4], emagic[5], emagic[6], emagic[7]);
261 + pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
262 + magic[0], magic[1], magic[2], magic[3],
263 + magic[4], magic[5], magic[6], magic[7]);
265 + iounmap(cd->map_addr);
269 + cd->start_addr = cd->map_addr
270 + + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
271 + cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
273 + wrapped = le32_to_cpu(cd->hdr->last_byte)
274 + & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
277 + if (wrapped && (cd->end_of_data != cd->end_addr))
278 + cd->cur_ptr = cd->end_of_data+1;
280 + cd->cur_ptr = cd->start_addr;
285 +static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
287 + uint8_t magic_word[] = { 0, 1, 'C', 'M' };
289 + return fsl_ls2_generic_console_open(node, fp,
290 + MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
291 + magic_word, sizeof(magic_word),
295 +static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
297 + uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
299 + return fsl_ls2_generic_console_open(node, fp,
300 + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
301 + magic_word, sizeof(magic_word),
302 + AIOP_OFFSET_DELTA);
305 +static int fsl_ls2_console_close(struct inode *node, struct file *fp)
307 + struct console_data *cd = fp->private_data;
309 + iounmap(cd->map_addr);
314 +ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
317 + struct console_data *cd = fp->private_data;
321 + /* Check if we need to adjust the end of data addr */
324 + while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
325 + if (((u64)cd->cur_ptr) % 64 == 0)
326 + invalidate(cd->cur_ptr);
328 + data = *(cd->cur_ptr);
329 + if (copy_to_user(&buf[bytes], &data, 1))
332 + if (cd->cur_ptr >= cd->end_addr)
333 + cd->cur_ptr = cd->start_addr;
339 +static const struct file_operations fsl_ls2_mc_console_fops = {
340 + .owner = THIS_MODULE,
341 + .open = fsl_ls2_mc_console_open,
342 + .release = fsl_ls2_console_close,
343 + .read = fsl_ls2_console_read,
346 +static struct miscdevice fsl_ls2_mc_console_dev = {
347 + .minor = MISC_DYNAMIC_MINOR,
348 + .name = "fsl_mc_console",
349 + .fops = &fsl_ls2_mc_console_fops
352 +static const struct file_operations fsl_ls2_aiop_console_fops = {
353 + .owner = THIS_MODULE,
354 + .open = fsl_ls2_aiop_console_open,
355 + .release = fsl_ls2_console_close,
356 + .read = fsl_ls2_console_read,
359 +static struct miscdevice fsl_ls2_aiop_console_dev = {
360 + .minor = MISC_DYNAMIC_MINOR,
361 + .name = "fsl_aiop_console",
362 + .fops = &fsl_ls2_aiop_console_fops
365 +static int __init fsl_ls2_console_init(void)
369 + pr_info("Freescale LS2 console driver\n");
370 + err = misc_register(&fsl_ls2_mc_console_dev);
372 + pr_err("fsl_mc_console: cannot register device\n");
375 + pr_info("fsl-ls2-console: device %s registered\n",
376 + fsl_ls2_mc_console_dev.name);
378 + err = misc_register(&fsl_ls2_aiop_console_dev);
380 + pr_err("fsl_aiop_console: cannot register device\n");
383 + pr_info("fsl-ls2-console: device %s registered\n",
384 + fsl_ls2_aiop_console_dev.name);
389 +static void __exit fsl_ls2_console_exit(void)
391 + misc_deregister(&fsl_ls2_mc_console_dev);
393 + misc_deregister(&fsl_ls2_aiop_console_dev);
396 +module_init(fsl_ls2_console_init);
397 +module_exit(fsl_ls2_console_exit);
399 +MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
400 +MODULE_LICENSE("Dual BSD/GPL");
401 +MODULE_DESCRIPTION("Freescale LS2 console driver");
402 diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
404 index 00000000..e26911d5
406 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
409 +# Makefile for the Freescale DPAA2 Ethernet controller
412 +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
414 +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
415 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
417 +# Needed by the tracing framework
418 +CFLAGS_dpaa2-eth.o := -I$(src)
419 diff --git a/drivers/staging/fsl-dpaa2/ethernet/README b/drivers/staging/fsl-dpaa2/ethernet/README
421 index 00000000..410952ec
423 +++ b/drivers/staging/fsl-dpaa2/ethernet/README
425 +Freescale DPAA2 Ethernet driver
426 +===============================
428 +This file provides documentation for the Freescale DPAA2 Ethernet driver.
433 + Supported Platforms
434 + Architecture Overview
435 + Creating a Network Interface
436 + Features & Offloads
441 +This driver provides networking support for Freescale DPAA2 SoCs, e.g.
442 +LS2080A, LS2088A, LS1088A.
445 +Architecture Overview
446 +=====================
447 +Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
448 +representing network interfaces; instead, several separate hardware resources
449 +concur to provide the networking functionality:
450 + - network interfaces
455 +All hardware resources are allocated and configured through the Management
456 +Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
457 +and exposes ABIs through which they can be configured and controlled. A few
458 +hardware resources, like queues, do not have a corresponding MC object and
459 +are treated as internal resources of other objects.
461 +For a more detailed description of the DPAA2 architecture and its object
463 + drivers/staging/fsl-mc/README.txt
465 +Each Linux net device is built on top of a Datapath Network Interface (DPNI)
466 +object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
469 +Configuration interface:
471 + -----------------------
472 + | DPAA2 Ethernet Driver |
473 + -----------------------
476 + . . . . . . . . . . . .
479 + ---------- ---------- -----------
480 + | DPBP API | | DPNI API | | DPCON API |
481 + ---------- ---------- -----------
483 +=========== . ========== . ============ . ===================
485 + ------------------------------------------
486 + | MC hardware portals |
487 + ------------------------------------------
490 + ------ ------ -------
491 + | DPBP | | DPNI | | DPCON |
492 + ------ ------ -------
494 +The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
495 +DPBPs represent hardware buffer pools. Packet I/O is performed in the context
496 +of DPCON objects, using DPIO portals for managing and communicating with the
499 +Datapath (I/O) interface:
501 + -----------------------------------------------
502 + | DPAA2 Ethernet Driver |
503 + -----------------------------------------------
506 + enqueue| dequeue| data | dequeue| seed |
507 + (Tx) | (Rx, TxC)| avail.| request| buffers|
511 + -----------------------------------------------
513 + -----------------------------------------------
515 + | | | | | ================
517 + -----------------------------------------------
518 + | I/O hardware portals |
519 + -----------------------------------------------
523 + V | ================ V
524 + ---------------------- | -------------
525 + queues ---------------------- | | Buffer pool |
526 + ---------------------- | -------------
527 + =======================
530 +Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
531 +availability notifications and buffer pool management. DPIOs are shared between
532 +all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
533 +frames, but must be affine to the CPUs for the purpose of traffic distribution.
535 +Frames are transmitted and received through hardware frame queues, which can be
536 +grouped in channels for the purpose of hardware scheduling. The Ethernet driver
537 +enqueues TX frames on egress queues and after transmission is complete a TX
538 +confirmation frame is sent back to the CPU.
540 +When frames are available on ingress queues, a data availability notification
541 +is sent to the CPU; notifications are raised per channel, so even if multiple
542 +queues in the same channel have available frames, only one notification is sent.
543 +After a channel fires a notification, is must be explicitly rearmed.
545 +Each network interface can have multiple Rx, Tx and confirmation queues affined
546 +to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
547 +DPCONs are used to distribute ingress traffic to different CPUs via the cores'
550 +The role of hardware buffer pools is storage of ingress frame data. Each network
551 +interface has a privately owned buffer pool which it seeds with kernel allocated
555 +DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
556 +object or to another DPNI through an internal link, but the connection is
557 +managed by MC and completely transparent to the Ethernet driver.
559 + --------- --------- ---------
560 + | eth if1 | | eth if2 | | eth ifn |
561 + --------- --------- ---------
565 + ---------------------------
566 + | DPAA2 Ethernet Driver |
567 + ---------------------------
571 + ------ ------ ------ -------
572 + | DPNI | | DPNI | | DPNI | | DPMAC |----+
573 + ------ ------ ------ ------- |
576 + =========== ================== | PHY |
579 +Creating a Network Interface
580 +============================
581 +A net device is created for each DPNI object probed on the MC bus. Each DPNI has
582 +a number of properties which determine the network interface configuration
583 +options and associated hardware resources.
585 +DPNI objects (and the other DPAA2 objects needed for a network interface) can be
586 +added to a container on the MC bus in one of two ways: statically, through a
587 +Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
588 +dynamically at runtime, via the DPAA2 objects APIs.
593 +Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
594 +The checksum offloads can be independently configured on RX and TX through
597 +Hardware offload of unicast and multicast MAC filtering is supported on the
598 +ingress path and permanently enabled.
600 +Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
601 +is configurable via ethtool; on RX it is always enabled.
603 +The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
605 +The Ethernet driver defines a static flow hashing scheme that distributes
606 +traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
607 +L4 dst port. No user configuration is supported for now.
609 +Hardware specific statistics for the network interface as well as some
610 +non-standard driver stats can be consulted through ethtool -S option.
611 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
613 index 00000000..445c5d17
615 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
618 +/* Copyright 2015 Freescale Semiconductor Inc.
620 + * Redistribution and use in source and binary forms, with or without
621 + * modification, are permitted provided that the following conditions are met:
622 + * * Redistributions of source code must retain the above copyright
623 + * notice, this list of conditions and the following disclaimer.
624 + * * Redistributions in binary form must reproduce the above copyright
625 + * notice, this list of conditions and the following disclaimer in the
626 + * documentation and/or other materials provided with the distribution.
627 + * * Neither the name of Freescale Semiconductor nor the
628 + * names of its contributors may be used to endorse or promote products
629 + * derived from this software without specific prior written permission.
632 + * ALTERNATIVELY, this software may be distributed under the terms of the
633 + * GNU General Public License ("GPL") as published by the Free Software
634 + * Foundation, either version 2 of that License or (at your option) any
637 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
638 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
639 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
640 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
641 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
642 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
643 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
644 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
645 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
646 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
649 +#include <linux/module.h>
650 +#include <linux/debugfs.h>
651 +#include "dpaa2-eth.h"
652 +#include "dpaa2-eth-debugfs.h"
654 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
656 +static struct dentry *dpaa2_dbg_root;
658 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
660 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
661 + struct rtnl_link_stats64 *stats;
662 + struct dpaa2_eth_drv_stats *extras;
665 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
666 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
667 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
668 + "Tx SG", "Enq busy");
670 + for_each_online_cpu(i) {
671 + stats = per_cpu_ptr(priv->percpu_stats, i);
672 + extras = per_cpu_ptr(priv->percpu_extras, i);
673 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
677 + extras->rx_sg_frames,
680 + extras->tx_conf_frames,
681 + extras->tx_sg_frames,
682 + extras->tx_portal_busy);
688 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
691 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
693 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
695 + netdev_err(priv->net_dev, "single_open() failed\n");
700 +static const struct file_operations dpaa2_dbg_cpu_ops = {
701 + .open = dpaa2_dbg_cpu_open,
703 + .llseek = seq_lseek,
704 + .release = single_release,
707 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
709 + switch (fq->type) {
712 + case DPAA2_TX_CONF_FQ:
714 + case DPAA2_RX_ERR_FQ:
721 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
723 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
724 + struct dpaa2_eth_fq *fq;
728 + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
729 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
730 + "VFQID", "CPU", "Type", "Frames", "Pending frames",
733 + for (i = 0; i < priv->num_fqs; i++) {
735 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
739 + seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n",
742 + fq_type_to_str(fq),
745 + fq->stats.congestion_entry);
751 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
754 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
756 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
758 + netdev_err(priv->net_dev, "single_open() failed\n");
763 +static const struct file_operations dpaa2_dbg_fq_ops = {
764 + .open = dpaa2_dbg_fqs_open,
766 + .llseek = seq_lseek,
767 + .release = single_release,
770 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
772 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
773 + struct dpaa2_eth_channel *ch;
776 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
777 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
778 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
781 + for (i = 0; i < priv->num_channels; i++) {
782 + ch = priv->channel[i];
783 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n",
785 + ch->nctx.desired_cpu,
786 + ch->stats.dequeue_portal_busy,
789 + ch->stats.frames / ch->stats.cdan);
795 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
798 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
800 + err = single_open(file, dpaa2_dbg_ch_show, priv);
802 + netdev_err(priv->net_dev, "single_open() failed\n");
807 +static const struct file_operations dpaa2_dbg_ch_ops = {
808 + .open = dpaa2_dbg_ch_open,
810 + .llseek = seq_lseek,
811 + .release = single_release,
814 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
815 + size_t count, loff_t *offset)
817 + struct dpaa2_eth_priv *priv = file->private_data;
818 + struct rtnl_link_stats64 *percpu_stats;
819 + struct dpaa2_eth_drv_stats *percpu_extras;
820 + struct dpaa2_eth_fq *fq;
821 + struct dpaa2_eth_channel *ch;
824 + for_each_online_cpu(i) {
825 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
826 + memset(percpu_stats, 0, sizeof(*percpu_stats));
828 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
829 + memset(percpu_extras, 0, sizeof(*percpu_extras));
832 + for (i = 0; i < priv->num_fqs; i++) {
834 + memset(&fq->stats, 0, sizeof(fq->stats));
837 + for (i = 0; i < priv->num_channels; i++) {
838 + ch = priv->channel[i];
839 + memset(&ch->stats, 0, sizeof(ch->stats));
845 +static const struct file_operations dpaa2_dbg_reset_ops = {
846 + .open = simple_open,
847 + .write = dpaa2_dbg_reset_write,
850 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
851 + const char __user *buf,
852 + size_t count, loff_t *offset)
854 + struct dpaa2_eth_priv *priv = file->private_data;
857 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
859 + netdev_err(priv->net_dev,
860 + "dpni_reset_statistics() failed %d\n", err);
865 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
866 + .open = simple_open,
867 + .write = dpaa2_dbg_reset_mc_write,
870 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
872 + if (!dpaa2_dbg_root)
875 + /* Create a directory for the interface */
876 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
878 + if (!priv->dbg.dir) {
879 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
883 + /* per-cpu stats file */
884 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
885 + priv->dbg.dir, priv,
886 + &dpaa2_dbg_cpu_ops);
887 + if (!priv->dbg.cpu_stats) {
888 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
889 + goto err_cpu_stats;
892 + /* per-fq stats file */
893 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
894 + priv->dbg.dir, priv,
895 + &dpaa2_dbg_fq_ops);
896 + if (!priv->dbg.fq_stats) {
897 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
901 + /* per-fq stats file */
902 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
903 + priv->dbg.dir, priv,
904 + &dpaa2_dbg_ch_ops);
905 + if (!priv->dbg.fq_stats) {
906 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
911 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
912 + priv->dbg.dir, priv,
913 + &dpaa2_dbg_reset_ops);
914 + if (!priv->dbg.reset_stats) {
915 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
916 + goto err_reset_stats;
919 + /* reset MC stats */
920 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
921 + 0222, priv->dbg.dir, priv,
922 + &dpaa2_dbg_reset_mc_ops);
923 + if (!priv->dbg.reset_mc_stats) {
924 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
925 + goto err_reset_mc_stats;
931 + debugfs_remove(priv->dbg.reset_stats);
933 + debugfs_remove(priv->dbg.ch_stats);
935 + debugfs_remove(priv->dbg.fq_stats);
937 + debugfs_remove(priv->dbg.cpu_stats);
939 + debugfs_remove(priv->dbg.dir);
942 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
944 + debugfs_remove(priv->dbg.reset_mc_stats);
945 + debugfs_remove(priv->dbg.reset_stats);
946 + debugfs_remove(priv->dbg.fq_stats);
947 + debugfs_remove(priv->dbg.ch_stats);
948 + debugfs_remove(priv->dbg.cpu_stats);
949 + debugfs_remove(priv->dbg.dir);
952 +void dpaa2_eth_dbg_init(void)
954 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
955 + if (!dpaa2_dbg_root) {
956 + pr_err("DPAA2-ETH: debugfs create failed\n");
960 + pr_info("DPAA2-ETH: debugfs created\n");
963 +void __exit dpaa2_eth_dbg_exit(void)
965 + debugfs_remove(dpaa2_dbg_root);
967 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
969 index 00000000..551e6c4c
971 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
973 +/* Copyright 2015 Freescale Semiconductor Inc.
975 + * Redistribution and use in source and binary forms, with or without
976 + * modification, are permitted provided that the following conditions are met:
977 + * * Redistributions of source code must retain the above copyright
978 + * notice, this list of conditions and the following disclaimer.
979 + * * Redistributions in binary form must reproduce the above copyright
980 + * notice, this list of conditions and the following disclaimer in the
981 + * documentation and/or other materials provided with the distribution.
982 + * * Neither the name of Freescale Semiconductor nor the
983 + * names of its contributors may be used to endorse or promote products
984 + * derived from this software without specific prior written permission.
987 + * ALTERNATIVELY, this software may be distributed under the terms of the
988 + * GNU General Public License ("GPL") as published by the Free Software
989 + * Foundation, either version 2 of that License or (at your option) any
992 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
993 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
994 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
995 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
996 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
997 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
998 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
999 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1000 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1001 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1004 +#ifndef DPAA2_ETH_DEBUGFS_H
1005 +#define DPAA2_ETH_DEBUGFS_H
1007 +#include <linux/dcache.h>
1009 +struct dpaa2_eth_priv;
1011 +struct dpaa2_debugfs {
1012 + struct dentry *dir;
1013 + struct dentry *fq_stats;
1014 + struct dentry *ch_stats;
1015 + struct dentry *cpu_stats;
1016 + struct dentry *reset_stats;
1017 + struct dentry *reset_mc_stats;
1020 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1021 +void dpaa2_eth_dbg_init(void);
1022 +void dpaa2_eth_dbg_exit(void);
1023 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1024 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1026 +static inline void dpaa2_eth_dbg_init(void) {}
1027 +static inline void dpaa2_eth_dbg_exit(void) {}
1028 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1029 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1030 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1032 +#endif /* DPAA2_ETH_DEBUGFS_H */
1033 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1034 new file mode 100644
1035 index 00000000..e8e6522a
1037 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1039 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1041 + * Redistribution and use in source and binary forms, with or without
1042 + * modification, are permitted provided that the following conditions are met:
1043 + * * Redistributions of source code must retain the above copyright
1044 + * notice, this list of conditions and the following disclaimer.
1045 + * * Redistributions in binary form must reproduce the above copyright
1046 + * notice, this list of conditions and the following disclaimer in the
1047 + * documentation and/or other materials provided with the distribution.
1048 + * * Neither the name of Freescale Semiconductor nor the
1049 + * names of its contributors may be used to endorse or promote products
1050 + * derived from this software without specific prior written permission.
1053 + * ALTERNATIVELY, this software may be distributed under the terms of the
1054 + * GNU General Public License ("GPL") as published by the Free Software
1055 + * Foundation, either version 2 of that License or (at your option) any
1058 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1059 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1060 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1061 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1062 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1063 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1064 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1065 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1066 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1067 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1070 +#undef TRACE_SYSTEM
1071 +#define TRACE_SYSTEM dpaa2_eth
1073 +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
1074 +#define _DPAA2_ETH_TRACE_H
1076 +#include <linux/skbuff.h>
1077 +#include <linux/netdevice.h>
1078 +#include <linux/tracepoint.h>
1080 +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
1081 +/* trace_printk format for raw buffer event class */
1082 +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
1084 +/* This is used to declare a class of events.
1085 + * individual events of this type will be defined below.
1088 +/* Store details about a frame descriptor */
1089 +DECLARE_EVENT_CLASS(dpaa2_eth_fd,
1090 + /* Trace function prototype */
1091 + TP_PROTO(struct net_device *netdev,
1092 + const struct dpaa2_fd *fd),
1094 + /* Repeat argument list here */
1095 + TP_ARGS(netdev, fd),
1097 + /* A structure containing the relevant information we want
1098 + * to record. Declare name and type for each normal element,
1099 + * name, type and size for arrays. Use __string for variable
1103 + __field(u64, fd_addr)
1104 + __field(u32, fd_len)
1105 + __field(u16, fd_offset)
1106 + __string(name, netdev->name)
1109 + /* The function that assigns values to the above declared
1113 + __entry->fd_addr = dpaa2_fd_get_addr(fd);
1114 + __entry->fd_len = dpaa2_fd_get_len(fd);
1115 + __entry->fd_offset = dpaa2_fd_get_offset(fd);
1116 + __assign_str(name, netdev->name);
1119 + /* This is what gets printed when the trace event is
1126 + __entry->fd_offset)
1129 +/* Now declare events of the above type. Format is:
1130 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
1133 +/* Tx (egress) fd */
1134 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
1135 + TP_PROTO(struct net_device *netdev,
1136 + const struct dpaa2_fd *fd),
1138 + TP_ARGS(netdev, fd)
1142 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
1143 + TP_PROTO(struct net_device *netdev,
1144 + const struct dpaa2_fd *fd),
1146 + TP_ARGS(netdev, fd)
1149 +/* Tx confirmation fd */
1150 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
1151 + TP_PROTO(struct net_device *netdev,
1152 + const struct dpaa2_fd *fd),
1154 + TP_ARGS(netdev, fd)
1157 +/* Log data about raw buffers. Useful for tracing DPBP content. */
1158 +TRACE_EVENT(dpaa2_eth_buf_seed,
1159 + /* Trace function prototype */
1160 + TP_PROTO(struct net_device *netdev,
1161 + /* virtual address and size */
1164 + /* dma map address and size */
1165 + dma_addr_t dma_addr,
1167 + /* buffer pool id, if relevant */
1170 + /* Repeat argument list here */
1171 + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
1173 + /* A structure containing the relevant information we want
1174 + * to record. Declare name and type for each normal element,
1175 + * name, type and size for arrays. Use __string for variable
1179 + __field(void *, vaddr)
1180 + __field(size_t, size)
1181 + __field(dma_addr_t, dma_addr)
1182 + __field(size_t, map_size)
1183 + __field(u16, bpid)
1184 + __string(name, netdev->name)
1187 + /* The function that assigns values to the above declared
1191 + __entry->vaddr = vaddr;
1192 + __entry->size = size;
1193 + __entry->dma_addr = dma_addr;
1194 + __entry->map_size = map_size;
1195 + __entry->bpid = bpid;
1196 + __assign_str(name, netdev->name);
1199 + /* This is what gets printed when the trace event is
1202 + TP_printk(TR_BUF_FMT,
1206 + &__entry->dma_addr,
1207 + __entry->map_size,
1211 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
1212 + * The syntax is the same as for DECLARE_EVENT_CLASS().
1215 +#endif /* _DPAA2_ETH_TRACE_H */
1217 +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
1218 +#undef TRACE_INCLUDE_PATH
1219 +#define TRACE_INCLUDE_PATH .
1220 +#undef TRACE_INCLUDE_FILE
1221 +#define TRACE_INCLUDE_FILE dpaa2-eth-trace
1222 +#include <trace/define_trace.h>
1223 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1224 new file mode 100644
1225 index 00000000..452eca52
1227 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1229 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1231 + * Redistribution and use in source and binary forms, with or without
1232 + * modification, are permitted provided that the following conditions are met:
1233 + * * Redistributions of source code must retain the above copyright
1234 + * notice, this list of conditions and the following disclaimer.
1235 + * * Redistributions in binary form must reproduce the above copyright
1236 + * notice, this list of conditions and the following disclaimer in the
1237 + * documentation and/or other materials provided with the distribution.
1238 + * * Neither the name of Freescale Semiconductor nor the
1239 + * names of its contributors may be used to endorse or promote products
1240 + * derived from this software without specific prior written permission.
1243 + * ALTERNATIVELY, this software may be distributed under the terms of the
1244 + * GNU General Public License ("GPL") as published by the Free Software
1245 + * Foundation, either version 2 of that License or (at your option) any
1248 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1249 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1250 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1251 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1252 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1253 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1254 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1255 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1256 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1257 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1259 +#include <linux/init.h>
1260 +#include <linux/module.h>
1261 +#include <linux/platform_device.h>
1262 +#include <linux/etherdevice.h>
1263 +#include <linux/of_net.h>
1264 +#include <linux/interrupt.h>
1265 +#include <linux/debugfs.h>
1266 +#include <linux/kthread.h>
1267 +#include <linux/msi.h>
1268 +#include <linux/net_tstamp.h>
1269 +#include <linux/iommu.h>
1271 +#include "../../fsl-mc/include/dpbp.h"
1272 +#include "../../fsl-mc/include/dpcon.h"
1273 +#include "../../fsl-mc/include/mc.h"
1274 +#include "../../fsl-mc/include/mc-sys.h"
1275 +#include "dpaa2-eth.h"
1278 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1279 + * using trace events only need to #include <trace/events/sched.h>
1281 +#define CREATE_TRACE_POINTS
1282 +#include "dpaa2-eth-trace.h"
1284 +MODULE_LICENSE("Dual BSD/GPL");
1285 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
1286 +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1288 +const char dpaa2_eth_drv_version[] = "0.1";
1290 +void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
1292 + phys_addr_t phys_addr;
1294 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
1296 + return phys_to_virt(phys_addr);
1299 +static void validate_rx_csum(struct dpaa2_eth_priv *priv,
1301 + struct sk_buff *skb)
1303 + skb_checksum_none_assert(skb);
1305 + /* HW checksum validation is disabled, nothing to do here */
1306 + if (!(priv->net_dev->features & NETIF_F_RXCSUM))
1309 + /* Read checksum validation bits */
1310 + if (!((fd_status & DPAA2_FAS_L3CV) &&
1311 + (fd_status & DPAA2_FAS_L4CV)))
1314 + /* Inform the stack there's no need to compute L3/L4 csum anymore */
1315 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1318 +/* Free a received FD.
1319 + * Not to be used for Tx conf FDs or on any other paths.
1321 +static void free_rx_fd(struct dpaa2_eth_priv *priv,
1322 + const struct dpaa2_fd *fd,
1325 + struct device *dev = priv->net_dev->dev.parent;
1326 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1327 + u8 fd_format = dpaa2_fd_get_format(fd);
1328 + struct dpaa2_sg_entry *sgt;
1332 + /* If single buffer frame, just free the data buffer */
1333 + if (fd_format == dpaa2_fd_single)
1335 + else if (fd_format != dpaa2_fd_sg)
1336 + /* we don't support any other format */
1339 + /* For S/G frames, we first need to free all SG entries */
1340 + sgt = vaddr + dpaa2_fd_get_offset(fd);
1341 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1342 + addr = dpaa2_sg_get_addr(&sgt[i]);
1343 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1345 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1348 + put_page(virt_to_head_page(sg_vaddr));
1350 + if (dpaa2_sg_is_final(&sgt[i]))
1355 + put_page(virt_to_head_page(vaddr));
1358 +/* Build a linear skb based on a single-buffer frame descriptor */
1359 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
1360 + struct dpaa2_eth_channel *ch,
1361 + const struct dpaa2_fd *fd,
1364 + struct sk_buff *skb = NULL;
1365 + u16 fd_offset = dpaa2_fd_get_offset(fd);
1366 + u32 fd_length = dpaa2_fd_get_len(fd);
1368 + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
1369 + if (unlikely(!skb))
1372 + skb_reserve(skb, fd_offset);
1373 + skb_put(skb, fd_length);
1380 +/* Build a non linear (fragmented) skb based on a S/G table */
1381 +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
1382 + struct dpaa2_eth_channel *ch,
1383 + struct dpaa2_sg_entry *sgt)
1385 + struct sk_buff *skb = NULL;
1386 + struct device *dev = priv->net_dev->dev.parent;
1388 + dma_addr_t sg_addr;
1391 + struct page *page, *head_page;
1395 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1396 + struct dpaa2_sg_entry *sge = &sgt[i];
1398 + /* NOTE: We only support SG entries in dpaa2_sg_single format,
1399 + * but this is the only format we may receive from HW anyway
1402 + /* Get the address and length from the S/G entry */
1403 + sg_addr = dpaa2_sg_get_addr(sge);
1404 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
1405 + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
1408 + sg_length = dpaa2_sg_get_len(sge);
1411 + /* We build the skb around the first data buffer */
1412 + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
1413 + if (unlikely(!skb))
1416 + sg_offset = dpaa2_sg_get_offset(sge);
1417 + skb_reserve(skb, sg_offset);
1418 + skb_put(skb, sg_length);
1420 + /* Rest of the data buffers are stored as skb frags */
1421 + page = virt_to_page(sg_vaddr);
1422 + head_page = virt_to_head_page(sg_vaddr);
1424 + /* Offset in page (which may be compound).
1425 + * Data in subsequent SG entries is stored from the
1426 + * beginning of the buffer, so we don't need to add the
1429 + page_offset = ((unsigned long)sg_vaddr &
1430 + (PAGE_SIZE - 1)) +
1431 + (page_address(page) - page_address(head_page));
1433 + skb_add_rx_frag(skb, i - 1, head_page, page_offset,
1434 + sg_length, DPAA2_ETH_RX_BUF_SIZE);
1437 + if (dpaa2_sg_is_final(sge))
1441 + /* Count all data buffers + SG table buffer */
1442 + ch->buf_count -= i + 2;
1447 +/* Main Rx frame processing routine */
1448 +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
1449 + struct dpaa2_eth_channel *ch,
1450 + const struct dpaa2_fd *fd,
1451 + struct napi_struct *napi,
1454 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1455 + u8 fd_format = dpaa2_fd_get_format(fd);
1457 + struct sk_buff *skb;
1458 + struct rtnl_link_stats64 *percpu_stats;
1459 + struct dpaa2_eth_drv_stats *percpu_extras;
1460 + struct device *dev = priv->net_dev->dev.parent;
1461 + struct dpaa2_fas *fas;
1465 + /* Tracing point */
1466 + trace_dpaa2_rx_fd(priv->net_dev, fd);
1468 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1469 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1471 + /* HWA - FAS, timestamp */
1472 + fas = dpaa2_eth_get_fas(vaddr);
1474 + /* data / SG table */
1475 + buf_data = vaddr + dpaa2_fd_get_offset(fd);
1476 + prefetch(buf_data);
1478 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1479 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1481 + switch (fd_format) {
1482 + case dpaa2_fd_single:
1483 + skb = build_linear_skb(priv, ch, fd, vaddr);
1486 + skb = build_frag_skb(priv, ch, buf_data);
1487 + put_page(virt_to_head_page(vaddr));
1488 + percpu_extras->rx_sg_frames++;
1489 + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
1492 + /* We don't support any other format */
1493 + goto err_frame_format;
1496 + if (unlikely(!skb))
1497 + goto err_build_skb;
1499 + prefetch(skb->data);
1501 + /* Get the timestamp value */
1502 + if (priv->ts_rx_en) {
1503 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1504 + u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
1506 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1507 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1508 + shhwtstamps->hwtstamp = ns_to_ktime(*ns);
1511 + /* Check if we need to validate the L4 csum */
1512 + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
1513 + status = le32_to_cpu(fas->status);
1514 + validate_rx_csum(priv, status, skb);
1517 + skb->protocol = eth_type_trans(skb, priv->net_dev);
1519 + /* Record Rx queue - this will be used when picking a Tx queue to
1520 + * forward the frames. We're keeping flow affinity through the
1523 + skb_record_rx_queue(skb, queue_id);
1525 + percpu_stats->rx_packets++;
1526 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
1528 + napi_gro_receive(napi, skb);
1533 + free_rx_fd(priv, fd, vaddr);
1535 + percpu_stats->rx_dropped++;
1538 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
1539 +/* Processing of Rx frames received on the error FQ
1540 + * We check and print the error bits and then free the frame
1542 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
1543 + struct dpaa2_eth_channel *ch,
1544 + const struct dpaa2_fd *fd,
1545 + struct napi_struct *napi __always_unused,
1546 + u16 queue_id __always_unused)
1548 + struct device *dev = priv->net_dev->dev.parent;
1549 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1551 + struct rtnl_link_stats64 *percpu_stats;
1552 + struct dpaa2_fas *fas;
1554 + bool check_fas_errors = false;
1556 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1557 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1559 + /* check frame errors in the FD field */
1560 + if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
1561 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
1562 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
1563 + if (net_ratelimit())
1564 + netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
1565 + fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
1568 + /* check frame errors in the FAS field */
1569 + if (check_fas_errors) {
1570 + fas = dpaa2_eth_get_fas(vaddr);
1571 + status = le32_to_cpu(fas->status);
1572 + if (net_ratelimit())
1573 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
1574 + status & DPAA2_FAS_RX_ERR_MASK);
1576 + free_rx_fd(priv, fd, vaddr);
1578 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1579 + percpu_stats->rx_errors++;
1583 +/* Consume all frames pull-dequeued into the store. This is the simplest way to
1584 + * make sure we don't accidentally issue another volatile dequeue which would
1585 + * overwrite (leak) frames already in the store.
1587 + * The number of frames is returned using the last 2 output arguments,
1588 + * separately for Rx and Tx confirmations.
1590 + * Observance of NAPI budget is not our concern, leaving that to the caller.
1592 +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
1593 + int *tx_conf_cleaned)
1595 + struct dpaa2_eth_priv *priv = ch->priv;
1596 + struct dpaa2_eth_fq *fq = NULL;
1597 + struct dpaa2_dq *dq;
1598 + const struct dpaa2_fd *fd;
1603 + dq = dpaa2_io_store_next(ch->store, &is_last);
1604 + if (unlikely(!dq)) {
1605 + /* If we're here, we *must* have placed a
1606 + * volatile dequeue comnmand, so keep reading through
1607 + * the store until we get some sort of valid response
1608 + * token (either a valid frame or an "empty dequeue")
1613 + fd = dpaa2_dq_fd(dq);
1615 + /* prefetch the frame descriptor */
1618 + fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
1619 + fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
1621 + } while (!is_last);
1626 + /* All frames brought in store by a volatile dequeue
1627 + * come from the same queue
1629 + if (fq->type == DPAA2_TX_CONF_FQ)
1630 + *tx_conf_cleaned += cleaned;
1632 + *rx_cleaned += cleaned;
1634 + fq->stats.frames += cleaned;
1635 + ch->stats.frames += cleaned;
1640 +/* Configure the egress frame annotation for timestamp update */
1641 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
1643 + struct dpaa2_faead *faead;
1647 + /* Mark the egress frame annotation area as valid */
1648 + frc = dpaa2_fd_get_frc(fd);
1649 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
1651 + /* enable UPD (update prepanded data) bit in FAEAD field of
1652 + * hardware frame annotation area
1654 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
1655 + faead = dpaa2_eth_get_faead(buf_start);
1656 + faead->ctrl = cpu_to_le32(ctrl);
1659 +/* Create a frame descriptor based on a fragmented skb */
1660 +static int build_sg_fd(struct dpaa2_eth_priv *priv,
1661 + struct sk_buff *skb,
1662 + struct dpaa2_fd *fd)
1664 + struct device *dev = priv->net_dev->dev.parent;
1665 + void *sgt_buf = NULL;
1667 + int nr_frags = skb_shinfo(skb)->nr_frags;
1668 + struct dpaa2_sg_entry *sgt;
1671 + struct scatterlist *scl, *crt_scl;
1674 + struct dpaa2_fas *fas;
1675 + struct dpaa2_eth_swa *swa;
1677 + /* Create and map scatterlist.
1678 + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
1679 + * to go beyond nr_frags+1.
1680 + * Note: We don't support chained scatterlists
1682 + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
1685 + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
1686 + if (unlikely(!scl))
1689 + sg_init_table(scl, nr_frags + 1);
1690 + num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
1691 + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1692 + if (unlikely(!num_dma_bufs)) {
1694 + goto dma_map_sg_failed;
1697 + /* Prepare the HW SGT structure */
1698 + sgt_buf_size = priv->tx_data_offset +
1699 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1700 + sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
1701 + if (unlikely(!sgt_buf)) {
1703 + goto sgt_buf_alloc_failed;
1705 + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
1707 + /* PTA from egress side is passed as is to the confirmation side so
1708 + * we need to clear some fields here in order to find consistent values
1709 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1710 + * field from the hardware annotation area
1712 + fas = dpaa2_eth_get_fas(sgt_buf);
1713 + memset(fas, 0, DPAA2_FAS_SIZE);
1715 + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1717 + /* Fill in the HW SGT structure.
1719 + * sgt_buf is zeroed out, so the following fields are implicit
1720 + * in all sgt entries:
1722 + * - format is 'dpaa2_sg_single'
1724 + for_each_sg(scl, crt_scl, num_dma_bufs, i) {
1725 + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
1726 + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
1728 + dpaa2_sg_set_final(&sgt[i - 1], true);
1730 + /* Store the skb backpointer in the SGT buffer.
1731 + * Fit the scatterlist and the number of buffers alongside the
1732 + * skb backpointer in the software annotation area. We'll need
1733 + * all of them on Tx Conf.
1735 + swa = (struct dpaa2_eth_swa *)sgt_buf;
1738 + swa->num_sg = num_sg;
1739 + swa->num_dma_bufs = num_dma_bufs;
1741 + /* Separately map the SGT buffer */
1742 + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1743 + if (unlikely(dma_mapping_error(dev, addr))) {
1745 + goto dma_map_single_failed;
1747 + dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1748 + dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1749 + dpaa2_fd_set_addr(fd, addr);
1750 + dpaa2_fd_set_len(fd, skb->len);
1752 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
1754 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1755 + enable_tx_tstamp(fd, sgt_buf);
1759 +dma_map_single_failed:
1761 +sgt_buf_alloc_failed:
1762 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1768 +/* Create a frame descriptor based on a linear skb */
1769 +static int build_single_fd(struct dpaa2_eth_priv *priv,
1770 + struct sk_buff *skb,
1771 + struct dpaa2_fd *fd)
1773 + struct device *dev = priv->net_dev->dev.parent;
1775 + struct sk_buff **skbh;
1777 + struct dpaa2_fas *fas;
1779 + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
1780 + DPAA2_ETH_TX_BUF_ALIGN,
1781 + DPAA2_ETH_TX_BUF_ALIGN);
1783 + /* PTA from egress side is passed as is to the confirmation side so
1784 + * we need to clear some fields here in order to find consistent values
1785 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1786 + * field from the hardware annotation area
1788 + fas = dpaa2_eth_get_fas(buffer_start);
1789 + memset(fas, 0, DPAA2_FAS_SIZE);
1791 + /* Store a backpointer to the skb at the beginning of the buffer
1792 + * (in the private data area) such that we can release it
1795 + skbh = (struct sk_buff **)buffer_start;
1798 + addr = dma_map_single(dev, buffer_start,
1799 + skb_tail_pointer(skb) - buffer_start,
1800 + DMA_BIDIRECTIONAL);
1801 + if (unlikely(dma_mapping_error(dev, addr)))
1804 + dpaa2_fd_set_addr(fd, addr);
1805 + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1806 + dpaa2_fd_set_len(fd, skb->len);
1807 + dpaa2_fd_set_format(fd, dpaa2_fd_single);
1809 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1;
1811 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1812 + enable_tx_tstamp(fd, buffer_start);
1817 +/* FD freeing routine on the Tx path
1819 + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1820 + * back-pointed to is also freed.
1821 + * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1823 + * Optionally, return the frame annotation status word (FAS), which needs
1824 + * to be checked if we're on the confirmation path.
1826 +static void free_tx_fd(const struct dpaa2_eth_priv *priv,
1827 + const struct dpaa2_fd *fd,
1830 + struct device *dev = priv->net_dev->dev.parent;
1831 + dma_addr_t fd_addr;
1832 + struct sk_buff **skbh, *skb;
1833 + unsigned char *buffer_start;
1835 + struct scatterlist *scl;
1836 + int num_sg, num_dma_bufs;
1837 + struct dpaa2_eth_swa *swa;
1838 + u8 fd_format = dpaa2_fd_get_format(fd);
1839 + struct dpaa2_fas *fas;
1841 + fd_addr = dpaa2_fd_get_addr(fd);
1842 + skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
1844 + /* HWA - FAS, timestamp (for Tx confirmation frames) */
1845 + fas = dpaa2_eth_get_fas(skbh);
1848 + switch (fd_format) {
1849 + case dpaa2_fd_single:
1851 + buffer_start = (unsigned char *)skbh;
1852 + /* Accessing the skb buffer is safe before dma unmap, because
1853 + * we didn't map the actual skb shell.
1855 + dma_unmap_single(dev, fd_addr,
1856 + skb_tail_pointer(skb) - buffer_start,
1857 + DMA_BIDIRECTIONAL);
1860 + swa = (struct dpaa2_eth_swa *)skbh;
1863 + num_sg = swa->num_sg;
1864 + num_dma_bufs = swa->num_dma_bufs;
1866 + /* Unmap the scatterlist */
1867 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1870 + /* Unmap the SGT buffer */
1871 + unmap_size = priv->tx_data_offset +
1872 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1873 + dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
1876 + /* Unsupported format, mark it as errored and give up */
1882 + /* Get the timestamp value */
1883 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1884 + struct skb_shared_hwtstamps shhwtstamps;
1887 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1889 + ns = (u64 *)dpaa2_eth_get_ts(skbh);
1890 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1891 + shhwtstamps.hwtstamp = ns_to_ktime(*ns);
1892 + skb_tstamp_tx(skb, &shhwtstamps);
1895 + /* Read the status from the Frame Annotation after we unmap the first
1896 + * buffer but before we free it. The caller function is responsible
1897 + * for checking the status value.
1900 + *status = le32_to_cpu(fas->status);
1902 + /* Free SGT buffer kmalloc'ed on tx */
1903 + if (fd_format != dpaa2_fd_single)
1906 + /* Move on with skb release */
1907 + dev_kfree_skb(skb);
1910 +static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1912 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1913 + struct device *dev = net_dev->dev.parent;
1914 + struct dpaa2_fd fd;
1915 + struct rtnl_link_stats64 *percpu_stats;
1916 + struct dpaa2_eth_drv_stats *percpu_extras;
1917 + struct dpaa2_eth_fq *fq;
1918 + u16 queue_mapping = skb_get_queue_mapping(skb);
1921 + /* If we're congested, stop this tx queue; transmission of the
1922 + * current skb happens regardless of congestion state
1924 + fq = &priv->fq[queue_mapping];
1926 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
1927 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
1928 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
1929 + netif_stop_subqueue(net_dev, queue_mapping);
1930 + fq->stats.congestion_entry++;
1933 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1934 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1936 + if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
1937 + struct sk_buff *ns;
1939 + ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
1940 + if (unlikely(!ns)) {
1941 + percpu_stats->tx_dropped++;
1942 + goto err_alloc_headroom;
1944 + dev_kfree_skb(skb);
1948 + /* We'll be holding a back-reference to the skb until Tx Confirmation;
1949 + * we don't want that overwritten by a concurrent Tx with a cloned skb.
1951 + skb = skb_unshare(skb, GFP_ATOMIC);
1952 + if (unlikely(!skb)) {
1953 + /* skb_unshare() has already freed the skb */
1954 + percpu_stats->tx_dropped++;
1955 + return NETDEV_TX_OK;
1958 + /* Setup the FD fields */
1959 + memset(&fd, 0, sizeof(fd));
1961 + if (skb_is_nonlinear(skb)) {
1962 + err = build_sg_fd(priv, skb, &fd);
1963 + percpu_extras->tx_sg_frames++;
1964 + percpu_extras->tx_sg_bytes += skb->len;
1966 + err = build_single_fd(priv, skb, &fd);
1969 + if (unlikely(err)) {
1970 + percpu_stats->tx_dropped++;
1971 + goto err_build_fd;
1974 + /* Tracing point */
1975 + trace_dpaa2_tx_fd(net_dev, &fd);
1977 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1978 + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
1979 + fq->tx_qdbin, &fd);
1980 + /* TODO: This doesn't work. Check on simulator.
1981 + * err = dpaa2_io_service_enqueue_fq(NULL,
1982 + * priv->fq[0].fqid_tx, &fd);
1984 + if (err != -EBUSY)
1987 + percpu_extras->tx_portal_busy += i;
1988 + if (unlikely(err < 0)) {
1989 + percpu_stats->tx_errors++;
1990 + /* Clean up everything, including freeing the skb */
1991 + free_tx_fd(priv, &fd, NULL);
1993 + percpu_stats->tx_packets++;
1994 + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1997 + return NETDEV_TX_OK;
2000 +err_alloc_headroom:
2001 + dev_kfree_skb(skb);
2003 + return NETDEV_TX_OK;
2006 +/* Tx confirmation frame processing routine */
2007 +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
2008 + struct dpaa2_eth_channel *ch,
2009 + const struct dpaa2_fd *fd,
2010 + struct napi_struct *napi __always_unused,
2013 + struct device *dev = priv->net_dev->dev.parent;
2014 + struct rtnl_link_stats64 *percpu_stats;
2015 + struct dpaa2_eth_drv_stats *percpu_extras;
2017 + bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2018 + bool check_fas_errors = false;
2020 + /* Tracing point */
2021 + trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
2023 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
2024 + percpu_extras->tx_conf_frames++;
2025 + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
2027 + /* Check congestion state and wake all queues if necessary */
2028 + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
2029 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
2030 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
2031 + if (!dpaa2_cscn_state_congested(priv->cscn_mem))
2032 + netif_tx_wake_all_queues(priv->net_dev);
2035 + /* check frame errors in the FD field */
2036 + if (unlikely(errors)) {
2037 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
2038 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2039 + if (net_ratelimit())
2040 + netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
2041 + fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2044 + free_tx_fd(priv, fd, check_fas_errors ? &status : NULL);
2046 + /* if there are no errors, we're done */
2047 + if (likely(!errors))
2050 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2051 + /* Tx-conf logically pertains to the egress path. */
2052 + percpu_stats->tx_errors++;
2054 + if (net_ratelimit())
2055 + netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
2056 + status & DPAA2_FAS_TX_ERR_MASK);
2059 +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2063 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2064 + DPNI_OFF_RX_L3_CSUM, enable);
2066 + netdev_err(priv->net_dev,
2067 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2071 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2072 + DPNI_OFF_RX_L4_CSUM, enable);
2074 + netdev_err(priv->net_dev,
2075 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2082 +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
2086 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2087 + DPNI_OFF_TX_L3_CSUM, enable);
2089 + netdev_err(priv->net_dev,
2090 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2094 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2095 + DPNI_OFF_TX_L4_CSUM, enable);
2097 + netdev_err(priv->net_dev,
2098 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2105 +/* Perform a single release command to add buffers
2106 + * to the specified buffer pool
2108 +static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2110 + struct device *dev = priv->net_dev->dev.parent;
2111 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2116 + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2117 + /* Allocate buffer visible to WRIOP + skb shared info +
2118 + * alignment padding.
2120 + buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
2121 + if (unlikely(!buf))
2124 + buf = PTR_ALIGN(buf, priv->rx_buf_align);
2126 + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2128 + if (unlikely(dma_mapping_error(dev, addr)))
2131 + buf_array[i] = addr;
2133 + /* tracing point */
2134 + trace_dpaa2_eth_buf_seed(priv->net_dev,
2135 + buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
2136 + addr, DPAA2_ETH_RX_BUF_SIZE,
2141 + /* In case the portal is busy, retry until successful.
2142 + * The buffer release function would only fail if the QBMan portal
2143 + * was busy, which implies portal contention (i.e. more CPUs than
2144 + * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
2145 + * there is little we can realistically do, short of giving up -
2146 + * in which case we'd risk depleting the buffer pool and never again
2147 + * receiving the Rx interrupt which would kick-start the refill logic.
2148 + * So just keep retrying, at the risk of being moved to ksoftirqd.
2150 + while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
2155 + put_page(virt_to_head_page(buf));
2158 + goto release_bufs;
2163 +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
2168 + /* This is the lazy seeding of Rx buffer pools.
2169 + * dpaa2_add_bufs() is also used on the Rx hotpath and calls
2170 + * napi_alloc_frag(). The trouble with that is that it in turn ends up
2171 + * calling this_cpu_ptr(), which mandates execution in atomic context.
2172 + * Rather than splitting up the code, do a one-off preempt disable.
2174 + preempt_disable();
2175 + for (j = 0; j < priv->num_channels; j++) {
2176 + priv->channel[j]->buf_count = 0;
2177 + for (i = 0; i < priv->num_bufs;
2178 + i += DPAA2_ETH_BUFS_PER_CMD) {
2179 + new_count = add_bufs(priv, bpid);
2180 + priv->channel[j]->buf_count += new_count;
2182 + if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2194 + * Drain the specified number of buffers from the DPNI's private buffer pool.
2195 + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
2197 +static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2199 + struct device *dev = priv->net_dev->dev.parent;
2200 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2205 + ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2206 + buf_array, count);
2208 + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2211 + for (i = 0; i < ret; i++) {
2212 + /* Same logic as on regular Rx path */
2213 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain,
2215 + dma_unmap_single(dev, buf_array[i],
2216 + DPAA2_ETH_RX_BUF_SIZE,
2218 + put_page(virt_to_head_page(vaddr));
2223 +static void drain_pool(struct dpaa2_eth_priv *priv)
2225 + preempt_disable();
2226 + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2227 + drain_bufs(priv, 1);
2231 +/* Function is called from softirq context only, so we don't need to guard
2232 + * the access to percpu count
2234 +static int refill_pool(struct dpaa2_eth_priv *priv,
2235 + struct dpaa2_eth_channel *ch,
2240 + if (likely(ch->buf_count >= priv->refill_thresh))
2244 + new_count = add_bufs(priv, bpid);
2245 + if (unlikely(!new_count)) {
2246 + /* Out of memory; abort for now, we'll try later on */
2249 + ch->buf_count += new_count;
2250 + } while (ch->buf_count < priv->num_bufs);
2252 + if (unlikely(ch->buf_count < priv->num_bufs))
2258 +static int pull_channel(struct dpaa2_eth_channel *ch)
2261 + int dequeues = -1;
2263 + /* Retry while portal is busy */
2265 + err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2268 + } while (err == -EBUSY);
2270 + ch->stats.dequeue_portal_busy += dequeues;
2271 + if (unlikely(err))
2272 + ch->stats.pull_err++;
2277 +/* NAPI poll routine
2279 + * Frames are dequeued from the QMan channel associated with this NAPI context.
2280 + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
2281 + * confirmation frames are limited by a threshold per NAPI poll cycle.
2283 +static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
2285 + struct dpaa2_eth_channel *ch;
2286 + int rx_cleaned = 0, tx_conf_cleaned = 0;
2287 + bool store_cleaned;
2288 + struct dpaa2_eth_priv *priv;
2291 + ch = container_of(napi, struct dpaa2_eth_channel, napi);
2295 + err = pull_channel(ch);
2296 + if (unlikely(err))
2299 + /* Refill pool if appropriate */
2300 + refill_pool(priv, ch, priv->bpid);
2302 + store_cleaned = consume_frames(ch, &rx_cleaned,
2303 + &tx_conf_cleaned);
2305 + /* If we've either consumed the budget with Rx frames,
2306 + * or reached the Tx conf threshold, we're done.
2308 + if (rx_cleaned >= budget ||
2309 + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
2311 + } while (store_cleaned);
2313 + /* We didn't consume the entire budget, finish napi and
2314 + * re-enable data availability notifications.
2316 + napi_complete(napi);
2318 + err = dpaa2_io_service_rearm(NULL, &ch->nctx);
2320 + } while (err == -EBUSY);
2322 + return max(rx_cleaned, 1);
2325 +static void enable_ch_napi(struct dpaa2_eth_priv *priv)
2327 + struct dpaa2_eth_channel *ch;
2330 + for (i = 0; i < priv->num_channels; i++) {
2331 + ch = priv->channel[i];
2332 + napi_enable(&ch->napi);
2336 +static void disable_ch_napi(struct dpaa2_eth_priv *priv)
2338 + struct dpaa2_eth_channel *ch;
2341 + for (i = 0; i < priv->num_channels; i++) {
2342 + ch = priv->channel[i];
2343 + napi_disable(&ch->napi);
2347 +static int link_state_update(struct dpaa2_eth_priv *priv)
2349 + struct dpni_link_state state;
2352 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2353 + if (unlikely(err)) {
2354 + netdev_err(priv->net_dev,
2355 + "dpni_get_link_state() failed\n");
2359 + /* Chech link state; speed / duplex changes are not treated yet */
2360 + if (priv->link_state.up == state.up)
2363 + priv->link_state = state;
2365 + netif_carrier_on(priv->net_dev);
2366 + netif_tx_start_all_queues(priv->net_dev);
2368 + netif_tx_stop_all_queues(priv->net_dev);
2369 + netif_carrier_off(priv->net_dev);
2372 + netdev_info(priv->net_dev, "Link Event: state %s",
2373 + state.up ? "up" : "down");
2378 +static int dpaa2_eth_open(struct net_device *net_dev)
2380 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2383 + /* We'll only start the txqs when the link is actually ready; make sure
2384 + * we don't race against the link up notification, which may come
2385 + * immediately after dpni_enable();
2387 + netif_tx_stop_all_queues(net_dev);
2389 + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
2390 + * return true and cause 'ip link show' to report the LOWER_UP flag,
2391 + * even though the link notification wasn't even received.
2393 + netif_carrier_off(net_dev);
2395 + err = seed_pool(priv, priv->bpid);
2397 + /* Not much to do; the buffer pool, though not filled up,
2398 + * may still contain some buffers which would enable us
2401 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
2402 + priv->dpbp_dev->obj_desc.id, priv->bpid);
2405 + if (priv->tx_pause_frames)
2406 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
2408 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
2410 + err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2412 + netdev_err(net_dev, "dpni_enable() failed\n");
2416 + /* If the DPMAC object has already processed the link up interrupt,
2417 + * we have to learn the link state ourselves.
2419 + err = link_state_update(priv);
2421 + netdev_err(net_dev, "Can't update link state\n");
2422 + goto link_state_err;
2429 + priv->refill_thresh = 0;
2434 +static int dpaa2_eth_stop(struct net_device *net_dev)
2436 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2438 + int retries = 10, i;
2440 + netif_tx_stop_all_queues(net_dev);
2441 + netif_carrier_off(net_dev);
2443 + /* Loop while dpni_disable() attempts to drain the egress FQs
2444 + * and confirm them back to us.
2447 + dpni_disable(priv->mc_io, 0, priv->mc_token);
2448 + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2450 + /* Allow the MC some slack */
2452 + } while (dpni_enabled && --retries);
2454 + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2455 + /* Must go on and disable NAPI nonetheless, so we don't crash at
2456 + * the next "ifconfig up"
2460 + priv->refill_thresh = 0;
2462 + /* Wait for all running napi poll routines to finish, so that no
2463 + * new refill operations are started.
2465 + for (i = 0; i < priv->num_channels; i++)
2466 + napi_synchronize(&priv->channel[i]->napi);
2468 + /* Empty the buffer pool */
2474 +static int dpaa2_eth_init(struct net_device *net_dev)
2476 + u64 supported = 0;
2477 + u64 not_supported = 0;
2478 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2479 + u32 options = priv->dpni_attrs.options;
2481 + /* Capabilities listing */
2482 + supported |= IFF_LIVE_ADDR_CHANGE;
2484 + if (options & DPNI_OPT_NO_MAC_FILTER)
2485 + not_supported |= IFF_UNICAST_FLT;
2487 + supported |= IFF_UNICAST_FLT;
2489 + net_dev->priv_flags |= supported;
2490 + net_dev->priv_flags &= ~not_supported;
2493 + net_dev->features = NETIF_F_RXCSUM |
2494 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2495 + NETIF_F_SG | NETIF_F_HIGHDMA |
2497 + net_dev->hw_features = net_dev->features;
2502 +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2504 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2505 + struct device *dev = net_dev->dev.parent;
2508 + err = eth_mac_addr(net_dev, addr);
2510 + dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2514 + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2515 + net_dev->dev_addr);
2517 + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2524 +/** Fill in counters maintained by the GPP driver. These may be different from
2525 + * the hardware counters obtained by ethtool.
2527 +static void dpaa2_eth_get_stats(struct net_device *net_dev,
2528 + struct rtnl_link_stats64 *stats)
2530 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2531 + struct rtnl_link_stats64 *percpu_stats;
2533 + u64 *netstats = (u64 *)stats;
2535 + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2537 + for_each_possible_cpu(i) {
2538 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2539 + cpustats = (u64 *)percpu_stats;
2540 + for (j = 0; j < num; j++)
2541 + netstats[j] += cpustats[j];
2545 +static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
2547 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2550 + /* Set the maximum Rx frame length to match the transmit side;
2551 + * account for L2 headers when computing the MFL
2553 + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2554 + (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
2556 + netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
2560 + net_dev->mtu = mtu;
2564 +/* Copy mac unicast addresses from @net_dev to @priv.
2565 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2567 +static void add_uc_hw_addr(const struct net_device *net_dev,
2568 + struct dpaa2_eth_priv *priv)
2570 + struct netdev_hw_addr *ha;
2573 + netdev_for_each_uc_addr(ha, net_dev) {
2574 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2577 + netdev_warn(priv->net_dev,
2578 + "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2583 +/* Copy mac multicast addresses from @net_dev to @priv
2584 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2586 +static void add_mc_hw_addr(const struct net_device *net_dev,
2587 + struct dpaa2_eth_priv *priv)
2589 + struct netdev_hw_addr *ha;
2592 + netdev_for_each_mc_addr(ha, net_dev) {
2593 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2596 + netdev_warn(priv->net_dev,
2597 + "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2602 +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2604 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2605 + int uc_count = netdev_uc_count(net_dev);
2606 + int mc_count = netdev_mc_count(net_dev);
2607 + u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2608 + u32 options = priv->dpni_attrs.options;
2609 + u16 mc_token = priv->mc_token;
2610 + struct fsl_mc_io *mc_io = priv->mc_io;
2613 + /* Basic sanity checks; these probably indicate a misconfiguration */
2614 + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2615 + netdev_info(net_dev,
2616 + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2619 + /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2620 + if (uc_count > max_mac) {
2621 + netdev_info(net_dev,
2622 + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2623 + uc_count, max_mac);
2624 + goto force_promisc;
2626 + if (mc_count + uc_count > max_mac) {
2627 + netdev_info(net_dev,
2628 + "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2629 + uc_count + mc_count, max_mac);
2630 + goto force_mc_promisc;
2633 + /* Adjust promisc settings due to flag combinations */
2634 + if (net_dev->flags & IFF_PROMISC)
2635 + goto force_promisc;
2636 + if (net_dev->flags & IFF_ALLMULTI) {
2637 + /* First, rebuild unicast filtering table. This should be done
2638 + * in promisc mode, in order to avoid frame loss while we
2639 + * progressively add entries to the table.
2640 + * We don't know whether we had been in promisc already, and
2641 + * making an MC call to find out is expensive; so set uc promisc
2644 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2646 + netdev_warn(net_dev, "Can't set uc promisc\n");
2648 + /* Actual uc table reconstruction. */
2649 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2651 + netdev_warn(net_dev, "Can't clear uc filters\n");
2652 + add_uc_hw_addr(net_dev, priv);
2654 + /* Finally, clear uc promisc and set mc promisc as requested. */
2655 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2657 + netdev_warn(net_dev, "Can't clear uc promisc\n");
2658 + goto force_mc_promisc;
2661 + /* Neither unicast, nor multicast promisc will be on... eventually.
2662 + * For now, rebuild mac filtering tables while forcing both of them on.
2664 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2666 + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2667 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2669 + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2671 + /* Actual mac filtering tables reconstruction */
2672 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2674 + netdev_warn(net_dev, "Can't clear mac filters\n");
2675 + add_mc_hw_addr(net_dev, priv);
2676 + add_uc_hw_addr(net_dev, priv);
2678 + /* Now we can clear both ucast and mcast promisc, without risking
2679 + * to drop legitimate frames anymore.
2681 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2683 + netdev_warn(net_dev, "Can't clear ucast promisc\n");
2684 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2686 + netdev_warn(net_dev, "Can't clear mcast promisc\n");
2691 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2693 + netdev_warn(net_dev, "Can't set ucast promisc\n");
2695 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2697 + netdev_warn(net_dev, "Can't set mcast promisc\n");
2700 +static int dpaa2_eth_set_features(struct net_device *net_dev,
2701 + netdev_features_t features)
2703 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2704 + netdev_features_t changed = features ^ net_dev->features;
2708 + if (changed & NETIF_F_RXCSUM) {
2709 + enable = !!(features & NETIF_F_RXCSUM);
2710 + err = set_rx_csum(priv, enable);
2715 + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2716 + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2717 + err = set_tx_csum(priv, enable);
2725 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2727 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
2728 + struct hwtstamp_config config;
2730 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2733 + switch (config.tx_type) {
2734 + case HWTSTAMP_TX_OFF:
2735 + priv->ts_tx_en = false;
2737 + case HWTSTAMP_TX_ON:
2738 + priv->ts_tx_en = true;
2744 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2745 + priv->ts_rx_en = false;
2747 + priv->ts_rx_en = true;
2748 + /* TS is set for all frame types, not only those requested */
2749 + config.rx_filter = HWTSTAMP_FILTER_ALL;
2752 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2756 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2758 + if (cmd == SIOCSHWTSTAMP)
2759 + return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2764 +static const struct net_device_ops dpaa2_eth_ops = {
2765 + .ndo_open = dpaa2_eth_open,
2766 + .ndo_start_xmit = dpaa2_eth_tx,
2767 + .ndo_stop = dpaa2_eth_stop,
2768 + .ndo_init = dpaa2_eth_init,
2769 + .ndo_set_mac_address = dpaa2_eth_set_addr,
2770 + .ndo_get_stats64 = dpaa2_eth_get_stats,
2771 + .ndo_change_mtu = dpaa2_eth_change_mtu,
2772 + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2773 + .ndo_set_features = dpaa2_eth_set_features,
2774 + .ndo_do_ioctl = dpaa2_eth_ioctl,
2777 +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2779 + struct dpaa2_eth_channel *ch;
2781 + ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2783 + /* Update NAPI statistics */
2786 + napi_schedule_irqoff(&ch->napi);
2789 +/* Allocate and configure a DPCON object */
2790 +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2792 + struct fsl_mc_device *dpcon;
2793 + struct device *dev = priv->net_dev->dev.parent;
2794 + struct dpcon_attr attrs;
2797 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2798 + FSL_MC_POOL_DPCON, &dpcon);
2800 + dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2804 + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2806 + dev_err(dev, "dpcon_open() failed\n");
2810 + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2812 + dev_err(dev, "dpcon_reset() failed\n");
2816 + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
2818 + dev_err(dev, "dpcon_get_attributes() failed\n");
2819 + goto err_get_attr;
2822 + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2824 + dev_err(dev, "dpcon_enable() failed\n");
2833 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2835 + fsl_mc_object_free(dpcon);
2840 +static void free_dpcon(struct dpaa2_eth_priv *priv,
2841 + struct fsl_mc_device *dpcon)
2843 + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2844 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2845 + fsl_mc_object_free(dpcon);
2848 +static struct dpaa2_eth_channel *
2849 +alloc_channel(struct dpaa2_eth_priv *priv)
2851 + struct dpaa2_eth_channel *channel;
2852 + struct dpcon_attr attr;
2853 + struct device *dev = priv->net_dev->dev.parent;
2856 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2860 + channel->dpcon = setup_dpcon(priv);
2861 + if (!channel->dpcon)
2864 + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2867 + dev_err(dev, "dpcon_get_attributes() failed\n");
2868 + goto err_get_attr;
2871 + channel->dpcon_id = attr.id;
2872 + channel->ch_id = attr.qbman_ch_id;
2873 + channel->priv = priv;
2878 + free_dpcon(priv, channel->dpcon);
2884 +static void free_channel(struct dpaa2_eth_priv *priv,
2885 + struct dpaa2_eth_channel *channel)
2887 + free_dpcon(priv, channel->dpcon);
2891 +/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2892 + * and register data availability notifications
2894 +static int setup_dpio(struct dpaa2_eth_priv *priv)
2896 + struct dpaa2_io_notification_ctx *nctx;
2897 + struct dpaa2_eth_channel *channel;
2898 + struct dpcon_notification_cfg dpcon_notif_cfg;
2899 + struct device *dev = priv->net_dev->dev.parent;
2902 + /* We want the ability to spread ingress traffic (RX, TX conf) to as
2903 + * many cores as possible, so we need one channel for each core
2904 + * (unless there's fewer queues than cores, in which case the extra
2905 + * channels would be wasted).
2906 + * Allocate one channel per core and register it to the core's
2907 + * affine DPIO. If not enough channels are available for all cores
2908 + * or if some cores don't have an affine DPIO, there will be no
2909 + * ingress frame processing on those cores.
2911 + cpumask_clear(&priv->dpio_cpumask);
2912 + for_each_online_cpu(i) {
2913 + /* Try to allocate a channel */
2914 + channel = alloc_channel(priv);
2917 + "No affine channel for cpu %d and above\n", i);
2918 + goto err_alloc_ch;
2921 + priv->channel[priv->num_channels] = channel;
2923 + nctx = &channel->nctx;
2924 + nctx->is_cdan = 1;
2925 + nctx->cb = cdan_cb;
2926 + nctx->id = channel->ch_id;
2927 + nctx->desired_cpu = i;
2929 + /* Register the new context */
2930 + err = dpaa2_io_service_register(NULL, nctx);
2932 + dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2933 + /* If no affine DPIO for this core, there's probably
2934 + * none available for next cores either.
2936 + goto err_service_reg;
2939 + /* Register DPCON notification with MC */
2940 + dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2941 + dpcon_notif_cfg.priority = 0;
2942 + dpcon_notif_cfg.user_ctx = nctx->qman64;
2943 + err = dpcon_set_notification(priv->mc_io, 0,
2944 + channel->dpcon->mc_handle,
2945 + &dpcon_notif_cfg);
2947 + dev_err(dev, "dpcon_set_notification failed()\n");
2948 + goto err_set_cdan;
2951 + /* If we managed to allocate a channel and also found an affine
2952 + * DPIO for this core, add it to the final mask
2954 + cpumask_set_cpu(i, &priv->dpio_cpumask);
2955 + priv->num_channels++;
2957 + /* Stop if we already have enough channels to accommodate all
2958 + * RX and TX conf queues
2960 + if (priv->num_channels == dpaa2_eth_queue_count(priv))
2964 + /* Tx confirmation queues can only be serviced by cpus
2965 + * with an affine DPIO/channel
2967 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2972 + dpaa2_io_service_deregister(NULL, nctx);
2974 + free_channel(priv, channel);
2976 + if (cpumask_empty(&priv->dpio_cpumask)) {
2977 + dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
2980 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2982 + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2983 + cpumask_pr_args(&priv->dpio_cpumask));
2988 +static void free_dpio(struct dpaa2_eth_priv *priv)
2991 + struct dpaa2_eth_channel *ch;
2993 + /* deregister CDAN notifications and free channels */
2994 + for (i = 0; i < priv->num_channels; i++) {
2995 + ch = priv->channel[i];
2996 + dpaa2_io_service_deregister(NULL, &ch->nctx);
2997 + free_channel(priv, ch);
3001 +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
3004 + struct device *dev = priv->net_dev->dev.parent;
3007 + for (i = 0; i < priv->num_channels; i++)
3008 + if (priv->channel[i]->nctx.desired_cpu == cpu)
3009 + return priv->channel[i];
3011 + /* We should never get here. Issue a warning and return
3012 + * the first channel, because it's still better than nothing
3014 + dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3016 + return priv->channel[0];
3019 +static void set_fq_affinity(struct dpaa2_eth_priv *priv)
3021 + struct device *dev = priv->net_dev->dev.parent;
3022 + struct cpumask xps_mask = CPU_MASK_NONE;
3023 + struct dpaa2_eth_fq *fq;
3024 + int rx_cpu, txc_cpu;
3027 + /* For each FQ, pick one channel/CPU to deliver frames to.
3028 + * This may well change at runtime, either through irqbalance or
3029 + * through direct user intervention.
3031 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3032 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3034 + for (i = 0; i < priv->num_fqs; i++) {
3035 + fq = &priv->fq[i];
3036 + switch (fq->type) {
3038 + case DPAA2_RX_ERR_FQ:
3039 + fq->target_cpu = rx_cpu;
3040 + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3041 + if (rx_cpu >= nr_cpu_ids)
3042 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3044 + case DPAA2_TX_CONF_FQ:
3045 + fq->target_cpu = txc_cpu;
3047 + /* register txc_cpu to XPS */
3048 + cpumask_set_cpu(txc_cpu, &xps_mask);
3049 + err = netif_set_xps_queue(priv->net_dev, &xps_mask,
3052 + dev_info_once(dev,
3053 + "Tx: error setting XPS queue\n");
3054 + cpumask_clear_cpu(txc_cpu, &xps_mask);
3056 + txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
3057 + if (txc_cpu >= nr_cpu_ids)
3058 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3061 + dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3063 + fq->channel = get_affine_channel(priv, fq->target_cpu);
3067 +static void setup_fqs(struct dpaa2_eth_priv *priv)
3071 + /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
3072 + * beginning of the queue array.
3073 + * Number of Rx and Tx queues are the same.
3074 + * We only support one traffic class for now.
3076 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3077 + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3078 + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3079 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3082 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3083 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3084 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3085 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3088 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3089 + /* We have exactly one Rx error queue per DPNI */
3090 + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3091 + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3094 + /* For each FQ, decide on which core to process incoming frames */
3095 + set_fq_affinity(priv);
3098 +/* Allocate and configure one buffer pool for each interface */
3099 +static int setup_dpbp(struct dpaa2_eth_priv *priv)
3102 + struct fsl_mc_device *dpbp_dev;
3103 + struct dpbp_attr dpbp_attrs;
3104 + struct device *dev = priv->net_dev->dev.parent;
3106 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3109 + dev_err(dev, "DPBP device allocation failed\n");
3113 + priv->dpbp_dev = dpbp_dev;
3115 + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
3116 + &dpbp_dev->mc_handle);
3118 + dev_err(dev, "dpbp_open() failed\n");
3122 + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3124 + dev_err(dev, "dpbp_reset() failed\n");
3128 + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3130 + dev_err(dev, "dpbp_enable() failed\n");
3134 + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3137 + dev_err(dev, "dpbp_get_attributes() failed\n");
3138 + goto err_get_attr;
3141 + priv->bpid = dpbp_attrs.bpid;
3142 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
3147 + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3150 + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3152 + fsl_mc_object_free(dpbp_dev);
3157 +static void free_dpbp(struct dpaa2_eth_priv *priv)
3160 + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3161 + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3162 + fsl_mc_object_free(priv->dpbp_dev);
3165 +static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
3167 + struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
3168 + struct device *dev = priv->net_dev->dev.parent;
3171 + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
3173 + if (!priv->cscn_unaligned)
3176 + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
3177 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
3179 + if (dma_mapping_error(dev, priv->cscn_dma)) {
3180 + dev_err(dev, "Error mapping CSCN memory area\n");
3185 + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
3186 + cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
3187 + cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
3188 + cong_notif_cfg.message_ctx = (u64)priv;
3189 + cong_notif_cfg.message_iova = priv->cscn_dma;
3190 + cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
3191 + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
3192 + DPNI_CONG_OPT_COHERENT_WRITE;
3193 + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
3197 + dev_err(dev, "dpni_set_congestion_notification failed\n");
3198 + goto err_set_cong;
3204 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3206 + kfree(priv->cscn_unaligned);
3211 +/* Configure the DPNI object this interface is associated with */
3212 +static int setup_dpni(struct fsl_mc_device *ls_dev)
3214 + struct device *dev = &ls_dev->dev;
3215 + struct dpaa2_eth_priv *priv;
3216 + struct net_device *net_dev;
3217 + struct dpni_buffer_layout buf_layout;
3218 + struct dpni_link_cfg cfg = {0};
3221 + net_dev = dev_get_drvdata(dev);
3222 + priv = netdev_priv(net_dev);
3224 + priv->dpni_id = ls_dev->obj_desc.id;
3226 + /* get a handle for the DPNI object */
3227 + err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
3229 + dev_err(dev, "dpni_open() failed\n");
3233 + ls_dev->mc_io = priv->mc_io;
3234 + ls_dev->mc_handle = priv->mc_token;
3236 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3238 + dev_err(dev, "dpni_reset() failed\n");
3242 + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3243 + &priv->dpni_attrs);
3246 + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3247 + goto err_get_attr;
3250 + /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
3251 + * align value must be a multiple of 256.
3253 + priv->rx_buf_align =
3254 + priv->dpni_attrs.wriop_version & 0x3ff ?
3255 + DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
3257 + /* Update number of logical FQs in netdev */
3258 + err = netif_set_real_num_tx_queues(net_dev,
3259 + dpaa2_eth_queue_count(priv));
3261 + dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
3262 + goto err_set_tx_queues;
3265 + err = netif_set_real_num_rx_queues(net_dev,
3266 + dpaa2_eth_queue_count(priv));
3268 + dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
3269 + goto err_set_rx_queues;
3272 + /* Configure buffer layouts */
3274 + buf_layout.pass_parser_result = true;
3275 + buf_layout.pass_frame_status = true;
3276 + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3277 + buf_layout.data_align = priv->rx_buf_align;
3278 + buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
3279 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3280 + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3281 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3282 + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3283 + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
3284 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3285 + DPNI_QUEUE_RX, &buf_layout);
3288 + "dpni_set_buffer_layout(RX) failed\n");
3289 + goto err_buf_layout;
3293 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3294 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3295 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
3296 + buf_layout.pass_timestamp = true;
3297 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3298 + DPNI_QUEUE_TX, &buf_layout);
3301 + "dpni_set_buffer_layout(TX) failed\n");
3302 + goto err_buf_layout;
3305 + /* tx-confirm buffer */
3306 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3307 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3308 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3309 + DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3311 + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3312 + goto err_buf_layout;
3315 + /* Now that we've set our tx buffer layout, retrieve the minimum
3316 + * required tx data offset.
3318 + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3319 + &priv->tx_data_offset);
3321 + dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
3322 + goto err_data_offset;
3325 + if ((priv->tx_data_offset % 64) != 0)
3326 + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
3327 + priv->tx_data_offset);
3329 + /* Accommodate software annotation space (SWA) */
3330 + priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
3332 + /* Enable congestion notifications for Tx queues */
3333 + err = setup_tx_congestion(priv);
3337 + /* allocate classification rule space */
3338 + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
3339 + dpaa2_eth_fs_count(priv), GFP_KERNEL);
3340 + if (!priv->cls_rule)
3341 + goto err_cls_rule;
3343 + /* Enable flow control */
3344 + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
3345 + priv->tx_pause_frames = 1;
3347 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
3349 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
3350 + goto err_set_link_cfg;
3364 + dpni_close(priv->mc_io, 0, priv->mc_token);
3369 +static void free_dpni(struct dpaa2_eth_priv *priv)
3371 + struct device *dev = priv->net_dev->dev.parent;
3374 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3376 + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3379 + dpni_close(priv->mc_io, 0, priv->mc_token);
3381 + kfree(priv->cls_rule);
3383 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3384 + kfree(priv->cscn_unaligned);
3387 +int setup_fqs_taildrop(struct dpaa2_eth_priv *priv,
3390 + struct device *dev = priv->net_dev->dev.parent;
3391 + struct dpni_taildrop td;
3394 + td.enable = enable;
3395 + td.threshold = DPAA2_ETH_TAILDROP_THRESH;
3398 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
3399 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
3401 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
3402 + priv->num_channels;
3403 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
3406 + for (i = 0; i < priv->num_fqs; i++) {
3407 + if (priv->fq[i].type != DPAA2_RX_FQ)
3410 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
3411 + DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
3412 + priv->fq[i].flowid, &td);
3414 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
3422 +static int setup_rx_flow(struct dpaa2_eth_priv *priv,
3423 + struct dpaa2_eth_fq *fq)
3425 + struct device *dev = priv->net_dev->dev.parent;
3426 + struct dpni_queue q = { { 0 } };
3427 + struct dpni_queue_id qid;
3428 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3431 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3432 + DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid);
3434 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3438 + fq->fqid = qid.fqid;
3440 + q.destination.id = fq->channel->dpcon_id;
3441 + q.destination.type = DPNI_DEST_DPCON;
3442 + q.destination.priority = 1;
3443 + q.user_context = (u64)fq;
3444 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3445 + DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q);
3447 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3454 +static int setup_tx_flow(struct dpaa2_eth_priv *priv,
3455 + struct dpaa2_eth_fq *fq)
3457 + struct device *dev = priv->net_dev->dev.parent;
3458 + struct dpni_queue q = { { 0 } };
3459 + struct dpni_queue_id qid;
3460 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3463 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3464 + DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
3466 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3470 + fq->tx_qdbin = qid.qdbin;
3472 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3473 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
3475 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3479 + fq->fqid = qid.fqid;
3481 + q.destination.id = fq->channel->dpcon_id;
3482 + q.destination.type = DPNI_DEST_DPCON;
3483 + q.destination.priority = 0;
3484 + q.user_context = (u64)fq;
3485 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3486 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
3488 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3495 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3496 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3497 + struct dpaa2_eth_fq *fq)
3499 + struct device *dev = priv->net_dev->dev.parent;
3500 + struct dpni_queue q = { { 0 } };
3501 + struct dpni_queue_id qid;
3502 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3505 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3506 + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3508 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3512 + fq->fqid = qid.fqid;
3514 + q.destination.id = fq->channel->dpcon_id;
3515 + q.destination.type = DPNI_DEST_DPCON;
3516 + q.destination.priority = 1;
3517 + q.user_context = (u64)fq;
3518 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3519 + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3521 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3529 +/* default hash key fields */
3530 +static struct dpaa2_eth_hash_fields default_hash_fields[] = {
3533 + .rxnfc_field = RXH_L2DA,
3534 + .cls_prot = NET_PROT_ETH,
3535 + .cls_field = NH_FLD_ETH_DA,
3538 + .cls_prot = NET_PROT_ETH,
3539 + .cls_field = NH_FLD_ETH_SA,
3542 + /* This is the last ethertype field parsed:
3543 + * depending on frame format, it can be the MAC ethertype
3544 + * or the VLAN etype.
3546 + .cls_prot = NET_PROT_ETH,
3547 + .cls_field = NH_FLD_ETH_TYPE,
3551 + .rxnfc_field = RXH_VLAN,
3552 + .cls_prot = NET_PROT_VLAN,
3553 + .cls_field = NH_FLD_VLAN_TCI,
3557 + .rxnfc_field = RXH_IP_SRC,
3558 + .cls_prot = NET_PROT_IP,
3559 + .cls_field = NH_FLD_IP_SRC,
3562 + .rxnfc_field = RXH_IP_DST,
3563 + .cls_prot = NET_PROT_IP,
3564 + .cls_field = NH_FLD_IP_DST,
3567 + .rxnfc_field = RXH_L3_PROTO,
3568 + .cls_prot = NET_PROT_IP,
3569 + .cls_field = NH_FLD_IP_PROTO,
3572 + /* Using UDP ports, this is functionally equivalent to raw
3573 + * byte pairs from L4 header.
3575 + .rxnfc_field = RXH_L4_B_0_1,
3576 + .cls_prot = NET_PROT_UDP,
3577 + .cls_field = NH_FLD_UDP_PORT_SRC,
3580 + .rxnfc_field = RXH_L4_B_2_3,
3581 + .cls_prot = NET_PROT_UDP,
3582 + .cls_field = NH_FLD_UDP_PORT_DST,
3587 +/* Set RX hash options */
3588 +static int set_hash(struct dpaa2_eth_priv *priv)
3590 + struct device *dev = priv->net_dev->dev.parent;
3591 + struct dpkg_profile_cfg cls_cfg;
3592 + struct dpni_rx_tc_dist_cfg dist_cfg;
3597 + memset(&cls_cfg, 0, sizeof(cls_cfg));
3599 + for (i = 0; i < priv->num_hash_fields; i++) {
3600 + struct dpkg_extract *key =
3601 + &cls_cfg.extracts[cls_cfg.num_extracts];
3603 + key->type = DPKG_EXTRACT_FROM_HDR;
3604 + key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
3605 + key->extract.from_hdr.type = DPKG_FULL_FIELD;
3606 + key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
3607 + cls_cfg.num_extracts++;
3609 + priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
3612 + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
3616 + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3618 + dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
3619 + goto err_prep_key;
3622 + memset(&dist_cfg, 0, sizeof(dist_cfg));
3624 + /* Prepare for setting the rx dist */
3625 + dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3626 + DPAA2_CLASSIFIER_DMA_SIZE,
3628 + if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
3629 + dev_err(dev, "DMA mapping failed\n");
3634 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3635 + if (dpaa2_eth_fs_enabled(priv)) {
3636 + dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
3637 + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
3639 + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3642 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
3643 + dma_unmap_single(dev, dist_cfg.key_cfg_iova,
3644 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
3646 + dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
3654 +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3655 + * frame queues and channels
3657 +static int bind_dpni(struct dpaa2_eth_priv *priv)
3659 + struct net_device *net_dev = priv->net_dev;
3660 + struct device *dev = net_dev->dev.parent;
3661 + struct dpni_pools_cfg pools_params;
3662 + struct dpni_error_cfg err_cfg;
3666 + pools_params.num_dpbp = 1;
3667 + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3668 + pools_params.pools[0].backup_pool = 0;
3669 + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3670 + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3672 + dev_err(dev, "dpni_set_pools() failed\n");
3676 + /* Verify classification options and disable hashing and/or
3677 + * flow steering support in case of invalid configuration values
3679 + priv->hash_fields = default_hash_fields;
3680 + priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
3681 + check_cls_support(priv);
3683 + /* have the interface implicitly distribute traffic based on
3684 + * a static hash key
3686 + if (dpaa2_eth_hash_enabled(priv)) {
3687 + err = set_hash(priv);
3689 + dev_err(dev, "Hashing configuration failed\n");
3694 + /* Configure handling of error frames */
3695 + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3696 + err_cfg.set_frame_annotation = 1;
3697 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3698 + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
3700 + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3702 + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3705 + dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
3709 + /* Configure Rx and Tx conf queues to generate CDANs */
3710 + for (i = 0; i < priv->num_fqs; i++) {
3711 + switch (priv->fq[i].type) {
3713 + err = setup_rx_flow(priv, &priv->fq[i]);
3715 + case DPAA2_TX_CONF_FQ:
3716 + err = setup_tx_flow(priv, &priv->fq[i]);
3718 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3719 + case DPAA2_RX_ERR_FQ:
3720 + err = setup_rx_err_flow(priv, &priv->fq[i]);
3724 + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3731 + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
3734 + dev_err(dev, "dpni_get_qdid() failed\n");
3741 +/* Allocate rings for storing incoming frame descriptors */
3742 +static int alloc_rings(struct dpaa2_eth_priv *priv)
3744 + struct net_device *net_dev = priv->net_dev;
3745 + struct device *dev = net_dev->dev.parent;
3748 + for (i = 0; i < priv->num_channels; i++) {
3749 + priv->channel[i]->store =
3750 + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3751 + if (!priv->channel[i]->store) {
3752 + netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3760 + for (i = 0; i < priv->num_channels; i++) {
3761 + if (!priv->channel[i]->store)
3763 + dpaa2_io_store_destroy(priv->channel[i]->store);
3769 +static void free_rings(struct dpaa2_eth_priv *priv)
3773 + for (i = 0; i < priv->num_channels; i++)
3774 + dpaa2_io_store_destroy(priv->channel[i]->store);
3777 +static int netdev_init(struct net_device *net_dev)
3780 + struct device *dev = net_dev->dev.parent;
3781 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3782 + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3783 + u8 bcast_addr[ETH_ALEN];
3784 + u16 rx_headroom, rx_req_headroom;
3786 + net_dev->netdev_ops = &dpaa2_eth_ops;
3788 + /* Get firmware address, if any */
3789 + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3791 + dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
3795 + /* Get DPNI atttributes address, if any */
3796 + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3799 + dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
3803 + /* First check if firmware has any address configured by bootloader */
3804 + if (!is_zero_ether_addr(mac_addr)) {
3805 + /* If the DPMAC addr != the DPNI addr, update it */
3806 + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3807 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3812 + "dpni_set_primary_mac_addr() failed (%d)\n",
3817 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3818 + } else if (is_zero_ether_addr(dpni_mac_addr)) {
3819 + /* Fills in net_dev->dev_addr, as required by
3820 + * register_netdevice()
3822 + eth_hw_addr_random(net_dev);
3823 + /* Make the user aware, without cluttering the boot log */
3824 + dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
3825 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3826 + priv->mc_token, net_dev->dev_addr);
3829 + "dpni_set_primary_mac_addr() failed (%d)\n", err);
3832 + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3833 + * practical purposes, this will be our "permanent" mac address,
3834 + * at least until the next reboot. This move will also permit
3835 + * register_netdevice() to properly fill up net_dev->perm_addr.
3837 + net_dev->addr_assign_type = NET_ADDR_PERM;
3838 + /* If DPMAC address is non-zero, use that one */
3840 + /* NET_ADDR_PERM is default, all we have to do is
3841 + * fill in the device addr.
3843 + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3846 + /* Explicitly add the broadcast address to the MAC filtering table;
3847 + * the MC won't do that for us.
3849 + eth_broadcast_addr(bcast_addr);
3850 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3852 + dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
3853 + /* Won't return an error; at least, we'd have egress traffic */
3856 + /* Reserve enough space to align buffer as per hardware requirement;
3857 + * NOTE: priv->tx_data_offset MUST be initialized at this point.
3859 + net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
3861 + /* Set MTU limits */
3862 + net_dev->min_mtu = 68;
3863 + net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3865 + /* Required headroom for Rx skbs, to avoid reallocation on
3866 + * forwarding path.
3868 + rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
3869 + rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
3870 + DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
3871 + if (rx_req_headroom > rx_headroom)
3872 + dev_info_once(dev,
3873 + "Required headroom (%d) greater than available (%d).\n"
3874 + "This will impact performance due to reallocations.\n",
3875 + rx_req_headroom, rx_headroom);
3877 + /* Our .ndo_init will be called herein */
3878 + err = register_netdev(net_dev);
3880 + dev_err(dev, "register_netdev() failed (%d)\n", err);
3887 +static int poll_link_state(void *arg)
3889 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3892 + while (!kthread_should_stop()) {
3893 + err = link_state_update(priv);
3894 + if (unlikely(err))
3897 + msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3903 +static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
3905 + return IRQ_WAKE_THREAD;
3908 +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3910 + u32 status = 0, clear = 0;
3911 + struct device *dev = (struct device *)arg;
3912 + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3913 + struct net_device *net_dev = dev_get_drvdata(dev);
3916 + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3917 + DPNI_IRQ_INDEX, &status);
3918 + if (unlikely(err)) {
3919 + netdev_err(net_dev, "Can't get irq status (err %d)", err);
3920 + clear = 0xffffffff;
3924 + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
3925 + clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
3926 + link_state_update(netdev_priv(net_dev));
3930 + dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3931 + DPNI_IRQ_INDEX, clear);
3932 + return IRQ_HANDLED;
3935 +static int setup_irqs(struct fsl_mc_device *ls_dev)
3938 + struct fsl_mc_device_irq *irq;
3940 + err = fsl_mc_allocate_irqs(ls_dev);
3942 + dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3946 + irq = ls_dev->irqs[0];
3947 + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3948 + dpni_irq0_handler,
3949 + dpni_irq0_handler_thread,
3950 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
3951 + dev_name(&ls_dev->dev), &ls_dev->dev);
3953 + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
3957 + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3958 + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
3960 + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
3964 + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3965 + DPNI_IRQ_INDEX, 1);
3967 + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
3974 + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3976 + fsl_mc_free_irqs(ls_dev);
3981 +static void add_ch_napi(struct dpaa2_eth_priv *priv)
3984 + struct dpaa2_eth_channel *ch;
3986 + for (i = 0; i < priv->num_channels; i++) {
3987 + ch = priv->channel[i];
3988 + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3989 + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3990 + NAPI_POLL_WEIGHT);
3994 +static void del_ch_napi(struct dpaa2_eth_priv *priv)
3997 + struct dpaa2_eth_channel *ch;
3999 + for (i = 0; i < priv->num_channels; i++) {
4000 + ch = priv->channel[i];
4001 + netif_napi_del(&ch->napi);
4005 +/* SysFS support */
4006 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
4007 + struct device_attribute *attr,
4010 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4011 + /* No MC API for getting the shaping config. We're stateful. */
4012 + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
4014 + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
4017 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
4018 + struct device_attribute *attr,
4023 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4024 + struct dpni_tx_shaping_cfg scfg;
4026 + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
4028 + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
4031 + /* Size restriction as per MC API documentation */
4032 + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
4033 + pr_err("max_burst_size must be <= %d\n",
4034 + DPAA2_ETH_MAX_BURST_SIZE);
4038 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
4040 + dev_err(dev, "dpni_set_tx_shaping() failed\n");
4043 + /* If successful, save the current configuration for future inquiries */
4044 + priv->shaping_cfg = scfg;
4049 +static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
4050 + struct device_attribute *attr,
4053 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4055 + return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
4058 +static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
4059 + struct device_attribute *attr,
4063 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4064 + struct dpaa2_eth_fq *fq;
4065 + bool running = netif_running(priv->net_dev);
4068 + err = cpulist_parse(buf, &priv->txconf_cpumask);
4072 + /* Only accept CPUs that have an affine DPIO */
4073 + if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
4074 + netdev_info(priv->net_dev,
4075 + "cpumask must be a subset of 0x%lx\n",
4076 + *cpumask_bits(&priv->dpio_cpumask));
4077 + cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
4078 + &priv->txconf_cpumask);
4081 + /* Rewiring the TxConf FQs requires interface shutdown.
4084 + err = dpaa2_eth_stop(priv->net_dev);
4089 + /* Set the new TxConf FQ affinities */
4090 + set_fq_affinity(priv);
4092 + /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
4093 + * link up notification is received. Give the polling thread enough time
4094 + * to detect the link state change, or else we'll end up with the
4095 + * transmission side forever shut down.
4097 + if (priv->do_link_poll)
4098 + msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
4100 + for (i = 0; i < priv->num_fqs; i++) {
4101 + fq = &priv->fq[i];
4102 + if (fq->type != DPAA2_TX_CONF_FQ)
4104 + setup_tx_flow(priv, fq);
4108 + err = dpaa2_eth_open(priv->net_dev);
4116 +static struct device_attribute dpaa2_eth_attrs[] = {
4117 + __ATTR(txconf_cpumask,
4119 + dpaa2_eth_show_txconf_cpumask,
4120 + dpaa2_eth_write_txconf_cpumask),
4122 + __ATTR(tx_shaping,
4124 + dpaa2_eth_show_tx_shaping,
4125 + dpaa2_eth_write_tx_shaping),
4128 +static void dpaa2_eth_sysfs_init(struct device *dev)
4132 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
4133 + err = device_create_file(dev, &dpaa2_eth_attrs[i]);
4135 + dev_err(dev, "ERROR creating sysfs file\n");
4143 + device_remove_file(dev, &dpaa2_eth_attrs[--i]);
4146 +static void dpaa2_eth_sysfs_remove(struct device *dev)
4150 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
4151 + device_remove_file(dev, &dpaa2_eth_attrs[i]);
4154 +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4156 + struct device *dev;
4157 + struct net_device *net_dev = NULL;
4158 + struct dpaa2_eth_priv *priv = NULL;
4161 + dev = &dpni_dev->dev;
4164 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
4166 + dev_err(dev, "alloc_etherdev_mq() failed\n");
4170 + SET_NETDEV_DEV(net_dev, dev);
4171 + dev_set_drvdata(dev, net_dev);
4173 + priv = netdev_priv(net_dev);
4174 + priv->net_dev = net_dev;
4176 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
4178 + /* Obtain a MC portal */
4179 + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4182 + dev_err(dev, "MC portal allocation failed\n");
4183 + goto err_portal_alloc;
4186 + /* MC objects initialization and configuration */
4187 + err = setup_dpni(dpni_dev);
4189 + goto err_dpni_setup;
4191 + err = setup_dpio(priv);
4193 + dev_info(dev, "Defer probing as no DPIO available\n");
4194 + err = -EPROBE_DEFER;
4195 + goto err_dpio_setup;
4200 + err = setup_dpbp(priv);
4202 + goto err_dpbp_setup;
4204 + err = bind_dpni(priv);
4208 + /* Add a NAPI context for each channel */
4209 + add_ch_napi(priv);
4210 + enable_ch_napi(priv);
4212 + /* Percpu statistics */
4213 + priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4214 + if (!priv->percpu_stats) {
4215 + dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4217 + goto err_alloc_percpu_stats;
4219 + priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4220 + if (!priv->percpu_extras) {
4221 + dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4223 + goto err_alloc_percpu_extras;
4226 + snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
4227 + if (!dev_valid_name(net_dev->name)) {
4228 + dev_warn(&net_dev->dev,
4229 + "netdevice name \"%s\" cannot be used, reverting to default..\n",
4231 + dev_alloc_name(net_dev, "eth%d");
4232 + dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
4235 + err = netdev_init(net_dev);
4237 + goto err_netdev_init;
4239 + /* Configure checksum offload based on current interface flags */
4240 + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4244 + err = set_tx_csum(priv, !!(net_dev->features &
4245 + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4249 + err = alloc_rings(priv);
4251 + goto err_alloc_rings;
4253 + net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4255 + err = setup_irqs(dpni_dev);
4257 + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4258 + priv->poll_thread = kthread_run(poll_link_state, priv,
4259 + "%s_poll_link", net_dev->name);
4260 + if (IS_ERR(priv->poll_thread)) {
4261 + netdev_err(net_dev, "Error starting polling thread\n");
4262 + goto err_poll_thread;
4264 + priv->do_link_poll = true;
4267 + dpaa2_eth_sysfs_init(&net_dev->dev);
4268 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4269 + dpaa2_dbg_add(priv);
4272 + dev_info(dev, "Probed interface %s\n", net_dev->name);
4279 + unregister_netdev(net_dev);
4281 + free_percpu(priv->percpu_extras);
4282 +err_alloc_percpu_extras:
4283 + free_percpu(priv->percpu_stats);
4284 +err_alloc_percpu_stats:
4285 + disable_ch_napi(priv);
4286 + del_ch_napi(priv);
4294 + fsl_mc_portal_free(priv->mc_io);
4296 + dev_set_drvdata(dev, NULL);
4297 + free_netdev(net_dev);
4302 +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4304 + struct device *dev;
4305 + struct net_device *net_dev;
4306 + struct dpaa2_eth_priv *priv;
4308 + dev = &ls_dev->dev;
4309 + net_dev = dev_get_drvdata(dev);
4310 + priv = netdev_priv(net_dev);
4312 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4313 + dpaa2_dbg_remove(priv);
4315 + dpaa2_eth_sysfs_remove(&net_dev->dev);
4317 + unregister_netdev(net_dev);
4318 + dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4320 + if (priv->do_link_poll)
4321 + kthread_stop(priv->poll_thread);
4323 + fsl_mc_free_irqs(ls_dev);
4326 + free_percpu(priv->percpu_stats);
4327 + free_percpu(priv->percpu_extras);
4329 + disable_ch_napi(priv);
4330 + del_ch_napi(priv);
4335 + fsl_mc_portal_free(priv->mc_io);
4337 + dev_set_drvdata(dev, NULL);
4338 + free_netdev(net_dev);
4343 +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4345 + .vendor = FSL_MC_VENDOR_FREESCALE,
4346 + .obj_type = "dpni",
4350 +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4352 +static struct fsl_mc_driver dpaa2_eth_driver = {
4354 + .name = KBUILD_MODNAME,
4355 + .owner = THIS_MODULE,
4357 + .probe = dpaa2_eth_probe,
4358 + .remove = dpaa2_eth_remove,
4359 + .match_id_table = dpaa2_eth_match_id_table
4362 +static int __init dpaa2_eth_driver_init(void)
4366 + dpaa2_eth_dbg_init();
4367 + err = fsl_mc_driver_register(&dpaa2_eth_driver);
4369 + dpaa2_eth_dbg_exit();
4376 +static void __exit dpaa2_eth_driver_exit(void)
4378 + dpaa2_eth_dbg_exit();
4379 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
4382 +module_init(dpaa2_eth_driver_init);
4383 +module_exit(dpaa2_eth_driver_exit);
4384 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
4385 new file mode 100644
4386 index 00000000..86cb12e9
4388 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
4390 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4392 + * Redistribution and use in source and binary forms, with or without
4393 + * modification, are permitted provided that the following conditions are met:
4394 + * * Redistributions of source code must retain the above copyright
4395 + * notice, this list of conditions and the following disclaimer.
4396 + * * Redistributions in binary form must reproduce the above copyright
4397 + * notice, this list of conditions and the following disclaimer in the
4398 + * documentation and/or other materials provided with the distribution.
4399 + * * Neither the name of Freescale Semiconductor nor the
4400 + * names of its contributors may be used to endorse or promote products
4401 + * derived from this software without specific prior written permission.
4404 + * ALTERNATIVELY, this software may be distributed under the terms of the
4405 + * GNU General Public License ("GPL") as published by the Free Software
4406 + * Foundation, either version 2 of that License or (at your option) any
4409 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4410 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4411 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4412 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4413 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4414 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4415 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4416 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4417 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4418 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4421 +#ifndef __DPAA2_ETH_H
4422 +#define __DPAA2_ETH_H
4424 +#include <linux/atomic.h>
4425 +#include <linux/netdevice.h>
4426 +#include <linux/if_vlan.h>
4427 +#include "../../fsl-mc/include/dpaa2-io.h"
4431 +#include "dpaa2-eth-debugfs.h"
4433 +#define DPAA2_ETH_STORE_SIZE 16
4435 +/* We set a max threshold for how many Tx confirmations we should process
4436 + * on a NAPI poll call, they take less processing time.
4438 +#define TX_CONF_PER_NAPI_POLL 256
4440 +/* Maximum number of scatter-gather entries in an ingress frame,
4441 + * considering the maximum receive frame size is 64K
4443 +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
4445 +/* Maximum acceptable MTU value. It is in direct relation with the hardware
4446 + * enforced Max Frame Length (currently 10k).
4448 +#define DPAA2_ETH_MFL (10 * 1024)
4449 +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
4450 +/* Convert L3 MTU to L2 MFL */
4451 +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
4453 +/* Maximum burst size value for Tx shaping */
4454 +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
4456 +/* Maximum number of buffers that can be acquired/released through a single
4459 +#define DPAA2_ETH_BUFS_PER_CMD 7
4461 +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
4462 + * frames in the Rx queues (length of the current frame is not
4463 + * taken into account when making the taildrop decision)
4465 +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
4467 +/* Buffer quota per queue. Must be large enough such that for minimum sized
4468 + * frames taildrop kicks in before the bpool gets depleted, so we compute
4469 + * how many 64B frames fit inside the taildrop threshold and add a margin
4470 + * to accommodate the buffer refill delay.
4472 +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
4473 +#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
4474 +#define DPAA2_ETH_REFILL_THRESH_TD \
4475 + (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
4477 +/* Buffer quota per queue to use when flow control is active. */
4478 +#define DPAA2_ETH_NUM_BUFS_FC 256
4480 +/* Hardware requires alignment for ingress/egress buffer addresses
4481 + * and ingress buffer lengths.
4483 +#define DPAA2_ETH_RX_BUF_SIZE 2048
4484 +#define DPAA2_ETH_TX_BUF_ALIGN 64
4485 +#define DPAA2_ETH_RX_BUF_ALIGN 64
4486 +#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
4487 +#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
4488 + ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
4490 +/* rx_extra_head prevents reallocations in L3 processing. */
4491 +#define DPAA2_ETH_SKB_SIZE \
4492 + (DPAA2_ETH_RX_BUF_SIZE + \
4493 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
4495 +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
4496 + * buffers large enough to allow building an skb around them and also account
4497 + * for alignment restrictions.
4499 +#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
4500 + (DPAA2_ETH_SKB_SIZE + \
4501 + (p_priv)->rx_buf_align)
4503 +/* PTP nominal frequency 1GHz */
4504 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
4506 +/* Leave enough extra space in the headroom to make sure the skb is
4507 + * not realloc'd in forwarding scenarios.
4509 +#define DPAA2_ETH_RX_HEAD_ROOM 192
4511 +/* We are accommodating a skb backpointer and some S/G info
4512 + * in the frame's software annotation. The hardware
4513 + * options are either 0 or 64, so we choose the latter.
4515 +#define DPAA2_ETH_SWA_SIZE 64
4517 +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
4518 +struct dpaa2_eth_swa {
4519 + struct sk_buff *skb;
4520 + struct scatterlist *scl;
4525 +/* Annotation valid bits in FD FRC */
4526 +#define DPAA2_FD_FRC_FASV 0x8000
4527 +#define DPAA2_FD_FRC_FAEADV 0x4000
4528 +#define DPAA2_FD_FRC_FAPRV 0x2000
4529 +#define DPAA2_FD_FRC_FAIADV 0x1000
4530 +#define DPAA2_FD_FRC_FASWOV 0x0800
4531 +#define DPAA2_FD_FRC_FAICFDV 0x0400
4533 +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
4534 +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
4539 +/* Annotation bits in FD CTRL */
4540 +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
4542 +/* Size of hardware annotation area based on the current buffer layout
4545 +#define DPAA2_ETH_RX_HWA_SIZE 64
4546 +#define DPAA2_ETH_TX_HWA_SIZE 128
4548 +/* Frame annotation status */
4556 +/* Frame annotation status word is located in the first 8 bytes
4557 + * of the buffer's hardware annotation area
4559 +#define DPAA2_FAS_OFFSET 0
4560 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
4562 +/* Timestamp is located in the next 8 bytes of the buffer's
4563 + * hardware annotation area
4565 +#define DPAA2_TS_OFFSET 0x8
4567 +/* Frame annotation egress action descriptor */
4568 +#define DPAA2_FAEAD_OFFSET 0x58
4570 +struct dpaa2_faead {
4575 +#define DPAA2_FAEAD_A2V 0x20000000
4576 +#define DPAA2_FAEAD_UPDV 0x00001000
4577 +#define DPAA2_FAEAD_UPD 0x00000010
4579 +/* accessors for the hardware annotation fields that we use */
4580 +#define dpaa2_eth_get_hwa(buf_addr) \
4581 + ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
4583 +#define dpaa2_eth_get_fas(buf_addr) \
4584 + (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
4586 +#define dpaa2_eth_get_ts(buf_addr) \
4587 + (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
4589 +#define dpaa2_eth_get_faead(buf_addr) \
4590 + (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
4592 +/* Error and status bits in the frame annotation status word */
4593 +/* Debug frame, otherwise supposed to be discarded */
4594 +#define DPAA2_FAS_DISC 0x80000000
4596 +#define DPAA2_FAS_MS 0x40000000
4597 +#define DPAA2_FAS_PTP 0x08000000
4598 +/* Ethernet multicast frame */
4599 +#define DPAA2_FAS_MC 0x04000000
4600 +/* Ethernet broadcast frame */
4601 +#define DPAA2_FAS_BC 0x02000000
4602 +#define DPAA2_FAS_KSE 0x00040000
4603 +#define DPAA2_FAS_EOFHE 0x00020000
4604 +#define DPAA2_FAS_MNLE 0x00010000
4605 +#define DPAA2_FAS_TIDE 0x00008000
4606 +#define DPAA2_FAS_PIEE 0x00004000
4607 +/* Frame length error */
4608 +#define DPAA2_FAS_FLE 0x00002000
4609 +/* Frame physical error */
4610 +#define DPAA2_FAS_FPE 0x00001000
4611 +#define DPAA2_FAS_PTE 0x00000080
4612 +#define DPAA2_FAS_ISP 0x00000040
4613 +#define DPAA2_FAS_PHE 0x00000020
4614 +#define DPAA2_FAS_BLE 0x00000010
4615 +/* L3 csum validation performed */
4616 +#define DPAA2_FAS_L3CV 0x00000008
4617 +/* L3 csum error */
4618 +#define DPAA2_FAS_L3CE 0x00000004
4619 +/* L4 csum validation performed */
4620 +#define DPAA2_FAS_L4CV 0x00000002
4621 +/* L4 csum error */
4622 +#define DPAA2_FAS_L4CE 0x00000001
4623 +/* Possible errors on the ingress path */
4624 +#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
4625 + (DPAA2_FAS_EOFHE) | \
4626 + (DPAA2_FAS_MNLE) | \
4627 + (DPAA2_FAS_TIDE) | \
4628 + (DPAA2_FAS_PIEE) | \
4629 + (DPAA2_FAS_FLE) | \
4630 + (DPAA2_FAS_FPE) | \
4631 + (DPAA2_FAS_PTE) | \
4632 + (DPAA2_FAS_ISP) | \
4633 + (DPAA2_FAS_PHE) | \
4634 + (DPAA2_FAS_BLE) | \
4635 + (DPAA2_FAS_L3CE) | \
4638 +#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
4639 + (DPAA2_FAS_EOFHE) | \
4640 + (DPAA2_FAS_MNLE) | \
4643 +/* Time in milliseconds between link state updates */
4644 +#define DPAA2_ETH_LINK_STATE_REFRESH 1000
4646 +/* Number of times to retry a frame enqueue before giving up.
4647 + * Value determined empirically, in order to minimize the number
4648 + * of frames dropped on Tx
4650 +#define DPAA2_ETH_ENQUEUE_RETRIES 10
4652 +/* Tx congestion entry & exit thresholds, in number of bytes.
4653 + * We allow a maximum of 512KB worth of frames pending processing on the Tx
4654 + * queues of an interface
4656 +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
4657 +#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
4659 +/* Driver statistics, other than those in struct rtnl_link_stats64.
4660 + * These are usually collected per-CPU and aggregated by ethtool.
4662 +struct dpaa2_eth_drv_stats {
4663 + __u64 tx_conf_frames;
4664 + __u64 tx_conf_bytes;
4665 + __u64 tx_sg_frames;
4666 + __u64 tx_sg_bytes;
4667 + __u64 rx_sg_frames;
4668 + __u64 rx_sg_bytes;
4669 + /* Enqueues retried due to portal busy */
4670 + __u64 tx_portal_busy;
4673 +/* Per-FQ statistics */
4674 +struct dpaa2_eth_fq_stats {
4675 + /* Number of frames received on this queue */
4677 + /* Number of times this queue entered congestion */
4678 + __u64 congestion_entry;
4681 +/* Per-channel statistics */
4682 +struct dpaa2_eth_ch_stats {
4683 + /* Volatile dequeues retried due to portal busy */
4684 + __u64 dequeue_portal_busy;
4685 + /* Number of CDANs; useful to estimate avg NAPI len */
4687 + /* Number of frames received on queues from this channel */
4693 +/* Maximum number of queues associated with a DPNI */
4694 +#define DPAA2_ETH_MAX_RX_QUEUES 16
4695 +#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
4696 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
4697 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
4698 + DPAA2_ETH_MAX_TX_QUEUES + \
4699 + DPAA2_ETH_MAX_RX_ERR_QUEUES)
4701 +#define DPAA2_ETH_MAX_DPCONS NR_CPUS
4703 +enum dpaa2_eth_fq_type {
4709 +struct dpaa2_eth_priv;
4711 +struct dpaa2_eth_fq {
4716 + struct dpaa2_eth_channel *channel;
4717 + enum dpaa2_eth_fq_type type;
4719 + void (*consume)(struct dpaa2_eth_priv *,
4720 + struct dpaa2_eth_channel *,
4721 + const struct dpaa2_fd *,
4722 + struct napi_struct *,
4724 + struct dpaa2_eth_fq_stats stats;
4727 +struct dpaa2_eth_channel {
4728 + struct dpaa2_io_notification_ctx nctx;
4729 + struct fsl_mc_device *dpcon;
4733 + struct napi_struct napi;
4734 + struct dpaa2_io_store *store;
4735 + struct dpaa2_eth_priv *priv;
4737 + struct dpaa2_eth_ch_stats stats;
4740 +struct dpaa2_eth_cls_rule {
4741 + struct ethtool_rx_flow_spec fs;
4745 +struct dpaa2_eth_hash_fields {
4747 + enum net_prot cls_prot;
4753 +/* Driver private data */
4754 +struct dpaa2_eth_priv {
4755 + struct net_device *net_dev;
4757 + /* Standard statistics */
4758 + struct rtnl_link_stats64 __percpu *percpu_stats;
4759 + /* Extra stats, in addition to the ones known by the kernel */
4760 + struct dpaa2_eth_drv_stats __percpu *percpu_extras;
4761 + struct iommu_domain *iommu_domain;
4763 + bool ts_tx_en; /* Tx timestamping enabled */
4764 + bool ts_rx_en; /* Rx timestamping enabled */
4766 + u16 tx_data_offset;
4772 + int tx_pause_frames;
4774 + int refill_thresh;
4776 + /* Tx congestion notifications are written here */
4778 + void *cscn_unaligned;
4779 + dma_addr_t cscn_dma;
4782 + /* Tx queues are at the beginning of the array */
4783 + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
4786 + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
4789 + struct dpni_attr dpni_attrs;
4790 + struct fsl_mc_device *dpbp_dev;
4792 + struct fsl_mc_io *mc_io;
4793 + /* SysFS-controlled affinity mask for TxConf FQs */
4794 + struct cpumask txconf_cpumask;
4795 + /* Cores which have an affine DPIO/DPCON.
4796 + * This is the cpu set on which Rx frames are processed;
4797 + * Tx confirmation frames are processed on a subset of this,
4798 + * depending on user settings.
4800 + struct cpumask dpio_cpumask;
4804 + struct dpni_link_state link_state;
4805 + bool do_link_poll;
4806 + struct task_struct *poll_thread;
4808 + struct dpaa2_eth_hash_fields *hash_fields;
4809 + u8 num_hash_fields;
4810 + /* enabled ethtool hashing bits */
4813 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4814 + struct dpaa2_debugfs dbg;
4817 + /* array of classification rules */
4818 + struct dpaa2_eth_cls_rule *cls_rule;
4820 + struct dpni_tx_shaping_cfg shaping_cfg;
4823 +#define dpaa2_eth_hash_enabled(priv) \
4824 + ((priv)->dpni_attrs.num_queues > 1)
4826 +#define dpaa2_eth_fs_enabled(priv) \
4827 + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
4829 +#define dpaa2_eth_fs_mask_enabled(priv) \
4830 + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
4832 +#define dpaa2_eth_fs_count(priv) \
4833 + ((priv)->dpni_attrs.fs_entries)
4835 +/* size of DMA memory used to pass configuration to classifier, in bytes */
4836 +#define DPAA2_CLASSIFIER_DMA_SIZE 256
4838 +extern const struct ethtool_ops dpaa2_ethtool_ops;
4839 +extern const char dpaa2_eth_drv_version[];
4841 +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
4843 + return priv->dpni_attrs.num_queues;
4846 +void check_cls_support(struct dpaa2_eth_priv *priv);
4848 +int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable);
4849 +#endif /* __DPAA2_H */
4850 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
4851 new file mode 100644
4852 index 00000000..9859814e
4854 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
4856 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4858 + * Redistribution and use in source and binary forms, with or without
4859 + * modification, are permitted provided that the following conditions are met:
4860 + * * Redistributions of source code must retain the above copyright
4861 + * notice, this list of conditions and the following disclaimer.
4862 + * * Redistributions in binary form must reproduce the above copyright
4863 + * notice, this list of conditions and the following disclaimer in the
4864 + * documentation and/or other materials provided with the distribution.
4865 + * * Neither the name of Freescale Semiconductor nor the
4866 + * names of its contributors may be used to endorse or promote products
4867 + * derived from this software without specific prior written permission.
4870 + * ALTERNATIVELY, this software may be distributed under the terms of the
4871 + * GNU General Public License ("GPL") as published by the Free Software
4872 + * Foundation, either version 2 of that License or (at your option) any
4875 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4876 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4877 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4878 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4879 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4880 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4881 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4882 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4883 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4884 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4887 +#include "dpni.h" /* DPNI_LINK_OPT_* */
4888 +#include "dpaa2-eth.h"
4890 +/* To be kept in sync with dpni_statistics */
4891 +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
4894 + "rx mcast frames",
4896 + "rx bcast frames",
4900 + "tx mcast frames",
4902 + "tx bcast frames",
4904 + "rx filtered frames",
4905 + "rx discarded frames",
4906 + "rx nobuffer discards",
4907 + "tx discarded frames",
4908 + "tx confirmed frames",
4911 +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
4913 +/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
4914 +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
4915 + /* per-cpu stats */
4923 + /* how many times we had to retry the enqueue command */
4924 + "enqueue portal busy",
4926 + /* Channel stats */
4927 + /* How many times we had to retry the volatile dequeue command */
4928 + "dequeue portal busy",
4929 + "channel pull errors",
4930 + /* Number of notifications received */
4932 + "tx congestion state",
4933 +#ifdef CONFIG_FSL_QBMAN_DEBUG
4935 + "rx pending frames",
4936 + "rx pending bytes",
4937 + "tx conf pending frames",
4938 + "tx conf pending bytes",
4943 +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
4945 +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
4946 + struct ethtool_drvinfo *drvinfo)
4948 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
4949 + strlcpy(drvinfo->version, dpaa2_eth_drv_version,
4950 + sizeof(drvinfo->version));
4951 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
4952 + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
4953 + sizeof(drvinfo->bus_info));
4956 +static int dpaa2_eth_get_settings(struct net_device *net_dev,
4957 + struct ethtool_cmd *cmd)
4959 + struct dpni_link_state state = {0};
4961 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4963 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4965 + netdev_err(net_dev, "ERROR %d getting link state", err);
4969 + /* At the moment, we have no way of interrogating the DPMAC
4970 + * from the DPNI side - and for that matter there may exist
4971 + * no DPMAC at all. So for now we just don't report anything
4972 + * beyond the DPNI attributes.
4974 + if (state.options & DPNI_LINK_OPT_AUTONEG)
4975 + cmd->autoneg = AUTONEG_ENABLE;
4976 + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
4977 + cmd->duplex = DUPLEX_FULL;
4978 + ethtool_cmd_speed_set(cmd, state.rate);
4984 +static int dpaa2_eth_set_settings(struct net_device *net_dev,
4985 + struct ethtool_cmd *cmd)
4987 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4988 + struct dpni_link_state state = {0};
4989 + struct dpni_link_cfg cfg = {0};
4992 + netdev_dbg(net_dev, "Setting link parameters...");
4994 + /* Need to interrogate on link state to get flow control params */
4995 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4997 + netdev_err(net_dev, "ERROR %d getting link state", err);
5001 + cfg.options = state.options;
5002 + cfg.rate = ethtool_cmd_speed(cmd);
5003 + if (cmd->autoneg == AUTONEG_ENABLE)
5004 + cfg.options |= DPNI_LINK_OPT_AUTONEG;
5006 + cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
5007 + if (cmd->duplex == DUPLEX_HALF)
5008 + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
5010 + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
5012 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5014 + /* ethtool will be loud enough if we return an error; no point
5015 + * in putting our own error message on the console by default
5017 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5023 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
5024 + struct ethtool_pauseparam *pause)
5026 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5027 + struct dpni_link_state state = {0};
5030 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5032 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5034 + /* for now, pause frames autonegotiation is not separate */
5035 + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
5036 + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
5037 + pause->tx_pause = pause->rx_pause ^
5038 + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
5041 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
5042 + struct ethtool_pauseparam *pause)
5044 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5045 + struct dpni_link_state state = {0};
5046 + struct dpni_link_cfg cfg = {0};
5047 + u32 current_tx_pause;
5050 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5052 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5056 + cfg.rate = state.rate;
5057 + cfg.options = state.options;
5058 + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
5059 + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
5061 + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
5062 + netdev_warn(net_dev,
5063 + "WARN: Can't change pause frames autoneg separately\n");
5065 + if (pause->rx_pause)
5066 + cfg.options |= DPNI_LINK_OPT_PAUSE;
5068 + cfg.options &= ~DPNI_LINK_OPT_PAUSE;
5070 + if (pause->rx_pause ^ pause->tx_pause)
5071 + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
5073 + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
5075 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5077 + /* ethtool will be loud enough if we return an error; no point
5078 + * in putting our own error message on the console by default
5080 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5084 + /* Enable / disable taildrops if Tx pause frames have changed */
5085 + if (current_tx_pause == pause->tx_pause)
5088 + err = setup_fqs_taildrop(priv, !pause->tx_pause);
5090 + netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
5092 + priv->tx_pause_frames = pause->tx_pause;
5097 +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
5103 + switch (stringset) {
5104 + case ETH_SS_STATS:
5105 + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
5106 + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
5107 + p += ETH_GSTRING_LEN;
5109 + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
5110 + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
5111 + p += ETH_GSTRING_LEN;
5117 +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
5120 + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
5121 + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
5123 + return -EOPNOTSUPP;
5127 +/** Fill in hardware counters, as returned by MC.
5129 +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
5130 + struct ethtool_stats *stats,
5133 + int i = 0; /* Current index in the data array */
5134 + int j = 0, k, err;
5135 + union dpni_statistics dpni_stats;
5137 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5139 + u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
5140 + u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
5144 + u64 portal_busy = 0, pull_err = 0;
5145 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5146 + struct dpaa2_eth_drv_stats *extras;
5147 + struct dpaa2_eth_ch_stats *ch_stats;
5150 + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
5152 + /* Print standard counters, from DPNI statistics */
5153 + for (j = 0; j <= 2; j++) {
5154 + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
5157 + netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
5162 + *(data + i++) = dpni_stats.page_0.ingress_all_frames;
5163 + *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
5164 + *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
5165 + *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
5166 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
5167 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
5170 + *(data + i++) = dpni_stats.page_1.egress_all_frames;
5171 + *(data + i++) = dpni_stats.page_1.egress_all_bytes;
5172 + *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
5173 + *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
5174 + *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
5175 + *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
5178 + *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
5179 + *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
5180 + *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
5181 + *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
5182 + *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
5189 + /* Print per-cpu extra stats */
5190 + for_each_online_cpu(k) {
5191 + extras = per_cpu_ptr(priv->percpu_extras, k);
5192 + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
5193 + *((__u64 *)data + i + j) += *((__u64 *)extras + j);
5198 + /* We may be using fewer DPIOs than actual CPUs */
5199 + for (j = 0; j < priv->num_channels; j++) {
5200 + ch_stats = &priv->channel[j]->stats;
5201 + cdan += ch_stats->cdan;
5202 + portal_busy += ch_stats->dequeue_portal_busy;
5203 + pull_err += ch_stats->pull_err;
5206 + *(data + i++) = portal_busy;
5207 + *(data + i++) = pull_err;
5208 + *(data + i++) = cdan;
5210 + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
5212 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5213 + for (j = 0; j < priv->num_fqs; j++) {
5214 + /* Print FQ instantaneous counts */
5215 + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
5218 + netdev_warn(net_dev, "FQ query error %d", err);
5222 + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
5223 + fcnt_tx_total += fcnt;
5224 + bcnt_tx_total += bcnt;
5226 + fcnt_rx_total += fcnt;
5227 + bcnt_rx_total += bcnt;
5231 + *(data + i++) = fcnt_rx_total;
5232 + *(data + i++) = bcnt_rx_total;
5233 + *(data + i++) = fcnt_tx_total;
5234 + *(data + i++) = bcnt_tx_total;
5236 + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
5238 + netdev_warn(net_dev, "Buffer count query error %d\n", err);
5241 + *(data + i++) = buf_cnt;
5245 +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
5249 + for (i = 0; i < priv->num_hash_fields; i++) {
5250 + if (priv->hash_fields[i].cls_prot == prot &&
5251 + priv->hash_fields[i].cls_field == field)
5253 + off += priv->hash_fields[i].size;
5259 +static u8 cls_key_size(struct dpaa2_eth_priv *priv)
5263 + for (i = 0; i < priv->num_hash_fields; i++)
5264 + size += priv->hash_fields[i].size;
5269 +void check_cls_support(struct dpaa2_eth_priv *priv)
5271 + u8 key_size = cls_key_size(priv);
5272 + struct device *dev = priv->net_dev->dev.parent;
5274 + if (dpaa2_eth_hash_enabled(priv)) {
5275 + if (priv->dpni_attrs.fs_key_size < key_size) {
5276 + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
5277 + priv->dpni_attrs.fs_key_size,
5281 + if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
5282 + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
5283 + DPKG_MAX_NUM_OF_EXTRACTS);
5288 + if (dpaa2_eth_fs_enabled(priv)) {
5289 + if (!dpaa2_eth_hash_enabled(priv)) {
5290 + dev_info(dev, "Insufficient queues. Steering is disabled\n");
5294 + if (!dpaa2_eth_fs_mask_enabled(priv)) {
5295 + dev_info(dev, "Key masks not supported. Steering is disabled\n");
5303 + priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
5304 + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
5307 +static int prep_l4_rule(struct dpaa2_eth_priv *priv,
5308 + struct ethtool_tcpip4_spec *l4_value,
5309 + struct ethtool_tcpip4_spec *l4_mask,
5310 + void *key, void *mask, u8 l4_proto)
5314 + if (l4_mask->tos) {
5315 + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
5316 + return -EOPNOTSUPP;
5319 + if (l4_mask->ip4src) {
5320 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5321 + *(u32 *)(key + offset) = l4_value->ip4src;
5322 + *(u32 *)(mask + offset) = l4_mask->ip4src;
5325 + if (l4_mask->ip4dst) {
5326 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5327 + *(u32 *)(key + offset) = l4_value->ip4dst;
5328 + *(u32 *)(mask + offset) = l4_mask->ip4dst;
5331 + if (l4_mask->psrc) {
5332 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5333 + *(u32 *)(key + offset) = l4_value->psrc;
5334 + *(u32 *)(mask + offset) = l4_mask->psrc;
5337 + if (l4_mask->pdst) {
5338 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5339 + *(u32 *)(key + offset) = l4_value->pdst;
5340 + *(u32 *)(mask + offset) = l4_mask->pdst;
5343 + /* Only apply the rule for the user-specified L4 protocol
5344 + * and if ethertype matches IPv4
5346 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5347 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5348 + *(u16 *)(mask + offset) = 0xFFFF;
5350 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5351 + *(u8 *)(key + offset) = l4_proto;
5352 + *(u8 *)(mask + offset) = 0xFF;
5354 + /* TODO: check IP version */
5359 +static int prep_eth_rule(struct dpaa2_eth_priv *priv,
5360 + struct ethhdr *eth_value, struct ethhdr *eth_mask,
5361 + void *key, void *mask)
5365 + if (eth_mask->h_proto) {
5366 + netdev_err(priv->net_dev, "Ethertype is not supported!\n");
5367 + return -EOPNOTSUPP;
5370 + if (!is_zero_ether_addr(eth_mask->h_source)) {
5371 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
5372 + ether_addr_copy(key + offset, eth_value->h_source);
5373 + ether_addr_copy(mask + offset, eth_mask->h_source);
5376 + if (!is_zero_ether_addr(eth_mask->h_dest)) {
5377 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5378 + ether_addr_copy(key + offset, eth_value->h_dest);
5379 + ether_addr_copy(mask + offset, eth_mask->h_dest);
5385 +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
5386 + struct ethtool_usrip4_spec *uip_value,
5387 + struct ethtool_usrip4_spec *uip_mask,
5388 + void *key, void *mask)
5392 + if (uip_mask->tos)
5393 + return -EOPNOTSUPP;
5395 + if (uip_mask->ip4src) {
5396 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5397 + *(u32 *)(key + offset) = uip_value->ip4src;
5398 + *(u32 *)(mask + offset) = uip_mask->ip4src;
5401 + if (uip_mask->ip4dst) {
5402 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5403 + *(u32 *)(key + offset) = uip_value->ip4dst;
5404 + *(u32 *)(mask + offset) = uip_mask->ip4dst;
5407 + if (uip_mask->proto) {
5408 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5409 + *(u32 *)(key + offset) = uip_value->proto;
5410 + *(u32 *)(mask + offset) = uip_mask->proto;
5412 + if (uip_mask->l4_4_bytes) {
5413 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5414 + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
5415 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
5417 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5418 + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
5419 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
5422 + /* Ethertype must be IP */
5423 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5424 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5425 + *(u16 *)(mask + offset) = 0xFFFF;
5430 +static int prep_ext_rule(struct dpaa2_eth_priv *priv,
5431 + struct ethtool_flow_ext *ext_value,
5432 + struct ethtool_flow_ext *ext_mask,
5433 + void *key, void *mask)
5437 + if (ext_mask->vlan_etype)
5438 + return -EOPNOTSUPP;
5440 + if (ext_mask->vlan_tci) {
5441 + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
5442 + *(u16 *)(key + offset) = ext_value->vlan_tci;
5443 + *(u16 *)(mask + offset) = ext_mask->vlan_tci;
5449 +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
5450 + struct ethtool_flow_ext *ext_value,
5451 + struct ethtool_flow_ext *ext_mask,
5452 + void *key, void *mask)
5456 + if (!is_zero_ether_addr(ext_mask->h_dest)) {
5457 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5458 + ether_addr_copy(key + offset, ext_value->h_dest);
5459 + ether_addr_copy(mask + offset, ext_mask->h_dest);
5465 +static int prep_cls_rule(struct net_device *net_dev,
5466 + struct ethtool_rx_flow_spec *fs,
5469 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5470 + const u8 key_size = cls_key_size(priv);
5471 + void *msk = key + key_size;
5474 + memset(key, 0, key_size * 2);
5476 + switch (fs->flow_type & 0xff) {
5478 + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
5479 + &fs->m_u.tcp_ip4_spec, key, msk,
5483 + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
5484 + &fs->m_u.udp_ip4_spec, key, msk,
5487 + case SCTP_V4_FLOW:
5488 + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
5489 + &fs->m_u.sctp_ip4_spec, key, msk,
5493 + err = prep_eth_rule(priv, &fs->h_u.ether_spec,
5494 + &fs->m_u.ether_spec, key, msk);
5496 + case IP_USER_FLOW:
5497 + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
5498 + &fs->m_u.usr_ip4_spec, key, msk);
5501 + /* TODO: AH, ESP */
5502 + return -EOPNOTSUPP;
5507 + if (fs->flow_type & FLOW_EXT) {
5508 + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5513 + if (fs->flow_type & FLOW_MAC_EXT) {
5514 + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5522 +static int del_cls(struct net_device *net_dev, int location);
5524 +static int do_cls(struct net_device *net_dev,
5525 + struct ethtool_rx_flow_spec *fs,
5528 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5529 + struct device *dev = net_dev->dev.parent;
5530 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5531 + struct dpni_rule_cfg rule_cfg;
5532 + struct dpni_fs_action_cfg fs_act = { 0 };
5536 + if (!dpaa2_eth_fs_enabled(priv)) {
5537 + netdev_err(net_dev, "dev does not support steering!\n");
5538 + /* dev doesn't support steering */
5539 + return -EOPNOTSUPP;
5542 + if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
5543 + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
5544 + fs->location >= rule_cnt)
5547 + /* When adding a new rule, check if location if available,
5548 + * and if not free the existing table entry before inserting
5551 + if (add && (priv->cls_rule[fs->location].in_use == true))
5552 + del_cls(net_dev, fs->location);
5554 + memset(&rule_cfg, 0, sizeof(rule_cfg));
5555 + rule_cfg.key_size = cls_key_size(priv);
5557 + /* allocate twice the key size, for the actual key and for mask */
5558 + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
5562 + err = prep_cls_rule(net_dev, fs, dma_mem);
5564 + goto err_free_mem;
5566 + rule_cfg.key_iova = dma_map_single(dev, dma_mem,
5567 + rule_cfg.key_size * 2,
5570 + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
5572 + if (fs->ring_cookie == RX_CLS_FLOW_DISC)
5573 + fs_act.options |= DPNI_FS_OPT_DISCARD;
5575 + fs_act.flow_id = fs->ring_cookie;
5578 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
5579 + 0, fs->location, &rule_cfg, &fs_act);
5581 + err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token,
5584 + dma_unmap_single(dev, rule_cfg.key_iova,
5585 + rule_cfg.key_size * 2, DMA_TO_DEVICE);
5588 + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
5596 +static int add_cls(struct net_device *net_dev,
5597 + struct ethtool_rx_flow_spec *fs)
5599 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5602 + err = do_cls(net_dev, fs, true);
5606 + priv->cls_rule[fs->location].in_use = true;
5607 + priv->cls_rule[fs->location].fs = *fs;
5612 +static int del_cls(struct net_device *net_dev, int location)
5614 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5617 + err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
5621 + priv->cls_rule[location].in_use = false;
5626 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
5627 + struct ethtool_rxnfc *rxnfc)
5631 + switch (rxnfc->cmd) {
5632 + case ETHTOOL_SRXCLSRLINS:
5633 + err = add_cls(net_dev, &rxnfc->fs);
5636 + case ETHTOOL_SRXCLSRLDEL:
5637 + err = del_cls(net_dev, rxnfc->fs.location);
5641 + err = -EOPNOTSUPP;
5647 +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
5648 + struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
5650 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5651 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5654 + switch (rxnfc->cmd) {
5655 + case ETHTOOL_GRXFH:
5656 + /* we purposely ignore cmd->flow_type, because the hashing key
5657 + * is the same (and fixed) for all protocols
5659 + rxnfc->data = priv->rx_flow_hash;
5662 + case ETHTOOL_GRXRINGS:
5663 + rxnfc->data = dpaa2_eth_queue_count(priv);
5666 + case ETHTOOL_GRXCLSRLCNT:
5667 + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
5668 + if (priv->cls_rule[i].in_use)
5669 + rxnfc->rule_cnt++;
5670 + rxnfc->data = rule_cnt;
5673 + case ETHTOOL_GRXCLSRULE:
5674 + if (!priv->cls_rule[rxnfc->fs.location].in_use)
5677 + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
5680 + case ETHTOOL_GRXCLSRLALL:
5681 + for (i = 0, j = 0; i < rule_cnt; i++) {
5682 + if (!priv->cls_rule[i].in_use)
5684 + if (j == rxnfc->rule_cnt)
5686 + rule_locs[j++] = i;
5688 + rxnfc->rule_cnt = j;
5689 + rxnfc->data = rule_cnt;
5693 + return -EOPNOTSUPP;
5699 +const struct ethtool_ops dpaa2_ethtool_ops = {
5700 + .get_drvinfo = dpaa2_eth_get_drvinfo,
5701 + .get_link = ethtool_op_get_link,
5702 + .get_settings = dpaa2_eth_get_settings,
5703 + .set_settings = dpaa2_eth_set_settings,
5704 + .get_pauseparam = dpaa2_eth_get_pauseparam,
5705 + .set_pauseparam = dpaa2_eth_set_pauseparam,
5706 + .get_sset_count = dpaa2_eth_get_sset_count,
5707 + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
5708 + .get_strings = dpaa2_eth_get_strings,
5709 + .get_rxnfc = dpaa2_eth_get_rxnfc,
5710 + .set_rxnfc = dpaa2_eth_set_rxnfc,
5712 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
5713 new file mode 100644
5714 index 00000000..02290a08
5716 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
5718 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
5720 + * Redistribution and use in source and binary forms, with or without
5721 + * modification, are permitted provided that the following conditions are met:
5722 + * * Redistributions of source code must retain the above copyright
5723 + * notice, this list of conditions and the following disclaimer.
5724 + * * Redistributions in binary form must reproduce the above copyright
5725 + * notice, this list of conditions and the following disclaimer in the
5726 + * documentation and/or other materials provided with the distribution.
5727 + * * Neither the name of the above-listed copyright holders nor the
5728 + * names of any contributors may be used to endorse or promote products
5729 + * derived from this software without specific prior written permission.
5732 + * ALTERNATIVELY, this software may be distributed under the terms of the
5733 + * GNU General Public License ("GPL") as published by the Free Software
5734 + * Foundation, either version 2 of that License or (at your option) any
5737 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
5738 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5739 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5740 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
5741 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
5742 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
5743 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
5744 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
5745 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
5746 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
5747 + * POSSIBILITY OF SUCH DAMAGE.
5749 +#ifndef __FSL_DPKG_H_
5750 +#define __FSL_DPKG_H_
5752 +#include <linux/types.h>
5755 +/* Data Path Key Generator API
5756 + * Contains initialization APIs and runtime APIs for the Key Generator
5759 +/** Key Generator properties */
5762 + * Number of masks per key extraction
5764 +#define DPKG_NUM_OF_MASKS 4
5766 + * Number of extractions per key profile
5768 +#define DPKG_MAX_NUM_OF_EXTRACTS 10
5771 + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
5772 + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
5773 + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
5774 + * @DPKG_FULL_FIELD: Extract a full field
5776 +enum dpkg_extract_from_hdr_type {
5777 + DPKG_FROM_HDR = 0,
5778 + DPKG_FROM_FIELD = 1,
5779 + DPKG_FULL_FIELD = 2
5783 + * enum dpkg_extract_type - Enumeration for selecting extraction type
5784 + * @DPKG_EXTRACT_FROM_HDR: Extract from the header
5785 + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
5786 + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
5787 + * e.g. can be used to extract header existence;
5788 + * please refer to 'Parse Result definition' section in the parser BG
5790 +enum dpkg_extract_type {
5791 + DPKG_EXTRACT_FROM_HDR = 0,
5792 + DPKG_EXTRACT_FROM_DATA = 1,
5793 + DPKG_EXTRACT_FROM_PARSE = 3
5797 + * struct dpkg_mask - A structure for defining a single extraction mask
5798 + * @mask: Byte mask for the extracted content
5799 + * @offset: Offset within the extracted content
5807 + * struct dpkg_extract - A structure for defining a single extraction
5808 + * @type: Determines how the union below is interpreted:
5809 + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
5810 + * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
5811 + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
5812 + * @extract: Selects extraction method
5813 + * @num_of_byte_masks: Defines the number of valid entries in the array below;
5814 + * This is also the number of bytes to be used as masks
5815 + * @masks: Masks parameters
5817 +struct dpkg_extract {
5818 + enum dpkg_extract_type type;
5820 + * union extract - Selects extraction method
5821 + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
5822 + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
5823 + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
5827 + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
5828 + * @prot: Any of the supported headers
5829 + * @type: Defines the type of header extraction:
5830 + * DPKG_FROM_HDR: use size & offset below;
5831 + * DPKG_FROM_FIELD: use field, size and offset below;
5832 + * DPKG_FULL_FIELD: use field below
5833 + * @field: One of the supported fields (NH_FLD_)
5835 + * @size: Size in bytes
5836 + * @offset: Byte offset
5837 + * @hdr_index: Clear for cases not listed below;
5838 + * Used for protocols that may have more than a single
5839 + * header, 0 indicates an outer header;
5840 + * Supported protocols (possible values):
5841 + * NET_PROT_VLAN (0, HDR_INDEX_LAST);
5842 + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
5843 + * NET_PROT_IP(0, HDR_INDEX_LAST);
5844 + * NET_PROT_IPv4(0, HDR_INDEX_LAST);
5845 + * NET_PROT_IPv6(0, HDR_INDEX_LAST);
5849 + enum net_prot prot;
5850 + enum dpkg_extract_from_hdr_type type;
5857 + * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
5858 + * @size: Size in bytes
5859 + * @offset: Byte offset
5867 + * struct from_parse - Used when
5868 + * 'type = DPKG_EXTRACT_FROM_PARSE'
5869 + * @size: Size in bytes
5870 + * @offset: Byte offset
5878 + u8 num_of_byte_masks;
5879 + struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
5883 + * struct dpkg_profile_cfg - A structure for defining a full Key Generation
5885 + * @num_extracts: Defines the number of valid entries in the array below
5886 + * @extracts: Array of required extractions
5888 +struct dpkg_profile_cfg {
5890 + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
5893 +#endif /* __FSL_DPKG_H_ */
5894 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
5895 new file mode 100644
5896 index 00000000..fa353d75
5898 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
5900 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
5901 + * Copyright 2016 NXP
5903 + * Redistribution and use in source and binary forms, with or without
5904 + * modification, are permitted provided that the following conditions are met:
5905 + * * Redistributions of source code must retain the above copyright
5906 + * notice, this list of conditions and the following disclaimer.
5907 + * * Redistributions in binary form must reproduce the above copyright
5908 + * notice, this list of conditions and the following disclaimer in the
5909 + * documentation and/or other materials provided with the distribution.
5910 + * * Neither the name of the above-listed copyright holders nor the
5911 + * names of any contributors may be used to endorse or promote products
5912 + * derived from this software without specific prior written permission.
5915 + * ALTERNATIVELY, this software may be distributed under the terms of the
5916 + * GNU General Public License ("GPL") as published by the Free Software
5917 + * Foundation, either version 2 of that License or (at your option) any
5920 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
5921 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5922 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5923 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
5924 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
5925 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
5926 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
5927 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
5928 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
5929 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
5930 + * POSSIBILITY OF SUCH DAMAGE.
5932 +#ifndef _FSL_DPNI_CMD_H
5933 +#define _FSL_DPNI_CMD_H
5936 +#define DPNI_VER_MAJOR 7
5937 +#define DPNI_VER_MINOR 0
5938 +#define DPNI_CMD_BASE_VERSION 1
5939 +#define DPNI_CMD_ID_OFFSET 4
5941 +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
5943 +#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
5944 +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
5945 +#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
5946 +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
5947 +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
5949 +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
5950 +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
5951 +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
5952 +#define DPNI_CMDID_RESET DPNI_CMD(0x005)
5953 +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
5955 +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
5956 +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
5957 +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
5958 +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
5959 +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
5960 +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
5961 +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
5962 +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
5964 +#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
5965 +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
5967 +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
5968 +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
5969 +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
5970 +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
5971 +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
5972 +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
5973 +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
5975 +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
5976 +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
5977 +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
5978 +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
5979 +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
5980 +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
5981 +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
5982 +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
5983 +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
5985 +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
5987 +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
5988 +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
5989 +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
5991 +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
5992 +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
5993 +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
5994 +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
5995 +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
5996 +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
5998 +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
6000 +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
6001 +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
6003 +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
6004 +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
6005 +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
6006 +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
6007 +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
6008 +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
6009 +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
6011 +/* Macros for accessing command fields smaller than 1byte */
6012 +#define DPNI_MASK(field) \
6013 + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
6014 + DPNI_##field##_SHIFT)
6016 +#define dpni_set_field(var, field, val) \
6017 + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
6018 +#define dpni_get_field(var, field) \
6019 + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
6021 +struct dpni_cmd_open {
6025 +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
6026 +struct dpni_cmd_set_pools {
6029 + u8 backup_pool_mask;
6031 + /* cmd word 0..4 */
6032 + __le32 dpbp_id[DPNI_MAX_DPBP];
6033 + /* cmd word 4..6 */
6034 + __le16 buffer_size[DPNI_MAX_DPBP];
6037 +/* The enable indication is always the least significant bit */
6038 +#define DPNI_ENABLE_SHIFT 0
6039 +#define DPNI_ENABLE_SIZE 1
6041 +struct dpni_rsp_is_enabled {
6045 +struct dpni_rsp_get_irq {
6046 + /* response word 0 */
6049 + /* response word 1 */
6051 + /* response word 2 */
6056 +struct dpni_cmd_set_irq_enable {
6062 +struct dpni_cmd_get_irq_enable {
6067 +struct dpni_rsp_get_irq_enable {
6071 +struct dpni_cmd_set_irq_mask {
6076 +struct dpni_cmd_get_irq_mask {
6081 +struct dpni_rsp_get_irq_mask {
6085 +struct dpni_cmd_get_irq_status {
6090 +struct dpni_rsp_get_irq_status {
6094 +struct dpni_cmd_clear_irq_status {
6099 +struct dpni_rsp_get_attr {
6100 + /* response word 0 */
6104 + u8 mac_filter_entries;
6106 + /* response word 1 */
6107 + u8 vlan_filter_entries;
6111 + __le16 fs_entries;
6113 + /* response word 2 */
6116 + __le16 wriop_version;
6119 +#define DPNI_ERROR_ACTION_SHIFT 0
6120 +#define DPNI_ERROR_ACTION_SIZE 4
6121 +#define DPNI_FRAME_ANN_SHIFT 4
6122 +#define DPNI_FRAME_ANN_SIZE 1
6124 +struct dpni_cmd_set_errors_behavior {
6126 + /* from least significant bit: error_action:4, set_frame_annotation:1 */
6130 +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
6131 + * buffer layouts, but they all share the same parameters.
6132 + * If one of the functions changes, below structure needs to be split.
6135 +#define DPNI_PASS_TS_SHIFT 0
6136 +#define DPNI_PASS_TS_SIZE 1
6137 +#define DPNI_PASS_PR_SHIFT 1
6138 +#define DPNI_PASS_PR_SIZE 1
6139 +#define DPNI_PASS_FS_SHIFT 2
6140 +#define DPNI_PASS_FS_SIZE 1
6142 +struct dpni_cmd_get_buffer_layout {
6146 +struct dpni_rsp_get_buffer_layout {
6147 + /* response word 0 */
6149 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6152 + /* response word 1 */
6153 + __le16 private_data_size;
6154 + __le16 data_align;
6159 +struct dpni_cmd_set_buffer_layout {
6164 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6168 + __le16 private_data_size;
6169 + __le16 data_align;
6174 +struct dpni_cmd_set_offload {
6180 +struct dpni_cmd_get_offload {
6185 +struct dpni_rsp_get_offload {
6190 +struct dpni_cmd_get_qdid {
6194 +struct dpni_rsp_get_qdid {
6198 +struct dpni_rsp_get_tx_data_offset {
6199 + __le16 data_offset;
6202 +struct dpni_cmd_get_statistics {
6206 +struct dpni_rsp_get_statistics {
6207 + __le64 counter[DPNI_STATISTICS_CNT];
6210 +struct dpni_cmd_set_link_cfg {
6220 +#define DPNI_LINK_STATE_SHIFT 0
6221 +#define DPNI_LINK_STATE_SIZE 1
6223 +struct dpni_rsp_get_link_state {
6224 + /* response word 0 */
6226 + /* from LSB: up:1 */
6229 + /* response word 1 */
6232 + /* response word 2 */
6236 +struct dpni_cmd_set_tx_shaping {
6238 + __le16 max_burst_size;
6241 + __le32 rate_limit;
6244 +struct dpni_cmd_set_max_frame_length {
6245 + __le16 max_frame_length;
6248 +struct dpni_rsp_get_max_frame_length {
6249 + __le16 max_frame_length;
6252 +struct dpni_cmd_set_multicast_promisc {
6256 +struct dpni_rsp_get_multicast_promisc {
6260 +struct dpni_cmd_set_unicast_promisc {
6264 +struct dpni_rsp_get_unicast_promisc {
6268 +struct dpni_cmd_set_primary_mac_addr {
6273 +struct dpni_rsp_get_primary_mac_addr {
6278 +struct dpni_rsp_get_port_mac_addr {
6283 +struct dpni_cmd_add_mac_addr {
6288 +struct dpni_cmd_remove_mac_addr {
6293 +#define DPNI_UNICAST_FILTERS_SHIFT 0
6294 +#define DPNI_UNICAST_FILTERS_SIZE 1
6295 +#define DPNI_MULTICAST_FILTERS_SHIFT 1
6296 +#define DPNI_MULTICAST_FILTERS_SIZE 1
6298 +struct dpni_cmd_clear_mac_filters {
6299 + /* from LSB: unicast:1, multicast:1 */
6303 +#define DPNI_DIST_MODE_SHIFT 0
6304 +#define DPNI_DIST_MODE_SIZE 4
6305 +#define DPNI_MISS_ACTION_SHIFT 4
6306 +#define DPNI_MISS_ACTION_SIZE 4
6308 +struct dpni_cmd_set_rx_tc_dist {
6312 + /* from LSB: dist_mode:4, miss_action:4 */
6315 + __le16 default_flow_id;
6316 + /* cmd word 1..5 */
6319 + __le64 key_cfg_iova;
6322 +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
6325 +struct dpni_mask_cfg {
6330 +#define DPNI_EFH_TYPE_SHIFT 0
6331 +#define DPNI_EFH_TYPE_SIZE 4
6332 +#define DPNI_EXTRACT_TYPE_SHIFT 0
6333 +#define DPNI_EXTRACT_TYPE_SIZE 4
6335 +struct dpni_dist_extract {
6338 + /* EFH type stored in the 4 least significant bits */
6346 + u8 num_of_repeats;
6347 + u8 num_of_byte_masks;
6348 + /* Extraction type is stored in the 4 LSBs */
6352 + struct dpni_mask_cfg masks[4];
6355 +struct dpni_ext_set_rx_tc_dist {
6356 + /* extension word 0 */
6360 + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
6363 +struct dpni_cmd_get_queue {
6369 +#define DPNI_DEST_TYPE_SHIFT 0
6370 +#define DPNI_DEST_TYPE_SIZE 4
6371 +#define DPNI_STASH_CTRL_SHIFT 6
6372 +#define DPNI_STASH_CTRL_SIZE 1
6373 +#define DPNI_HOLD_ACTIVE_SHIFT 7
6374 +#define DPNI_HOLD_ACTIVE_SIZE 1
6376 +struct dpni_rsp_get_queue {
6377 + /* response word 0 */
6379 + /* response word 1 */
6383 + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
6385 + /* response word 2 */
6387 + /* response word 3 */
6388 + __le64 user_context;
6389 + /* response word 4 */
6394 +struct dpni_cmd_set_queue {
6409 + __le64 user_context;
6412 +struct dpni_cmd_add_fs_entry {
6427 +struct dpni_cmd_remove_fs_entry {
6439 +struct dpni_cmd_set_taildrop {
6441 + u8 congestion_point;
6447 + /* Only least significant bit is relevant */
6455 +struct dpni_cmd_get_taildrop {
6456 + u8 congestion_point;
6462 +struct dpni_rsp_get_taildrop {
6466 + /* only least significant bit is relevant */
6474 +#define DPNI_DEST_TYPE_SHIFT 0
6475 +#define DPNI_DEST_TYPE_SIZE 4
6476 +#define DPNI_CONG_UNITS_SHIFT 4
6477 +#define DPNI_CONG_UNITS_SIZE 2
6479 +struct dpni_cmd_set_congestion_notification {
6486 + u16 notification_mode;
6488 + /* from LSB: dest_type: 4 units:2 */
6495 + u32 threshold_entry;
6496 + u32 threshold_exit;
6499 +#endif /* _FSL_DPNI_CMD_H */
6500 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
6501 new file mode 100644
6502 index 00000000..3c23e4dc
6504 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
6506 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
6507 + * Copyright 2016 NXP
6509 + * Redistribution and use in source and binary forms, with or without
6510 + * modification, are permitted provided that the following conditions are met:
6511 + * * Redistributions of source code must retain the above copyright
6512 + * notice, this list of conditions and the following disclaimer.
6513 + * * Redistributions in binary form must reproduce the above copyright
6514 + * notice, this list of conditions and the following disclaimer in the
6515 + * documentation and/or other materials provided with the distribution.
6516 + * * Neither the name of the above-listed copyright holders nor the
6517 + * names of any contributors may be used to endorse or promote products
6518 + * derived from this software without specific prior written permission.
6521 + * ALTERNATIVELY, this software may be distributed under the terms of the
6522 + * GNU General Public License ("GPL") as published by the Free Software
6523 + * Foundation, either version 2 of that License or (at your option) any
6526 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6527 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6528 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6529 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6530 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6531 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6532 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6533 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6534 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6535 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6536 + * POSSIBILITY OF SUCH DAMAGE.
6538 +#include "../../fsl-mc/include/mc-sys.h"
6539 +#include "../../fsl-mc/include/mc-cmd.h"
6541 +#include "dpni-cmd.h"
6544 + * dpni_prepare_key_cfg() - function prepare extract parameters
6545 + * @cfg: defining a full Key Generation profile (rule)
6546 + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
6548 + * This function has to be called before the following functions:
6549 + * - dpni_set_rx_tc_dist()
6550 + * - dpni_set_qos_table()
6552 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
6555 + struct dpni_ext_set_rx_tc_dist *dpni_ext;
6556 + struct dpni_dist_extract *extr;
6558 + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
6561 + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
6562 + dpni_ext->num_extracts = cfg->num_extracts;
6564 + for (i = 0; i < cfg->num_extracts; i++) {
6565 + extr = &dpni_ext->extracts[i];
6567 + switch (cfg->extracts[i].type) {
6568 + case DPKG_EXTRACT_FROM_HDR:
6569 + extr->prot = cfg->extracts[i].extract.from_hdr.prot;
6570 + dpni_set_field(extr->efh_type, EFH_TYPE,
6571 + cfg->extracts[i].extract.from_hdr.type);
6572 + extr->size = cfg->extracts[i].extract.from_hdr.size;
6573 + extr->offset = cfg->extracts[i].extract.from_hdr.offset;
6574 + extr->field = cpu_to_le32(
6575 + cfg->extracts[i].extract.from_hdr.field);
6577 + cfg->extracts[i].extract.from_hdr.hdr_index;
6579 + case DPKG_EXTRACT_FROM_DATA:
6580 + extr->size = cfg->extracts[i].extract.from_data.size;
6582 + cfg->extracts[i].extract.from_data.offset;
6584 + case DPKG_EXTRACT_FROM_PARSE:
6585 + extr->size = cfg->extracts[i].extract.from_parse.size;
6587 + cfg->extracts[i].extract.from_parse.offset;
6593 + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
6594 + dpni_set_field(extr->extract_type, EXTRACT_TYPE,
6595 + cfg->extracts[i].type);
6597 + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
6598 + extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
6599 + extr->masks[j].offset =
6600 + cfg->extracts[i].masks[j].offset;
6608 + * dpni_open() - Open a control session for the specified object
6609 + * @mc_io: Pointer to MC portal's I/O object
6610 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6611 + * @dpni_id: DPNI unique ID
6612 + * @token: Returned token; use in subsequent API calls
6614 + * This function can be used to open a control session for an
6615 + * already created object; an object may have been declared in
6616 + * the DPL or by calling the dpni_create() function.
6617 + * This function returns a unique authentication token,
6618 + * associated with the specific object ID and the specific MC
6619 + * portal; this token must be used in all subsequent commands for
6620 + * this specific object.
6622 + * Return: '0' on Success; Error code otherwise.
6624 +int dpni_open(struct fsl_mc_io *mc_io,
6629 + struct mc_command cmd = { 0 };
6630 + struct dpni_cmd_open *cmd_params;
6634 + /* prepare command */
6635 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
6638 + cmd_params = (struct dpni_cmd_open *)cmd.params;
6639 + cmd_params->dpni_id = cpu_to_le32(dpni_id);
6641 + /* send command to mc*/
6642 + err = mc_send_command(mc_io, &cmd);
6646 + /* retrieve response parameters */
6647 + *token = mc_cmd_hdr_read_token(&cmd);
6653 + * dpni_close() - Close the control session of the object
6654 + * @mc_io: Pointer to MC portal's I/O object
6655 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6656 + * @token: Token of DPNI object
6658 + * After this function is called, no further operations are
6659 + * allowed on the object without opening a new control session.
6661 + * Return: '0' on Success; Error code otherwise.
6663 +int dpni_close(struct fsl_mc_io *mc_io,
6667 + struct mc_command cmd = { 0 };
6669 + /* prepare command */
6670 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
6674 + /* send command to mc*/
6675 + return mc_send_command(mc_io, &cmd);
6679 + * dpni_set_pools() - Set buffer pools configuration
6680 + * @mc_io: Pointer to MC portal's I/O object
6681 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6682 + * @token: Token of DPNI object
6683 + * @cfg: Buffer pools configuration
6685 + * mandatory for DPNI operation
6686 + * warning:Allowed only when DPNI is disabled
6688 + * Return: '0' on Success; Error code otherwise.
6690 +int dpni_set_pools(struct fsl_mc_io *mc_io,
6693 + const struct dpni_pools_cfg *cfg)
6695 + struct mc_command cmd = { 0 };
6696 + struct dpni_cmd_set_pools *cmd_params;
6699 + /* prepare command */
6700 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
6703 + cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
6704 + cmd_params->num_dpbp = cfg->num_dpbp;
6705 + for (i = 0; i < DPNI_MAX_DPBP; i++) {
6706 + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
6707 + cmd_params->buffer_size[i] =
6708 + cpu_to_le16(cfg->pools[i].buffer_size);
6709 + cmd_params->backup_pool_mask |=
6710 + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
6713 + /* send command to mc*/
6714 + return mc_send_command(mc_io, &cmd);
6718 + * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
6719 + * @mc_io: Pointer to MC portal's I/O object
6720 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6721 + * @token: Token of DPNI object
6723 + * Return: '0' on Success; Error code otherwise.
6725 +int dpni_enable(struct fsl_mc_io *mc_io,
6729 + struct mc_command cmd = { 0 };
6731 + /* prepare command */
6732 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
6736 + /* send command to mc*/
6737 + return mc_send_command(mc_io, &cmd);
6741 + * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
6742 + * @mc_io: Pointer to MC portal's I/O object
6743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6744 + * @token: Token of DPNI object
6746 + * Return: '0' on Success; Error code otherwise.
6748 +int dpni_disable(struct fsl_mc_io *mc_io,
6752 + struct mc_command cmd = { 0 };
6754 + /* prepare command */
6755 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
6759 + /* send command to mc*/
6760 + return mc_send_command(mc_io, &cmd);
6764 + * dpni_is_enabled() - Check if the DPNI is enabled.
6765 + * @mc_io: Pointer to MC portal's I/O object
6766 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6767 + * @token: Token of DPNI object
6768 + * @en: Returns '1' if object is enabled; '0' otherwise
6770 + * Return: '0' on Success; Error code otherwise.
6772 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
6777 + struct mc_command cmd = { 0 };
6778 + struct dpni_rsp_is_enabled *rsp_params;
6781 + /* prepare command */
6782 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
6786 + /* send command to mc*/
6787 + err = mc_send_command(mc_io, &cmd);
6791 + /* retrieve response parameters */
6792 + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
6793 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
6799 + * dpni_reset() - Reset the DPNI, returns the object to initial state.
6800 + * @mc_io: Pointer to MC portal's I/O object
6801 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6802 + * @token: Token of DPNI object
6804 + * Return: '0' on Success; Error code otherwise.
6806 +int dpni_reset(struct fsl_mc_io *mc_io,
6810 + struct mc_command cmd = { 0 };
6812 + /* prepare command */
6813 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
6817 + /* send command to mc*/
6818 + return mc_send_command(mc_io, &cmd);
6822 + * dpni_set_irq_enable() - Set overall interrupt state.
6823 + * @mc_io: Pointer to MC portal's I/O object
6824 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6825 + * @token: Token of DPNI object
6826 + * @irq_index: The interrupt index to configure
6827 + * @en: Interrupt state: - enable = 1, disable = 0
6829 + * Allows GPP software to control when interrupts are generated.
6830 + * Each interrupt can have up to 32 causes. The enable/disable control's the
6831 + * overall interrupt state. if the interrupt is disabled no causes will cause
6834 + * Return: '0' on Success; Error code otherwise.
6836 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
6842 + struct mc_command cmd = { 0 };
6843 + struct dpni_cmd_set_irq_enable *cmd_params;
6845 + /* prepare command */
6846 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
6849 + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
6850 + dpni_set_field(cmd_params->enable, ENABLE, en);
6851 + cmd_params->irq_index = irq_index;
6853 + /* send command to mc*/
6854 + return mc_send_command(mc_io, &cmd);
6858 + * dpni_get_irq_enable() - Get overall interrupt state
6859 + * @mc_io: Pointer to MC portal's I/O object
6860 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6861 + * @token: Token of DPNI object
6862 + * @irq_index: The interrupt index to configure
6863 + * @en: Returned interrupt state - enable = 1, disable = 0
6865 + * Return: '0' on Success; Error code otherwise.
6867 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
6873 + struct mc_command cmd = { 0 };
6874 + struct dpni_cmd_get_irq_enable *cmd_params;
6875 + struct dpni_rsp_get_irq_enable *rsp_params;
6879 + /* prepare command */
6880 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
6883 + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
6884 + cmd_params->irq_index = irq_index;
6886 + /* send command to mc*/
6887 + err = mc_send_command(mc_io, &cmd);
6891 + /* retrieve response parameters */
6892 + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
6893 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
6899 + * dpni_set_irq_mask() - Set interrupt mask.
6900 + * @mc_io: Pointer to MC portal's I/O object
6901 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6902 + * @token: Token of DPNI object
6903 + * @irq_index: The interrupt index to configure
6904 + * @mask: event mask to trigger interrupt;
6906 + * 0 = ignore event
6907 + * 1 = consider event for asserting IRQ
6909 + * Every interrupt can have up to 32 causes and the interrupt model supports
6910 + * masking/unmasking each cause independently
6912 + * Return: '0' on Success; Error code otherwise.
6914 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
6920 + struct mc_command cmd = { 0 };
6921 + struct dpni_cmd_set_irq_mask *cmd_params;
6923 + /* prepare command */
6924 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
6927 + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
6928 + cmd_params->mask = cpu_to_le32(mask);
6929 + cmd_params->irq_index = irq_index;
6931 + /* send command to mc*/
6932 + return mc_send_command(mc_io, &cmd);
6936 + * dpni_get_irq_mask() - Get interrupt mask.
6937 + * @mc_io: Pointer to MC portal's I/O object
6938 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6939 + * @token: Token of DPNI object
6940 + * @irq_index: The interrupt index to configure
6941 + * @mask: Returned event mask to trigger interrupt
6943 + * Every interrupt can have up to 32 causes and the interrupt model supports
6944 + * masking/unmasking each cause independently
6946 + * Return: '0' on Success; Error code otherwise.
6948 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
6954 + struct mc_command cmd = { 0 };
6955 + struct dpni_cmd_get_irq_mask *cmd_params;
6956 + struct dpni_rsp_get_irq_mask *rsp_params;
6959 + /* prepare command */
6960 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
6963 + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
6964 + cmd_params->irq_index = irq_index;
6966 + /* send command to mc*/
6967 + err = mc_send_command(mc_io, &cmd);
6971 + /* retrieve response parameters */
6972 + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
6973 + *mask = le32_to_cpu(rsp_params->mask);
6979 + * dpni_get_irq_status() - Get the current status of any pending interrupts.
6980 + * @mc_io: Pointer to MC portal's I/O object
6981 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
6982 + * @token: Token of DPNI object
6983 + * @irq_index: The interrupt index to configure
6984 + * @status: Returned interrupts status - one bit per cause:
6985 + * 0 = no interrupt pending
6986 + * 1 = interrupt pending
6988 + * Return: '0' on Success; Error code otherwise.
6990 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
6996 + struct mc_command cmd = { 0 };
6997 + struct dpni_cmd_get_irq_status *cmd_params;
6998 + struct dpni_rsp_get_irq_status *rsp_params;
7001 + /* prepare command */
7002 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
7005 + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
7006 + cmd_params->status = cpu_to_le32(*status);
7007 + cmd_params->irq_index = irq_index;
7009 + /* send command to mc*/
7010 + err = mc_send_command(mc_io, &cmd);
7014 + /* retrieve response parameters */
7015 + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
7016 + *status = le32_to_cpu(rsp_params->status);
7022 + * dpni_clear_irq_status() - Clear a pending interrupt's status
7023 + * @mc_io: Pointer to MC portal's I/O object
7024 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7025 + * @token: Token of DPNI object
7026 + * @irq_index: The interrupt index to configure
7027 + * @status: bits to clear (W1C) - one bit per cause:
7028 + * 0 = don't change
7029 + * 1 = clear status bit
7031 + * Return: '0' on Success; Error code otherwise.
7033 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
7039 + struct mc_command cmd = { 0 };
7040 + struct dpni_cmd_clear_irq_status *cmd_params;
7042 + /* prepare command */
7043 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
7046 + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
7047 + cmd_params->irq_index = irq_index;
7048 + cmd_params->status = cpu_to_le32(status);
7050 + /* send command to mc*/
7051 + return mc_send_command(mc_io, &cmd);
7055 + * dpni_get_attributes() - Retrieve DPNI attributes.
7056 + * @mc_io: Pointer to MC portal's I/O object
7057 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7058 + * @token: Token of DPNI object
7059 + * @attr: Object's attributes
7061 + * Return: '0' on Success; Error code otherwise.
7063 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
7066 + struct dpni_attr *attr)
7068 + struct mc_command cmd = { 0 };
7069 + struct dpni_rsp_get_attr *rsp_params;
7073 + /* prepare command */
7074 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
7078 + /* send command to mc*/
7079 + err = mc_send_command(mc_io, &cmd);
7083 + /* retrieve response parameters */
7084 + rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
7085 + attr->options = le32_to_cpu(rsp_params->options);
7086 + attr->num_queues = rsp_params->num_queues;
7087 + attr->num_tcs = rsp_params->num_tcs;
7088 + attr->mac_filter_entries = rsp_params->mac_filter_entries;
7089 + attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
7090 + attr->qos_entries = rsp_params->qos_entries;
7091 + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
7092 + attr->qos_key_size = rsp_params->qos_key_size;
7093 + attr->fs_key_size = rsp_params->fs_key_size;
7094 + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
7100 + * dpni_set_errors_behavior() - Set errors behavior
7101 + * @mc_io: Pointer to MC portal's I/O object
7102 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7103 + * @token: Token of DPNI object
7104 + * @cfg: Errors configuration
7106 + * this function may be called numerous times with different
7109 + * Return: '0' on Success; Error code otherwise.
7111 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
7114 + struct dpni_error_cfg *cfg)
7116 + struct mc_command cmd = { 0 };
7117 + struct dpni_cmd_set_errors_behavior *cmd_params;
7119 + /* prepare command */
7120 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
7123 + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
7124 + cmd_params->errors = cpu_to_le32(cfg->errors);
7125 + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
7126 + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
7128 + /* send command to mc*/
7129 + return mc_send_command(mc_io, &cmd);
7133 + * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
7134 + * @mc_io: Pointer to MC portal's I/O object
7135 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7136 + * @token: Token of DPNI object
7137 + * @qtype: Type of queue to retrieve configuration for
7138 + * @layout: Returns buffer layout attributes
7140 + * Return: '0' on Success; Error code otherwise.
7142 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
7145 + enum dpni_queue_type qtype,
7146 + struct dpni_buffer_layout *layout)
7148 + struct mc_command cmd = { 0 };
7149 + struct dpni_cmd_get_buffer_layout *cmd_params;
7150 + struct dpni_rsp_get_buffer_layout *rsp_params;
7153 + /* prepare command */
7154 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
7157 + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
7158 + cmd_params->qtype = qtype;
7160 + /* send command to mc*/
7161 + err = mc_send_command(mc_io, &cmd);
7165 + /* retrieve response parameters */
7166 + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
7167 + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
7168 + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
7169 + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
7170 + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
7171 + layout->data_align = le16_to_cpu(rsp_params->data_align);
7172 + layout->data_head_room = le16_to_cpu(rsp_params->head_room);
7173 + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
7179 + * dpni_set_buffer_layout() - Set buffer layout configuration.
7180 + * @mc_io: Pointer to MC portal's I/O object
7181 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7182 + * @token: Token of DPNI object
7183 + * @qtype: Type of queue this configuration applies to
7184 + * @layout: Buffer layout configuration
7186 + * Return: '0' on Success; Error code otherwise.
7188 + * @warning Allowed only when DPNI is disabled
7190 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
7193 + enum dpni_queue_type qtype,
7194 + const struct dpni_buffer_layout *layout)
7196 + struct mc_command cmd = { 0 };
7197 + struct dpni_cmd_set_buffer_layout *cmd_params;
7199 + /* prepare command */
7200 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
7203 + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
7204 + cmd_params->qtype = qtype;
7205 + cmd_params->options = cpu_to_le16(layout->options);
7206 + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
7207 + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
7208 + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
7209 + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
7210 + cmd_params->data_align = cpu_to_le16(layout->data_align);
7211 + cmd_params->head_room = cpu_to_le16(layout->data_head_room);
7212 + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
7214 + /* send command to mc*/
7215 + return mc_send_command(mc_io, &cmd);
7219 + * dpni_set_offload() - Set DPNI offload configuration.
7220 + * @mc_io: Pointer to MC portal's I/O object
7221 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7222 + * @token: Token of DPNI object
7223 + * @type: Type of DPNI offload
7224 + * @config: Offload configuration.
7225 + * For checksum offloads, non-zero value enables the offload
7227 + * Return: '0' on Success; Error code otherwise.
7229 + * @warning Allowed only when DPNI is disabled
7232 +int dpni_set_offload(struct fsl_mc_io *mc_io,
7235 + enum dpni_offload type,
7238 + struct mc_command cmd = { 0 };
7239 + struct dpni_cmd_set_offload *cmd_params;
7241 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
7244 + cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
7245 + cmd_params->dpni_offload = type;
7246 + cmd_params->config = cpu_to_le32(config);
7248 + return mc_send_command(mc_io, &cmd);
7251 +int dpni_get_offload(struct fsl_mc_io *mc_io,
7254 + enum dpni_offload type,
7257 + struct mc_command cmd = { 0 };
7258 + struct dpni_cmd_get_offload *cmd_params;
7259 + struct dpni_rsp_get_offload *rsp_params;
7262 + /* prepare command */
7263 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
7266 + cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
7267 + cmd_params->dpni_offload = type;
7269 + /* send command to mc*/
7270 + err = mc_send_command(mc_io, &cmd);
7274 + /* retrieve response parameters */
7275 + rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
7276 + *config = le32_to_cpu(rsp_params->config);
7282 + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
7283 + * for enqueue operations
7284 + * @mc_io: Pointer to MC portal's I/O object
7285 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7286 + * @token: Token of DPNI object
7287 + * @qtype: Type of queue to receive QDID for
7288 + * @qdid: Returned virtual QDID value that should be used as an argument
7289 + * in all enqueue operations
7291 + * Return: '0' on Success; Error code otherwise.
7293 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
7296 + enum dpni_queue_type qtype,
7299 + struct mc_command cmd = { 0 };
7300 + struct dpni_cmd_get_qdid *cmd_params;
7301 + struct dpni_rsp_get_qdid *rsp_params;
7304 + /* prepare command */
7305 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
7308 + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
7309 + cmd_params->qtype = qtype;
7311 + /* send command to mc*/
7312 + err = mc_send_command(mc_io, &cmd);
7316 + /* retrieve response parameters */
7317 + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
7318 + *qdid = le16_to_cpu(rsp_params->qdid);
7324 + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
7325 + * @mc_io: Pointer to MC portal's I/O object
7326 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7327 + * @token: Token of DPNI object
7328 + * @data_offset: Tx data offset (from start of buffer)
7330 + * Return: '0' on Success; Error code otherwise.
7332 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
7337 + struct mc_command cmd = { 0 };
7338 + struct dpni_rsp_get_tx_data_offset *rsp_params;
7341 + /* prepare command */
7342 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
7346 + /* send command to mc*/
7347 + err = mc_send_command(mc_io, &cmd);
7351 + /* retrieve response parameters */
7352 + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
7353 + *data_offset = le16_to_cpu(rsp_params->data_offset);
7359 + * dpni_set_link_cfg() - set the link configuration.
7360 + * @mc_io: Pointer to MC portal's I/O object
7361 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7362 + * @token: Token of DPNI object
7363 + * @cfg: Link configuration
7365 + * Return: '0' on Success; Error code otherwise.
7367 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
7370 + const struct dpni_link_cfg *cfg)
7372 + struct mc_command cmd = { 0 };
7373 + struct dpni_cmd_set_link_cfg *cmd_params;
7375 + /* prepare command */
7376 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
7379 + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
7380 + cmd_params->rate = cpu_to_le32(cfg->rate);
7381 + cmd_params->options = cpu_to_le64(cfg->options);
7383 + /* send command to mc*/
7384 + return mc_send_command(mc_io, &cmd);
7388 + * dpni_get_link_state() - Return the link state (either up or down)
7389 + * @mc_io: Pointer to MC portal's I/O object
7390 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7391 + * @token: Token of DPNI object
7392 + * @state: Returned link state;
7394 + * Return: '0' on Success; Error code otherwise.
7396 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
7399 + struct dpni_link_state *state)
7401 + struct mc_command cmd = { 0 };
7402 + struct dpni_rsp_get_link_state *rsp_params;
7405 + /* prepare command */
7406 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
7410 + /* send command to mc*/
7411 + err = mc_send_command(mc_io, &cmd);
7415 + /* retrieve response parameters */
7416 + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
7417 + state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
7418 + state->rate = le32_to_cpu(rsp_params->rate);
7419 + state->options = le64_to_cpu(rsp_params->options);
7425 + * dpni_set_tx_shaping() - Set the transmit shaping
7426 + * @mc_io: Pointer to MC portal's I/O object
7427 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7428 + * @token: Token of DPNI object
7429 + * @tx_shaper: tx shaping configuration
7431 + * Return: '0' on Success; Error code otherwise.
7433 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
7436 + const struct dpni_tx_shaping_cfg *tx_shaper)
7438 + struct mc_command cmd = { 0 };
7439 + struct dpni_cmd_set_tx_shaping *cmd_params;
7441 + /* prepare command */
7442 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
7445 + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
7446 + cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
7447 + cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
7449 + /* send command to mc*/
7450 + return mc_send_command(mc_io, &cmd);
7454 + * dpni_set_max_frame_length() - Set the maximum received frame length.
7455 + * @mc_io: Pointer to MC portal's I/O object
7456 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7457 + * @token: Token of DPNI object
7458 + * @max_frame_length: Maximum received frame length (in
7459 + * bytes); frame is discarded if its
7460 + * length exceeds this value
7462 + * Return: '0' on Success; Error code otherwise.
7464 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
7467 + u16 max_frame_length)
7469 + struct mc_command cmd = { 0 };
7470 + struct dpni_cmd_set_max_frame_length *cmd_params;
7472 + /* prepare command */
7473 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
7476 + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
7477 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
7479 + /* send command to mc*/
7480 + return mc_send_command(mc_io, &cmd);
7484 + * dpni_get_max_frame_length() - Get the maximum received frame length.
7485 + * @mc_io: Pointer to MC portal's I/O object
7486 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7487 + * @token: Token of DPNI object
7488 + * @max_frame_length: Maximum received frame length (in
7489 + * bytes); frame is discarded if its
7490 + * length exceeds this value
7492 + * Return: '0' on Success; Error code otherwise.
7494 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
7497 + u16 *max_frame_length)
7499 + struct mc_command cmd = { 0 };
7500 + struct dpni_rsp_get_max_frame_length *rsp_params;
7503 + /* prepare command */
7504 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
7508 + /* send command to mc*/
7509 + err = mc_send_command(mc_io, &cmd);
7513 + /* retrieve response parameters */
7514 + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
7515 + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
7521 + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
7522 + * @mc_io: Pointer to MC portal's I/O object
7523 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7524 + * @token: Token of DPNI object
7525 + * @en: Set to '1' to enable; '0' to disable
7527 + * Return: '0' on Success; Error code otherwise.
7529 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
7534 + struct mc_command cmd = { 0 };
7535 + struct dpni_cmd_set_multicast_promisc *cmd_params;
7537 + /* prepare command */
7538 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
7541 + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
7542 + dpni_set_field(cmd_params->enable, ENABLE, en);
7544 + /* send command to mc*/
7545 + return mc_send_command(mc_io, &cmd);
7549 + * dpni_get_multicast_promisc() - Get multicast promiscuous mode
7550 + * @mc_io: Pointer to MC portal's I/O object
7551 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7552 + * @token: Token of DPNI object
7553 + * @en: Returns '1' if enabled; '0' otherwise
7555 + * Return: '0' on Success; Error code otherwise.
7557 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
7562 + struct mc_command cmd = { 0 };
7563 + struct dpni_rsp_get_multicast_promisc *rsp_params;
7566 + /* prepare command */
7567 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
7571 + /* send command to mc*/
7572 + err = mc_send_command(mc_io, &cmd);
7576 + /* retrieve response parameters */
7577 + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
7578 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7584 + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
7585 + * @mc_io: Pointer to MC portal's I/O object
7586 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7587 + * @token: Token of DPNI object
7588 + * @en: Set to '1' to enable; '0' to disable
7590 + * Return: '0' on Success; Error code otherwise.
7592 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
7597 + struct mc_command cmd = { 0 };
7598 + struct dpni_cmd_set_unicast_promisc *cmd_params;
7600 + /* prepare command */
7601 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
7604 + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
7605 + dpni_set_field(cmd_params->enable, ENABLE, en);
7607 + /* send command to mc*/
7608 + return mc_send_command(mc_io, &cmd);
7612 + * dpni_get_unicast_promisc() - Get unicast promiscuous mode
7613 + * @mc_io: Pointer to MC portal's I/O object
7614 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7615 + * @token: Token of DPNI object
7616 + * @en: Returns '1' if enabled; '0' otherwise
7618 + * Return: '0' on Success; Error code otherwise.
7620 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
7625 + struct mc_command cmd = { 0 };
7626 + struct dpni_rsp_get_unicast_promisc *rsp_params;
7629 + /* prepare command */
7630 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
7634 + /* send command to mc*/
7635 + err = mc_send_command(mc_io, &cmd);
7639 + /* retrieve response parameters */
7640 + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
7641 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7647 + * dpni_set_primary_mac_addr() - Set the primary MAC address
7648 + * @mc_io: Pointer to MC portal's I/O object
7649 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7650 + * @token: Token of DPNI object
7651 + * @mac_addr: MAC address to set as primary address
7653 + * Return: '0' on Success; Error code otherwise.
7655 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
7658 + const u8 mac_addr[6])
7660 + struct mc_command cmd = { 0 };
7661 + struct dpni_cmd_set_primary_mac_addr *cmd_params;
7664 + /* prepare command */
7665 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
7668 + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
7669 + for (i = 0; i < 6; i++)
7670 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7672 + /* send command to mc*/
7673 + return mc_send_command(mc_io, &cmd);
7677 + * dpni_get_primary_mac_addr() - Get the primary MAC address
7678 + * @mc_io: Pointer to MC portal's I/O object
7679 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7680 + * @token: Token of DPNI object
7681 + * @mac_addr: Returned MAC address
7683 + * Return: '0' on Success; Error code otherwise.
7685 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
7690 + struct mc_command cmd = { 0 };
7691 + struct dpni_rsp_get_primary_mac_addr *rsp_params;
7694 + /* prepare command */
7695 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
7699 + /* send command to mc*/
7700 + err = mc_send_command(mc_io, &cmd);
7704 + /* retrieve response parameters */
7705 + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
7706 + for (i = 0; i < 6; i++)
7707 + mac_addr[5 - i] = rsp_params->mac_addr[i];
7713 + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
7714 + * port the DPNI is attached to
7715 + * @mc_io: Pointer to MC portal's I/O object
7716 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7717 + * @token: Token of DPNI object
7718 + * @mac_addr: MAC address of the physical port, if any, otherwise 0
7720 + * The primary MAC address is not cleared by this operation.
7722 + * Return: '0' on Success; Error code otherwise.
7724 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
7729 + struct mc_command cmd = { 0 };
7730 + struct dpni_rsp_get_port_mac_addr *rsp_params;
7733 + /* prepare command */
7734 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
7738 + /* send command to mc*/
7739 + err = mc_send_command(mc_io, &cmd);
7743 + /* retrieve response parameters */
7744 + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
7745 + for (i = 0; i < 6; i++)
7746 + mac_addr[5 - i] = rsp_params->mac_addr[i];
7752 + * dpni_add_mac_addr() - Add MAC address filter
7753 + * @mc_io: Pointer to MC portal's I/O object
7754 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7755 + * @token: Token of DPNI object
7756 + * @mac_addr: MAC address to add
7758 + * Return: '0' on Success; Error code otherwise.
7760 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
7763 + const u8 mac_addr[6])
7765 + struct mc_command cmd = { 0 };
7766 + struct dpni_cmd_add_mac_addr *cmd_params;
7769 + /* prepare command */
7770 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
7773 + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
7774 + for (i = 0; i < 6; i++)
7775 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7777 + /* send command to mc*/
7778 + return mc_send_command(mc_io, &cmd);
7782 + * dpni_remove_mac_addr() - Remove MAC address filter
7783 + * @mc_io: Pointer to MC portal's I/O object
7784 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7785 + * @token: Token of DPNI object
7786 + * @mac_addr: MAC address to remove
7788 + * Return: '0' on Success; Error code otherwise.
7790 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
7793 + const u8 mac_addr[6])
7795 + struct mc_command cmd = { 0 };
7796 + struct dpni_cmd_remove_mac_addr *cmd_params;
7799 + /* prepare command */
7800 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
7803 + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
7804 + for (i = 0; i < 6; i++)
7805 + cmd_params->mac_addr[i] = mac_addr[5 - i];
7807 + /* send command to mc*/
7808 + return mc_send_command(mc_io, &cmd);
7812 + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
7813 + * @mc_io: Pointer to MC portal's I/O object
7814 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7815 + * @token: Token of DPNI object
7816 + * @unicast: Set to '1' to clear unicast addresses
7817 + * @multicast: Set to '1' to clear multicast addresses
7819 + * The primary MAC address is not cleared by this operation.
7821 + * Return: '0' on Success; Error code otherwise.
7823 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
7829 + struct mc_command cmd = { 0 };
7830 + struct dpni_cmd_clear_mac_filters *cmd_params;
7832 + /* prepare command */
7833 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
7836 + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
7837 + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
7838 + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
7840 + /* send command to mc*/
7841 + return mc_send_command(mc_io, &cmd);
7845 + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
7846 + * @mc_io: Pointer to MC portal's I/O object
7847 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7848 + * @token: Token of DPNI object
7849 + * @tc_id: Traffic class selection (0-7)
7850 + * @cfg: Traffic class distribution configuration
7852 + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
7853 + * first to prepare the key_cfg_iova parameter
7855 + * Return: '0' on Success; error code otherwise.
7857 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
7861 + const struct dpni_rx_tc_dist_cfg *cfg)
7863 + struct mc_command cmd = { 0 };
7864 + struct dpni_cmd_set_rx_tc_dist *cmd_params;
7866 + /* prepare command */
7867 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
7870 + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
7871 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
7872 + cmd_params->tc_id = tc_id;
7873 + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
7874 + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
7875 + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
7876 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
7878 + /* send command to mc*/
7879 + return mc_send_command(mc_io, &cmd);
7883 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
7884 + * (to select a flow ID)
7885 + * @mc_io: Pointer to MC portal's I/O object
7886 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7887 + * @token: Token of DPNI object
7888 + * @tc_id: Traffic class selection (0-7)
7889 + * @index: Location in the QoS table where to insert the entry.
7890 + * Only relevant if MASKING is enabled for QoS
7891 + * classification on this DPNI, it is ignored for exact match.
7892 + * @cfg: Flow steering rule to add
7893 + * @action: Action to be taken as result of a classification hit
7895 + * Return: '0' on Success; Error code otherwise.
7897 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
7902 + const struct dpni_rule_cfg *cfg,
7903 + const struct dpni_fs_action_cfg *action)
7905 + struct dpni_cmd_add_fs_entry *cmd_params;
7906 + struct mc_command cmd = { 0 };
7908 + /* prepare command */
7909 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
7912 + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
7913 + cmd_params->tc_id = tc_id;
7914 + cmd_params->key_size = cfg->key_size;
7915 + cmd_params->index = cpu_to_le16(index);
7916 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
7917 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
7918 + cmd_params->options = cpu_to_le16(action->options);
7919 + cmd_params->flow_id = cpu_to_le16(action->flow_id);
7920 + cmd_params->flc = cpu_to_le64(action->flc);
7922 + /* send command to mc*/
7923 + return mc_send_command(mc_io, &cmd);
7927 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
7929 + * @mc_io: Pointer to MC portal's I/O object
7930 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7931 + * @token: Token of DPNI object
7932 + * @tc_id: Traffic class selection (0-7)
7933 + * @cfg: Flow steering rule to remove
7935 + * Return: '0' on Success; Error code otherwise.
7937 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
7941 + const struct dpni_rule_cfg *cfg)
7943 + struct dpni_cmd_remove_fs_entry *cmd_params;
7944 + struct mc_command cmd = { 0 };
7946 + /* prepare command */
7947 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
7950 + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
7951 + cmd_params->tc_id = tc_id;
7952 + cmd_params->key_size = cfg->key_size;
7953 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
7954 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
7956 + /* send command to mc*/
7957 + return mc_send_command(mc_io, &cmd);
7961 + * dpni_set_congestion_notification() - Set traffic class congestion
7962 + * notification configuration
7963 + * @mc_io: Pointer to MC portal's I/O object
7964 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7965 + * @token: Token of DPNI object
7966 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
7967 + * @tc_id: Traffic class selection (0-7)
7968 + * @cfg: congestion notification configuration
7970 + * Return: '0' on Success; error code otherwise.
7972 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
7975 + enum dpni_queue_type qtype,
7977 + const struct dpni_congestion_notification_cfg *cfg)
7979 + struct dpni_cmd_set_congestion_notification *cmd_params;
7980 + struct mc_command cmd = { 0 };
7982 + /* prepare command */
7983 + cmd.header = mc_encode_cmd_header(
7984 + DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
7987 + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
7988 + cmd_params->qtype = qtype;
7989 + cmd_params->tc = tc_id;
7990 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
7991 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
7992 + cmd_params->dest_priority = cfg->dest_cfg.priority;
7993 + dpni_set_field(cmd_params->type_units, DEST_TYPE,
7994 + cfg->dest_cfg.dest_type);
7995 + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
7996 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
7997 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
7998 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
7999 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
8001 + /* send command to mc*/
8002 + return mc_send_command(mc_io, &cmd);
8006 + * dpni_set_queue() - Set queue parameters
8007 + * @mc_io: Pointer to MC portal's I/O object
8008 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8009 + * @token: Token of DPNI object
8010 + * @qtype: Type of queue - all queue types are supported, although
8011 + * the command is ignored for Tx
8012 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8013 + * @index: Selects the specific queue out of the set allocated for the
8014 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8015 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what
8016 + * configuration options are set on the queue
8017 + * @queue: Queue structure
8019 + * Return: '0' on Success; Error code otherwise.
8021 +int dpni_set_queue(struct fsl_mc_io *mc_io,
8024 + enum dpni_queue_type qtype,
8028 + const struct dpni_queue *queue)
8030 + struct mc_command cmd = { 0 };
8031 + struct dpni_cmd_set_queue *cmd_params;
8033 + /* prepare command */
8034 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
8037 + cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
8038 + cmd_params->qtype = qtype;
8039 + cmd_params->tc = tc;
8040 + cmd_params->index = index;
8041 + cmd_params->options = options;
8042 + cmd_params->dest_id = cpu_to_le32(queue->destination.id);
8043 + cmd_params->dest_prio = queue->destination.priority;
8044 + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
8045 + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
8046 + dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
8047 + queue->destination.hold_active);
8048 + cmd_params->flc = cpu_to_le64(queue->flc.value);
8049 + cmd_params->user_context = cpu_to_le64(queue->user_context);
8051 + /* send command to mc */
8052 + return mc_send_command(mc_io, &cmd);
8056 + * dpni_get_queue() - Get queue parameters
8057 + * @mc_io: Pointer to MC portal's I/O object
8058 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8059 + * @token: Token of DPNI object
8060 + * @qtype: Type of queue - all queue types are supported
8061 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8062 + * @index: Selects the specific queue out of the set allocated for the
8063 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8064 + * @queue: Queue configuration structure
8065 + * @qid: Queue identification
8067 + * Return: '0' on Success; Error code otherwise.
8069 +int dpni_get_queue(struct fsl_mc_io *mc_io,
8072 + enum dpni_queue_type qtype,
8075 + struct dpni_queue *queue,
8076 + struct dpni_queue_id *qid)
8078 + struct mc_command cmd = { 0 };
8079 + struct dpni_cmd_get_queue *cmd_params;
8080 + struct dpni_rsp_get_queue *rsp_params;
8083 + /* prepare command */
8084 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
8087 + cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
8088 + cmd_params->qtype = qtype;
8089 + cmd_params->tc = tc;
8090 + cmd_params->index = index;
8092 + /* send command to mc */
8093 + err = mc_send_command(mc_io, &cmd);
8097 + /* retrieve response parameters */
8098 + rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
8099 + queue->destination.id = le32_to_cpu(rsp_params->dest_id);
8100 + queue->destination.priority = rsp_params->dest_prio;
8101 + queue->destination.type = dpni_get_field(rsp_params->flags,
8103 + queue->flc.stash_control = dpni_get_field(rsp_params->flags,
8105 + queue->destination.hold_active = dpni_get_field(rsp_params->flags,
8107 + queue->flc.value = le64_to_cpu(rsp_params->flc);
8108 + queue->user_context = le64_to_cpu(rsp_params->user_context);
8109 + qid->fqid = le32_to_cpu(rsp_params->fqid);
8110 + qid->qdbin = le16_to_cpu(rsp_params->qdbin);
8116 + * dpni_get_statistics() - Get DPNI statistics
8117 + * @mc_io: Pointer to MC portal's I/O object
8118 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8119 + * @token: Token of DPNI object
8120 + * @page: Selects the statistics page to retrieve, see
8121 + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
8122 + * @stat: Structure containing the statistics
8124 + * Return: '0' on Success; Error code otherwise.
8126 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8130 + union dpni_statistics *stat)
8132 + struct mc_command cmd = { 0 };
8133 + struct dpni_cmd_get_statistics *cmd_params;
8134 + struct dpni_rsp_get_statistics *rsp_params;
8137 + /* prepare command */
8138 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
8141 + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
8142 + cmd_params->page_number = page;
8144 + /* send command to mc */
8145 + err = mc_send_command(mc_io, &cmd);
8149 + /* retrieve response parameters */
8150 + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
8151 + for (i = 0; i < DPNI_STATISTICS_CNT; i++)
8152 + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
8158 + * dpni_reset_statistics() - Clears DPNI statistics
8159 + * @mc_io: Pointer to MC portal's I/O object
8160 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8161 + * @token: Token of DPNI object
8163 + * Return: '0' on Success; Error code otherwise.
8165 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8169 + struct mc_command cmd = { 0 };
8171 + /* prepare command */
8172 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
8176 + /* send command to mc*/
8177 + return mc_send_command(mc_io, &cmd);
8181 + * dpni_set_taildrop() - Set taildrop per queue or TC
8182 + * @mc_io: Pointer to MC portal's I/O object
8183 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8184 + * @token: Token of DPNI object
8185 + * @cg_point: Congestion point
8186 + * @q_type: Queue type on which the taildrop is configured.
8187 + * Only Rx queues are supported for now
8188 + * @tc: Traffic class to apply this taildrop to
8189 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8190 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8191 + * @taildrop: Taildrop structure
8193 + * Return: '0' on Success; Error code otherwise.
8195 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
8198 + enum dpni_congestion_point cg_point,
8199 + enum dpni_queue_type qtype,
8202 + struct dpni_taildrop *taildrop)
8204 + struct mc_command cmd = { 0 };
8205 + struct dpni_cmd_set_taildrop *cmd_params;
8207 + /* prepare command */
8208 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
8211 + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
8212 + cmd_params->congestion_point = cg_point;
8213 + cmd_params->qtype = qtype;
8214 + cmd_params->tc = tc;
8215 + cmd_params->index = index;
8216 + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
8217 + cmd_params->units = taildrop->units;
8218 + cmd_params->threshold = cpu_to_le32(taildrop->threshold);
8220 + /* send command to mc */
8221 + return mc_send_command(mc_io, &cmd);
8225 + * dpni_get_taildrop() - Get taildrop information
8226 + * @mc_io: Pointer to MC portal's I/O object
8227 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8228 + * @token: Token of DPNI object
8229 + * @cg_point: Congestion point
8230 + * @q_type: Queue type on which the taildrop is configured.
8231 + * Only Rx queues are supported for now
8232 + * @tc: Traffic class to apply this taildrop to
8233 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8234 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8235 + * @taildrop: Taildrop structure
8237 + * Return: '0' on Success; Error code otherwise.
8239 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
8242 + enum dpni_congestion_point cg_point,
8243 + enum dpni_queue_type qtype,
8246 + struct dpni_taildrop *taildrop)
8248 + struct mc_command cmd = { 0 };
8249 + struct dpni_cmd_get_taildrop *cmd_params;
8250 + struct dpni_rsp_get_taildrop *rsp_params;
8253 + /* prepare command */
8254 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
8257 + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
8258 + cmd_params->congestion_point = cg_point;
8259 + cmd_params->qtype = qtype;
8260 + cmd_params->tc = tc;
8261 + cmd_params->index = index;
8263 + /* send command to mc */
8264 + err = mc_send_command(mc_io, &cmd);
8268 + /* retrieve response parameters */
8269 + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
8270 + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
8271 + taildrop->units = rsp_params->units;
8272 + taildrop->threshold = le32_to_cpu(rsp_params->threshold);
8276 diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8277 new file mode 100644
8278 index 00000000..600c3574
8280 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8282 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
8283 + * Copyright 2016 NXP
8285 + * Redistribution and use in source and binary forms, with or without
8286 + * modification, are permitted provided that the following conditions are met:
8287 + * * Redistributions of source code must retain the above copyright
8288 + * notice, this list of conditions and the following disclaimer.
8289 + * * Redistributions in binary form must reproduce the above copyright
8290 + * notice, this list of conditions and the following disclaimer in the
8291 + * documentation and/or other materials provided with the distribution.
8292 + * * Neither the name of the above-listed copyright holders nor the
8293 + * names of any contributors may be used to endorse or promote products
8294 + * derived from this software without specific prior written permission.
8297 + * ALTERNATIVELY, this software may be distributed under the terms of the
8298 + * GNU General Public License ("GPL") as published by the Free Software
8299 + * Foundation, either version 2 of that License or (at your option) any
8302 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8303 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8304 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8305 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8306 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8307 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8308 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8309 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8310 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8311 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8312 + * POSSIBILITY OF SUCH DAMAGE.
8314 +#ifndef __FSL_DPNI_H
8315 +#define __FSL_DPNI_H
8322 + * Data Path Network Interface API
8323 + * Contains initialization APIs and runtime control APIs for DPNI
8326 +/** General DPNI macros */
8329 + * Maximum number of traffic classes
8331 +#define DPNI_MAX_TC 8
8333 + * Maximum number of buffer pools per DPNI
8335 +#define DPNI_MAX_DPBP 8
8338 + * All traffic classes considered; see dpni_set_queue()
8340 +#define DPNI_ALL_TCS (u8)(-1)
8342 + * All flows within traffic class considered; see dpni_set_queue()
8344 +#define DPNI_ALL_TC_FLOWS (u16)(-1)
8346 + * Generate new flow ID; see dpni_set_queue()
8348 +#define DPNI_NEW_FLOW_ID (u16)(-1)
8351 + * Tx traffic is always released to a buffer pool on transmit, there are no
8352 + * resources allocated to have the frames confirmed back to the source after
8355 +#define DPNI_OPT_TX_FRM_RELEASE 0x000001
8357 + * Disables support for MAC address filtering for addresses other than primary
8358 + * MAC address. This affects both unicast and multicast. Promiscuous mode can
8359 + * still be enabled/disabled for both unicast and multicast. If promiscuous mode
8360 + * is disabled, only traffic matching the primary MAC address will be accepted.
8362 +#define DPNI_OPT_NO_MAC_FILTER 0x000002
8364 + * Allocate policers for this DPNI. They can be used to rate-limit traffic per
8365 + * traffic class (TC) basis.
8367 +#define DPNI_OPT_HAS_POLICING 0x000004
8369 + * Congestion can be managed in several ways, allowing the buffer pool to
8370 + * deplete on ingress, taildrop on each queue or use congestion groups for sets
8371 + * of queues. If set, it configures a single congestion groups across all TCs.
8372 + * If reset, a congestion group is allocated for each TC. Only relevant if the
8373 + * DPNI has multiple traffic classes.
8375 +#define DPNI_OPT_SHARED_CONGESTION 0x000008
8377 + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
8378 + * look-ups are exact match. Note that TCAM is not available on LS1088 and its
8379 + * variants. Setting this bit on these SoCs will trigger an error.
8381 +#define DPNI_OPT_HAS_KEY_MASKING 0x000010
8383 + * Disables the flow steering table.
8385 +#define DPNI_OPT_NO_FS 0x000020
8387 +int dpni_open(struct fsl_mc_io *mc_io,
8392 +int dpni_close(struct fsl_mc_io *mc_io,
8397 + * struct dpni_pools_cfg - Structure representing buffer pools configuration
8398 + * @num_dpbp: Number of DPBPs
8399 + * @pools: Array of buffer pools parameters; The number of valid entries
8400 + * must match 'num_dpbp' value
8402 +struct dpni_pools_cfg {
8405 + * struct pools - Buffer pools parameters
8406 + * @dpbp_id: DPBP object ID
8407 + * @buffer_size: Buffer size
8408 + * @backup_pool: Backup pool
8414 + } pools[DPNI_MAX_DPBP];
8417 +int dpni_set_pools(struct fsl_mc_io *mc_io,
8420 + const struct dpni_pools_cfg *cfg);
8422 +int dpni_enable(struct fsl_mc_io *mc_io,
8426 +int dpni_disable(struct fsl_mc_io *mc_io,
8430 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
8435 +int dpni_reset(struct fsl_mc_io *mc_io,
8440 + * DPNI IRQ Index and Events
8446 +#define DPNI_IRQ_INDEX 0
8448 + * IRQ event - indicates a change in link state
8450 +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
8452 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
8458 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
8464 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
8470 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
8476 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
8482 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
8489 + * struct dpni_attr - Structure representing DPNI attributes
8490 + * @options: Any combination of the following options:
8491 + * DPNI_OPT_TX_FRM_RELEASE
8492 + * DPNI_OPT_NO_MAC_FILTER
8493 + * DPNI_OPT_HAS_POLICING
8494 + * DPNI_OPT_SHARED_CONGESTION
8495 + * DPNI_OPT_HAS_KEY_MASKING
8497 + * @num_queues: Number of Tx and Rx queues used for traffic distribution.
8498 + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
8499 + * @mac_filter_entries: Number of entries in the MAC address filtering table.
8500 + * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
8501 + * @qos_entries: Number of entries in the QoS classification table.
8502 + * @fs_entries: Number of entries in the flow steering table.
8503 + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
8504 + * than this when adding QoS entries will result in an error.
8505 + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
8506 + * key larger than this when composing the hash + FS key will
8507 + * result in an error.
8508 + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
8509 + * on 6, 5, 5 bits respectively.
8515 + u8 mac_filter_entries;
8516 + u8 vlan_filter_entries;
8521 + u16 wriop_version;
8524 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
8527 + struct dpni_attr *attr);
8534 + * Extract out of frame header error
8536 +#define DPNI_ERROR_EOFHE 0x00020000
8538 + * Frame length error
8540 +#define DPNI_ERROR_FLE 0x00002000
8542 + * Frame physical error
8544 +#define DPNI_ERROR_FPE 0x00001000
8546 + * Parsing header error
8548 +#define DPNI_ERROR_PHE 0x00000020
8550 + * Parser L3 checksum error
8552 +#define DPNI_ERROR_L3CE 0x00000004
8554 + * Parser L3 checksum error
8556 +#define DPNI_ERROR_L4CE 0x00000001
8559 + * enum dpni_error_action - Defines DPNI behavior for errors
8560 + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
8561 + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
8562 + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
8564 +enum dpni_error_action {
8565 + DPNI_ERROR_ACTION_DISCARD = 0,
8566 + DPNI_ERROR_ACTION_CONTINUE = 1,
8567 + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
8571 + * struct dpni_error_cfg - Structure representing DPNI errors treatment
8572 + * @errors: Errors mask; use 'DPNI_ERROR__<X>
8573 + * @error_action: The desired action for the errors mask
8574 + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
8575 + * status (FAS); relevant only for the non-discard action
8577 +struct dpni_error_cfg {
8579 + enum dpni_error_action error_action;
8580 + int set_frame_annotation;
8583 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
8586 + struct dpni_error_cfg *cfg);
8589 + * DPNI buffer layout modification options
8593 + * Select to modify the time-stamp setting
8595 +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
8597 + * Select to modify the parser-result setting; not applicable for Tx
8599 +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
8601 + * Select to modify the frame-status setting
8603 +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
8605 + * Select to modify the private-data-size setting
8607 +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
8609 + * Select to modify the data-alignment setting
8611 +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
8613 + * Select to modify the data-head-room setting
8615 +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
8617 + * Select to modify the data-tail-room setting
8619 +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
8622 + * struct dpni_buffer_layout - Structure representing DPNI buffer layout
8623 + * @options: Flags representing the suggested modifications to the buffer
8624 + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
8625 + * @pass_timestamp: Pass timestamp value
8626 + * @pass_parser_result: Pass parser results
8627 + * @pass_frame_status: Pass frame status
8628 + * @private_data_size: Size kept for private data (in bytes)
8629 + * @data_align: Data alignment
8630 + * @data_head_room: Data head room
8631 + * @data_tail_room: Data tail room
8633 +struct dpni_buffer_layout {
8635 + int pass_timestamp;
8636 + int pass_parser_result;
8637 + int pass_frame_status;
8638 + u16 private_data_size;
8640 + u16 data_head_room;
8641 + u16 data_tail_room;
8645 + * enum dpni_queue_type - Identifies a type of queue targeted by the command
8646 + * @DPNI_QUEUE_RX: Rx queue
8647 + * @DPNI_QUEUE_TX: Tx queue
8648 + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
8649 + * @DPNI_QUEUE_RX_ERR: Rx error queue
8650 + */enum dpni_queue_type {
8653 + DPNI_QUEUE_TX_CONFIRM,
8654 + DPNI_QUEUE_RX_ERR,
8657 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
8660 + enum dpni_queue_type qtype,
8661 + struct dpni_buffer_layout *layout);
8663 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
8666 + enum dpni_queue_type qtype,
8667 + const struct dpni_buffer_layout *layout);
8670 + * enum dpni_offload - Identifies a type of offload targeted by the command
8671 + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
8672 + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
8673 + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
8674 + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
8676 +enum dpni_offload {
8677 + DPNI_OFF_RX_L3_CSUM,
8678 + DPNI_OFF_RX_L4_CSUM,
8679 + DPNI_OFF_TX_L3_CSUM,
8680 + DPNI_OFF_TX_L4_CSUM,
8683 +int dpni_set_offload(struct fsl_mc_io *mc_io,
8686 + enum dpni_offload type,
8689 +int dpni_get_offload(struct fsl_mc_io *mc_io,
8692 + enum dpni_offload type,
8695 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
8698 + enum dpni_queue_type qtype,
8701 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
8704 + u16 *data_offset);
8706 +#define DPNI_STATISTICS_CNT 7
8708 +union dpni_statistics {
8710 + * struct page_0 - Page_0 statistics structure
8711 + * @ingress_all_frames: Ingress frame count
8712 + * @ingress_all_bytes: Ingress byte count
8713 + * @ingress_multicast_frames: Ingress multicast frame count
8714 + * @ingress_multicast_bytes: Ingress multicast byte count
8715 + * @ingress_broadcast_frames: Ingress broadcast frame count
8716 + * @ingress_broadcast_bytes: Ingress broadcast byte count
8719 + u64 ingress_all_frames;
8720 + u64 ingress_all_bytes;
8721 + u64 ingress_multicast_frames;
8722 + u64 ingress_multicast_bytes;
8723 + u64 ingress_broadcast_frames;
8724 + u64 ingress_broadcast_bytes;
8727 + * struct page_1 - Page_1 statistics structure
8728 + * @egress_all_frames: Egress frame count
8729 + * @egress_all_bytes: Egress byte count
8730 + * @egress_multicast_frames: Egress multicast frame count
8731 + * @egress_multicast_bytes: Egress multicast byte count
8732 + * @egress_broadcast_frames: Egress broadcast frame count
8733 + * @egress_broadcast_bytes: Egress broadcast byte count
8736 + u64 egress_all_frames;
8737 + u64 egress_all_bytes;
8738 + u64 egress_multicast_frames;
8739 + u64 egress_multicast_bytes;
8740 + u64 egress_broadcast_frames;
8741 + u64 egress_broadcast_bytes;
8744 + * struct page_2 - Page_2 statistics structure
8745 + * @ingress_filtered_frames: Ingress filtered frame count
8746 + * @ingress_discarded_frames: Ingress discarded frame count
8747 + * @ingress_nobuffer_discards: Ingress discarded frame count
8748 + * due to lack of buffers
8749 + * @egress_discarded_frames: Egress discarded frame count
8750 + * @egress_confirmed_frames: Egress confirmed frame count
8753 + u64 ingress_filtered_frames;
8754 + u64 ingress_discarded_frames;
8755 + u64 ingress_nobuffer_discards;
8756 + u64 egress_discarded_frames;
8757 + u64 egress_confirmed_frames;
8760 + * struct raw - raw statistics structure
8763 + u64 counter[DPNI_STATISTICS_CNT];
8767 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8771 + union dpni_statistics *stat);
8773 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8778 + * Enable auto-negotiation
8780 +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
8782 + * Enable half-duplex mode
8784 +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
8786 + * Enable pause frames
8788 +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
8790 + * Enable a-symmetric pause frames
8792 +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
8795 + * struct - Structure representing DPNI link configuration
8797 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
8799 +struct dpni_link_cfg {
8804 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
8807 + const struct dpni_link_cfg *cfg);
8810 + * struct dpni_link_state - Structure representing DPNI link state
8812 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
8813 + * @up: Link state; '0' for down, '1' for up
8815 +struct dpni_link_state {
8821 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
8824 + struct dpni_link_state *state);
8827 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
8828 + * @rate_limit: rate in Mbps
8829 + * @max_burst_size: burst size in bytes (up to 64KB)
8831 +struct dpni_tx_shaping_cfg {
8833 + u16 max_burst_size;
8836 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
8839 + const struct dpni_tx_shaping_cfg *tx_shaper);
8841 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
8844 + u16 max_frame_length);
8846 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
8849 + u16 *max_frame_length);
8851 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
8856 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
8861 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
8866 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
8871 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
8874 + const u8 mac_addr[6]);
8876 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
8881 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
8886 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
8889 + const u8 mac_addr[6]);
8891 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
8894 + const u8 mac_addr[6]);
8896 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
8903 + * enum dpni_dist_mode - DPNI distribution mode
8904 + * @DPNI_DIST_MODE_NONE: No distribution
8905 + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
8906 + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
8907 + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
8908 + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
8910 +enum dpni_dist_mode {
8911 + DPNI_DIST_MODE_NONE = 0,
8912 + DPNI_DIST_MODE_HASH = 1,
8913 + DPNI_DIST_MODE_FS = 2
8917 + * enum dpni_fs_miss_action - DPNI Flow Steering miss action
8918 + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
8919 + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
8920 + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
8922 +enum dpni_fs_miss_action {
8923 + DPNI_FS_MISS_DROP = 0,
8924 + DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
8925 + DPNI_FS_MISS_HASH = 2
8929 + * struct dpni_fs_tbl_cfg - Flow Steering table configuration
8930 + * @miss_action: Miss action selection
8931 + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
8933 +struct dpni_fs_tbl_cfg {
8934 + enum dpni_fs_miss_action miss_action;
8935 + u16 default_flow_id;
8938 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
8942 + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
8943 + * @dist_size: Set the distribution size;
8944 + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
8945 + * 112,128,192,224,256,384,448,512,768,896,1024
8946 + * @dist_mode: Distribution mode
8947 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
8948 + * the extractions to be used for the distribution key by calling
8949 + * dpni_prepare_key_cfg() relevant only when
8950 + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
8951 + * @fs_cfg: Flow Steering table configuration; only relevant if
8952 + * 'dist_mode = DPNI_DIST_MODE_FS'
8954 +struct dpni_rx_tc_dist_cfg {
8956 + enum dpni_dist_mode dist_mode;
8958 + struct dpni_fs_tbl_cfg fs_cfg;
8961 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
8965 + const struct dpni_rx_tc_dist_cfg *cfg);
8968 + * enum dpni_dest - DPNI destination types
8969 + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
8970 + * does not generate FQDAN notifications; user is expected to
8971 + * dequeue from the queue based on polling or other user-defined
8973 + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
8974 + * notifications to the specified DPIO; user is expected to dequeue
8975 + * from the queue only after notification is received
8976 + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
8977 + * FQDAN notifications, but is connected to the specified DPCON
8978 + * object; user is expected to dequeue from the DPCON channel
8981 + DPNI_DEST_NONE = 0,
8982 + DPNI_DEST_DPIO = 1,
8983 + DPNI_DEST_DPCON = 2
8987 + * struct dpni_queue - Queue structure
8988 + * @user_context: User data, presented to the user along with any frames from
8989 + * this queue. Not relevant for Tx queues.
8991 +struct dpni_queue {
8993 + * struct destination - Destination structure
8994 + * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
8995 + * Identifies either a DPIO or a DPCON object. Not relevant for
8997 + * @type: May be one of the following:
8998 + * 0 - No destination, queue can be manually queried, but will not
8999 + * push traffic or notifications to a DPIO;
9000 + * 1 - The destination is a DPIO. When traffic becomes available in
9001 + * the queue a FQDAN (FQ data available notification) will be
9002 + * generated to selected DPIO;
9003 + * 2 - The destination is a DPCON. The queue is associated with a
9004 + * DPCON object for the purpose of scheduling between multiple
9005 + * queues. The DPCON may be independently configured to
9006 + * generate notifications. Not relevant for Tx queues.
9007 + * @hold_active: Hold active, maintains a queue scheduled for longer
9008 + * in a DPIO during dequeue to reduce spread of traffic.
9009 + * Only relevant if queues are not affined to a single DPIO.
9013 + enum dpni_dest type;
9020 + char stash_control;
9025 + * struct dpni_queue_id - Queue identification, used for enqueue commands
9026 + * or queue control
9027 + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
9028 + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
9031 +struct dpni_queue_id {
9037 + * Set User Context
9039 +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
9040 +#define DPNI_QUEUE_OPT_DEST 0x00000002
9041 +#define DPNI_QUEUE_OPT_FLC 0x00000004
9042 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
9044 +int dpni_set_queue(struct fsl_mc_io *mc_io,
9047 + enum dpni_queue_type qtype,
9051 + const struct dpni_queue *queue);
9053 +int dpni_get_queue(struct fsl_mc_io *mc_io,
9056 + enum dpni_queue_type qtype,
9059 + struct dpni_queue *queue,
9060 + struct dpni_queue_id *qid);
9063 + * enum dpni_congestion_unit - DPNI congestion units
9064 + * @DPNI_CONGESTION_UNIT_BYTES: bytes units
9065 + * @DPNI_CONGESTION_UNIT_FRAMES: frames units
9067 +enum dpni_congestion_unit {
9068 + DPNI_CONGESTION_UNIT_BYTES = 0,
9069 + DPNI_CONGESTION_UNIT_FRAMES
9073 + * enum dpni_congestion_point - Structure representing congestion point
9074 + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
9076 + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
9077 + * define the DPNI this can be either per TC (default) or per
9078 + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
9079 + * QUEUE_INDEX is ignored if this type is used.
9081 +enum dpni_congestion_point {
9087 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
9088 + * @dest_type: Destination type
9089 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
9090 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
9091 + * are 0-1 or 0-7, depending on the number of priorities in that
9092 + * channel; not relevant for 'DPNI_DEST_NONE' option
9094 +struct dpni_dest_cfg {
9095 + enum dpni_dest dest_type;
9100 +/* DPNI congestion options */
9103 + * CSCN message is written to message_iova once entering a
9104 + * congestion state (see 'threshold_entry')
9106 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
9108 + * CSCN message is written to message_iova once exiting a
9109 + * congestion state (see 'threshold_exit')
9111 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
9113 + * CSCN write will attempt to allocate into a cache (coherent write);
9114 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
9116 +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
9118 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9119 + * DPIO/DPCON's WQ channel once entering a congestion state
9120 + * (see 'threshold_entry')
9122 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
9124 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9125 + * DPIO/DPCON's WQ channel once exiting a congestion state
9126 + * (see 'threshold_exit')
9128 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
9130 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
9131 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
9133 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
9136 + * struct dpni_congestion_notification_cfg - congestion notification
9138 + * @units: units type
9139 + * @threshold_entry: above this threshold we enter a congestion state.
9140 + * set it to '0' to disable it
9141 + * @threshold_exit: below this threshold we exit the congestion state.
9142 + * @message_ctx: The context that will be part of the CSCN message
9143 + * @message_iova: I/O virtual address (must be in DMA-able memory),
9144 + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
9145 + * contained in 'options'
9146 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
9147 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
9150 +struct dpni_congestion_notification_cfg {
9151 + enum dpni_congestion_unit units;
9152 + u32 threshold_entry;
9153 + u32 threshold_exit;
9156 + struct dpni_dest_cfg dest_cfg;
9157 + u16 notification_mode;
9160 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
9163 + enum dpni_queue_type qtype,
9165 + const struct dpni_congestion_notification_cfg *cfg);
9168 + * struct dpni_taildrop - Structure representing the taildrop
9169 + * @enable: Indicates whether the taildrop is active or not.
9170 + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
9171 + * byte units, this field is ignored and assumed = 0 if
9172 + * CONGESTION_POINT is 0.
9173 + * @threshold: Threshold value, in units identified by UNITS field. Value 0
9174 + * cannot be used as a valid taildrop threshold, THRESHOLD must
9175 + * be > 0 if the taildrop is enabled.
9177 +struct dpni_taildrop {
9179 + enum dpni_congestion_unit units;
9183 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
9186 + enum dpni_congestion_point cg_point,
9187 + enum dpni_queue_type q_type,
9190 + struct dpni_taildrop *taildrop);
9192 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
9195 + enum dpni_congestion_point cg_point,
9196 + enum dpni_queue_type q_type,
9199 + struct dpni_taildrop *taildrop);
9202 + * struct dpni_rule_cfg - Rule configuration for table lookup
9203 + * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
9204 + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
9205 + * @key_size: key and mask size (in bytes)
9207 +struct dpni_rule_cfg {
9214 + * Discard matching traffic. If set, this takes precedence over any other
9215 + * configuration and matching traffic is always discarded.
9217 + #define DPNI_FS_OPT_DISCARD 0x1
9220 + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
9221 + * override the FLC value set per queue.
9222 + * For more details check the Frame Descriptor section in the hardware
9225 +#define DPNI_FS_OPT_SET_FLC 0x2
9228 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
9229 + * control. If set, the 6 least significant bits in value are interpreted as
9231 + * - bits 0-1: indicates the number of 64 byte units of context that are
9232 + * stashed. FLC value is interpreted as a memory address in this case,
9233 + * excluding the 6 LS bits.
9234 + * - bits 2-3: indicates the number of 64 byte units of frame annotation
9235 + * to be stashed. Annotation is placed at FD[ADDR].
9236 + * - bits 4-5: indicates the number of 64 byte units of frame data to be
9237 + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
9238 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
9240 +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
9243 + * struct dpni_fs_action_cfg - Action configuration for table look-up
9244 + * @flc: FLC value for traffic matching this rule. Please check the Frame
9245 + * Descriptor section in the hardware documentation for more information.
9246 + * @flow_id: Identifies the Rx queue used for matching traffic. Supported
9247 + * values are in range 0 to num_queue-1.
9248 + * @options: Any combination of DPNI_FS_OPT_ values.
9250 +struct dpni_fs_action_cfg {
9256 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
9261 + const struct dpni_rule_cfg *cfg,
9262 + const struct dpni_fs_action_cfg *action);
9264 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
9268 + const struct dpni_rule_cfg *cfg);
9270 +#endif /* __FSL_DPNI_H */
9271 diff --git a/drivers/staging/fsl-dpaa2/ethernet/net.h b/drivers/staging/fsl-dpaa2/ethernet/net.h
9272 new file mode 100644
9273 index 00000000..5020dee1
9275 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
9277 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
9279 + * Redistribution and use in source and binary forms, with or without
9280 + * modification, are permitted provided that the following conditions are met:
9281 + * * Redistributions of source code must retain the above copyright
9282 + * notice, this list of conditions and the following disclaimer.
9283 + * * Redistributions in binary form must reproduce the above copyright
9284 + * notice, this list of conditions and the following disclaimer in the
9285 + * documentation and/or other materials provided with the distribution.
9286 + * * Neither the name of the above-listed copyright holders nor the
9287 + * names of any contributors may be used to endorse or promote products
9288 + * derived from this software without specific prior written permission.
9291 + * ALTERNATIVELY, this software may be distributed under the terms of the
9292 + * GNU General Public License ("GPL") as published by the Free Software
9293 + * Foundation, either version 2 of that License or (at your option) any
9296 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9297 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9298 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9299 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9300 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9301 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9302 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9303 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9304 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9305 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9306 + * POSSIBILITY OF SUCH DAMAGE.
9308 +#ifndef __FSL_NET_H
9309 +#define __FSL_NET_H
9311 +#define LAST_HDR_INDEX 0xFFFFFFFF
9313 +/*****************************************************************************/
9314 +/* Protocol fields */
9315 +/*****************************************************************************/
9317 +/************************* Ethernet fields *********************************/
9318 +#define NH_FLD_ETH_DA (1)
9319 +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
9320 +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
9321 +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
9322 +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
9323 +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
9324 +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
9326 +#define NH_FLD_ETH_ADDR_SIZE 6
9328 +/*************************** VLAN fields ***********************************/
9329 +#define NH_FLD_VLAN_VPRI (1)
9330 +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
9331 +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
9332 +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
9333 +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
9334 +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
9336 +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
9337 + NH_FLD_VLAN_CFI | \
9340 +/************************ IP (generic) fields ******************************/
9341 +#define NH_FLD_IP_VER (1)
9342 +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
9343 +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
9344 +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
9345 +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
9346 +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
9347 +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
9348 +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
9349 +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
9351 +#define NH_FLD_IP_PROTO_SIZE 1
9353 +/***************************** IPV4 fields *********************************/
9354 +#define NH_FLD_IPV4_VER (1)
9355 +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
9356 +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
9357 +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
9358 +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
9359 +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
9360 +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
9361 +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
9362 +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
9363 +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
9364 +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
9365 +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
9366 +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
9367 +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
9368 +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
9369 +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
9371 +#define NH_FLD_IPV4_ADDR_SIZE 4
9372 +#define NH_FLD_IPV4_PROTO_SIZE 1
9374 +/***************************** IPV6 fields *********************************/
9375 +#define NH_FLD_IPV6_VER (1)
9376 +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
9377 +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
9378 +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
9379 +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
9380 +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
9381 +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
9382 +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
9383 +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
9385 +#define NH_FLD_IPV6_ADDR_SIZE 16
9386 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
9388 +/***************************** ICMP fields *********************************/
9389 +#define NH_FLD_ICMP_TYPE (1)
9390 +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
9391 +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
9392 +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
9393 +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
9394 +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
9396 +#define NH_FLD_ICMP_CODE_SIZE 1
9397 +#define NH_FLD_ICMP_TYPE_SIZE 1
9399 +/***************************** IGMP fields *********************************/
9400 +#define NH_FLD_IGMP_VERSION (1)
9401 +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
9402 +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
9403 +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
9404 +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
9406 +/***************************** TCP fields **********************************/
9407 +#define NH_FLD_TCP_PORT_SRC (1)
9408 +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
9409 +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
9410 +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
9411 +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
9412 +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
9413 +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
9414 +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
9415 +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
9416 +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
9417 +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
9418 +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
9420 +#define NH_FLD_TCP_PORT_SIZE 2
9422 +/***************************** UDP fields **********************************/
9423 +#define NH_FLD_UDP_PORT_SRC (1)
9424 +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
9425 +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
9426 +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
9427 +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
9429 +#define NH_FLD_UDP_PORT_SIZE 2
9431 +/*************************** UDP-lite fields *******************************/
9432 +#define NH_FLD_UDP_LITE_PORT_SRC (1)
9433 +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
9434 +#define NH_FLD_UDP_LITE_ALL_FIELDS \
9435 + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
9437 +#define NH_FLD_UDP_LITE_PORT_SIZE 2
9439 +/*************************** UDP-encap-ESP fields **************************/
9440 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
9441 +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
9442 +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
9443 +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
9444 +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
9445 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
9446 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
9447 + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
9449 +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
9450 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
9452 +/***************************** SCTP fields *********************************/
9453 +#define NH_FLD_SCTP_PORT_SRC (1)
9454 +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
9455 +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
9456 +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
9457 +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
9459 +#define NH_FLD_SCTP_PORT_SIZE 2
9461 +/***************************** DCCP fields *********************************/
9462 +#define NH_FLD_DCCP_PORT_SRC (1)
9463 +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
9464 +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
9466 +#define NH_FLD_DCCP_PORT_SIZE 2
9468 +/***************************** IPHC fields *********************************/
9469 +#define NH_FLD_IPHC_CID (1)
9470 +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
9471 +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
9472 +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
9473 +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
9474 +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
9476 +/***************************** SCTP fields *********************************/
9477 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
9478 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
9479 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
9480 +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
9481 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
9482 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
9483 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
9484 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
9485 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
9486 +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
9487 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
9488 + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
9490 +/*************************** L2TPV2 fields *********************************/
9491 +#define NH_FLD_L2TPV2_TYPE_BIT (1)
9492 +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
9493 +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
9494 +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
9495 +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
9496 +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
9497 +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
9498 +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
9499 +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
9500 +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
9501 +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
9502 +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
9503 +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
9504 +#define NH_FLD_L2TPV2_ALL_FIELDS \
9505 + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
9507 +/*************************** L2TPV3 fields *********************************/
9508 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
9509 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
9510 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
9511 +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
9512 +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
9513 +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
9514 +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
9515 +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
9516 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
9517 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
9518 + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
9520 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
9521 +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
9522 +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
9523 +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
9524 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
9525 + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
9527 +/**************************** PPP fields ***********************************/
9528 +#define NH_FLD_PPP_PID (1)
9529 +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
9530 +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
9532 +/************************** PPPoE fields ***********************************/
9533 +#define NH_FLD_PPPOE_VER (1)
9534 +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
9535 +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
9536 +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
9537 +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
9538 +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
9539 +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
9540 +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
9542 +/************************* PPP-Mux fields **********************************/
9543 +#define NH_FLD_PPPMUX_PID (1)
9544 +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
9545 +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
9546 +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
9548 +/*********************** PPP-Mux sub-frame fields **************************/
9549 +#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
9550 +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
9551 +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
9552 +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
9553 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
9554 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
9555 + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
9557 +/*************************** LLC fields ************************************/
9558 +#define NH_FLD_LLC_DSAP (1)
9559 +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
9560 +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
9561 +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
9563 +/*************************** NLPID fields **********************************/
9564 +#define NH_FLD_NLPID_NLPID (1)
9565 +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
9567 +/*************************** SNAP fields ***********************************/
9568 +#define NH_FLD_SNAP_OUI (1)
9569 +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
9570 +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
9572 +/*************************** LLC SNAP fields *******************************/
9573 +#define NH_FLD_LLC_SNAP_TYPE (1)
9574 +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
9576 +#define NH_FLD_ARP_HTYPE (1)
9577 +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
9578 +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
9579 +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
9580 +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
9581 +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
9582 +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
9583 +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
9584 +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
9585 +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
9587 +/*************************** RFC2684 fields ********************************/
9588 +#define NH_FLD_RFC2684_LLC (1)
9589 +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
9590 +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
9591 +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
9592 +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
9593 +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
9594 +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
9596 +/*************************** User defined fields ***************************/
9597 +#define NH_FLD_USER_DEFINED_SRCPORT (1)
9598 +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
9599 +#define NH_FLD_USER_DEFINED_ALL_FIELDS \
9600 + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
9602 +/*************************** Payload fields ********************************/
9603 +#define NH_FLD_PAYLOAD_BUFFER (1)
9604 +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
9605 +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
9606 +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
9607 +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
9608 +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
9609 +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
9611 +/*************************** GRE fields ************************************/
9612 +#define NH_FLD_GRE_TYPE (1)
9613 +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
9615 +/*************************** MINENCAP fields *******************************/
9616 +#define NH_FLD_MINENCAP_SRC_IP (1)
9617 +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
9618 +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
9619 +#define NH_FLD_MINENCAP_ALL_FIELDS \
9620 + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
9622 +/*************************** IPSEC AH fields *******************************/
9623 +#define NH_FLD_IPSEC_AH_SPI (1)
9624 +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
9625 +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
9627 +/*************************** IPSEC ESP fields ******************************/
9628 +#define NH_FLD_IPSEC_ESP_SPI (1)
9629 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
9630 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
9632 +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
9634 +/*************************** MPLS fields ***********************************/
9635 +#define NH_FLD_MPLS_LABEL_STACK (1)
9636 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
9637 + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
9639 +/*************************** MACSEC fields *********************************/
9640 +#define NH_FLD_MACSEC_SECTAG (1)
9641 +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
9643 +/*************************** GTP fields ************************************/
9644 +#define NH_FLD_GTP_TEID (1)
9646 +/* Protocol options */
9648 +/* Ethernet options */
9649 +#define NH_OPT_ETH_BROADCAST 1
9650 +#define NH_OPT_ETH_MULTICAST 2
9651 +#define NH_OPT_ETH_UNICAST 3
9652 +#define NH_OPT_ETH_BPDU 4
9654 +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
9655 +/* also applicable for broadcast */
9658 +#define NH_OPT_VLAN_CFI 1
9661 +#define NH_OPT_IPV4_UNICAST 1
9662 +#define NH_OPT_IPV4_MULTICAST 2
9663 +#define NH_OPT_IPV4_BROADCAST 3
9664 +#define NH_OPT_IPV4_OPTION 4
9665 +#define NH_OPT_IPV4_FRAG 5
9666 +#define NH_OPT_IPV4_INITIAL_FRAG 6
9669 +#define NH_OPT_IPV6_UNICAST 1
9670 +#define NH_OPT_IPV6_MULTICAST 2
9671 +#define NH_OPT_IPV6_OPTION 3
9672 +#define NH_OPT_IPV6_FRAG 4
9673 +#define NH_OPT_IPV6_INITIAL_FRAG 5
9675 +/* General IP options (may be used for any version) */
9676 +#define NH_OPT_IP_FRAG 1
9677 +#define NH_OPT_IP_INITIAL_FRAG 2
9678 +#define NH_OPT_IP_OPTION 3
9680 +/* Minenc. options */
9681 +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
9684 +#define NH_OPT_GRE_ROUTING_PRESENT 1
9687 +#define NH_OPT_TCP_OPTIONS 1
9688 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
9689 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3
9691 +/* CAPWAP options */
9692 +#define NH_OPT_CAPWAP_DTLS 1
9695 + NET_PROT_NONE = 0,
9704 + NET_PROT_UDP_LITE,
9707 + NET_PROT_SCTP_CHUNK_DATA,
9711 + NET_PROT_PPPMUX_SUBFRM,
9713 + NET_PROT_L2TPV3_CTRL,
9714 + NET_PROT_L2TPV3_SESS,
9716 + NET_PROT_LLC_SNAP,
9720 + NET_PROT_IPSEC_AH,
9721 + NET_PROT_IPSEC_ESP,
9722 + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
9725 + NET_PROT_MINENCAP,
9730 + NET_PROT_CAPWAP_DATA,
9731 + NET_PROT_CAPWAP_CTRL,
9738 + NET_PROT_USER_DEFINED_L2,
9739 + NET_PROT_USER_DEFINED_L3,
9740 + NET_PROT_USER_DEFINED_L4,
9741 + NET_PROT_USER_DEFINED_L5,
9742 + NET_PROT_USER_DEFINED_SHIM1,
9743 + NET_PROT_USER_DEFINED_SHIM2,
9745 + NET_PROT_DUMMY_LAST
9749 +#define NH_IEEE8021Q_ETYPE 0x8100
9750 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
9751 + ((((u32)((etype) & 0xFFFF)) << 16) | \
9752 + (((u32)((pcp) & 0x07)) << 13) | \
9753 + (((u32)((dei) & 0x01)) << 12) | \
9754 + (((u32)((vlan_id) & 0xFFF))))
9756 +#endif /* __FSL_NET_H */
9757 diff --git a/drivers/staging/fsl-dpaa2/ethsw/Kconfig b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
9758 new file mode 100644
9759 index 00000000..06c70408
9761 +++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
9763 +config FSL_DPAA2_ETHSW
9764 + tristate "DPAA2 Ethernet Switch"
9765 + depends on FSL_MC_BUS && FSL_DPAA2
9768 + Prototype driver for DPAA2 Ethernet Switch.
9769 diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
9770 new file mode 100644
9771 index 00000000..20eb3ac4
9773 +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
9776 +obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
9778 +dpaa2-ethsw-objs := switch.o dpsw.o
9781 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
9784 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
9785 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
9786 new file mode 100644
9787 index 00000000..f7374d1c
9789 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
9791 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
9793 + * Redistribution and use in source and binary forms, with or without
9794 + * modification, are permitted provided that the following conditions are met:
9795 + * * Redistributions of source code must retain the above copyright
9796 + * notice, this list of conditions and the following disclaimer.
9797 + * * Redistributions in binary form must reproduce the above copyright
9798 + * notice, this list of conditions and the following disclaimer in the
9799 + * documentation and/or other materials provided with the distribution.
9800 + * * Neither the name of the above-listed copyright holders nor the
9801 + * names of any contributors may be used to endorse or promote products
9802 + * derived from this software without specific prior written permission.
9805 + * ALTERNATIVELY, this software may be distributed under the terms of the
9806 + * GNU General Public License ("GPL") as published by the Free Software
9807 + * Foundation, either version 2 of that License or (at your option) any
9810 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9811 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9812 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9813 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9814 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9815 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9816 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9817 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9818 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9819 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9820 + * POSSIBILITY OF SUCH DAMAGE.
9822 +#ifndef __FSL_DPSW_CMD_H
9823 +#define __FSL_DPSW_CMD_H
9826 +#define DPSW_VER_MAJOR 8
9827 +#define DPSW_VER_MINOR 0
9829 +#define DPSW_CMD_BASE_VERSION 1
9830 +#define DPSW_CMD_ID_OFFSET 4
9832 +#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
9835 +#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
9836 +#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
9838 +#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
9840 +#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
9841 +#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
9842 +#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
9843 +#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
9844 +#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
9846 +#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
9847 +#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
9848 +#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
9849 +#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
9850 +#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
9851 +#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
9852 +#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
9853 +#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
9855 +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
9857 +#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
9859 +#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
9861 +#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
9862 +#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
9863 +#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
9864 +#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
9865 +#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
9866 +#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
9867 +#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
9868 +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
9869 +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
9870 +#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
9871 +#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
9872 +#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
9874 +#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
9875 +#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
9877 +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
9879 +#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
9880 +#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
9881 +#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
9882 +#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
9883 +#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
9884 +#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
9885 +#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
9887 +#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
9889 +#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
9890 +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
9891 +#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
9892 +#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
9893 +#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
9894 +#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
9895 +#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
9896 +#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
9897 +#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
9898 +#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
9899 +#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
9900 +#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
9902 +#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
9903 +#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
9904 +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
9905 +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
9906 +#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
9907 +#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
9908 +#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
9909 +#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
9910 +#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
9911 +#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
9913 +#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
9914 +#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
9915 +#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
9916 +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
9917 +#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
9918 +#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
9919 +#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
9921 +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
9922 +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
9923 +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
9924 +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
9926 +/* Macros for accessing command fields smaller than 1byte */
9927 +#define DPSW_MASK(field) \
9928 + GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
9929 + DPSW_##field##_SHIFT)
9930 +#define dpsw_set_field(var, field, val) \
9931 + ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
9932 +#define dpsw_get_field(var, field) \
9933 + (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
9934 +#define dpsw_get_bit(var, bit) \
9935 + (((var) >> (bit)) & GENMASK(0, 0))
9937 +static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
9939 + var |= (u64)val << bit & GENMASK(bit, bit);
9943 +struct dpsw_cmd_open {
9947 +#define DPSW_COMPONENT_TYPE_SHIFT 0
9948 +#define DPSW_COMPONENT_TYPE_SIZE 4
9950 +struct dpsw_cmd_create {
9954 + u8 max_meters_per_if;
9955 + /* from LSB: only the first 4 bits */
9956 + u8 component_type;
9960 + __le16 max_fdb_entries;
9961 + __le16 fdb_aging_time;
9962 + __le16 max_fdb_mc_groups;
9967 +struct dpsw_cmd_destroy {
9971 +#define DPSW_ENABLE_SHIFT 0
9972 +#define DPSW_ENABLE_SIZE 1
9974 +struct dpsw_rsp_is_enabled {
9975 + /* from LSB: enable:1 */
9979 +struct dpsw_cmd_set_irq {
9990 +struct dpsw_cmd_get_irq {
9995 +struct dpsw_rsp_get_irq {
10006 +struct dpsw_cmd_set_irq_enable {
10012 +struct dpsw_cmd_get_irq_enable {
10017 +struct dpsw_rsp_get_irq_enable {
10021 +struct dpsw_cmd_set_irq_mask {
10026 +struct dpsw_cmd_get_irq_mask {
10031 +struct dpsw_rsp_get_irq_mask {
10035 +struct dpsw_cmd_get_irq_status {
10040 +struct dpsw_rsp_get_irq_status {
10044 +struct dpsw_cmd_clear_irq_status {
10049 +#define DPSW_COMPONENT_TYPE_SHIFT 0
10050 +#define DPSW_COMPONENT_TYPE_SIZE 4
10052 +struct dpsw_rsp_get_attr {
10057 + __le16 max_vlans;
10058 + __le16 num_vlans;
10060 + __le16 max_fdb_entries;
10061 + __le16 fdb_aging_time;
10065 + __le16 max_fdb_mc_groups;
10066 + u8 max_meters_per_if;
10067 + /* from LSB only the ffirst 4 bits */
10068 + u8 component_type;
10074 +struct dpsw_cmd_set_reflection_if {
10078 +struct dpsw_cmd_if_set_flooding {
10080 + /* from LSB: enable:1 */
10084 +struct dpsw_cmd_if_set_broadcast {
10086 + /* from LSB: enable:1 */
10090 +struct dpsw_cmd_if_set_multicast {
10092 + /* from LSB: enable:1 */
10096 +#define DPSW_VLAN_ID_SHIFT 0
10097 +#define DPSW_VLAN_ID_SIZE 12
10098 +#define DPSW_DEI_SHIFT 12
10099 +#define DPSW_DEI_SIZE 1
10100 +#define DPSW_PCP_SHIFT 13
10101 +#define DPSW_PCP_SIZE 3
10103 +struct dpsw_cmd_if_set_tci {
10105 + /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
10109 +struct dpsw_cmd_if_get_tci {
10113 +struct dpsw_rsp_if_get_tci {
10120 +#define DPSW_STATE_SHIFT 0
10121 +#define DPSW_STATE_SIZE 4
10123 +struct dpsw_cmd_if_set_stp {
10126 + /* only the first LSB 4 bits */
10130 +#define DPSW_FRAME_TYPE_SHIFT 0
10131 +#define DPSW_FRAME_TYPE_SIZE 4
10132 +#define DPSW_UNACCEPTED_ACT_SHIFT 4
10133 +#define DPSW_UNACCEPTED_ACT_SIZE 4
10135 +struct dpsw_cmd_if_set_accepted_frames {
10137 + /* from LSB: type:4 unaccepted_act:4 */
10141 +#define DPSW_ACCEPT_ALL_SHIFT 0
10142 +#define DPSW_ACCEPT_ALL_SIZE 1
10144 +struct dpsw_cmd_if_set_accept_all_vlan {
10146 + /* only the least significant bit */
10150 +#define DPSW_COUNTER_TYPE_SHIFT 0
10151 +#define DPSW_COUNTER_TYPE_SIZE 5
10153 +struct dpsw_cmd_if_get_counter {
10155 + /* from LSB: type:5 */
10159 +struct dpsw_rsp_if_get_counter {
10164 +struct dpsw_cmd_if_set_counter {
10167 + /* from LSB: type:5 */
10173 +#define DPSW_PRIORITY_SELECTOR_SHIFT 0
10174 +#define DPSW_PRIORITY_SELECTOR_SIZE 3
10175 +#define DPSW_SCHED_MODE_SHIFT 0
10176 +#define DPSW_SCHED_MODE_SIZE 4
10178 +struct dpsw_cmd_if_set_tx_selection {
10180 + /* from LSB: priority_selector:3 */
10181 + u8 priority_selector;
10185 + struct dpsw_tc_sched {
10186 + __le16 delta_bandwidth;
10192 +#define DPSW_FILTER_SHIFT 0
10193 +#define DPSW_FILTER_SIZE 2
10195 +struct dpsw_cmd_if_reflection {
10198 + /* only 2 bits from the LSB */
10202 +#define DPSW_MODE_SHIFT 0
10203 +#define DPSW_MODE_SIZE 4
10204 +#define DPSW_UNITS_SHIFT 4
10205 +#define DPSW_UNITS_SIZE 4
10207 +struct dpsw_cmd_if_set_flooding_metering {
10211 + /* from LSB: mode:4 units:4 */
10221 +struct dpsw_cmd_if_set_metering {
10225 + /* from LSB: mode:4 units:4 */
10235 +#define DPSW_EARLY_DROP_MODE_SHIFT 0
10236 +#define DPSW_EARLY_DROP_MODE_SIZE 2
10237 +#define DPSW_EARLY_DROP_UNIT_SHIFT 2
10238 +#define DPSW_EARLY_DROP_UNIT_SIZE 2
10240 +struct dpsw_prep_early_drop {
10241 + /* from LSB: mode:2 units:2 */
10244 + __le32 tail_drop_threshold;
10245 + u8 green_drop_probability;
10247 + __le64 green_max_threshold;
10248 + __le64 green_min_threshold;
10250 + u8 yellow_drop_probability;
10252 + __le64 yellow_max_threshold;
10253 + __le64 yellow_min_threshold;
10256 +struct dpsw_cmd_if_set_early_drop {
10263 + __le64 early_drop_iova;
10266 +struct dpsw_cmd_custom_tpid {
10271 +struct dpsw_cmd_if {
10275 +#define DPSW_ADMIT_UNTAGGED_SHIFT 0
10276 +#define DPSW_ADMIT_UNTAGGED_SIZE 4
10277 +#define DPSW_ENABLED_SHIFT 5
10278 +#define DPSW_ENABLED_SIZE 1
10279 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
10280 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
10282 +struct dpsw_rsp_if_get_attr {
10284 + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
10297 +struct dpsw_cmd_if_set_max_frame_length {
10299 + __le16 frame_length;
10302 +struct dpsw_cmd_if_get_max_frame_length {
10306 +struct dpsw_rsp_if_get_max_frame_length {
10308 + __le16 frame_length;
10311 +struct dpsw_cmd_if_set_link_cfg {
10322 +struct dpsw_cmd_if_get_link_state {
10326 +#define DPSW_UP_SHIFT 0
10327 +#define DPSW_UP_SIZE 1
10329 +struct dpsw_rsp_if_get_link_state {
10341 +struct dpsw_vlan_add {
10346 +struct dpsw_cmd_vlan_manage_if {
10355 +struct dpsw_cmd_vlan_remove {
10360 +struct dpsw_cmd_vlan_get_attr {
10364 +struct dpsw_rsp_vlan_get_attr {
10370 + __le16 num_untagged_ifs;
10371 + __le16 num_flooding_ifs;
10374 +struct dpsw_cmd_vlan_get_if {
10378 +struct dpsw_rsp_vlan_get_if {
10387 +struct dpsw_cmd_vlan_get_if_untagged {
10391 +struct dpsw_rsp_vlan_get_if_untagged {
10400 +struct dpsw_cmd_vlan_get_if_flooding {
10404 +struct dpsw_rsp_vlan_get_if_flooding {
10413 +struct dpsw_cmd_fdb_add {
10415 + __le16 fdb_aging_time;
10416 + __le16 num_fdb_entries;
10419 +struct dpsw_rsp_fdb_add {
10423 +struct dpsw_cmd_fdb_remove {
10427 +#define DPSW_ENTRY_TYPE_SHIFT 0
10428 +#define DPSW_ENTRY_TYPE_SIZE 4
10430 +struct dpsw_cmd_fdb_add_unicast {
10437 + /* only the first 4 bits from LSB */
10441 +struct dpsw_cmd_fdb_get_unicast {
10446 +struct dpsw_rsp_fdb_get_unicast {
10448 + __le16 if_egress;
10449 + /* only first 4 bits from LSB */
10453 +struct dpsw_cmd_fdb_remove_unicast {
10458 + __le16 if_egress;
10459 + /* only the first 4 bits from LSB */
10463 +struct dpsw_cmd_fdb_add_multicast {
10467 + /* only the first 4 bits from LSB */
10477 +struct dpsw_cmd_fdb_get_multicast {
10482 +struct dpsw_rsp_fdb_get_multicast {
10487 + /* only the first 4 bits from LSB */
10494 +struct dpsw_cmd_fdb_remove_multicast {
10498 + /* only the first 4 bits from LSB */
10508 +#define DPSW_LEARNING_MODE_SHIFT 0
10509 +#define DPSW_LEARNING_MODE_SIZE 4
10511 +struct dpsw_cmd_fdb_set_learning_mode {
10513 + /* only the first 4 bits from LSB */
10517 +struct dpsw_cmd_fdb_get_attr {
10521 +struct dpsw_rsp_fdb_get_attr {
10524 + __le16 max_fdb_entries;
10525 + __le16 fdb_aging_time;
10526 + __le16 num_fdb_mc_groups;
10528 + __le16 max_fdb_mc_groups;
10529 + /* only the first 4 bits from LSB */
10530 + u8 learning_mode;
10533 +struct dpsw_cmd_acl_add {
10535 + __le16 max_entries;
10538 +struct dpsw_rsp_acl_add {
10542 +struct dpsw_cmd_acl_remove {
10546 +struct dpsw_prep_acl_entry {
10547 + u8 match_l2_dest_mac[6];
10548 + __le16 match_l2_tpid;
10550 + u8 match_l2_source_mac[6];
10551 + __le16 match_l2_vlan_id;
10553 + __le32 match_l3_dest_ip;
10554 + __le32 match_l3_source_ip;
10556 + __le16 match_l4_dest_port;
10557 + __le16 match_l4_source_port;
10558 + __le16 match_l2_ether_type;
10559 + u8 match_l2_pcp_dei;
10560 + u8 match_l3_dscp;
10562 + u8 mask_l2_dest_mac[6];
10563 + __le16 mask_l2_tpid;
10565 + u8 mask_l2_source_mac[6];
10566 + __le16 mask_l2_vlan_id;
10568 + __le32 mask_l3_dest_ip;
10569 + __le32 mask_l3_source_ip;
10571 + __le16 mask_l4_dest_port;
10572 + __le16 mask_l4_source_port;
10573 + __le16 mask_l2_ether_type;
10574 + u8 mask_l2_pcp_dei;
10577 + u8 match_l3_protocol;
10578 + u8 mask_l3_protocol;
10581 +#define DPSW_RESULT_ACTION_SHIFT 0
10582 +#define DPSW_RESULT_ACTION_SIZE 4
10584 +struct dpsw_cmd_acl_entry {
10586 + __le16 result_if_id;
10587 + __le32 precedence;
10588 + /* from LSB only the first 4 bits */
10589 + u8 result_action;
10595 +struct dpsw_cmd_acl_if {
10604 +struct dpsw_cmd_acl_get_attr {
10608 +struct dpsw_rsp_acl_get_attr {
10612 + __le16 max_entries;
10613 + __le16 num_entries;
10617 +struct dpsw_rsp_ctrl_if_get_attr {
10622 + __le32 rx_err_fqid;
10624 + __le32 tx_err_conf_fqid;
10627 +struct dpsw_cmd_ctrl_if_set_pools {
10629 + /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
10632 + __le32 dpbp_id[8];
10633 + __le16 buffer_size[8];
10636 +struct dpsw_rsp_get_api_version {
10637 + __le16 version_major;
10638 + __le16 version_minor;
10641 +#endif /* __FSL_DPSW_CMD_H */
10642 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
10643 new file mode 100644
10644 index 00000000..179e98c8
10646 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
10648 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
10650 + * Redistribution and use in source and binary forms, with or without
10651 + * modification, are permitted provided that the following conditions are met:
10652 + * * Redistributions of source code must retain the above copyright
10653 + * notice, this list of conditions and the following disclaimer.
10654 + * * Redistributions in binary form must reproduce the above copyright
10655 + * notice, this list of conditions and the following disclaimer in the
10656 + * documentation and/or other materials provided with the distribution.
10657 + * * Neither the name of the above-listed copyright holders nor the
10658 + * names of any contributors may be used to endorse or promote products
10659 + * derived from this software without specific prior written permission.
10662 + * ALTERNATIVELY, this software may be distributed under the terms of the
10663 + * GNU General Public License ("GPL") as published by the Free Software
10664 + * Foundation, either version 2 of that License or (at your option) any
10667 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10668 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10669 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10670 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10671 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10672 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10673 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10674 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10675 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10676 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10677 + * POSSIBILITY OF SUCH DAMAGE.
10679 +#include "../../fsl-mc/include/mc-sys.h"
10680 +#include "../../fsl-mc/include/mc-cmd.h"
10682 +#include "dpsw-cmd.h"
10684 +static void build_if_id_bitmap(__le64 *bmap,
10686 + const u16 num_ifs) {
10689 + for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
10690 + bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
10695 +static void read_if_id_bitmap(u16 *if_id,
10698 + int bitmap[DPSW_MAX_IF] = { 0 };
10702 + for (i = 0; i < DPSW_MAX_IF; i++) {
10703 + bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
10705 + count += bitmap[i];
10708 + *num_ifs = (u16)count;
10710 + for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
10712 + if_id[j] = (u16)i;
10719 + * dpsw_open() - Open a control session for the specified object
10720 + * @mc_io: Pointer to MC portal's I/O object
10721 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10722 + * @dpsw_id: DPSW unique ID
10723 + * @token: Returned token; use in subsequent API calls
10725 + * This function can be used to open a control session for an
10726 + * already created object; an object may have been declared in
10727 + * the DPL or by calling the dpsw_create() function.
10728 + * This function returns a unique authentication token,
10729 + * associated with the specific object ID and the specific MC
10730 + * portal; this token must be used in all subsequent commands for
10731 + * this specific object
10733 + * Return: '0' on Success; Error code otherwise.
10735 +int dpsw_open(struct fsl_mc_io *mc_io,
10740 + struct mc_command cmd = { 0 };
10741 + struct dpsw_cmd_open *cmd_params;
10744 + /* prepare command */
10745 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
10748 + cmd_params = (struct dpsw_cmd_open *)cmd.params;
10749 + cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
10751 + /* send command to mc*/
10752 + err = mc_send_command(mc_io, &cmd);
10756 + /* retrieve response parameters */
10757 + *token = mc_cmd_hdr_read_token(&cmd);
10763 + * dpsw_close() - Close the control session of the object
10764 + * @mc_io: Pointer to MC portal's I/O object
10765 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10766 + * @token: Token of DPSW object
10768 + * After this function is called, no further operations are
10769 + * allowed on the object without opening a new control session.
10771 + * Return: '0' on Success; Error code otherwise.
10773 +int dpsw_close(struct fsl_mc_io *mc_io,
10777 + struct mc_command cmd = { 0 };
10779 + /* prepare command */
10780 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
10784 + /* send command to mc*/
10785 + return mc_send_command(mc_io, &cmd);
10789 + * dpsw_enable() - Enable DPSW functionality
10790 + * @mc_io: Pointer to MC portal's I/O object
10791 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10792 + * @token: Token of DPSW object
10794 + * Return: Completion status. '0' on Success; Error code otherwise.
10796 +int dpsw_enable(struct fsl_mc_io *mc_io,
10800 + struct mc_command cmd = { 0 };
10802 + /* prepare command */
10803 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
10807 + /* send command to mc*/
10808 + return mc_send_command(mc_io, &cmd);
10812 + * dpsw_disable() - Disable DPSW functionality
10813 + * @mc_io: Pointer to MC portal's I/O object
10814 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10815 + * @token: Token of DPSW object
10817 + * Return: Completion status. '0' on Success; Error code otherwise.
10819 +int dpsw_disable(struct fsl_mc_io *mc_io,
10823 + struct mc_command cmd = { 0 };
10825 + /* prepare command */
10826 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
10830 + /* send command to mc*/
10831 + return mc_send_command(mc_io, &cmd);
10835 + * dpsw_is_enabled() - Check if the DPSW is enabled
10837 + * @mc_io: Pointer to MC portal's I/O object
10838 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10839 + * @token: Token of DPSW object
10840 + * @en: Returns '1' if object is enabled; '0' otherwise
10842 + * Return: '0' on Success; Error code otherwise
10844 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
10849 + struct mc_command cmd = { 0 };
10850 + struct dpsw_rsp_is_enabled *cmd_rsp;
10853 + /* prepare command */
10854 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
10857 + /* send command to mc*/
10858 + err = mc_send_command(mc_io, &cmd);
10862 + /* retrieve response parameters */
10863 + cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
10864 + *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
10870 + * dpsw_reset() - Reset the DPSW, returns the object to initial state.
10871 + * @mc_io: Pointer to MC portal's I/O object
10872 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10873 + * @token: Token of DPSW object
10875 + * Return: '0' on Success; Error code otherwise.
10877 +int dpsw_reset(struct fsl_mc_io *mc_io,
10881 + struct mc_command cmd = { 0 };
10883 + /* prepare command */
10884 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
10888 + /* send command to mc*/
10889 + return mc_send_command(mc_io, &cmd);
10893 + * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
10894 + * @mc_io: Pointer to MC portal's I/O object
10895 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10896 + * @token: Token of DPSW object
10897 + * @irq_index: Identifies the interrupt index to configure
10898 + * @irq_cfg: IRQ configuration
10900 + * Return: '0' on Success; Error code otherwise.
10902 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
10906 + struct dpsw_irq_cfg *irq_cfg)
10908 + struct mc_command cmd = { 0 };
10909 + struct dpsw_cmd_set_irq *cmd_params;
10911 + /* prepare command */
10912 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
10915 + cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
10916 + cmd_params->irq_index = irq_index;
10917 + cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
10918 + cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
10919 + cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
10921 + /* send command to mc*/
10922 + return mc_send_command(mc_io, &cmd);
10926 + * dpsw_get_irq() - Get IRQ information from the DPSW
10928 + * @mc_io: Pointer to MC portal's I/O object
10929 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10930 + * @token: Token of DPSW object
10931 + * @irq_index: The interrupt index to configure
10932 + * @type: Interrupt type: 0 represents message interrupt
10933 + * type (both irq_addr and irq_val are valid)
10934 + * @irq_cfg: IRQ attributes
10936 + * Return: '0' on Success; Error code otherwise.
10938 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
10943 + struct dpsw_irq_cfg *irq_cfg)
10945 + struct mc_command cmd = { 0 };
10946 + struct dpsw_cmd_get_irq *cmd_params;
10947 + struct dpsw_rsp_get_irq *rsp_params;
10950 + /* prepare command */
10951 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
10954 + cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
10955 + cmd_params->irq_index = irq_index;
10957 + /* send command to mc*/
10958 + err = mc_send_command(mc_io, &cmd);
10962 + /* retrieve response parameters */
10963 + rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
10964 + irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
10965 + irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
10966 + irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
10967 + *type = le32_to_cpu(rsp_params->irq_type);
10973 + * dpsw_set_irq_enable() - Set overall interrupt state.
10974 + * @mc_io: Pointer to MC portal's I/O object
10975 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
10976 + * @token: Token of DPCI object
10977 + * @irq_index: The interrupt index to configure
10978 + * @en: Interrupt state - enable = 1, disable = 0
10980 + * Allows GPP software to control when interrupts are generated.
10981 + * Each interrupt can have up to 32 causes. The enable/disable control's the
10982 + * overall interrupt state. if the interrupt is disabled no causes will cause
10985 + * Return: '0' on Success; Error code otherwise.
10987 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
10993 + struct mc_command cmd = { 0 };
10994 + struct dpsw_cmd_set_irq_enable *cmd_params;
10996 + /* prepare command */
10997 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
11000 + cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
11001 + dpsw_set_field(cmd_params->enable_state, ENABLE, en);
11002 + cmd_params->irq_index = irq_index;
11004 + /* send command to mc*/
11005 + return mc_send_command(mc_io, &cmd);
11009 + * dpsw_set_irq_mask() - Set interrupt mask.
11010 + * @mc_io: Pointer to MC portal's I/O object
11011 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11012 + * @token: Token of DPCI object
11013 + * @irq_index: The interrupt index to configure
11014 + * @mask: Event mask to trigger interrupt;
11016 + * 0 = ignore event
11017 + * 1 = consider event for asserting IRQ
11019 + * Every interrupt can have up to 32 causes and the interrupt model supports
11020 + * masking/unmasking each cause independently
11022 + * Return: '0' on Success; Error code otherwise.
11024 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
11030 + struct mc_command cmd = { 0 };
11031 + struct dpsw_cmd_set_irq_mask *cmd_params;
11033 + /* prepare command */
11034 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
11037 + cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
11038 + cmd_params->mask = cpu_to_le32(mask);
11039 + cmd_params->irq_index = irq_index;
11041 + /* send command to mc*/
11042 + return mc_send_command(mc_io, &cmd);
11046 + * dpsw_get_irq_status() - Get the current status of any pending interrupts
11047 + * @mc_io: Pointer to MC portal's I/O object
11048 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11049 + * @token: Token of DPSW object
11050 + * @irq_index: The interrupt index to configure
11051 + * @status: Returned interrupts status - one bit per cause:
11052 + * 0 = no interrupt pending
11053 + * 1 = interrupt pending
11055 + * Return: '0' on Success; Error code otherwise.
11057 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
11063 + struct mc_command cmd = { 0 };
11064 + struct dpsw_cmd_get_irq_status *cmd_params;
11065 + struct dpsw_rsp_get_irq_status *rsp_params;
11068 + /* prepare command */
11069 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
11072 + cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
11073 + cmd_params->status = cpu_to_le32(*status);
11074 + cmd_params->irq_index = irq_index;
11076 + /* send command to mc*/
11077 + err = mc_send_command(mc_io, &cmd);
11081 + /* retrieve response parameters */
11082 + rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
11083 + *status = le32_to_cpu(rsp_params->status);
11089 + * dpsw_clear_irq_status() - Clear a pending interrupt's status
11090 + * @mc_io: Pointer to MC portal's I/O object
11091 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11092 + * @token: Token of DPCI object
11093 + * @irq_index: The interrupt index to configure
11094 + * @status: bits to clear (W1C) - one bit per cause:
11095 + * 0 = don't change
11096 + * 1 = clear status bit
11098 + * Return: '0' on Success; Error code otherwise.
11100 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
11106 + struct mc_command cmd = { 0 };
11107 + struct dpsw_cmd_clear_irq_status *cmd_params;
11109 + /* prepare command */
11110 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
11113 + cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
11114 + cmd_params->status = cpu_to_le32(status);
11115 + cmd_params->irq_index = irq_index;
11117 + /* send command to mc*/
11118 + return mc_send_command(mc_io, &cmd);
11122 + * dpsw_get_attributes() - Retrieve DPSW attributes
11123 + * @mc_io: Pointer to MC portal's I/O object
11124 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11125 + * @token: Token of DPSW object
11126 + * @attr: Returned DPSW attributes
11128 + * Return: Completion status. '0' on Success; Error code otherwise.
11130 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
11133 + struct dpsw_attr *attr)
11135 + struct mc_command cmd = { 0 };
11136 + struct dpsw_rsp_get_attr *rsp_params;
11139 + /* prepare command */
11140 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
11144 + /* send command to mc*/
11145 + err = mc_send_command(mc_io, &cmd);
11149 + /* retrieve response parameters */
11150 + rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
11151 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
11152 + attr->max_fdbs = rsp_params->max_fdbs;
11153 + attr->num_fdbs = rsp_params->num_fdbs;
11154 + attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
11155 + attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
11156 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
11157 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
11158 + attr->id = le32_to_cpu(rsp_params->dpsw_id);
11159 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
11160 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
11161 + attr->max_meters_per_if = rsp_params->max_meters_per_if;
11162 + attr->options = le64_to_cpu(rsp_params->options);
11163 + attr->component_type = dpsw_get_field(rsp_params->component_type,
11170 + * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
11171 + * @mc_io: Pointer to MC portal's I/O object
11172 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11173 + * @token: Token of DPSW object
11174 + * @if_id: Interface Id
11176 + * Only one reflection receive interface is allowed per switch
11178 + * Return: Completion status. '0' on Success; Error code otherwise.
11180 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
11185 + struct mc_command cmd = { 0 };
11186 + struct dpsw_cmd_set_reflection_if *cmd_params;
11188 + /* prepare command */
11189 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
11192 + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
11193 + cmd_params->if_id = cpu_to_le16(if_id);
11195 + /* send command to mc*/
11196 + return mc_send_command(mc_io, &cmd);
11200 + * dpsw_if_set_link_cfg() - Set the link configuration.
11201 + * @mc_io: Pointer to MC portal's I/O object
11202 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11203 + * @token: Token of DPSW object
11204 + * @if_id: Interface id
11205 + * @cfg: Link configuration
11207 + * Return: '0' on Success; Error code otherwise.
11209 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
11213 + struct dpsw_link_cfg *cfg)
11215 + struct mc_command cmd = { 0 };
11216 + struct dpsw_cmd_if_set_link_cfg *cmd_params;
11218 + /* prepare command */
11219 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
11222 + cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
11223 + cmd_params->if_id = cpu_to_le16(if_id);
11224 + cmd_params->rate = cpu_to_le32(cfg->rate);
11225 + cmd_params->options = cpu_to_le64(cfg->options);
11227 + /* send command to mc*/
11228 + return mc_send_command(mc_io, &cmd);
11232 + * dpsw_if_get_link_state - Return the link state
11233 + * @mc_io: Pointer to MC portal's I/O object
11234 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11235 + * @token: Token of DPSW object
11236 + * @if_id: Interface id
11237 + * @state: Link state 1 - linkup, 0 - link down or disconnected
11239 + * @Return '0' on Success; Error code otherwise.
11241 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
11245 + struct dpsw_link_state *state)
11247 + struct mc_command cmd = { 0 };
11248 + struct dpsw_cmd_if_get_link_state *cmd_params;
11249 + struct dpsw_rsp_if_get_link_state *rsp_params;
11252 + /* prepare command */
11253 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
11256 + cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
11257 + cmd_params->if_id = cpu_to_le16(if_id);
11259 + /* send command to mc*/
11260 + err = mc_send_command(mc_io, &cmd);
11264 + /* retrieve response parameters */
11265 + rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
11266 + state->rate = le32_to_cpu(rsp_params->rate);
11267 + state->options = le64_to_cpu(rsp_params->options);
11268 + state->up = dpsw_get_field(rsp_params->up, UP);
11274 + * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
11275 + * @mc_io: Pointer to MC portal's I/O object
11276 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11277 + * @token: Token of DPSW object
11278 + * @if_id: Interface Identifier
11279 + * @en: 1 - enable, 0 - disable
11281 + * Return: Completion status. '0' on Success; Error code otherwise.
11283 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
11289 + struct mc_command cmd = { 0 };
11290 + struct dpsw_cmd_if_set_flooding *cmd_params;
11292 + /* prepare command */
11293 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
11296 + cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
11297 + cmd_params->if_id = cpu_to_le16(if_id);
11298 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11300 + /* send command to mc*/
11301 + return mc_send_command(mc_io, &cmd);
11305 + * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
11306 + * @mc_io: Pointer to MC portal's I/O object
11307 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11308 + * @token: Token of DPSW object
11309 + * @if_id: Interface Identifier
11310 + * @en: 1 - enable, 0 - disable
11312 + * Return: Completion status. '0' on Success; Error code otherwise.
11314 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
11320 + struct mc_command cmd = { 0 };
11321 + struct dpsw_cmd_if_set_broadcast *cmd_params;
11323 + /* prepare command */
11324 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
11327 + cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
11328 + cmd_params->if_id = cpu_to_le16(if_id);
11329 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11331 + /* send command to mc*/
11332 + return mc_send_command(mc_io, &cmd);
11336 + * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
11337 + * @mc_io: Pointer to MC portal's I/O object
11338 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11339 + * @token: Token of DPSW object
11340 + * @if_id: Interface Identifier
11341 + * @en: 1 - enable, 0 - disable
11343 + * Return: Completion status. '0' on Success; Error code otherwise.
11345 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
11351 + struct mc_command cmd = { 0 };
11352 + struct dpsw_cmd_if_set_multicast *cmd_params;
11354 + /* prepare command */
11355 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
11358 + cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
11359 + cmd_params->if_id = cpu_to_le16(if_id);
11360 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11362 + /* send command to mc*/
11363 + return mc_send_command(mc_io, &cmd);
11367 + * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
11368 + * @mc_io: Pointer to MC portal's I/O object
11369 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11370 + * @token: Token of DPSW object
11371 + * @if_id: Interface Identifier
11372 + * @cfg: Tag Control Information Configuration
11374 + * Return: Completion status. '0' on Success; Error code otherwise.
11376 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
11380 + const struct dpsw_tci_cfg *cfg)
11382 + struct mc_command cmd = { 0 };
11383 + struct dpsw_cmd_if_set_tci *cmd_params;
11385 + /* prepare command */
11386 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
11389 + cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
11390 + cmd_params->if_id = cpu_to_le16(if_id);
11391 + dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
11392 + dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
11393 + dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
11394 + cmd_params->conf = cpu_to_le16(cmd_params->conf);
11396 + /* send command to mc*/
11397 + return mc_send_command(mc_io, &cmd);
11401 + * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
11402 + * @mc_io: Pointer to MC portal's I/O object
11403 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11404 + * @token: Token of DPSW object
11405 + * @if_id: Interface Identifier
11406 + * @cfg: Tag Control Information Configuration
11408 + * Return: Completion status. '0' on Success; Error code otherwise.
11410 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
11414 + struct dpsw_tci_cfg *cfg)
11416 + struct mc_command cmd = { 0 };
11417 + struct dpsw_cmd_if_get_tci *cmd_params;
11418 + struct dpsw_rsp_if_get_tci *rsp_params;
11421 + /* prepare command */
11422 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
11425 + cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
11426 + cmd_params->if_id = cpu_to_le16(if_id);
11428 + /* send command to mc*/
11429 + err = mc_send_command(mc_io, &cmd);
11433 + /* retrieve response parameters */
11434 + rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
11435 + cfg->pcp = rsp_params->pcp;
11436 + cfg->dei = rsp_params->dei;
11437 + cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
11443 + * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
11444 + * @mc_io: Pointer to MC portal's I/O object
11445 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11446 + * @token: Token of DPSW object
11447 + * @if_id: Interface Identifier
11448 + * @cfg: STP State configuration parameters
11450 + * The following STP states are supported -
11451 + * blocking, listening, learning, forwarding and disabled.
11453 + * Return: Completion status. '0' on Success; Error code otherwise.
11455 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
11459 + const struct dpsw_stp_cfg *cfg)
11461 + struct mc_command cmd = { 0 };
11462 + struct dpsw_cmd_if_set_stp *cmd_params;
11464 + /* prepare command */
11465 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
11468 + cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
11469 + cmd_params->if_id = cpu_to_le16(if_id);
11470 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11471 + dpsw_set_field(cmd_params->state, STATE, cfg->state);
11473 + /* send command to mc*/
11474 + return mc_send_command(mc_io, &cmd);
11478 + * dpsw_if_set_accepted_frames()
11479 + * @mc_io: Pointer to MC portal's I/O object
11480 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11481 + * @token: Token of DPSW object
11482 + * @if_id: Interface Identifier
11483 + * @cfg: Frame types configuration
11485 + * When is admit_only_vlan_tagged- the device will discard untagged
11486 + * frames or Priority-Tagged frames received on this interface.
11487 + * When admit_only_untagged- untagged frames or Priority-Tagged
11488 + * frames received on this interface will be accepted and assigned
11489 + * to a VID based on the PVID and VID Set for this interface.
11490 + * When admit_all - the device will accept VLAN tagged, untagged
11491 + * and priority tagged frames.
11492 + * The default is admit_all
11494 + * Return: Completion status. '0' on Success; Error code otherwise.
11496 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
11500 + const struct dpsw_accepted_frames_cfg *cfg)
11502 + struct mc_command cmd = { 0 };
11503 + struct dpsw_cmd_if_set_accepted_frames *cmd_params;
11505 + /* prepare command */
11506 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
11509 + cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
11510 + cmd_params->if_id = cpu_to_le16(if_id);
11511 + dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
11512 + dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
11513 + cfg->unaccept_act);
11515 + /* send command to mc*/
11516 + return mc_send_command(mc_io, &cmd);
11520 + * dpsw_if_set_accept_all_vlan()
11521 + * @mc_io: Pointer to MC portal's I/O object
11522 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11523 + * @token: Token of DPSW object
11524 + * @if_id: Interface Identifier
11525 + * @accept_all: Accept or drop frames having different VLAN
11527 + * When this is accept (FALSE), the device will discard incoming
11528 + * frames for VLANs that do not include this interface in its
11529 + * Member set. When accept (TRUE), the interface will accept all incoming frames
11531 + * Return: Completion status. '0' on Success; Error code otherwise.
11533 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
11539 + struct mc_command cmd = { 0 };
11540 + struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
11542 + /* prepare command */
11543 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
11546 + cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
11547 + cmd_params->if_id = cpu_to_le16(if_id);
11548 + dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
11550 + /* send command to mc*/
11551 + return mc_send_command(mc_io, &cmd);
11555 + * dpsw_if_get_counter() - Get specific counter of particular interface
11556 + * @mc_io: Pointer to MC portal's I/O object
11557 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11558 + * @token: Token of DPSW object
11559 + * @if_id: Interface Identifier
11560 + * @type: Counter type
11561 + * @counter: return value
11563 + * Return: Completion status. '0' on Success; Error code otherwise.
11565 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
11569 + enum dpsw_counter type,
11572 + struct mc_command cmd = { 0 };
11573 + struct dpsw_cmd_if_get_counter *cmd_params;
11574 + struct dpsw_rsp_if_get_counter *rsp_params;
11577 + /* prepare command */
11578 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
11581 + cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
11582 + cmd_params->if_id = cpu_to_le16(if_id);
11583 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
11585 + /* send command to mc*/
11586 + err = mc_send_command(mc_io, &cmd);
11590 + /* retrieve response parameters */
11591 + rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
11592 + *counter = le64_to_cpu(rsp_params->counter);
11598 + * dpsw_if_set_counter() - Set specific counter of particular interface
11599 + * @mc_io: Pointer to MC portal's I/O object
11600 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11601 + * @token: Token of DPSW object
11602 + * @if_id: Interface Identifier
11603 + * @type: Counter type
11604 + * @counter: New counter value
11606 + * Return: Completion status. '0' on Success; Error code otherwise.
11608 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
11612 + enum dpsw_counter type,
11615 + struct mc_command cmd = { 0 };
11616 + struct dpsw_cmd_if_set_counter *cmd_params;
11618 + /* prepare command */
11619 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
11622 + cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
11623 + cmd_params->if_id = cpu_to_le16(if_id);
11624 + cmd_params->counter = cpu_to_le64(counter);
11625 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
11627 + /* send command to mc*/
11628 + return mc_send_command(mc_io, &cmd);
11632 + * dpsw_if_set_tx_selection() - Function is used for mapping variety
11633 + * of frame fields
11634 + * @mc_io: Pointer to MC portal's I/O object
11635 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11636 + * @token: Token of DPSW object
11637 + * @if_id: Interface Identifier
11638 + * @cfg: Traffic class mapping configuration
11640 + * Function is used for mapping variety of frame fields (DSCP, PCP)
11641 + * to Traffic Class. Traffic class is a number
11642 + * in the range from 0 to 7
11644 + * Return: Completion status. '0' on Success; Error code otherwise.
11646 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
11650 + const struct dpsw_tx_selection_cfg *cfg)
11652 + struct dpsw_cmd_if_set_tx_selection *cmd_params;
11653 + struct mc_command cmd = { 0 };
11656 + /* prepare command */
11657 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
11660 + cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
11661 + cmd_params->if_id = cpu_to_le16(if_id);
11662 + dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
11663 + cfg->priority_selector);
11665 + for (i = 0; i < 8; i++) {
11666 + cmd_params->tc_sched[i].delta_bandwidth =
11667 + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
11668 + dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
11669 + cfg->tc_sched[i].mode);
11670 + cmd_params->tc_id[i] = cfg->tc_id[i];
11673 + /* send command to mc*/
11674 + return mc_send_command(mc_io, &cmd);
11678 + * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
11679 + * @mc_io: Pointer to MC portal's I/O object
11680 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11681 + * @token: Token of DPSW object
11682 + * @if_id: Interface Identifier
11683 + * @cfg: Reflection configuration
11685 + * Return: Completion status. '0' on Success; Error code otherwise.
11687 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
11691 + const struct dpsw_reflection_cfg *cfg)
11693 + struct mc_command cmd = { 0 };
11694 + struct dpsw_cmd_if_reflection *cmd_params;
11696 + /* prepare command */
11697 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
11700 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
11701 + cmd_params->if_id = cpu_to_le16(if_id);
11702 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11703 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
11705 + /* send command to mc*/
11706 + return mc_send_command(mc_io, &cmd);
11710 + * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
11711 + * @mc_io: Pointer to MC portal's I/O object
11712 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11713 + * @token: Token of DPSW object
11714 + * @if_id: Interface Identifier
11715 + * @cfg: Reflection configuration
11717 + * Return: Completion status. '0' on Success; Error code otherwise.
11719 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
11723 + const struct dpsw_reflection_cfg *cfg)
11725 + struct mc_command cmd = { 0 };
11726 + struct dpsw_cmd_if_reflection *cmd_params;
11728 + /* prepare command */
11729 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
11732 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
11733 + cmd_params->if_id = cpu_to_le16(if_id);
11734 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
11735 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
11737 + /* send command to mc*/
11738 + return mc_send_command(mc_io, &cmd);
11742 + * dpsw_if_set_flooding_metering() - Set flooding metering
11743 + * @mc_io: Pointer to MC portal's I/O object
11744 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11745 + * @token: Token of DPSW object
11746 + * @if_id: Interface Identifier
11747 + * @cfg: Metering parameters
11749 + * Return: Completion status. '0' on Success; Error code otherwise.
11751 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
11755 + const struct dpsw_metering_cfg *cfg)
11757 + struct mc_command cmd = { 0 };
11758 + struct dpsw_cmd_if_set_flooding_metering *cmd_params;
11760 + /* prepare command */
11761 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
11764 + cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
11765 + cmd_params->if_id = cpu_to_le16(if_id);
11766 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
11767 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
11768 + cmd_params->cir = cpu_to_le32(cfg->cir);
11769 + cmd_params->eir = cpu_to_le32(cfg->eir);
11770 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
11771 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
11773 + /* send command to mc*/
11774 + return mc_send_command(mc_io, &cmd);
11778 + * dpsw_if_set_metering() - Set interface metering for flooding
11779 + * @mc_io: Pointer to MC portal's I/O object
11780 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11781 + * @token: Token of DPSW object
11782 + * @if_id: Interface Identifier
11783 + * @tc_id: Traffic class ID
11784 + * @cfg: Metering parameters
11786 + * Return: Completion status. '0' on Success; Error code otherwise.
11788 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
11793 + const struct dpsw_metering_cfg *cfg)
11795 + struct mc_command cmd = { 0 };
11796 + struct dpsw_cmd_if_set_metering *cmd_params;
11798 + /* prepare command */
11799 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
11802 + cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
11803 + cmd_params->if_id = cpu_to_le16(if_id);
11804 + cmd_params->tc_id = tc_id;
11805 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
11806 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
11807 + cmd_params->cir = cpu_to_le32(cfg->cir);
11808 + cmd_params->eir = cpu_to_le32(cfg->eir);
11809 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
11810 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
11812 + /* send command to mc*/
11813 + return mc_send_command(mc_io, &cmd);
11817 + * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
11818 + * @cfg: Early-drop configuration
11819 + * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
11821 + * This function has to be called before dpsw_if_tc_set_early_drop
11824 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
11825 + u8 *early_drop_buf)
11827 + struct dpsw_prep_early_drop *ext_params;
11829 + ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
11830 + dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
11831 + dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
11832 + ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
11833 + ext_params->green_drop_probability = cfg->green.drop_probability;
11834 + ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
11835 + ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
11836 + ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
11837 + ext_params->yellow_max_threshold =
11838 + cpu_to_le64(cfg->yellow.max_threshold);
11839 + ext_params->yellow_min_threshold =
11840 + cpu_to_le64(cfg->yellow.min_threshold);
11844 + * dpsw_if_set_early_drop() - Set interface traffic class early-drop
11846 + * @mc_io: Pointer to MC portal's I/O object
11847 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11848 + * @token: Token of DPSW object
11849 + * @if_id: Interface Identifier
11850 + * @tc_id: Traffic class selection (0-7)
11851 + * @early_drop_iova: I/O virtual address of 64 bytes;
11852 + * Must be cacheline-aligned and DMA-able memory
11854 + * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
11855 + * to prepare the early_drop_iova parameter
11857 + * Return: '0' on Success; error code otherwise.
11859 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
11864 + u64 early_drop_iova)
11866 + struct mc_command cmd = { 0 };
11867 + struct dpsw_cmd_if_set_early_drop *cmd_params;
11869 + /* prepare command */
11870 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
11873 + cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
11874 + cmd_params->tc_id = tc_id;
11875 + cmd_params->if_id = cpu_to_le16(if_id);
11876 + cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
11878 + /* send command to mc*/
11879 + return mc_send_command(mc_io, &cmd);
11883 + * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
11884 + * @mc_io: Pointer to MC portal's I/O object
11885 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11886 + * @token: Token of DPSW object
11887 + * @cfg: Tag Protocol identifier
11889 + * API Configures a distinct Ethernet type value (or TPID value)
11890 + * to indicate a VLAN tag in addition to the common
11891 + * TPID values 0x8100 and 0x88A8.
11892 + * Two additional TPID's are supported
11894 + * Return: Completion status. '0' on Success; Error code otherwise.
11896 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
11899 + const struct dpsw_custom_tpid_cfg *cfg)
11901 + struct mc_command cmd = { 0 };
11902 + struct dpsw_cmd_custom_tpid *cmd_params;
11904 + /* prepare command */
11905 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
11908 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
11909 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
11911 + /* send command to mc*/
11912 + return mc_send_command(mc_io, &cmd);
11916 + * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
11917 + * @mc_io: Pointer to MC portal's I/O object
11918 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11919 + * @token: Token of DPSW object
11920 + * @cfg: Tag Protocol identifier
11922 + * Return: Completion status. '0' on Success; Error code otherwise.
11924 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
11927 + const struct dpsw_custom_tpid_cfg *cfg)
11929 + struct mc_command cmd = { 0 };
11930 + struct dpsw_cmd_custom_tpid *cmd_params;
11932 + /* prepare command */
11933 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
11936 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
11937 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
11939 + /* send command to mc*/
11940 + return mc_send_command(mc_io, &cmd);
11944 + * dpsw_if_enable() - Enable Interface
11945 + * @mc_io: Pointer to MC portal's I/O object
11946 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11947 + * @token: Token of DPSW object
11948 + * @if_id: Interface Identifier
11950 + * Return: Completion status. '0' on Success; Error code otherwise.
11952 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
11957 + struct mc_command cmd = { 0 };
11958 + struct dpsw_cmd_if *cmd_params;
11960 + /* prepare command */
11961 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
11964 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11965 + cmd_params->if_id = cpu_to_le16(if_id);
11967 + /* send command to mc*/
11968 + return mc_send_command(mc_io, &cmd);
11972 + * dpsw_if_disable() - Disable Interface
11973 + * @mc_io: Pointer to MC portal's I/O object
11974 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11975 + * @token: Token of DPSW object
11976 + * @if_id: Interface Identifier
11978 + * Return: Completion status. '0' on Success; Error code otherwise.
11980 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
11985 + struct mc_command cmd = { 0 };
11986 + struct dpsw_cmd_if *cmd_params;
11988 + /* prepare command */
11989 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
11992 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
11993 + cmd_params->if_id = cpu_to_le16(if_id);
11995 + /* send command to mc*/
11996 + return mc_send_command(mc_io, &cmd);
12000 + * dpsw_if_get_attributes() - Function obtains attributes of interface
12001 + * @mc_io: Pointer to MC portal's I/O object
12002 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12003 + * @token: Token of DPSW object
12004 + * @if_id: Interface Identifier
12005 + * @attr: Returned interface attributes
12007 + * Return: Completion status. '0' on Success; Error code otherwise.
12009 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
12013 + struct dpsw_if_attr *attr)
12015 + struct dpsw_rsp_if_get_attr *rsp_params;
12016 + struct dpsw_cmd_if *cmd_params;
12017 + struct mc_command cmd = { 0 };
12020 + /* prepare command */
12021 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
12024 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
12025 + cmd_params->if_id = cpu_to_le16(if_id);
12027 + /* send command to mc*/
12028 + err = mc_send_command(mc_io, &cmd);
12032 + /* retrieve response parameters */
12033 + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
12034 + attr->num_tcs = rsp_params->num_tcs;
12035 + attr->rate = le32_to_cpu(rsp_params->rate);
12036 + attr->options = le32_to_cpu(rsp_params->options);
12037 + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
12038 + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
12039 + ACCEPT_ALL_VLAN);
12040 + attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
12041 + attr->qdid = le16_to_cpu(rsp_params->qdid);
12047 + * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
12048 + * @mc_io: Pointer to MC portal's I/O object
12049 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12050 + * @token: Token of DPSW object
12051 + * @if_id: Interface Identifier
12052 + * @frame_length: Maximum Frame Length
12054 + * Return: Completion status. '0' on Success; Error code otherwise.
12056 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
12060 + u16 frame_length)
12062 + struct mc_command cmd = { 0 };
12063 + struct dpsw_cmd_if_set_max_frame_length *cmd_params;
12065 + /* prepare command */
12066 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
12069 + cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
12070 + cmd_params->if_id = cpu_to_le16(if_id);
12071 + cmd_params->frame_length = cpu_to_le16(frame_length);
12073 + /* send command to mc*/
12074 + return mc_send_command(mc_io, &cmd);
12078 + * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
12079 + * @mc_io: Pointer to MC portal's I/O object
12080 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12081 + * @token: Token of DPSW object
12082 + * @if_id: Interface Identifier
12083 + * @frame_length: Returned maximum Frame Length
12085 + * Return: Completion status. '0' on Success; Error code otherwise.
12087 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
12091 + u16 *frame_length)
12093 + struct mc_command cmd = { 0 };
12094 + struct dpsw_cmd_if_get_max_frame_length *cmd_params;
12095 + struct dpsw_rsp_if_get_max_frame_length *rsp_params;
12098 + /* prepare command */
12099 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
12102 + cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
12103 + cmd_params->if_id = cpu_to_le16(if_id);
12105 + /* send command to mc*/
12106 + err = mc_send_command(mc_io, &cmd);
12110 + rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
12111 + *frame_length = le16_to_cpu(rsp_params->frame_length);
12117 + * dpsw_vlan_add() - Adding new VLAN to DPSW.
12118 + * @mc_io: Pointer to MC portal's I/O object
12119 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12120 + * @token: Token of DPSW object
12121 + * @vlan_id: VLAN Identifier
12122 + * @cfg: VLAN configuration
12124 + * Only VLAN ID and FDB ID are required parameters here.
12125 + * 12 bit VLAN ID is defined in IEEE802.1Q.
12126 + * Adding a duplicate VLAN ID is not allowed.
12127 + * FDB ID can be shared across multiple VLANs. Shared learning
12128 + * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
12129 + * with same fdb_id
12131 + * Return: Completion status. '0' on Success; Error code otherwise.
12133 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
12137 + const struct dpsw_vlan_cfg *cfg)
12139 + struct mc_command cmd = { 0 };
12140 + struct dpsw_vlan_add *cmd_params;
12142 + /* prepare command */
12143 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
12146 + cmd_params = (struct dpsw_vlan_add *)cmd.params;
12147 + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
12148 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12150 + /* send command to mc*/
12151 + return mc_send_command(mc_io, &cmd);
12155 + * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
12156 + * @mc_io: Pointer to MC portal's I/O object
12157 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12158 + * @token: Token of DPSW object
12159 + * @vlan_id: VLAN Identifier
12160 + * @cfg: Set of interfaces to add
12162 + * It adds only interfaces not belonging to this VLAN yet,
12163 + * otherwise an error is generated and an entire command is
12164 + * ignored. This function can be called numerous times always
12165 + * providing required interfaces delta.
12167 + * Return: Completion status. '0' on Success; Error code otherwise.
12169 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
12173 + const struct dpsw_vlan_if_cfg *cfg)
12175 + struct mc_command cmd = { 0 };
12176 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12178 + /* prepare command */
12179 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
12182 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12183 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12184 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12186 + /* send command to mc*/
12187 + return mc_send_command(mc_io, &cmd);
12191 + * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
12192 + * transmitted as untagged.
12193 + * @mc_io: Pointer to MC portal's I/O object
12194 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12195 + * @token: Token of DPSW object
12196 + * @vlan_id: VLAN Identifier
12197 + * @cfg: Set of interfaces that should be transmitted as untagged
12199 + * These interfaces should already belong to this VLAN.
12200 + * By default all interfaces are transmitted as tagged.
12201 + * Providing un-existing interface or untagged interface that is
12202 + * configured untagged already generates an error and the entire
12203 + * command is ignored.
12205 + * Return: Completion status. '0' on Success; Error code otherwise.
12207 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
12211 + const struct dpsw_vlan_if_cfg *cfg)
12213 + struct mc_command cmd = { 0 };
12214 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12216 + /* prepare command */
12217 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
12220 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12221 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12222 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12224 + /* send command to mc*/
12225 + return mc_send_command(mc_io, &cmd);
12229 + * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
12230 + * included in flooding when frame with unknown destination
12231 + * unicast MAC arrived.
12232 + * @mc_io: Pointer to MC portal's I/O object
12233 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12234 + * @token: Token of DPSW object
12235 + * @vlan_id: VLAN Identifier
12236 + * @cfg: Set of interfaces that should be used for flooding
12238 + * These interfaces should belong to this VLAN. By default all
12239 + * interfaces are included into flooding list. Providing
12240 + * un-existing interface or an interface that already in the
12241 + * flooding list generates an error and the entire command is
12244 + * Return: Completion status. '0' on Success; Error code otherwise.
12246 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
12250 + const struct dpsw_vlan_if_cfg *cfg)
12252 + struct mc_command cmd = { 0 };
12253 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12255 + /* prepare command */
12256 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
12259 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12260 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12261 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12263 + /* send command to mc*/
12264 + return mc_send_command(mc_io, &cmd);
12268 + * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
12269 + * @mc_io: Pointer to MC portal's I/O object
12270 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12271 + * @token: Token of DPSW object
12272 + * @vlan_id: VLAN Identifier
12273 + * @cfg: Set of interfaces that should be removed
12275 + * Interfaces must belong to this VLAN, otherwise an error
12276 + * is returned and an the command is ignored
12278 + * Return: Completion status. '0' on Success; Error code otherwise.
12280 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
12284 + const struct dpsw_vlan_if_cfg *cfg)
12286 + struct mc_command cmd = { 0 };
12287 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12289 + /* prepare command */
12290 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
12293 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12294 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12295 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12297 + /* send command to mc*/
12298 + return mc_send_command(mc_io, &cmd);
12302 + * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
12303 + * converted from transmitted as untagged to transmit as tagged.
12304 + * @mc_io: Pointer to MC portal's I/O object
12305 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12306 + * @token: Token of DPSW object
12307 + * @vlan_id: VLAN Identifier
12308 + * @cfg: Set of interfaces that should be removed
12310 + * Interfaces provided by API have to belong to this VLAN and
12311 + * configured untagged, otherwise an error is returned and the
12312 + * command is ignored
12314 + * Return: Completion status. '0' on Success; Error code otherwise.
12316 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
12320 + const struct dpsw_vlan_if_cfg *cfg)
12322 + struct mc_command cmd = { 0 };
12323 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12325 + /* prepare command */
12326 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
12329 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12330 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12331 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12333 + /* send command to mc*/
12334 + return mc_send_command(mc_io, &cmd);
12338 + * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
12339 + * removed from the flooding list.
12340 + * @mc_io: Pointer to MC portal's I/O object
12341 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12342 + * @token: Token of DPSW object
12343 + * @vlan_id: VLAN Identifier
12344 + * @cfg: Set of interfaces used for flooding
12346 + * Return: Completion status. '0' on Success; Error code otherwise.
12348 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
12352 + const struct dpsw_vlan_if_cfg *cfg)
12354 + struct mc_command cmd = { 0 };
12355 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12357 + /* prepare command */
12358 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
12361 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12362 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12363 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12365 + /* send command to mc*/
12366 + return mc_send_command(mc_io, &cmd);
12370 + * dpsw_vlan_remove() - Remove an entire VLAN
12371 + * @mc_io: Pointer to MC portal's I/O object
12372 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12373 + * @token: Token of DPSW object
12374 + * @vlan_id: VLAN Identifier
12376 + * Return: Completion status. '0' on Success; Error code otherwise.
12378 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
12383 + struct mc_command cmd = { 0 };
12384 + struct dpsw_cmd_vlan_remove *cmd_params;
12386 + /* prepare command */
12387 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
12390 + cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
12391 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12393 + /* send command to mc*/
12394 + return mc_send_command(mc_io, &cmd);
12398 + * dpsw_vlan_get_attributes() - Get VLAN attributes
12399 + * @mc_io: Pointer to MC portal's I/O object
12400 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12401 + * @token: Token of DPSW object
12402 + * @vlan_id: VLAN Identifier
12403 + * @attr: Returned DPSW attributes
12405 + * Return: Completion status. '0' on Success; Error code otherwise.
12407 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
12411 + struct dpsw_vlan_attr *attr)
12413 + struct mc_command cmd = { 0 };
12414 + struct dpsw_cmd_vlan_get_attr *cmd_params;
12415 + struct dpsw_rsp_vlan_get_attr *rsp_params;
12418 + /* prepare command */
12419 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
12422 + cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
12423 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12425 + /* send command to mc*/
12426 + err = mc_send_command(mc_io, &cmd);
12430 + /* retrieve response parameters */
12431 + rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
12432 + attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
12433 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12434 + attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
12435 + attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
12441 + * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
12442 + * @mc_io: Pointer to MC portal's I/O object
12443 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12444 + * @token: Token of DPSW object
12445 + * @vlan_id: VLAN Identifier
12446 + * @cfg: Returned set of interfaces belong to this VLAN
12448 + * Return: Completion status. '0' on Success; Error code otherwise.
12450 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
12454 + struct dpsw_vlan_if_cfg *cfg)
12456 + struct mc_command cmd = { 0 };
12457 + struct dpsw_cmd_vlan_get_if *cmd_params;
12458 + struct dpsw_rsp_vlan_get_if *rsp_params;
12461 + /* prepare command */
12462 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
12465 + cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
12466 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12468 + /* send command to mc*/
12469 + err = mc_send_command(mc_io, &cmd);
12473 + /* retrieve response parameters */
12474 + rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
12475 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12476 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12482 + * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
12483 + * @mc_io: Pointer to MC portal's I/O object
12484 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12485 + * @token: Token of DPSW object
12486 + * @vlan_id: VLAN Identifier
12487 + * @cfg: Returned set of flooding interfaces
12489 + * Return: Completion status. '0' on Success; Error code otherwise.
12492 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
12496 + struct dpsw_vlan_if_cfg *cfg)
12498 + struct mc_command cmd = { 0 };
12499 + struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
12500 + struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
12503 + /* prepare command */
12504 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
12507 + cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
12508 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12510 + /* send command to mc*/
12511 + err = mc_send_command(mc_io, &cmd);
12515 + /* retrieve response parameters */
12516 + rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
12517 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12518 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12524 + * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
12526 + * @mc_io: Pointer to MC portal's I/O object
12527 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12528 + * @token: Token of DPSW object
12529 + * @vlan_id: VLAN Identifier
12530 + * @cfg: Returned set of untagged interfaces
12532 + * Return: Completion status. '0' on Success; Error code otherwise.
12534 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
12538 + struct dpsw_vlan_if_cfg *cfg)
12540 + struct mc_command cmd = { 0 };
12541 + struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
12542 + struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
12545 + /* prepare command */
12546 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
12549 + cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
12550 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12552 + /* send command to mc*/
12553 + err = mc_send_command(mc_io, &cmd);
12557 + /* retrieve response parameters */
12558 + rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
12559 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12560 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12566 + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
12568 + * @mc_io: Pointer to MC portal's I/O object
12569 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12570 + * @token: Token of DPSW object
12571 + * @fdb_id: Returned Forwarding Database Identifier
12572 + * @cfg: FDB Configuration
12574 + * Return: Completion status. '0' on Success; Error code otherwise.
12576 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
12580 + const struct dpsw_fdb_cfg *cfg)
12582 + struct mc_command cmd = { 0 };
12583 + struct dpsw_cmd_fdb_add *cmd_params;
12584 + struct dpsw_rsp_fdb_add *rsp_params;
12587 + /* prepare command */
12588 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
12591 + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
12592 + cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
12593 + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
12595 + /* send command to mc*/
12596 + err = mc_send_command(mc_io, &cmd);
12600 + /* retrieve response parameters */
12601 + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
12602 + *fdb_id = le16_to_cpu(rsp_params->fdb_id);
12608 + * dpsw_fdb_remove() - Remove FDB from switch
12609 + * @mc_io: Pointer to MC portal's I/O object
12610 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12611 + * @token: Token of DPSW object
12612 + * @fdb_id: Forwarding Database Identifier
12614 + * Return: Completion status. '0' on Success; Error code otherwise.
12616 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
12621 + struct mc_command cmd = { 0 };
12622 + struct dpsw_cmd_fdb_remove *cmd_params;
12624 + /* prepare command */
12625 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
12628 + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
12629 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12631 + /* send command to mc*/
12632 + return mc_send_command(mc_io, &cmd);
12636 + * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
12637 + * @mc_io: Pointer to MC portal's I/O object
12638 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12639 + * @token: Token of DPSW object
12640 + * @fdb_id: Forwarding Database Identifier
12641 + * @cfg: Unicast entry configuration
12643 + * Return: Completion status. '0' on Success; Error code otherwise.
12645 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
12649 + const struct dpsw_fdb_unicast_cfg *cfg)
12651 + struct mc_command cmd = { 0 };
12652 + struct dpsw_cmd_fdb_add_unicast *cmd_params;
12655 + /* prepare command */
12656 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
12659 + cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
12660 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12661 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
12662 + for (i = 0; i < 6; i++)
12663 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12664 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12666 + /* send command to mc*/
12667 + return mc_send_command(mc_io, &cmd);
12671 + * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
12672 + * unicast Ethernet address
12673 + * @mc_io: Pointer to MC portal's I/O object
12674 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12675 + * @token: Token of DPSW object
12676 + * @fdb_id: Forwarding Database Identifier
12677 + * @cfg: Returned unicast entry configuration
12679 + * Return: Completion status. '0' on Success; Error code otherwise.
12681 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
12685 + struct dpsw_fdb_unicast_cfg *cfg)
12687 + struct mc_command cmd = { 0 };
12688 + struct dpsw_cmd_fdb_get_unicast *cmd_params;
12689 + struct dpsw_rsp_fdb_get_unicast *rsp_params;
12692 + /* prepare command */
12693 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
12696 + cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
12697 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12698 + for (i = 0; i < 6; i++)
12699 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12701 + /* send command to mc*/
12702 + err = mc_send_command(mc_io, &cmd);
12706 + /* retrieve response parameters */
12707 + rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
12708 + cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
12709 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
12715 + * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
12716 + * @mc_io: Pointer to MC portal's I/O object
12717 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12718 + * @token: Token of DPSW object
12719 + * @fdb_id: Forwarding Database Identifier
12720 + * @cfg: Unicast entry configuration
12722 + * Return: Completion status. '0' on Success; Error code otherwise.
12724 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
12728 + const struct dpsw_fdb_unicast_cfg *cfg)
12730 + struct mc_command cmd = { 0 };
12731 + struct dpsw_cmd_fdb_remove_unicast *cmd_params;
12734 + /* prepare command */
12735 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
12738 + cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
12739 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12740 + for (i = 0; i < 6; i++)
12741 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12742 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
12743 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12745 + /* send command to mc*/
12746 + return mc_send_command(mc_io, &cmd);
12750 + * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
12751 + * @mc_io: Pointer to MC portal's I/O object
12752 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12753 + * @token: Token of DPSW object
12754 + * @fdb_id: Forwarding Database Identifier
12755 + * @cfg: Multicast entry configuration
12757 + * If group doesn't exist, it will be created.
12758 + * It adds only interfaces not belonging to this multicast group
12759 + * yet, otherwise error will be generated and the command is
12761 + * This function may be called numerous times always providing
12762 + * required interfaces delta.
12764 + * Return: Completion status. '0' on Success; Error code otherwise.
12766 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
12770 + const struct dpsw_fdb_multicast_cfg *cfg)
12772 + struct mc_command cmd = { 0 };
12773 + struct dpsw_cmd_fdb_add_multicast *cmd_params;
12776 + /* prepare command */
12777 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
12780 + cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
12781 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12782 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
12783 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12784 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12785 + for (i = 0; i < 6; i++)
12786 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12788 + /* send command to mc*/
12789 + return mc_send_command(mc_io, &cmd);
12793 + * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
12795 + * @mc_io: Pointer to MC portal's I/O object
12796 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12797 + * @token: Token of DPSW object
12798 + * @fdb_id: Forwarding Database Identifier
12799 + * @cfg: Returned multicast entry configuration
12801 + * Return: Completion status. '0' on Success; Error code otherwise.
12803 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
12807 + struct dpsw_fdb_multicast_cfg *cfg)
12809 + struct mc_command cmd = { 0 };
12810 + struct dpsw_cmd_fdb_get_multicast *cmd_params;
12811 + struct dpsw_rsp_fdb_get_multicast *rsp_params;
12814 + /* prepare command */
12815 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
12818 + cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
12819 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12820 + for (i = 0; i < 6; i++)
12821 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12823 + /* send command to mc*/
12824 + err = mc_send_command(mc_io, &cmd);
12828 + /* retrieve response parameters */
12829 + rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
12830 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
12831 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
12832 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
12838 + * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
12840 + * @mc_io: Pointer to MC portal's I/O object
12841 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12842 + * @token: Token of DPSW object
12843 + * @fdb_id: Forwarding Database Identifier
12844 + * @cfg: Multicast entry configuration
12846 + * Interfaces provided by this API have to exist in the group,
12847 + * otherwise an error will be returned and an entire command
12848 + * ignored. If there is no interface left in the group,
12849 + * an entire group is deleted
12851 + * Return: Completion status. '0' on Success; Error code otherwise.
12853 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
12857 + const struct dpsw_fdb_multicast_cfg *cfg)
12859 + struct mc_command cmd = { 0 };
12860 + struct dpsw_cmd_fdb_remove_multicast *cmd_params;
12863 + /* prepare command */
12864 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
12867 + cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
12868 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12869 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
12870 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
12871 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12872 + for (i = 0; i < 6; i++)
12873 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
12875 + /* send command to mc*/
12876 + return mc_send_command(mc_io, &cmd);
12880 + * dpsw_fdb_set_learning_mode() - Define FDB learning mode
12881 + * @mc_io: Pointer to MC portal's I/O object
12882 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12883 + * @token: Token of DPSW object
12884 + * @fdb_id: Forwarding Database Identifier
12885 + * @mode: Learning mode
12887 + * Return: Completion status. '0' on Success; Error code otherwise.
12889 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
12893 + enum dpsw_fdb_learning_mode mode)
12895 + struct mc_command cmd = { 0 };
12896 + struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
12898 + /* prepare command */
12899 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
12902 + cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
12903 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12904 + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
12906 + /* send command to mc*/
12907 + return mc_send_command(mc_io, &cmd);
12911 + * dpsw_fdb_get_attributes() - Get FDB attributes
12912 + * @mc_io: Pointer to MC portal's I/O object
12913 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12914 + * @token: Token of DPSW object
12915 + * @fdb_id: Forwarding Database Identifier
12916 + * @attr: Returned FDB attributes
12918 + * Return: Completion status. '0' on Success; Error code otherwise.
12920 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
12924 + struct dpsw_fdb_attr *attr)
12926 + struct mc_command cmd = { 0 };
12927 + struct dpsw_cmd_fdb_get_attr *cmd_params;
12928 + struct dpsw_rsp_fdb_get_attr *rsp_params;
12931 + /* prepare command */
12932 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
12935 + cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
12936 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
12938 + /* send command to mc*/
12939 + err = mc_send_command(mc_io, &cmd);
12943 + /* retrieve response parameters */
12944 + rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
12945 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
12946 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
12947 + attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
12949 + attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
12950 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
12956 + * dpsw_acl_add() - Adds ACL to L2 switch.
12957 + * @mc_io: Pointer to MC portal's I/O object
12958 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12959 + * @token: Token of DPSW object
12960 + * @acl_id: Returned ACL ID, for the future reference
12961 + * @cfg: ACL configuration
12963 + * Create Access Control List. Multiple ACLs can be created and
12964 + * co-exist in L2 switch
12966 + * Return: '0' on Success; Error code otherwise.
12968 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
12972 + const struct dpsw_acl_cfg *cfg)
12974 + struct mc_command cmd = { 0 };
12975 + struct dpsw_cmd_acl_add *cmd_params;
12976 + struct dpsw_rsp_acl_add *rsp_params;
12979 + /* prepare command */
12980 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
12983 + cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
12984 + cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
12986 + /* send command to mc*/
12987 + err = mc_send_command(mc_io, &cmd);
12991 + /* retrieve response parameters */
12992 + rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
12993 + *acl_id = le16_to_cpu(rsp_params->acl_id);
12999 + * dpsw_acl_remove() - Removes ACL from L2 switch.
13000 + * @mc_io: Pointer to MC portal's I/O object
13001 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13002 + * @token: Token of DPSW object
13003 + * @acl_id: ACL ID
13005 + * Return: '0' on Success; Error code otherwise.
13007 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
13012 + struct mc_command cmd = { 0 };
13013 + struct dpsw_cmd_acl_remove *cmd_params;
13015 + /* prepare command */
13016 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
13019 + cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
13020 + cmd_params->acl_id = cpu_to_le16(acl_id);
13022 + /* send command to mc*/
13023 + return mc_send_command(mc_io, &cmd);
13027 + * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
13029 + * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
13031 + * This function has to be called before adding or removing acl_entry
13034 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
13035 + u8 *entry_cfg_buf)
13037 + struct dpsw_prep_acl_entry *ext_params;
13040 + ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
13042 + for (i = 0; i < 6; i++) {
13043 + ext_params->match_l2_dest_mac[i] =
13044 + key->match.l2_dest_mac[5 - i];
13045 + ext_params->match_l2_source_mac[i] =
13046 + key->match.l2_source_mac[5 - i];
13047 + ext_params->mask_l2_dest_mac[i] =
13048 + key->mask.l2_dest_mac[5 - i];
13049 + ext_params->mask_l2_source_mac[i] =
13050 + key->mask.l2_source_mac[5 - i];
13053 + ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
13054 + ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
13055 + ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
13056 + ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
13057 + ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
13058 + ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
13059 + ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
13060 + ext_params->match_l3_dscp = key->match.l3_dscp;
13061 + ext_params->match_l4_source_port =
13062 + cpu_to_le16(key->match.l4_source_port);
13064 + ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
13065 + ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
13066 + ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
13067 + ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
13068 + ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
13069 + ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
13070 + ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
13071 + ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
13072 + ext_params->mask_l3_dscp = key->mask.l3_dscp;
13073 + ext_params->match_l3_protocol = key->match.l3_protocol;
13074 + ext_params->mask_l3_protocol = key->mask.l3_protocol;
13078 + * dpsw_acl_add_entry() - Adds an entry to ACL.
13079 + * @mc_io: Pointer to MC portal's I/O object
13080 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13081 + * @token: Token of DPSW object
13082 + * @acl_id: ACL ID
13083 + * @cfg: Entry configuration
13085 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13087 + * Return: '0' on Success; Error code otherwise.
13089 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
13093 + const struct dpsw_acl_entry_cfg *cfg)
13095 + struct mc_command cmd = { 0 };
13096 + struct dpsw_cmd_acl_entry *cmd_params;
13098 + /* prepare command */
13099 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
13102 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13103 + cmd_params->acl_id = cpu_to_le16(acl_id);
13104 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13105 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13106 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13107 + cfg->result.action);
13108 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13110 + /* send command to mc*/
13111 + return mc_send_command(mc_io, &cmd);
13115 + * dpsw_acl_remove_entry() - Removes an entry from ACL.
13116 + * @mc_io: Pointer to MC portal's I/O object
13117 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13118 + * @token: Token of DPSW object
13119 + * @acl_id: ACL ID
13120 + * @cfg: Entry configuration
13122 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13124 + * Return: '0' on Success; Error code otherwise.
13126 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
13130 + const struct dpsw_acl_entry_cfg *cfg)
13132 + struct mc_command cmd = { 0 };
13133 + struct dpsw_cmd_acl_entry *cmd_params;
13135 + /* prepare command */
13136 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
13139 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13140 + cmd_params->acl_id = cpu_to_le16(acl_id);
13141 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13142 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13143 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13144 + cfg->result.action);
13145 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13147 + /* send command to mc*/
13148 + return mc_send_command(mc_io, &cmd);
13152 + * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
13153 + * @mc_io: Pointer to MC portal's I/O object
13154 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13155 + * @token: Token of DPSW object
13156 + * @acl_id: ACL ID
13157 + * @cfg: Interfaces list
13159 + * Return: '0' on Success; Error code otherwise.
13161 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
13165 + const struct dpsw_acl_if_cfg *cfg)
13167 + struct mc_command cmd = { 0 };
13168 + struct dpsw_cmd_acl_if *cmd_params;
13170 + /* prepare command */
13171 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
13174 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13175 + cmd_params->acl_id = cpu_to_le16(acl_id);
13176 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13177 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13179 + /* send command to mc*/
13180 + return mc_send_command(mc_io, &cmd);
13184 + * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
13185 + * @mc_io: Pointer to MC portal's I/O object
13186 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13187 + * @token: Token of DPSW object
13188 + * @acl_id: ACL ID
13189 + * @cfg: Interfaces list
13191 + * Return: '0' on Success; Error code otherwise.
13193 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
13197 + const struct dpsw_acl_if_cfg *cfg)
13199 + struct mc_command cmd = { 0 };
13200 + struct dpsw_cmd_acl_if *cmd_params;
13202 + /* prepare command */
13203 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
13206 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13207 + cmd_params->acl_id = cpu_to_le16(acl_id);
13208 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13209 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13211 + /* send command to mc*/
13212 + return mc_send_command(mc_io, &cmd);
13216 + * dpsw_acl_get_attributes() - Get specific counter of particular interface
13217 + * @mc_io: Pointer to MC portal's I/O object
13218 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13219 + * @token: Token of DPSW object
13220 + * @acl_id: ACL Identifier
13221 + * @attr: Returned ACL attributes
13223 + * Return: '0' on Success; Error code otherwise.
13225 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
13229 + struct dpsw_acl_attr *attr)
13231 + struct mc_command cmd = { 0 };
13232 + struct dpsw_cmd_acl_get_attr *cmd_params;
13233 + struct dpsw_rsp_acl_get_attr *rsp_params;
13236 + /* prepare command */
13237 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
13240 + cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
13241 + cmd_params->acl_id = cpu_to_le16(acl_id);
13243 + /* send command to mc*/
13244 + err = mc_send_command(mc_io, &cmd);
13248 + /* retrieve response parameters */
13249 + rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
13250 + attr->max_entries = le16_to_cpu(rsp_params->max_entries);
13251 + attr->num_entries = le16_to_cpu(rsp_params->num_entries);
13252 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13258 + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
13259 + * @mc_io: Pointer to MC portal's I/O object
13260 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13261 + * @token: Token of DPSW object
13262 + * @attr: Returned control interface attributes
13264 + * Return: '0' on Success; Error code otherwise.
13266 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
13269 + struct dpsw_ctrl_if_attr *attr)
13271 + struct mc_command cmd = { 0 };
13272 + struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
13275 + /* prepare command */
13276 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
13280 + /* send command to mc*/
13281 + err = mc_send_command(mc_io, &cmd);
13285 + /* retrieve response parameters */
13286 + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
13287 + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
13288 + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
13289 + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
13295 + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
13296 + * @mc_io: Pointer to MC portal's I/O object
13297 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13298 + * @token: Token of DPSW object
13299 + * @cfg: Buffer pools configuration
13301 + * Return: '0' on Success; Error code otherwise.
13303 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
13306 + const struct dpsw_ctrl_if_pools_cfg *pools)
13308 + struct mc_command cmd = { 0 };
13309 + struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
13312 + /* prepare command */
13313 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
13316 + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
13317 + cmd_params->num_dpbp = pools->num_dpbp;
13318 + for (i = 0; i < 8; i++) {
13319 + cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
13321 + pools->pools[i].backup_pool);
13322 + cmd_params->buffer_size[i] =
13323 + cpu_to_le16(pools->pools[i].buffer_size);
13324 + cmd_params->dpbp_id[i] =
13325 + cpu_to_le32(pools->pools[i].dpbp_id);
13328 + /* send command to mc*/
13329 + return mc_send_command(mc_io, &cmd);
13333 + * dpsw_ctrl_if_enable() - Enable control interface
13334 + * @mc_io: Pointer to MC portal's I/O object
13335 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13336 + * @token: Token of DPSW object
13338 + * Return: '0' on Success; Error code otherwise.
13340 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
13344 + struct mc_command cmd = { 0 };
13346 + /* prepare command */
13347 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
13351 + /* send command to mc*/
13352 + return mc_send_command(mc_io, &cmd);
13356 + * dpsw_ctrl_if_disable() - Function disables control interface
13357 + * @mc_io: Pointer to MC portal's I/O object
13358 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13359 + * @token: Token of DPSW object
13361 + * Return: '0' on Success; Error code otherwise.
13363 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
13367 + struct mc_command cmd = { 0 };
13369 + /* prepare command */
13370 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
13374 + /* send command to mc*/
13375 + return mc_send_command(mc_io, &cmd);
13379 + * dpsw_get_api_version() - Get Data Path Switch API version
13380 + * @mc_io: Pointer to MC portal's I/O object
13381 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13382 + * @major_ver: Major version of data path switch API
13383 + * @minor_ver: Minor version of data path switch API
13385 + * Return: '0' on Success; Error code otherwise.
13387 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
13392 + struct mc_command cmd = { 0 };
13393 + struct dpsw_rsp_get_api_version *rsp_params;
13396 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
13400 + err = mc_send_command(mc_io, &cmd);
13404 + rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
13405 + *major_ver = le16_to_cpu(rsp_params->version_major);
13406 + *minor_ver = le16_to_cpu(rsp_params->version_minor);
13410 diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
13411 new file mode 100644
13412 index 00000000..c91abeb4
13414 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
13416 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
13418 + * Redistribution and use in source and binary forms, with or without
13419 + * modification, are permitted provided that the following conditions are met:
13420 + * * Redistributions of source code must retain the above copyright
13421 + * notice, this list of conditions and the following disclaimer.
13422 + * * Redistributions in binary form must reproduce the above copyright
13423 + * notice, this list of conditions and the following disclaimer in the
13424 + * documentation and/or other materials provided with the distribution.
13425 + * * Neither the name of the above-listed copyright holders nor the
13426 + * names of any contributors may be used to endorse or promote products
13427 + * derived from this software without specific prior written permission.
13430 + * ALTERNATIVELY, this software may be distributed under the terms of the
13431 + * GNU General Public License ("GPL") as published by the Free Software
13432 + * Foundation, either version 2 of that License or (at your option) any
13435 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13436 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13437 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13438 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13439 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13440 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13441 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13442 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13443 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13444 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13445 + * POSSIBILITY OF SUCH DAMAGE.
13447 +#ifndef __FSL_DPSW_H
13448 +#define __FSL_DPSW_H
13450 +/* Data Path L2-Switch API
13451 + * Contains API for handling DPSW topology and functionality
13457 + * DPSW general definitions
13461 + * Maximum number of traffic class priorities
13463 +#define DPSW_MAX_PRIORITIES 8
13465 + * Maximum number of interfaces
13467 +#define DPSW_MAX_IF 64
13469 +int dpsw_open(struct fsl_mc_io *mc_io,
13474 +int dpsw_close(struct fsl_mc_io *mc_io,
13483 + * Disable flooding
13485 +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
13487 + * Disable Multicast
13489 +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
13491 + * Support control interface
13493 +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
13495 + * Disable flooding metering
13497 +#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
13499 + * Enable metering
13501 +#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
13504 + * enum dpsw_component_type - component type of a bridge
13505 + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
13506 + * enterprise VLAN bridge or of a Provider Bridge used
13507 + * to process C-tagged frames
13508 + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
13509 + * Provider Bridge
13512 +enum dpsw_component_type {
13513 + DPSW_COMPONENT_TYPE_C_VLAN = 0,
13514 + DPSW_COMPONENT_TYPE_S_VLAN
13518 + * struct dpsw_cfg - DPSW configuration
13519 + * @num_ifs: Number of external and internal interfaces
13520 + * @adv: Advanced parameters; default is all zeros;
13521 + * use this structure to change default settings
13526 + * struct adv - Advanced parameters
13527 + * @options: Enable/Disable DPSW features (bitmap)
13528 + * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
13529 + * @max_meters_per_if: Number of meters per interface
13530 + * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
13531 + * @max_fdb_entries: Number of FDB entries for default FDB table;
13532 + * 0 - indicates default 1024 entries.
13533 + * @fdb_aging_time: Default FDB aging time for default FDB table;
13534 + * 0 - indicates default 300 seconds
13535 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
13536 + * 0 - indicates default 32
13537 + * @component_type: Indicates the component type of this bridge
13542 + u8 max_meters_per_if;
13544 + u16 max_fdb_entries;
13545 + u16 fdb_aging_time;
13546 + u16 max_fdb_mc_groups;
13547 + enum dpsw_component_type component_type;
13551 +int dpsw_create(struct fsl_mc_io *mc_io,
13554 + const struct dpsw_cfg *cfg,
13557 +int dpsw_destroy(struct fsl_mc_io *mc_io,
13562 +int dpsw_enable(struct fsl_mc_io *mc_io,
13566 +int dpsw_disable(struct fsl_mc_io *mc_io,
13570 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
13575 +int dpsw_reset(struct fsl_mc_io *mc_io,
13580 + * DPSW IRQ Index and Events
13583 +#define DPSW_IRQ_INDEX_IF 0x0000
13584 +#define DPSW_IRQ_INDEX_L2SW 0x0001
13587 + * IRQ event - Indicates that the link state changed
13589 +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
13592 + * struct dpsw_irq_cfg - IRQ configuration
13593 + * @addr: Address that must be written to signal a message-based interrupt
13594 + * @val: Value to write into irq_addr address
13595 + * @irq_num: A user defined number associated with this IRQ
13597 +struct dpsw_irq_cfg {
13603 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
13607 + struct dpsw_irq_cfg *irq_cfg);
13609 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
13614 + struct dpsw_irq_cfg *irq_cfg);
13616 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
13622 +int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
13628 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
13634 +int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
13640 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
13646 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
13653 + * struct dpsw_attr - Structure representing DPSW attributes
13654 + * @id: DPSW object ID
13655 + * @options: Enable/Disable DPSW features
13656 + * @max_vlans: Maximum Number of VLANs
13657 + * @max_meters_per_if: Number of meters per interface
13658 + * @max_fdbs: Maximum Number of FDBs
13659 + * @max_fdb_entries: Number of FDB entries for default FDB table;
13660 + * 0 - indicates default 1024 entries.
13661 + * @fdb_aging_time: Default FDB aging time for default FDB table;
13662 + * 0 - indicates default 300 seconds
13663 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
13664 + * 0 - indicates default 32
13665 + * @mem_size: DPSW frame storage memory size
13666 + * @num_ifs: Number of interfaces
13667 + * @num_vlans: Current number of VLANs
13668 + * @num_fdbs: Current number of FDBs
13669 + * @component_type: Component type of this bridge
13671 +struct dpsw_attr {
13675 + u8 max_meters_per_if;
13677 + u16 max_fdb_entries;
13678 + u16 fdb_aging_time;
13679 + u16 max_fdb_mc_groups;
13684 + enum dpsw_component_type component_type;
13687 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
13690 + struct dpsw_attr *attr);
13692 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
13698 + * enum dpsw_action - Action selection for special/control frames
13699 + * @DPSW_ACTION_DROP: Drop frame
13700 + * @DPSW_ACTION_REDIRECT: Redirect frame to control port
13702 +enum dpsw_action {
13703 + DPSW_ACTION_DROP = 0,
13704 + DPSW_ACTION_REDIRECT = 1
13708 + * Enable auto-negotiation
13710 +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
13712 + * Enable half-duplex mode
13714 +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
13716 + * Enable pause frames
13718 +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
13720 + * Enable a-symmetric pause frames
13722 +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
13725 + * struct dpsw_link_cfg - Structure representing DPSW link configuration
13727 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
13729 +struct dpsw_link_cfg {
13734 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
13738 + struct dpsw_link_cfg *cfg);
13740 + * struct dpsw_link_state - Structure representing DPSW link state
13742 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
13743 + * @up: 0 - covers two cases: down and disconnected, 1 - up
13745 +struct dpsw_link_state {
13751 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
13755 + struct dpsw_link_state *state);
13757 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
13763 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
13769 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
13776 + * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
13777 + * @pcp: Priority Code Point (PCP): a 3-bit field which refers
13778 + * to the IEEE 802.1p priority
13779 + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
13780 + * separately or in conjunction with PCP to indicate frames
13781 + * eligible to be dropped in the presence of congestion
13782 + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
13783 + * to which the frame belongs. The hexadecimal values
13784 + * of 0x000 and 0xFFF are reserved;
13785 + * all other values may be used as VLAN identifiers,
13786 + * allowing up to 4,094 VLANs
13788 +struct dpsw_tci_cfg {
13794 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
13798 + const struct dpsw_tci_cfg *cfg);
13800 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
13804 + struct dpsw_tci_cfg *cfg);
13807 + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
13808 + * @DPSW_STP_STATE_BLOCKING: Blocking state
13809 + * @DPSW_STP_STATE_LISTENING: Listening state
13810 + * @DPSW_STP_STATE_LEARNING: Learning state
13811 + * @DPSW_STP_STATE_FORWARDING: Forwarding state
13814 +enum dpsw_stp_state {
13815 + DPSW_STP_STATE_BLOCKING = 0,
13816 + DPSW_STP_STATE_LISTENING = 1,
13817 + DPSW_STP_STATE_LEARNING = 2,
13818 + DPSW_STP_STATE_FORWARDING = 3
13822 + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
13823 + * @vlan_id: VLAN ID STP state
13824 + * @state: STP state
13826 +struct dpsw_stp_cfg {
13828 + enum dpsw_stp_state state;
13831 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
13835 + const struct dpsw_stp_cfg *cfg);
13838 + * enum dpsw_accepted_frames - Types of frames to accept
13839 + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
13840 + * priority tagged frames
13841 + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
13842 + * Priority-Tagged frames received on this interface.
13845 +enum dpsw_accepted_frames {
13846 + DPSW_ADMIT_ALL = 1,
13847 + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
13851 + * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
13852 + * @type: Defines ingress accepted frames
13853 + * @unaccept_act: When a frame is not accepted, it may be discarded or
13854 + * redirected to control interface depending on this mode
13856 +struct dpsw_accepted_frames_cfg {
13857 + enum dpsw_accepted_frames type;
13858 + enum dpsw_action unaccept_act;
13861 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
13865 + const struct dpsw_accepted_frames_cfg *cfg);
13867 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
13874 + * enum dpsw_counter - Counters types
13875 + * @DPSW_CNT_ING_FRAME: Counts ingress frames
13876 + * @DPSW_CNT_ING_BYTE: Counts ingress bytes
13877 + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
13878 + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
13879 + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
13880 + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
13881 + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
13882 + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
13883 + * @DPSW_CNT_EGR_FRAME: Counts egress frames
13884 + * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
13885 + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
13886 + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
13888 +enum dpsw_counter {
13889 + DPSW_CNT_ING_FRAME = 0x0,
13890 + DPSW_CNT_ING_BYTE = 0x1,
13891 + DPSW_CNT_ING_FLTR_FRAME = 0x2,
13892 + DPSW_CNT_ING_FRAME_DISCARD = 0x3,
13893 + DPSW_CNT_ING_MCAST_FRAME = 0x4,
13894 + DPSW_CNT_ING_MCAST_BYTE = 0x5,
13895 + DPSW_CNT_ING_BCAST_FRAME = 0x6,
13896 + DPSW_CNT_ING_BCAST_BYTES = 0x7,
13897 + DPSW_CNT_EGR_FRAME = 0x8,
13898 + DPSW_CNT_EGR_BYTE = 0x9,
13899 + DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
13900 + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
13903 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
13907 + enum dpsw_counter type,
13910 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
13914 + enum dpsw_counter type,
13918 + * Maximum number of TC
13920 +#define DPSW_MAX_TC 8
13923 + * enum dpsw_priority_selector - User priority
13924 + * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
13925 + * refers to the IEEE 802.1p priority.
13926 + * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
13927 + * field from IP header
13930 +enum dpsw_priority_selector {
13936 + * enum dpsw_schedule_mode - Traffic classes scheduling
13937 + * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
13938 + * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
13940 +enum dpsw_schedule_mode {
13941 + DPSW_SCHED_STRICT_PRIORITY,
13942 + DPSW_SCHED_WEIGHTED
13946 + * struct dpsw_tx_schedule_cfg - traffic class configuration
13947 + * @mode: Strict or weight-based scheduling
13948 + * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
13950 +struct dpsw_tx_schedule_cfg {
13951 + enum dpsw_schedule_mode mode;
13952 + u16 delta_bandwidth;
13956 + * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
13957 + * class configuration
13958 + * @priority_selector: Source for user priority regeneration
13959 + * @tc_id: The Regenerated User priority that the incoming
13960 + * User Priority is mapped to for this interface
13961 + * @tc_sched: Traffic classes configuration
13963 +struct dpsw_tx_selection_cfg {
13964 + enum dpsw_priority_selector priority_selector;
13965 + u8 tc_id[DPSW_MAX_PRIORITIES];
13966 + struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
13969 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
13973 + const struct dpsw_tx_selection_cfg *cfg);
13976 + * enum dpsw_reflection_filter - Filter type for frames to reflect
13977 + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
13978 + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
13979 + * particular VLAN defined by vid parameter
13982 +enum dpsw_reflection_filter {
13983 + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
13984 + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
13988 + * struct dpsw_reflection_cfg - Structure representing reflection information
13989 + * @filter: Filter type for frames to reflect
13990 + * @vlan_id: Vlan Id to reflect; valid only when filter type is
13991 + * DPSW_INGRESS_VLAN
13993 +struct dpsw_reflection_cfg {
13994 + enum dpsw_reflection_filter filter;
13998 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
14002 + const struct dpsw_reflection_cfg *cfg);
14004 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
14008 + const struct dpsw_reflection_cfg *cfg);
14011 + * enum dpsw_metering_mode - Metering modes
14012 + * @DPSW_METERING_MODE_NONE: metering disabled
14013 + * @DPSW_METERING_MODE_RFC2698: RFC 2698
14014 + * @DPSW_METERING_MODE_RFC4115: RFC 4115
14016 +enum dpsw_metering_mode {
14017 + DPSW_METERING_MODE_NONE = 0,
14018 + DPSW_METERING_MODE_RFC2698,
14019 + DPSW_METERING_MODE_RFC4115
14023 + * enum dpsw_metering_unit - Metering count
14024 + * @DPSW_METERING_UNIT_BYTES: count bytes
14025 + * @DPSW_METERING_UNIT_FRAMES: count frames
14027 +enum dpsw_metering_unit {
14028 + DPSW_METERING_UNIT_BYTES = 0,
14029 + DPSW_METERING_UNIT_FRAMES
14033 + * struct dpsw_metering_cfg - Metering configuration
14034 + * @mode: metering modes
14035 + * @units: Bytes or frame units
14036 + * @cir: Committed information rate (CIR) in Kbits/s
14037 + * @eir: Peak information rate (PIR) Kbit/s rfc2698
14038 + * Excess information rate (EIR) Kbit/s rfc4115
14039 + * @cbs: Committed burst size (CBS) in bytes
14040 + * @ebs: Peak burst size (PBS) in bytes for rfc2698
14041 + * Excess bust size (EBS) in bytes rfc4115
14044 +struct dpsw_metering_cfg {
14045 + enum dpsw_metering_mode mode;
14046 + enum dpsw_metering_unit units;
14053 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
14057 + const struct dpsw_metering_cfg *cfg);
14059 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
14064 + const struct dpsw_metering_cfg *cfg);
14067 + * enum dpsw_early_drop_unit - DPSW early drop unit
14068 + * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
14069 + * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
14071 +enum dpsw_early_drop_unit {
14072 + DPSW_EARLY_DROP_UNIT_BYTE = 0,
14073 + DPSW_EARLY_DROP_UNIT_FRAMES
14077 + * enum dpsw_early_drop_mode - DPSW early drop mode
14078 + * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
14079 + * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
14080 + * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
14082 +enum dpsw_early_drop_mode {
14083 + DPSW_EARLY_DROP_MODE_NONE = 0,
14084 + DPSW_EARLY_DROP_MODE_TAIL,
14085 + DPSW_EARLY_DROP_MODE_WRED
14089 + * struct dpsw_wred_cfg - WRED configuration
14090 + * @max_threshold: maximum threshold that packets may be discarded. Above this
14091 + * threshold all packets are discarded; must be less than 2^39;
14092 + * approximated to be expressed as (x+256)*2^(y-1) due to HW
14093 + * implementation.
14094 + * @min_threshold: minimum threshold that packets may be discarded at
14095 + * @drop_probability: probability that a packet will be discarded (1-100,
14096 + * associated with the maximum threshold)
14098 +struct dpsw_wred_cfg {
14099 + u64 min_threshold;
14100 + u64 max_threshold;
14101 + u8 drop_probability;
14105 + * struct dpsw_early_drop_cfg - early-drop configuration
14106 + * @drop_mode: drop mode
14107 + * @units: count units
14108 + * @yellow: WRED - 'yellow' configuration
14109 + * @green: WRED - 'green' configuration
14110 + * @tail_drop_threshold: tail drop threshold
14112 +struct dpsw_early_drop_cfg {
14113 + enum dpsw_early_drop_mode drop_mode;
14114 + enum dpsw_early_drop_unit units;
14115 + struct dpsw_wred_cfg yellow;
14116 + struct dpsw_wred_cfg green;
14117 + u32 tail_drop_threshold;
14120 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
14121 + u8 *early_drop_buf);
14123 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
14128 + u64 early_drop_iova);
14131 + * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
14132 + * @tpid: An additional tag protocol identifier
14134 +struct dpsw_custom_tpid_cfg {
14138 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
14141 + const struct dpsw_custom_tpid_cfg *cfg);
14143 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
14146 + const struct dpsw_custom_tpid_cfg *cfg);
14148 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
14153 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
14159 + * struct dpsw_if_attr - Structure representing DPSW interface attributes
14160 + * @num_tcs: Number of traffic classes
14161 + * @rate: Transmit rate in bits per second
14162 + * @options: Interface configuration options (bitmap)
14163 + * @enabled: Indicates if interface is enabled
14164 + * @accept_all_vlan: The device discards/accepts incoming frames
14165 + * for VLANs that do not include this interface
14166 + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
14167 + * discards untagged frames or priority-tagged frames received on
14168 + * this interface;
14169 + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
14170 + * tagged frames received on this interface are accepted
14171 + * @qdid: control frames transmit qdid
14173 +struct dpsw_if_attr {
14178 + int accept_all_vlan;
14179 + enum dpsw_accepted_frames admit_untagged;
14183 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
14187 + struct dpsw_if_attr *attr);
14189 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
14193 + u16 frame_length);
14195 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
14199 + u16 *frame_length);
14202 + * struct dpsw_vlan_cfg - VLAN Configuration
14203 + * @fdb_id: Forwarding Data Base
14205 +struct dpsw_vlan_cfg {
14209 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
14213 + const struct dpsw_vlan_cfg *cfg);
14216 + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
14217 + * @num_ifs: The number of interfaces that are assigned to the egress
14218 + * list for this VLAN
14219 + * @if_id: The set of interfaces that are
14220 + * assigned to the egress list for this VLAN
14222 +struct dpsw_vlan_if_cfg {
14224 + u16 if_id[DPSW_MAX_IF];
14227 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
14231 + const struct dpsw_vlan_if_cfg *cfg);
14233 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
14237 + const struct dpsw_vlan_if_cfg *cfg);
14239 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
14243 + const struct dpsw_vlan_if_cfg *cfg);
14245 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
14249 + const struct dpsw_vlan_if_cfg *cfg);
14251 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
14255 + const struct dpsw_vlan_if_cfg *cfg);
14257 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
14261 + const struct dpsw_vlan_if_cfg *cfg);
14263 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
14269 + * struct dpsw_vlan_attr - VLAN attributes
14270 + * @fdb_id: Associated FDB ID
14271 + * @num_ifs: Number of interfaces
14272 + * @num_untagged_ifs: Number of untagged interfaces
14273 + * @num_flooding_ifs: Number of flooding interfaces
14275 +struct dpsw_vlan_attr {
14278 + u16 num_untagged_ifs;
14279 + u16 num_flooding_ifs;
14282 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
14286 + struct dpsw_vlan_attr *attr);
14288 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
14292 + struct dpsw_vlan_if_cfg *cfg);
14294 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
14298 + struct dpsw_vlan_if_cfg *cfg);
14300 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
14304 + struct dpsw_vlan_if_cfg *cfg);
14307 + * struct dpsw_fdb_cfg - FDB Configuration
14308 + * @num_fdb_entries: Number of FDB entries
14309 + * @fdb_aging_time: Aging time in seconds
14311 +struct dpsw_fdb_cfg {
14312 + u16 num_fdb_entries;
14313 + u16 fdb_aging_time;
14316 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
14320 + const struct dpsw_fdb_cfg *cfg);
14322 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
14328 + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
14329 + * @DPSW_FDB_ENTRY_STATIC: Static entry
14330 + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
14332 +enum dpsw_fdb_entry_type {
14333 + DPSW_FDB_ENTRY_STATIC = 0,
14334 + DPSW_FDB_ENTRY_DINAMIC = 1
14338 + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
14339 + * @type: Select static or dynamic entry
14340 + * @mac_addr: MAC address
14341 + * @if_egress: Egress interface ID
14343 +struct dpsw_fdb_unicast_cfg {
14344 + enum dpsw_fdb_entry_type type;
14349 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
14353 + const struct dpsw_fdb_unicast_cfg *cfg);
14355 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
14359 + struct dpsw_fdb_unicast_cfg *cfg);
14361 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
14365 + const struct dpsw_fdb_unicast_cfg *cfg);
14368 + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
14369 + * @type: Select static or dynamic entry
14370 + * @mac_addr: MAC address
14371 + * @num_ifs: Number of external and internal interfaces
14372 + * @if_id: Egress interface IDs
14374 +struct dpsw_fdb_multicast_cfg {
14375 + enum dpsw_fdb_entry_type type;
14378 + u16 if_id[DPSW_MAX_IF];
14381 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
14385 + const struct dpsw_fdb_multicast_cfg *cfg);
14387 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
14391 + struct dpsw_fdb_multicast_cfg *cfg);
14393 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
14397 + const struct dpsw_fdb_multicast_cfg *cfg);
14400 + * enum dpsw_fdb_learning_mode - Auto-learning modes
14401 + * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
14402 + * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
14403 + * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
14404 + * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
14406 + * NONE - SECURE LEARNING
14407 + * SMAC found DMAC found CTLU Action
14408 + * v v Forward frame to
14409 + * 1. DMAC destination
14410 + * - v Forward frame to
14411 + * 1. DMAC destination
14412 + * 2. Control interface
14413 + * v - Forward frame to
14414 + * 1. Flooding list of interfaces
14415 + * - - Forward frame to
14416 + * 1. Flooding list of interfaces
14417 + * 2. Control interface
14419 + * SMAC found DMAC found CTLU Action
14420 + * v v Forward frame to
14421 + * 1. DMAC destination
14422 + * - v Forward frame to
14423 + * 1. Control interface
14424 + * v - Forward frame to
14425 + * 1. Flooding list of interfaces
14426 + * - - Forward frame to
14427 + * 1. Control interface
14429 +enum dpsw_fdb_learning_mode {
14430 + DPSW_FDB_LEARNING_MODE_DIS = 0,
14431 + DPSW_FDB_LEARNING_MODE_HW = 1,
14432 + DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
14433 + DPSW_FDB_LEARNING_MODE_SECURE = 3
14436 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
14440 + enum dpsw_fdb_learning_mode mode);
14443 + * struct dpsw_fdb_attr - FDB Attributes
14444 + * @max_fdb_entries: Number of FDB entries
14445 + * @fdb_aging_time: Aging time in seconds
14446 + * @learning_mode: Learning mode
14447 + * @num_fdb_mc_groups: Current number of multicast groups
14448 + * @max_fdb_mc_groups: Maximum number of multicast groups
14450 +struct dpsw_fdb_attr {
14451 + u16 max_fdb_entries;
14452 + u16 fdb_aging_time;
14453 + enum dpsw_fdb_learning_mode learning_mode;
14454 + u16 num_fdb_mc_groups;
14455 + u16 max_fdb_mc_groups;
14458 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
14462 + struct dpsw_fdb_attr *attr);
14465 + * struct dpsw_acl_cfg - ACL Configuration
14466 + * @max_entries: Number of FDB entries
14468 +struct dpsw_acl_cfg {
14473 + * struct dpsw_acl_fields - ACL fields.
14474 + * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
14475 + * slow protocols, MVRP, STP
14476 + * @l2_source_mac: Source MAC address
14477 + * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
14478 + * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
14479 + * Q-in-Q, IPv4, IPv6, PPPoE
14480 + * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
14481 + * @l2_vlan_id: layer 2 VLAN ID
14482 + * @l2_ether_type: layer 2 Ethernet type
14483 + * @l3_dscp: Layer 3 differentiated services code point
14484 + * @l3_protocol: Tells the Network layer at the destination host, to which
14485 + * Protocol this packet belongs to. The following protocol are
14486 + * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
14487 + * (encapsulation), GRE, PTP
14488 + * @l3_source_ip: Source IPv4 IP
14489 + * @l3_dest_ip: Destination IPv4 IP
14490 + * @l4_source_port: Source TCP/UDP Port
14491 + * @l4_dest_port: Destination TCP/UDP Port
14493 +struct dpsw_acl_fields {
14494 + u8 l2_dest_mac[6];
14495 + u8 l2_source_mac[6];
14499 + u16 l2_ether_type;
14502 + u32 l3_source_ip;
14504 + u16 l4_source_port;
14505 + u16 l4_dest_port;
14509 + * struct dpsw_acl_key - ACL key
14510 + * @match: Match fields
14511 + * @mask: Mask: b'1 - valid, b'0 don't care
14513 +struct dpsw_acl_key {
14514 + struct dpsw_acl_fields match;
14515 + struct dpsw_acl_fields mask;
14519 + * enum dpsw_acl_action
14520 + * @DPSW_ACL_ACTION_DROP: Drop frame
14521 + * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
14522 + * @DPSW_ACL_ACTION_ACCEPT: Accept frame
14523 + * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
14525 +enum dpsw_acl_action {
14526 + DPSW_ACL_ACTION_DROP,
14527 + DPSW_ACL_ACTION_REDIRECT,
14528 + DPSW_ACL_ACTION_ACCEPT,
14529 + DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
14533 + * struct dpsw_acl_result - ACL action
14534 + * @action: Action should be taken when ACL entry hit
14535 + * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
14538 +struct dpsw_acl_result {
14539 + enum dpsw_acl_action action;
14544 + * struct dpsw_acl_entry_cfg - ACL entry
14545 + * @key_iova: I/O virtual address of DMA-able memory filled with key after call
14546 + * to dpsw_acl_prepare_entry_cfg()
14547 + * @result: Required action when entry hit occurs
14548 + * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
14549 + * during the lifetime of a Policy. It is user responsibility to
14550 + * space the priorities according to consequent rule additions.
14552 +struct dpsw_acl_entry_cfg {
14554 + struct dpsw_acl_result result;
14558 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
14562 + const struct dpsw_acl_cfg *cfg);
14564 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
14569 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
14570 + uint8_t *entry_cfg_buf);
14572 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
14576 + const struct dpsw_acl_entry_cfg *cfg);
14578 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
14582 + const struct dpsw_acl_entry_cfg *cfg);
14585 + * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
14586 + * @num_ifs: Number of interfaces
14587 + * @if_id: List of interfaces
14589 +struct dpsw_acl_if_cfg {
14591 + u16 if_id[DPSW_MAX_IF];
14594 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
14598 + const struct dpsw_acl_if_cfg *cfg);
14600 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
14604 + const struct dpsw_acl_if_cfg *cfg);
14607 + * struct dpsw_acl_attr - ACL Attributes
14608 + * @max_entries: Max number of ACL entries
14609 + * @num_entries: Number of used ACL entries
14610 + * @num_ifs: Number of interfaces associated with ACL
14612 +struct dpsw_acl_attr {
14618 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
14622 + struct dpsw_acl_attr *attr);
14624 + * struct dpsw_ctrl_if_attr - Control interface attributes
14625 + * @rx_fqid: Receive FQID
14626 + * @rx_err_fqid: Receive error FQID
14627 + * @tx_err_conf_fqid: Transmit error and confirmation FQID
14629 +struct dpsw_ctrl_if_attr {
14632 + u32 tx_err_conf_fqid;
14635 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
14638 + struct dpsw_ctrl_if_attr *attr);
14641 + * Maximum number of DPBP
14643 +#define DPSW_MAX_DPBP 8
14646 + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
14647 + * @num_dpbp: Number of DPBPs
14648 + * @pools: Array of buffer pools parameters; The number of valid entries
14649 + * must match 'num_dpbp' value
14651 +struct dpsw_ctrl_if_pools_cfg {
14654 + * struct pools - Buffer pools parameters
14655 + * @dpbp_id: DPBP object ID
14656 + * @buffer_size: Buffer size
14657 + * @backup_pool: Backup pool
14663 + } pools[DPSW_MAX_DPBP];
14666 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
14669 + const struct dpsw_ctrl_if_pools_cfg *cfg);
14671 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
14675 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
14679 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
14684 +#endif /* __FSL_DPSW_H */
14685 diff --git a/drivers/staging/fsl-dpaa2/ethsw/switch.c b/drivers/staging/fsl-dpaa2/ethsw/switch.c
14686 new file mode 100644
14687 index 00000000..3f2c9648
14689 +++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
14691 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
14693 + * Redistribution and use in source and binary forms, with or without
14694 + * modification, are permitted provided that the following conditions are met:
14695 + * * Redistributions of source code must retain the above copyright
14696 + * notice, this list of conditions and the following disclaimer.
14697 + * * Redistributions in binary form must reproduce the above copyright
14698 + * notice, this list of conditions and the following disclaimer in the
14699 + * documentation and/or other materials provided with the distribution.
14700 + * * Neither the name of Freescale Semiconductor nor the
14701 + * names of its contributors may be used to endorse or promote products
14702 + * derived from this software without specific prior written permission.
14705 + * ALTERNATIVELY, this software may be distributed under the terms of the
14706 + * GNU General Public License ("GPL") as published by the Free Software
14707 + * Foundation, either version 2 of that License or (at your option) any
14710 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
14711 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
14712 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14713 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
14714 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
14715 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
14716 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
14717 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14718 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
14719 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14722 +#include <linux/module.h>
14723 +#include <linux/msi.h>
14725 +#include <linux/netdevice.h>
14726 +#include <linux/etherdevice.h>
14727 +#include <linux/rtnetlink.h>
14728 +#include <linux/if_vlan.h>
14730 +#include <uapi/linux/if_bridge.h>
14731 +#include <net/netlink.h>
14733 +#include "../../fsl-mc/include/mc.h"
14735 +#include "dpsw-cmd.h"
14737 +static const char ethsw_drv_version[] = "0.1";
14739 +/* Minimal supported DPSE version */
14740 +#define DPSW_MIN_VER_MAJOR 8
14741 +#define DPSW_MIN_VER_MINOR 0
14744 +#define DPSW_MAX_IRQ_NUM 2
14746 +#define ETHSW_VLAN_MEMBER 1
14747 +#define ETHSW_VLAN_UNTAGGED 2
14748 +#define ETHSW_VLAN_PVID 4
14749 +#define ETHSW_VLAN_GLOBAL 8
14751 +/* Maximum Frame Length supported by HW (currently 10k) */
14752 +#define DPAA2_MFL (10 * 1024)
14753 +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
14754 +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
14756 +struct ethsw_port_priv {
14757 + struct net_device *netdev;
14758 + struct list_head list;
14760 + struct ethsw_dev_priv *ethsw_priv;
14763 + char vlans[VLAN_VID_MASK + 1];
14767 +struct ethsw_dev_priv {
14768 + struct net_device *netdev;
14769 + struct fsl_mc_io *mc_io;
14771 + struct dpsw_attr sw_attr;
14773 + /*TODO: redundant, we can use the slave dev list */
14774 + struct list_head port_list;
14779 + char vlans[VLAN_VID_MASK + 1];
14782 +static int ethsw_port_stop(struct net_device *netdev);
14783 +static int ethsw_port_open(struct net_device *netdev);
14785 +static inline void __get_priv(struct net_device *netdev,
14786 + struct ethsw_dev_priv **priv,
14787 + struct ethsw_port_priv **port_priv)
14789 + struct ethsw_dev_priv *_priv = NULL;
14790 + struct ethsw_port_priv *_port_priv = NULL;
14792 + if (netdev->flags & IFF_MASTER) {
14793 + _priv = netdev_priv(netdev);
14795 + _port_priv = netdev_priv(netdev);
14796 + _priv = _port_priv->ethsw_priv;
14802 + *port_priv = _port_priv;
14805 +/* -------------------------------------------------------------------------- */
14806 +/* ethsw netdevice ops */
14808 +static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
14810 + /* we don't support I/O for now, drop the frame */
14811 + dev_kfree_skb_any(skb);
14812 + return NETDEV_TX_OK;
14815 +static int ethsw_open(struct net_device *netdev)
14817 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14818 + struct list_head *pos;
14819 + struct ethsw_port_priv *port_priv = NULL;
14822 + err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
14824 + netdev_err(netdev, "dpsw_enable err %d\n", err);
14828 + list_for_each(pos, &priv->port_list) {
14829 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
14830 + err = dev_open(port_priv->netdev);
14832 + netdev_err(port_priv->netdev, "dev_open err %d\n", err);
14838 +static int ethsw_stop(struct net_device *netdev)
14840 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14841 + struct list_head *pos;
14842 + struct ethsw_port_priv *port_priv = NULL;
14845 + err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
14847 + netdev_err(netdev, "dpsw_disable err %d\n", err);
14851 + list_for_each(pos, &priv->port_list) {
14852 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
14853 + err = dev_close(port_priv->netdev);
14855 + netdev_err(port_priv->netdev,
14856 + "dev_close err %d\n", err);
14862 +static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
14864 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
14867 + struct dpsw_vlan_cfg vcfg = {
14868 + /* TODO: add support for VLAN private FDBs */
14871 + if (priv->vlans[vid]) {
14872 + netdev_err(netdev, "VLAN already configured\n");
14876 + err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
14878 + netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
14881 + priv->vlans[vid] = ETHSW_VLAN_MEMBER;
14886 +static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
14888 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
14889 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
14892 + struct dpsw_vlan_if_cfg vcfg = {
14894 + .if_id[0] = port_priv->port_index,
14897 + if (port_priv->vlans[vid]) {
14898 + netdev_err(netdev, "VLAN already configured\n");
14902 + if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
14903 + netdev_err(netdev, "interface must be down to change PVID!\n");
14907 + err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
14909 + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
14912 + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
14914 + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
14915 + err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
14916 + priv->dpsw_handle, vid, &vcfg);
14918 + netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
14922 + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
14925 + if (flags & BRIDGE_VLAN_INFO_PVID) {
14926 + struct dpsw_tci_cfg tci_cfg = {
14927 + /* TODO: at least add better defaults if these cannot
14935 + err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
14936 + port_priv->port_index, &tci_cfg);
14938 + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
14941 + port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
14947 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
14948 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
14949 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
14950 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
14951 + .len = sizeof(struct bridge_vlan_info), },
14954 +static int ethsw_setlink_af_spec(struct net_device *netdev,
14955 + struct nlattr **tb)
14957 + struct bridge_vlan_info *vinfo;
14958 + struct ethsw_dev_priv *priv = NULL;
14959 + struct ethsw_port_priv *port_priv = NULL;
14962 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
14963 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
14964 + return -EOPNOTSUPP;
14967 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
14969 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
14972 + __get_priv(netdev, &priv, &port_priv);
14974 + if (!port_priv || !priv->vlans[vinfo->vid]) {
14975 + /* command targets switch device or this is a new VLAN */
14976 + err = ethsw_add_vlan(priv->netdev, vinfo->vid);
14980 + /* command targets switch device; mark it*/
14982 + priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
14986 + /* command targets switch port */
14987 + err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
14995 +static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
14996 + [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
14997 + [IFLA_BRPORT_COST] = { .type = NLA_U32 },
14998 + [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
14999 + [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
15000 + [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
15001 + [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
15002 + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
15003 + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
15006 +static int ethsw_set_learning(struct net_device *netdev, u8 flag)
15008 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
15009 + enum dpsw_fdb_learning_mode learn_mode;
15013 + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
15015 + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
15017 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
15020 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
15023 + priv->learning = !!flag;
15028 +static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
15030 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15031 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15034 + err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
15035 + port_priv->port_index, (int)flag);
15037 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
15040 + priv->flood = !!flag;
15045 +static int ethsw_port_set_state(struct net_device *netdev, u8 state)
15047 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15048 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15049 + u8 old_state = port_priv->stp_state;
15052 + struct dpsw_stp_cfg stp_cfg = {
15056 + /* TODO: check port state, interface may be down */
15058 + if (state > BR_STATE_BLOCKING)
15061 + if (state == port_priv->stp_state)
15064 + if (state == BR_STATE_DISABLED) {
15065 + port_priv->stp_state = state;
15067 + err = ethsw_port_stop(netdev);
15071 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
15072 + port_priv->port_index, &stp_cfg);
15074 + netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
15078 + port_priv->stp_state = state;
15080 + if (old_state == BR_STATE_DISABLED) {
15081 + err = ethsw_port_open(netdev);
15089 + port_priv->stp_state = old_state;
15093 +static int ethsw_setlink_protinfo(struct net_device *netdev,
15094 + struct nlattr **tb)
15096 + struct ethsw_dev_priv *priv;
15097 + struct ethsw_port_priv *port_priv = NULL;
15100 + __get_priv(netdev, &priv, &port_priv);
15102 + if (tb[IFLA_BRPORT_LEARNING]) {
15103 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
15106 + netdev_warn(netdev,
15107 + "learning set on whole switch dev\n");
15109 + err = ethsw_set_learning(priv->netdev, flag);
15113 + } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
15114 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
15116 + err = ethsw_port_set_flood(port_priv->netdev, flag);
15120 + } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
15121 + u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
15123 + err = ethsw_port_set_state(port_priv->netdev, state);
15128 + return -EOPNOTSUPP;
15134 +static int ethsw_setlink(struct net_device *netdev,
15135 + struct nlmsghdr *nlh,
15138 + struct nlattr *attr;
15139 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
15140 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
15143 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15145 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
15148 + netdev_err(netdev,
15149 + "nla_parse_nested for br_policy err %d\n",
15154 + err = ethsw_setlink_af_spec(netdev, tb);
15158 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
15160 + err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
15161 + ifla_brport_policy);
15163 + netdev_err(netdev,
15164 + "nla_parse_nested for brport_policy err %d\n",
15169 + err = ethsw_setlink_protinfo(netdev, tb);
15173 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
15174 + return -EOPNOTSUPP;
15177 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
15178 + struct ethsw_dev_priv *priv)
15180 + u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
15184 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
15186 + goto nla_put_err;
15187 + err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
15189 + goto nla_put_err;
15190 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
15192 + goto nla_put_err;
15193 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
15195 + goto nla_put_err;
15196 + if (netdev->addr_len) {
15197 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
15198 + netdev->dev_addr);
15200 + goto nla_put_err;
15203 + iflink = dev_get_iflink(netdev);
15204 + if (netdev->ifindex != iflink) {
15205 + err = nla_put_u32(skb, IFLA_LINK, iflink);
15207 + goto nla_put_err;
15213 + netdev_err(netdev, "nla_put_ err %d\n", err);
15217 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
15218 + struct ethsw_port_priv *port_priv)
15220 + struct nlattr *nest;
15223 + u8 stp_state = port_priv->stp_state;
15225 + if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
15226 + stp_state = BR_STATE_BLOCKING;
15228 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
15230 + netdev_err(netdev, "nla_nest_start failed\n");
15234 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
15236 + goto nla_put_err;
15237 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
15239 + goto nla_put_err;
15240 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
15242 + goto nla_put_err;
15243 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
15245 + goto nla_put_err;
15246 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
15248 + goto nla_put_err;
15249 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
15251 + goto nla_put_err;
15252 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
15254 + goto nla_put_err;
15255 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
15256 + port_priv->ethsw_priv->learning);
15258 + goto nla_put_err;
15259 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
15260 + port_priv->ethsw_priv->flood);
15262 + goto nla_put_err;
15263 + nla_nest_end(skb, nest);
15268 + netdev_err(netdev, "nla_put_ err %d\n", err);
15269 + nla_nest_cancel(skb, nest);
15273 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
15274 + struct ethsw_dev_priv *priv,
15275 + struct ethsw_port_priv *port_priv)
15277 + struct nlattr *nest;
15278 + struct bridge_vlan_info vinfo;
15279 + const char *vlans;
15283 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
15285 + netdev_err(netdev, "nla_nest_start failed");
15290 + vlans = port_priv->vlans;
15292 + vlans = priv->vlans;
15294 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
15298 + if (vlans[i] & ETHSW_VLAN_UNTAGGED)
15299 + vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
15301 + if (vlans[i] & ETHSW_VLAN_PVID)
15302 + vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
15304 + if (vlans[i] & ETHSW_VLAN_MEMBER) {
15305 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
15306 + sizeof(vinfo), &vinfo);
15308 + goto nla_put_err;
15312 + nla_nest_end(skb, nest);
15316 + netdev_err(netdev, "nla_put_ err %d\n", err);
15317 + nla_nest_cancel(skb, nest);
15321 +static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15322 + struct net_device *netdev, u32 filter_mask,
15325 + struct ethsw_dev_priv *priv;
15326 + struct ethsw_port_priv *port_priv = NULL;
15327 + struct ifinfomsg *hdr;
15328 + struct nlmsghdr *nlh;
15331 + __get_priv(netdev, &priv, &port_priv);
15333 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
15335 + return -EMSGSIZE;
15337 + hdr = nlmsg_data(nlh);
15338 + memset(hdr, 0, sizeof(*hdr));
15339 + hdr->ifi_family = AF_BRIDGE;
15340 + hdr->ifi_type = netdev->type;
15341 + hdr->ifi_index = netdev->ifindex;
15342 + hdr->ifi_flags = dev_get_flags(netdev);
15344 + err = __nla_put_netdev(skb, netdev, priv);
15346 + goto nla_put_err;
15349 + err = __nla_put_port(skb, netdev, port_priv);
15351 + goto nla_put_err;
15354 + /* Check if the VID information is requested */
15355 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
15356 + err = __nla_put_vlan(skb, netdev, priv, port_priv);
15358 + goto nla_put_err;
15361 + nlmsg_end(skb, nlh);
15365 + nlmsg_cancel(skb, nlh);
15366 + return -EMSGSIZE;
15369 +static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
15371 + struct list_head *pos;
15372 + struct ethsw_port_priv *ppriv_local = NULL;
15375 + if (!priv->vlans[vid])
15378 + err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
15380 + netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
15383 + priv->vlans[vid] = 0;
15385 + list_for_each(pos, &priv->port_list) {
15386 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15388 + ppriv_local->vlans[vid] = 0;
15394 +static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
15395 + struct ethsw_port_priv *port_priv,
15398 + struct list_head *pos;
15399 + struct ethsw_port_priv *ppriv_local = NULL;
15400 + struct dpsw_vlan_if_cfg vcfg = {
15402 + .if_id[0] = port_priv->port_index,
15404 + unsigned int count = 0;
15407 + if (!port_priv->vlans[vid])
15410 + /* VLAN will be deleted from switch if global flag is not set
15411 + * and is configured on only one port
15413 + if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
15414 + list_for_each(pos, &priv->port_list) {
15415 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15417 + if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
15422 + return ethsw_dellink_switch(priv, vid);
15425 + err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
15428 + netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
15431 + port_priv->vlans[vid] = 0;
15435 +static int ethsw_dellink(struct net_device *netdev,
15436 + struct nlmsghdr *nlh,
15439 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
15440 + struct nlattr *spec;
15441 + struct bridge_vlan_info *vinfo;
15442 + struct ethsw_dev_priv *priv;
15443 + struct ethsw_port_priv *port_priv = NULL;
15446 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15450 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
15454 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
15455 + return -EOPNOTSUPP;
15457 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
15459 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
15462 + __get_priv(netdev, &priv, &port_priv);
15464 + /* decide if command targets switch device or port */
15466 + err = ethsw_dellink_switch(priv, vinfo->vid);
15468 + err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
15473 +static const struct net_device_ops ethsw_ops = {
15474 + .ndo_open = ðsw_open,
15475 + .ndo_stop = ðsw_stop,
15477 + .ndo_bridge_setlink = ðsw_setlink,
15478 + .ndo_bridge_getlink = ðsw_getlink,
15479 + .ndo_bridge_dellink = ðsw_dellink,
15481 + .ndo_start_xmit = ðsw_dropframe,
15484 +/*--------------------------------------------------------------------------- */
15485 +/* switch port netdevice ops */
15487 +static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
15489 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15490 + struct dpsw_link_state state;
15493 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15494 + port_priv->ethsw_priv->dpsw_handle,
15495 + port_priv->port_index, &state);
15496 + if (unlikely(err)) {
15497 + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
15501 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
15504 + netif_carrier_on(port_priv->netdev);
15506 + netif_carrier_off(port_priv->netdev);
15511 +static int ethsw_port_open(struct net_device *netdev)
15513 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15516 + err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
15517 + port_priv->ethsw_priv->dpsw_handle,
15518 + port_priv->port_index);
15520 + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
15524 + /* sync carrier state */
15525 + err = _ethsw_port_carrier_state_sync(netdev);
15527 + netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
15529 + goto err_carrier_sync;
15535 + dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
15536 + port_priv->ethsw_priv->dpsw_handle,
15537 + port_priv->port_index);
15541 +static int ethsw_port_stop(struct net_device *netdev)
15543 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15546 + err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
15547 + port_priv->ethsw_priv->dpsw_handle,
15548 + port_priv->port_index);
15550 + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
15557 +static int ethsw_port_fdb_add_uc(struct net_device *netdev,
15558 + const unsigned char *addr)
15560 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15561 + struct dpsw_fdb_unicast_cfg entry = {0};
15564 + entry.if_egress = port_priv->port_index;
15565 + entry.type = DPSW_FDB_ENTRY_STATIC;
15566 + ether_addr_copy(entry.mac_addr, addr);
15568 + err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
15569 + port_priv->ethsw_priv->dpsw_handle,
15572 + netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
15576 +static int ethsw_port_fdb_del_uc(struct net_device *netdev,
15577 + const unsigned char *addr)
15579 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15580 + struct dpsw_fdb_unicast_cfg entry = {0};
15583 + entry.if_egress = port_priv->port_index;
15584 + entry.type = DPSW_FDB_ENTRY_STATIC;
15585 + ether_addr_copy(entry.mac_addr, addr);
15587 + err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
15588 + port_priv->ethsw_priv->dpsw_handle,
15591 + netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
15595 +static int ethsw_port_fdb_add_mc(struct net_device *netdev,
15596 + const unsigned char *addr)
15598 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15599 + struct dpsw_fdb_multicast_cfg entry = {0};
15602 + ether_addr_copy(entry.mac_addr, addr);
15603 + entry.type = DPSW_FDB_ENTRY_STATIC;
15604 + entry.num_ifs = 1;
15605 + entry.if_id[0] = port_priv->port_index;
15607 + err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
15608 + port_priv->ethsw_priv->dpsw_handle,
15611 + netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
15615 +static int ethsw_port_fdb_del_mc(struct net_device *netdev,
15616 + const unsigned char *addr)
15618 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15619 + struct dpsw_fdb_multicast_cfg entry = {0};
15622 + ether_addr_copy(entry.mac_addr, addr);
15623 + entry.type = DPSW_FDB_ENTRY_STATIC;
15624 + entry.num_ifs = 1;
15625 + entry.if_id[0] = port_priv->port_index;
15627 + err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
15628 + port_priv->ethsw_priv->dpsw_handle,
15631 + netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
15635 +static int _lookup_address(struct net_device *netdev, int is_uc,
15636 + const unsigned char *addr)
15638 + struct netdev_hw_addr *ha;
15639 + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
15641 + netif_addr_lock_bh(netdev);
15642 + list_for_each_entry(ha, &list->list, list) {
15643 + if (ether_addr_equal(ha->addr, addr)) {
15644 + netif_addr_unlock_bh(netdev);
15648 + netif_addr_unlock_bh(netdev);
15652 +static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
15653 + struct net_device *netdev,
15654 + const unsigned char *addr, u16 vid,
15657 + struct list_head *pos;
15658 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15659 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15662 + /* TODO: add replace support when added to iproute bridge */
15663 + if (!(flags & NLM_F_REQUEST)) {
15664 + netdev_err(netdev,
15665 + "ethsw_port_fdb_add unexpected flags value %08x\n",
15670 + if (is_unicast_ether_addr(addr)) {
15671 + /* if entry cannot be replaced, return error if exists */
15672 + if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
15673 + list_for_each(pos, &priv->port_list) {
15674 + port_priv = list_entry(pos,
15675 + struct ethsw_port_priv,
15677 + if (_lookup_address(port_priv->netdev,
15683 + err = ethsw_port_fdb_add_uc(netdev, addr);
15685 + netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
15690 + /* we might have replaced an existing entry for a different
15691 + * switch port, make sure the address doesn't linger in any
15692 + * port address list
15694 + list_for_each(pos, &priv->port_list) {
15695 + port_priv = list_entry(pos, struct ethsw_port_priv,
15697 + dev_uc_del(port_priv->netdev, addr);
15700 + err = dev_uc_add(netdev, addr);
15702 + netdev_err(netdev, "dev_uc_add err %d\n", err);
15706 + struct dpsw_fdb_multicast_cfg entry = {
15707 + .type = DPSW_FDB_ENTRY_STATIC,
15711 + /* check if address is already set on this port */
15712 + if (_lookup_address(netdev, 0, addr))
15715 + /* check if the address exists on other port */
15716 + ether_addr_copy(entry.mac_addr, addr);
15717 + err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
15720 + /* entry exists, can we replace it? */
15721 + if (flags & NLM_F_EXCL)
15723 + } else if (err != -ENAVAIL) {
15724 + netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
15729 + err = ethsw_port_fdb_add_mc(netdev, addr);
15731 + netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
15736 + err = dev_mc_add(netdev, addr);
15738 + netdev_err(netdev, "dev_mc_add err %d\n", err);
15746 +static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
15747 + struct net_device *netdev,
15748 + const unsigned char *addr, u16 vid)
15752 + if (is_unicast_ether_addr(addr)) {
15753 + err = ethsw_port_fdb_del_uc(netdev, addr);
15755 + netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
15760 + /* also delete if configured on port */
15761 + err = dev_uc_del(netdev, addr);
15762 + if (err && err != -ENOENT) {
15763 + netdev_err(netdev, "dev_uc_del err %d\n", err);
15767 + if (!_lookup_address(netdev, 0, addr))
15770 + err = dev_mc_del(netdev, addr);
15772 + netdev_err(netdev, "dev_mc_del err %d\n", err);
15776 + err = ethsw_port_fdb_del_mc(netdev, addr);
15778 + netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
15787 +void ethsw_port_get_stats(struct net_device *netdev,
15788 + struct rtnl_link_stats64 *storage)
15790 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15794 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15795 + port_priv->ethsw_priv->dpsw_handle,
15796 + port_priv->port_index,
15797 + DPSW_CNT_ING_FRAME, &storage->rx_packets);
15801 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15802 + port_priv->ethsw_priv->dpsw_handle,
15803 + port_priv->port_index,
15804 + DPSW_CNT_EGR_FRAME, &storage->tx_packets);
15808 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15809 + port_priv->ethsw_priv->dpsw_handle,
15810 + port_priv->port_index,
15811 + DPSW_CNT_ING_BYTE, &storage->rx_bytes);
15815 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15816 + port_priv->ethsw_priv->dpsw_handle,
15817 + port_priv->port_index,
15818 + DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
15822 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15823 + port_priv->ethsw_priv->dpsw_handle,
15824 + port_priv->port_index,
15825 + DPSW_CNT_ING_FRAME_DISCARD,
15826 + &storage->rx_dropped);
15830 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15831 + port_priv->ethsw_priv->dpsw_handle,
15832 + port_priv->port_index,
15833 + DPSW_CNT_ING_FLTR_FRAME,
15837 + storage->rx_dropped += tmp;
15839 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
15840 + port_priv->ethsw_priv->dpsw_handle,
15841 + port_priv->port_index,
15842 + DPSW_CNT_EGR_FRAME_DISCARD,
15843 + &storage->tx_dropped);
15850 + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
15853 +static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
15855 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15858 + if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
15859 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
15860 + mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
15864 + err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
15866 + port_priv->ethsw_priv->dpsw_handle,
15867 + port_priv->port_index,
15868 + (u16)ETHSW_L2_MAX_FRM(mtu));
15870 + netdev_err(netdev,
15871 + "dpsw_if_set_max_frame_length() err %d\n", err);
15875 + netdev->mtu = mtu;
15879 +static const struct net_device_ops ethsw_port_ops = {
15880 + .ndo_open = ðsw_port_open,
15881 + .ndo_stop = ðsw_port_stop,
15883 + .ndo_fdb_add = ðsw_port_fdb_add,
15884 + .ndo_fdb_del = ðsw_port_fdb_del,
15885 + .ndo_fdb_dump = &ndo_dflt_fdb_dump,
15887 + .ndo_get_stats64 = ðsw_port_get_stats,
15888 + .ndo_change_mtu = ðsw_port_change_mtu,
15890 + .ndo_start_xmit = ðsw_dropframe,
15893 +static void ethsw_get_drvinfo(struct net_device *netdev,
15894 + struct ethtool_drvinfo *drvinfo)
15896 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15897 + u16 version_major, version_minor;
15900 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
15901 + strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
15903 + err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
15907 + strlcpy(drvinfo->fw_version, "N/A",
15908 + sizeof(drvinfo->fw_version));
15910 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
15911 + "%u.%u", version_major, version_minor);
15913 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
15914 + sizeof(drvinfo->bus_info));
15917 +static int ethsw_get_settings(struct net_device *netdev,
15918 + struct ethtool_cmd *cmd)
15920 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15921 + struct dpsw_link_state state = {0};
15924 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15925 + port_priv->ethsw_priv->dpsw_handle,
15926 + port_priv->port_index,
15929 + netdev_err(netdev, "ERROR %d getting link state", err);
15933 + /* At the moment, we have no way of interrogating the DPMAC
15934 + * from the DPSW side or there may not exist a DPMAC at all.
15935 + * Report only autoneg state, duplexity and speed.
15937 + if (state.options & DPSW_LINK_OPT_AUTONEG)
15938 + cmd->autoneg = AUTONEG_ENABLE;
15939 + if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
15940 + cmd->autoneg = DUPLEX_FULL;
15941 + ethtool_cmd_speed_set(cmd, state.rate);
15947 +static int ethsw_set_settings(struct net_device *netdev,
15948 + struct ethtool_cmd *cmd)
15950 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15951 + struct dpsw_link_state state = {0};
15952 + struct dpsw_link_cfg cfg = {0};
15955 + netdev_dbg(netdev, "Setting link parameters...");
15957 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
15958 + port_priv->ethsw_priv->dpsw_handle,
15959 + port_priv->port_index,
15962 + netdev_err(netdev, "ERROR %d getting link state", err);
15966 + /* Due to a temporary MC limitation, the DPSW port must be down
15967 + * in order to be able to change link settings. Taking steps to let
15968 + * the user know that.
15970 + if (netif_running(netdev)) {
15971 + netdev_info(netdev,
15972 + "Sorry, interface must be brought down first.\n");
15976 + cfg.options = state.options;
15977 + cfg.rate = ethtool_cmd_speed(cmd);
15978 + if (cmd->autoneg == AUTONEG_ENABLE)
15979 + cfg.options |= DPSW_LINK_OPT_AUTONEG;
15981 + cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
15982 + if (cmd->duplex == DUPLEX_HALF)
15983 + cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
15985 + cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
15987 + err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
15988 + port_priv->ethsw_priv->dpsw_handle,
15989 + port_priv->port_index,
15992 + /* ethtool will be loud enough if we return an error; no point
15993 + * in putting our own error message on the console by default
15995 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
16002 + enum dpsw_counter id;
16003 + char name[ETH_GSTRING_LEN];
16004 +} ethsw_ethtool_counters[] = {
16005 + {DPSW_CNT_ING_FRAME, "rx frames"},
16006 + {DPSW_CNT_ING_BYTE, "rx bytes"},
16007 + {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
16008 + {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
16009 + {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
16010 + {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
16011 + {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
16012 + {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
16013 + {DPSW_CNT_EGR_FRAME, "tx frames"},
16014 + {DPSW_CNT_EGR_BYTE, "tx bytes"},
16015 + {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
16019 +static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
16022 + case ETH_SS_STATS:
16023 + return ARRAY_SIZE(ethsw_ethtool_counters);
16025 + return -EOPNOTSUPP;
16029 +static void ethsw_ethtool_get_strings(struct net_device *netdev,
16030 + u32 stringset, u8 *data)
16034 + switch (stringset) {
16035 + case ETH_SS_STATS:
16036 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
16037 + memcpy(data + i * ETH_GSTRING_LEN,
16038 + ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
16043 +static void ethsw_ethtool_get_stats(struct net_device *netdev,
16044 + struct ethtool_stats *stats,
16047 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16051 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
16052 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16053 + port_priv->ethsw_priv->dpsw_handle,
16054 + port_priv->port_index,
16055 + ethsw_ethtool_counters[i].id,
16058 + netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
16059 + ethsw_ethtool_counters[i].name, err);
16063 +static const struct ethtool_ops ethsw_port_ethtool_ops = {
16064 + .get_drvinfo = ðsw_get_drvinfo,
16065 + .get_link = ðtool_op_get_link,
16066 + .get_settings = ðsw_get_settings,
16067 + .set_settings = ðsw_set_settings,
16068 + .get_strings = ðsw_ethtool_get_strings,
16069 + .get_ethtool_stats = ðsw_ethtool_get_stats,
16070 + .get_sset_count = ðsw_ethtool_get_sset_count,
16073 +/* -------------------------------------------------------------------------- */
16074 +/* ethsw driver functions */
16076 +static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
16078 + struct list_head *pos;
16079 + struct ethsw_port_priv *port_priv;
16082 + list_for_each(pos, &priv->port_list) {
16083 + port_priv = list_entry(pos, struct ethsw_port_priv,
16086 + err = _ethsw_port_carrier_state_sync(port_priv->netdev);
16088 + netdev_err(port_priv->netdev,
16089 + "_ethsw_port_carrier_state_sync err %d\n",
16096 +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
16098 + return IRQ_WAKE_THREAD;
16101 +static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
16103 + struct device *dev = (struct device *)arg;
16104 + struct net_device *netdev = dev_get_drvdata(dev);
16105 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16107 + struct fsl_mc_io *io = priv->mc_io;
16108 + u16 token = priv->dpsw_handle;
16109 + int irq_index = DPSW_IRQ_INDEX_IF;
16111 + /* Mask the events and the if_id reserved bits to be cleared on read */
16112 + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
16115 + err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
16116 + if (unlikely(err)) {
16117 + netdev_err(netdev, "Can't get irq status (err %d)", err);
16119 + err = dpsw_clear_irq_status(io, 0, token, irq_index,
16121 + if (unlikely(err))
16122 + netdev_err(netdev, "Can't clear irq status (err %d)",
16127 + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
16128 + err = ethsw_links_state_update(priv);
16129 + if (unlikely(err))
16134 + return IRQ_HANDLED;
16137 +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
16139 + struct device *dev = &sw_dev->dev;
16140 + struct net_device *netdev = dev_get_drvdata(dev);
16141 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16143 + struct fsl_mc_device_irq *irq;
16144 + const int irq_index = DPSW_IRQ_INDEX_IF;
16145 + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
16147 + err = fsl_mc_allocate_irqs(sw_dev);
16148 + if (unlikely(err)) {
16149 + dev_err(dev, "MC irqs allocation failed\n");
16153 + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
16158 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16160 + if (unlikely(err)) {
16161 + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
16165 + irq = sw_dev->irqs[irq_index];
16167 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
16168 + ethsw_irq0_handler,
16169 + _ethsw_irq0_handler_thread,
16170 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
16171 + dev_name(dev), dev);
16172 + if (unlikely(err)) {
16173 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
16177 + err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
16178 + irq_index, mask);
16179 + if (unlikely(err)) {
16180 + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
16181 + goto free_devm_irq;
16184 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16186 + if (unlikely(err)) {
16187 + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
16188 + goto free_devm_irq;
16194 + devm_free_irq(dev, irq->msi_desc->irq, dev);
16196 + fsl_mc_free_irqs(sw_dev);
16200 +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
16202 + struct device *dev = &sw_dev->dev;
16203 + struct net_device *netdev = dev_get_drvdata(dev);
16204 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16206 + dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16207 + DPSW_IRQ_INDEX_IF, 0);
16208 + devm_free_irq(dev,
16209 + sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
16211 + fsl_mc_free_irqs(sw_dev);
16215 +ethsw_init(struct fsl_mc_device *sw_dev)
16217 + struct device *dev = &sw_dev->dev;
16218 + struct ethsw_dev_priv *priv;
16219 + struct net_device *netdev;
16222 + u16 version_major, version_minor;
16223 + const struct dpsw_stp_cfg stp_cfg = {
16225 + .state = DPSW_STP_STATE_FORWARDING,
16228 + netdev = dev_get_drvdata(dev);
16229 + priv = netdev_priv(netdev);
16231 + priv->dev_id = sw_dev->obj_desc.id;
16233 + err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
16235 + dev_err(dev, "dpsw_open err %d\n", err);
16238 + if (!priv->dpsw_handle) {
16239 + dev_err(dev, "dpsw_open returned null handle but no error\n");
16244 + err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
16247 + dev_err(dev, "dpsw_get_attributes err %d\n", err);
16251 + err = dpsw_get_api_version(priv->mc_io, 0,
16255 + dev_err(dev, "dpsw_get_api_version err %d\n", err);
16259 + /* Minimum supported DPSW version check */
16260 + if (version_major < DPSW_MIN_VER_MAJOR ||
16261 + (version_major == DPSW_MIN_VER_MAJOR &&
16262 + version_minor < DPSW_MIN_VER_MINOR)) {
16263 + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
16266 + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
16271 + err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
16273 + dev_err(dev, "dpsw_reset err %d\n", err);
16277 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
16278 + DPSW_FDB_LEARNING_MODE_HW);
16280 + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
16284 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16285 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
16288 + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
16293 + err = dpsw_if_set_broadcast(priv->mc_io, 0,
16294 + priv->dpsw_handle, i, 1);
16297 + "dpsw_if_set_broadcast err %d for port %d\n",
16306 + dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16312 +ethsw_takedown(struct fsl_mc_device *sw_dev)
16314 + struct device *dev = &sw_dev->dev;
16315 + struct net_device *netdev;
16316 + struct ethsw_dev_priv *priv;
16319 + netdev = dev_get_drvdata(dev);
16320 + priv = netdev_priv(netdev);
16322 + err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16324 + dev_warn(dev, "dpsw_close err %d\n", err);
16330 +ethsw_remove(struct fsl_mc_device *sw_dev)
16332 + struct device *dev;
16333 + struct net_device *netdev;
16334 + struct ethsw_dev_priv *priv;
16335 + struct ethsw_port_priv *port_priv;
16336 + struct list_head *pos;
16338 + dev = &sw_dev->dev;
16339 + netdev = dev_get_drvdata(dev);
16340 + priv = netdev_priv(netdev);
16342 + list_for_each(pos, &priv->port_list) {
16343 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
16346 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
16349 + unregister_netdev(port_priv->netdev);
16350 + free_netdev(port_priv->netdev);
16353 + ethsw_teardown_irqs(sw_dev);
16355 + unregister_netdev(netdev);
16357 + ethsw_takedown(sw_dev);
16358 + fsl_mc_portal_free(priv->mc_io);
16360 + dev_set_drvdata(dev, NULL);
16361 + free_netdev(netdev);
16367 +ethsw_probe(struct fsl_mc_device *sw_dev)
16369 + struct device *dev;
16370 + struct net_device *netdev = NULL;
16371 + struct ethsw_dev_priv *priv = NULL;
16374 + const char def_mcast[ETH_ALEN] = {
16375 + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
16377 + char port_name[IFNAMSIZ];
16379 + dev = &sw_dev->dev;
16381 + /* register switch device, it's for management only - no I/O */
16382 + netdev = alloc_etherdev(sizeof(*priv));
16384 + dev_err(dev, "alloc_etherdev error\n");
16387 + netdev->netdev_ops = ðsw_ops;
16389 + SET_NETDEV_DEV(netdev, dev);
16390 + dev_set_drvdata(dev, netdev);
16392 + priv = netdev_priv(netdev);
16393 + priv->netdev = netdev;
16395 + err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
16397 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
16398 + goto err_free_netdev;
16400 + if (!priv->mc_io) {
16401 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
16403 + goto err_free_netdev;
16406 + err = ethsw_init(sw_dev);
16408 + dev_err(dev, "switch init err %d\n", err);
16409 + goto err_free_cmdport;
16412 + netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
16414 + /* TODO: should we hold rtnl_lock here? We can't register_netdev under
16417 + dev_alloc_name(netdev, "sw%d");
16418 + err = register_netdev(netdev);
16420 + dev_err(dev, "register_netdev error %d\n", err);
16421 + goto err_takedown;
16424 + dev_info(dev, "register_netdev res %d\n", err);
16426 + /* VLAN 1 is implicitly configured on the switch */
16427 + priv->vlans[1] = ETHSW_VLAN_MEMBER;
16428 + /* Flooding, learning are implicitly enabled */
16429 + priv->learning = true;
16430 + priv->flood = true;
16432 + /* register switch ports */
16433 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
16435 + INIT_LIST_HEAD(&priv->port_list);
16436 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16437 + struct net_device *port_netdev;
16438 + struct ethsw_port_priv *port_priv;
16440 + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
16441 + if (!port_netdev) {
16442 + dev_err(dev, "alloc_etherdev error\n");
16443 + goto err_takedown;
16446 + port_priv = netdev_priv(port_netdev);
16447 + port_priv->netdev = port_netdev;
16448 + port_priv->ethsw_priv = priv;
16450 + port_priv->port_index = i;
16451 + port_priv->stp_state = BR_STATE_FORWARDING;
16452 + /* VLAN 1 is configured by default on all switch ports */
16453 + port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
16456 + SET_NETDEV_DEV(port_netdev, dev);
16457 + port_netdev->netdev_ops = ðsw_port_ops;
16458 + port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
16460 + port_netdev->flags = port_netdev->flags |
16461 + IFF_PROMISC | IFF_SLAVE;
16463 + dev_alloc_name(port_netdev, port_name);
16464 + err = register_netdev(port_netdev);
16466 + dev_err(dev, "register_netdev error %d\n", err);
16467 + free_netdev(port_netdev);
16468 + goto err_takedown;
16473 + err = netdev_master_upper_dev_link(port_netdev, netdev,
16476 + dev_err(dev, "netdev_master_upper_dev_link error %d\n",
16478 + unregister_netdev(port_netdev);
16479 + free_netdev(port_netdev);
16481 + goto err_takedown;
16484 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
16488 + list_add(&port_priv->list, &priv->port_list);
16490 + /* TODO: implmenet set_rm_mode instead of this */
16491 + err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
16493 + dev_warn(&netdev->dev,
16494 + "ethsw_port_fdb_add_mc err %d\n", err);
16497 + /* the switch starts up enabled */
16499 + err = dev_open(netdev);
16502 + dev_warn(dev, "dev_open err %d\n", err);
16505 + err = ethsw_setup_irqs(sw_dev);
16506 + if (unlikely(err)) {
16507 + dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
16508 + goto err_takedown;
16511 + dev_info(&netdev->dev,
16512 + "probed %d port switch\n", priv->sw_attr.num_ifs);
16516 + ethsw_remove(sw_dev);
16518 + fsl_mc_portal_free(priv->mc_io);
16520 + dev_set_drvdata(dev, NULL);
16521 + free_netdev(netdev);
16526 +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
16528 + .vendor = FSL_MC_VENDOR_FREESCALE,
16529 + .obj_type = "dpsw",
16534 +static struct fsl_mc_driver eth_sw_drv = {
16536 + .name = KBUILD_MODNAME,
16537 + .owner = THIS_MODULE,
16539 + .probe = ethsw_probe,
16540 + .remove = ethsw_remove,
16541 + .match_id_table = ethsw_match_id_table,
16544 +module_fsl_mc_driver(eth_sw_drv);
16546 +MODULE_LICENSE("GPL");
16547 +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
16548 diff --git a/drivers/staging/fsl-dpaa2/evb/Kconfig b/drivers/staging/fsl-dpaa2/evb/Kconfig
16549 new file mode 100644
16550 index 00000000..3534f697
16552 +++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
16554 +config FSL_DPAA2_EVB
16555 + tristate "DPAA2 Edge Virtual Bridge"
16556 + depends on FSL_MC_BUS && FSL_DPAA2
16557 + select VLAN_8021Q
16560 + Prototype driver for DPAA2 Edge Virtual Bridge.
16561 diff --git a/drivers/staging/fsl-dpaa2/evb/Makefile b/drivers/staging/fsl-dpaa2/evb/Makefile
16562 new file mode 100644
16563 index 00000000..ecc529d7
16565 +++ b/drivers/staging/fsl-dpaa2/evb/Makefile
16568 +obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
16570 +dpaa2-evb-objs := evb.o dpdmux.o
16573 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
16576 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
16577 diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
16578 new file mode 100644
16579 index 00000000..66306804
16581 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
16583 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16585 + * Redistribution and use in source and binary forms, with or without
16586 + * modification, are permitted provided that the following conditions are met:
16587 + * * Redistributions of source code must retain the above copyright
16588 + * notice, this list of conditions and the following disclaimer.
16589 + * * Redistributions in binary form must reproduce the above copyright
16590 + * notice, this list of conditions and the following disclaimer in the
16591 + * documentation and/or other materials provided with the distribution.
16592 + * * Neither the name of the above-listed copyright holders nor the
16593 + * names of any contributors may be used to endorse or promote products
16594 + * derived from this software without specific prior written permission.
16597 + * ALTERNATIVELY, this software may be distributed under the terms of the
16598 + * GNU General Public License ("GPL") as published by the Free Software
16599 + * Foundation, either version 2 of that License or (at your option) any
16602 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16603 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16604 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16605 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16606 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16607 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16608 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16609 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16610 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16611 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16612 + * POSSIBILITY OF SUCH DAMAGE.
16614 +#ifndef _FSL_DPDMUX_CMD_H
16615 +#define _FSL_DPDMUX_CMD_H
16617 +/* DPDMUX Version */
16618 +#define DPDMUX_VER_MAJOR 6
16619 +#define DPDMUX_VER_MINOR 1
16621 +#define DPDMUX_CMD_BASE_VER 1
16622 +#define DPDMUX_CMD_ID_OFFSET 4
16624 +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
16627 +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
16628 +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
16629 +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
16630 +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
16631 +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
16633 +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
16634 +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
16635 +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
16636 +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
16637 +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
16639 +#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
16640 +#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
16641 +#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
16642 +#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
16643 +#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
16644 +#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
16646 +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
16648 +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
16650 +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
16651 +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
16652 +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
16653 +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
16655 +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
16656 +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
16657 +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
16658 +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
16659 +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
16661 +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
16662 +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
16663 +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
16665 +#define DPDMUX_MASK(field) \
16666 + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
16667 + DPDMUX_##field##_SHIFT)
16668 +#define dpdmux_set_field(var, field, val) \
16669 + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
16670 +#define dpdmux_get_field(var, field) \
16671 + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
16673 +struct dpdmux_cmd_open {
16677 +struct dpdmux_cmd_create {
16683 + u16 adv_max_dmat_entries;
16684 + u16 adv_max_mc_groups;
16685 + u16 adv_max_vlan_ids;
16691 +struct dpdmux_cmd_destroy {
16695 +#define DPDMUX_ENABLE_SHIFT 0
16696 +#define DPDMUX_ENABLE_SIZE 1
16698 +struct dpdmux_rsp_is_enabled {
16702 +struct dpdmux_cmd_set_irq_enable {
16708 +struct dpdmux_cmd_get_irq_enable {
16713 +struct dpdmux_rsp_get_irq_enable {
16717 +struct dpdmux_cmd_set_irq_mask {
16722 +struct dpdmux_cmd_get_irq_mask {
16727 +struct dpdmux_rsp_get_irq_mask {
16731 +struct dpdmux_cmd_get_irq_status {
16736 +struct dpdmux_rsp_get_irq_status {
16740 +struct dpdmux_cmd_clear_irq_status {
16745 +struct dpdmux_rsp_get_attr {
16760 +struct dpdmux_cmd_set_max_frame_length {
16761 + u16 max_frame_length;
16764 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
16765 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
16766 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
16767 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
16769 +struct dpdmux_cmd_if_set_accepted_frames {
16771 + u8 frames_options;
16774 +struct dpdmux_cmd_if {
16778 +struct dpdmux_rsp_if_get_attr {
16782 + u8 accepted_frames_type;
16786 +struct dpdmux_cmd_if_l2_rule {
16799 +struct dpdmux_cmd_if_get_counter {
16804 +struct dpdmux_rsp_if_get_counter {
16809 +struct dpdmux_cmd_if_set_link_cfg {
16819 +struct dpdmux_cmd_if_get_link_state {
16823 +struct dpdmux_rsp_if_get_link_state {
16834 +struct dpdmux_rsp_get_api_version {
16839 +struct dpdmux_set_custom_key {
16841 + u64 key_cfg_iova;
16844 +struct dpdmux_cmd_add_custom_cls_entry {
16853 +struct dpdmux_cmd_remove_custom_cls_entry {
16861 +#endif /* _FSL_DPDMUX_CMD_H */
16862 diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.c b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
16863 new file mode 100644
16864 index 00000000..f7a87633
16866 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
16868 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
16870 + * Redistribution and use in source and binary forms, with or without
16871 + * modification, are permitted provided that the following conditions are met:
16872 + * * Redistributions of source code must retain the above copyright
16873 + * notice, this list of conditions and the following disclaimer.
16874 + * * Redistributions in binary form must reproduce the above copyright
16875 + * notice, this list of conditions and the following disclaimer in the
16876 + * documentation and/or other materials provided with the distribution.
16877 + * * Neither the name of the above-listed copyright holders nor the
16878 + * names of any contributors may be used to endorse or promote products
16879 + * derived from this software without specific prior written permission.
16882 + * ALTERNATIVELY, this software may be distributed under the terms of the
16883 + * GNU General Public License ("GPL") as published by the Free Software
16884 + * Foundation, either version 2 of that License or (at your option) any
16887 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16888 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16889 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16890 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
16891 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16892 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16893 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
16894 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
16895 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
16896 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
16897 + * POSSIBILITY OF SUCH DAMAGE.
16899 +#include "../../fsl-mc/include/mc-sys.h"
16900 +#include "../../fsl-mc/include/mc-cmd.h"
16901 +#include "dpdmux.h"
16902 +#include "dpdmux-cmd.h"
16905 + * dpdmux_open() - Open a control session for the specified object
16906 + * @mc_io: Pointer to MC portal's I/O object
16907 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16908 + * @dpdmux_id: DPDMUX unique ID
16909 + * @token: Returned token; use in subsequent API calls
16911 + * This function can be used to open a control session for an
16912 + * already created object; an object may have been declared in
16913 + * the DPL or by calling the dpdmux_create() function.
16914 + * This function returns a unique authentication token,
16915 + * associated with the specific object ID and the specific MC
16916 + * portal; this token must be used in all subsequent commands for
16917 + * this specific object.
16919 + * Return: '0' on Success; Error code otherwise.
16921 +int dpdmux_open(struct fsl_mc_io *mc_io,
16926 + struct mc_command cmd = { 0 };
16927 + struct dpdmux_cmd_open *cmd_params;
16930 + /* prepare command */
16931 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
16934 + cmd_params = (struct dpdmux_cmd_open *)cmd.params;
16935 + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
16937 + /* send command to mc*/
16938 + err = mc_send_command(mc_io, &cmd);
16942 + /* retrieve response parameters */
16943 + *token = mc_cmd_hdr_read_token(&cmd);
16949 + * dpdmux_close() - Close the control session of the object
16950 + * @mc_io: Pointer to MC portal's I/O object
16951 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16952 + * @token: Token of DPDMUX object
16954 + * After this function is called, no further operations are
16955 + * allowed on the object without opening a new control session.
16957 + * Return: '0' on Success; Error code otherwise.
16959 +int dpdmux_close(struct fsl_mc_io *mc_io,
16963 + struct mc_command cmd = { 0 };
16965 + /* prepare command */
16966 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
16970 + /* send command to mc*/
16971 + return mc_send_command(mc_io, &cmd);
16975 + * dpdmux_create() - Create the DPDMUX object
16976 + * @mc_io: Pointer to MC portal's I/O object
16977 + * @dprc_token: Parent container token; '0' for default container
16978 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
16979 + * @cfg: Configuration structure
16980 + * @obj_id: returned object id
16982 + * Create the DPDMUX object, allocate required resources and
16983 + * perform required initialization.
16985 + * The object can be created either by declaring it in the
16986 + * DPL file, or by calling this function.
16988 + * The function accepts an authentication token of a parent
16989 + * container that this object should be assigned to. The token
16990 + * can be '0' so the object will be assigned to the default container.
16991 + * The newly created object can be opened with the returned
16992 + * object id and using the container's associated tokens and MC portals.
16994 + * Return: '0' on Success; Error code otherwise.
16996 +int dpdmux_create(struct fsl_mc_io *mc_io,
16999 + const struct dpdmux_cfg *cfg,
17002 + struct mc_command cmd = { 0 };
17003 + struct dpdmux_cmd_create *cmd_params;
17006 + /* prepare command */
17007 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
17010 + cmd_params = (struct dpdmux_cmd_create *)cmd.params;
17011 + cmd_params->method = cfg->method;
17012 + cmd_params->manip = cfg->manip;
17013 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
17014 + cmd_params->adv_max_dmat_entries =
17015 + cpu_to_le16(cfg->adv.max_dmat_entries);
17016 + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
17017 + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
17018 + cmd_params->options = cpu_to_le64(cfg->adv.options);
17020 + /* send command to mc*/
17021 + err = mc_send_command(mc_io, &cmd);
17025 + /* retrieve response parameters */
17026 + *obj_id = mc_cmd_hdr_read_token(&cmd);
17032 + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
17033 + * @mc_io: Pointer to MC portal's I/O object
17034 + * @dprc_token: Parent container token; '0' for default container
17035 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17036 + * @object_id: The object id; it must be a valid id within the container that
17037 + * created this object;
17039 + * The function accepts the authentication token of the parent container that
17040 + * created the object (not the one that currently owns the object). The object
17041 + * is searched within parent using the provided 'object_id'.
17042 + * All tokens to the object must be closed before calling destroy.
17044 + * Return: '0' on Success; error code otherwise.
17046 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
17051 + struct mc_command cmd = { 0 };
17052 + struct dpdmux_cmd_destroy *cmd_params;
17054 + /* prepare command */
17055 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
17058 + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
17059 + cmd_params->dpdmux_id = cpu_to_le32(object_id);
17061 + /* send command to mc*/
17062 + return mc_send_command(mc_io, &cmd);
17066 + * dpdmux_enable() - Enable DPDMUX functionality
17067 + * @mc_io: Pointer to MC portal's I/O object
17068 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17069 + * @token: Token of DPDMUX object
17071 + * Return: '0' on Success; Error code otherwise.
17073 +int dpdmux_enable(struct fsl_mc_io *mc_io,
17077 + struct mc_command cmd = { 0 };
17079 + /* prepare command */
17080 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
17084 + /* send command to mc*/
17085 + return mc_send_command(mc_io, &cmd);
17089 + * dpdmux_disable() - Disable DPDMUX functionality
17090 + * @mc_io: Pointer to MC portal's I/O object
17091 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17092 + * @token: Token of DPDMUX object
17094 + * Return: '0' on Success; Error code otherwise.
17096 +int dpdmux_disable(struct fsl_mc_io *mc_io,
17100 + struct mc_command cmd = { 0 };
17102 + /* prepare command */
17103 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
17107 + /* send command to mc*/
17108 + return mc_send_command(mc_io, &cmd);
17112 + * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
17113 + * @mc_io: Pointer to MC portal's I/O object
17114 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17115 + * @token: Token of DPDMUX object
17116 + * @en: Returns '1' if object is enabled; '0' otherwise
17118 + * Return: '0' on Success; Error code otherwise.
17120 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
17125 + struct mc_command cmd = { 0 };
17126 + struct dpdmux_rsp_is_enabled *rsp_params;
17129 + /* prepare command */
17130 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
17134 + /* send command to mc*/
17135 + err = mc_send_command(mc_io, &cmd);
17139 + /* retrieve response parameters */
17140 + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
17141 + *en = dpdmux_get_field(rsp_params->en, ENABLE);
17147 + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
17148 + * @mc_io: Pointer to MC portal's I/O object
17149 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17150 + * @token: Token of DPDMUX object
17152 + * Return: '0' on Success; Error code otherwise.
17154 +int dpdmux_reset(struct fsl_mc_io *mc_io,
17158 + struct mc_command cmd = { 0 };
17160 + /* prepare command */
17161 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
17165 + /* send command to mc*/
17166 + return mc_send_command(mc_io, &cmd);
17170 + * dpdmux_set_irq_enable() - Set overall interrupt state.
17171 + * @mc_io: Pointer to MC portal's I/O object
17172 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17173 + * @token: Token of DPDMUX object
17174 + * @irq_index: The interrupt index to configure
17175 + * @en: Interrupt state - enable = 1, disable = 0
17177 + * Allows GPP software to control when interrupts are generated.
17178 + * Each interrupt can have up to 32 causes. The enable/disable control's the
17179 + * overall interrupt state. if the interrupt is disabled no causes will cause
17182 + * Return: '0' on Success; Error code otherwise.
17184 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
17190 + struct mc_command cmd = { 0 };
17191 + struct dpdmux_cmd_set_irq_enable *cmd_params;
17193 + /* prepare command */
17194 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
17197 + cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
17198 + cmd_params->enable = en;
17199 + cmd_params->irq_index = irq_index;
17201 + /* send command to mc*/
17202 + return mc_send_command(mc_io, &cmd);
17206 + * dpdmux_get_irq_enable() - Get overall interrupt state.
17207 + * @mc_io: Pointer to MC portal's I/O object
17208 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17209 + * @token: Token of DPDMUX object
17210 + * @irq_index: The interrupt index to configure
17211 + * @en: Returned interrupt state - enable = 1, disable = 0
17213 + * Return: '0' on Success; Error code otherwise.
17215 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
17221 + struct mc_command cmd = { 0 };
17222 + struct dpdmux_cmd_get_irq_enable *cmd_params;
17223 + struct dpdmux_rsp_get_irq_enable *rsp_params;
17226 + /* prepare command */
17227 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
17230 + cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
17231 + cmd_params->irq_index = irq_index;
17233 + /* send command to mc*/
17234 + err = mc_send_command(mc_io, &cmd);
17238 + /* retrieve response parameters */
17239 + rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
17240 + *en = rsp_params->enable;
17246 + * dpdmux_set_irq_mask() - Set interrupt mask.
17247 + * @mc_io: Pointer to MC portal's I/O object
17248 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17249 + * @token: Token of DPDMUX object
17250 + * @irq_index: The interrupt index to configure
17251 + * @mask: event mask to trigger interrupt;
17253 + * 0 = ignore event
17254 + * 1 = consider event for asserting IRQ
17256 + * Every interrupt can have up to 32 causes and the interrupt model supports
17257 + * masking/unmasking each cause independently
17259 + * Return: '0' on Success; Error code otherwise.
17261 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
17267 + struct mc_command cmd = { 0 };
17268 + struct dpdmux_cmd_set_irq_mask *cmd_params;
17270 + /* prepare command */
17271 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
17274 + cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
17275 + cmd_params->mask = cpu_to_le32(mask);
17276 + cmd_params->irq_index = irq_index;
17278 + /* send command to mc*/
17279 + return mc_send_command(mc_io, &cmd);
17283 + * dpdmux_get_irq_mask() - Get interrupt mask.
17284 + * @mc_io: Pointer to MC portal's I/O object
17285 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17286 + * @token: Token of DPDMUX object
17287 + * @irq_index: The interrupt index to configure
17288 + * @mask: Returned event mask to trigger interrupt
17290 + * Every interrupt can have up to 32 causes and the interrupt model supports
17291 + * masking/unmasking each cause independently
17293 + * Return: '0' on Success; Error code otherwise.
17295 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
17301 + struct mc_command cmd = { 0 };
17302 + struct dpdmux_cmd_get_irq_mask *cmd_params;
17303 + struct dpdmux_rsp_get_irq_mask *rsp_params;
17306 + /* prepare command */
17307 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
17310 + cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
17311 + cmd_params->irq_index = irq_index;
17313 + /* send command to mc*/
17314 + err = mc_send_command(mc_io, &cmd);
17318 + /* retrieve response parameters */
17319 + rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
17320 + *mask = le32_to_cpu(rsp_params->mask);
17326 + * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
17327 + * @mc_io: Pointer to MC portal's I/O object
17328 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17329 + * @token: Token of DPDMUX object
17330 + * @irq_index: The interrupt index to configure
17331 + * @status: Returned interrupts status - one bit per cause:
17332 + * 0 = no interrupt pending
17333 + * 1 = interrupt pending
17335 + * Return: '0' on Success; Error code otherwise.
17337 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
17343 + struct mc_command cmd = { 0 };
17344 + struct dpdmux_cmd_get_irq_status *cmd_params;
17345 + struct dpdmux_rsp_get_irq_status *rsp_params;
17348 + /* prepare command */
17349 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
17352 + cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
17353 + cmd_params->status = cpu_to_le32(*status);
17354 + cmd_params->irq_index = irq_index;
17356 + /* send command to mc*/
17357 + err = mc_send_command(mc_io, &cmd);
17361 + /* retrieve response parameters */
17362 + rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
17363 + *status = le32_to_cpu(rsp_params->status);
17369 + * dpdmux_clear_irq_status() - Clear a pending interrupt's status
17370 + * @mc_io: Pointer to MC portal's I/O object
17371 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17372 + * @token: Token of DPDMUX object
17373 + * @irq_index: The interrupt index to configure
17374 + * @status: bits to clear (W1C) - one bit per cause:
17375 + * 0 = don't change
17376 + * 1 = clear status bit
17378 + * Return: '0' on Success; Error code otherwise.
17380 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
17386 + struct mc_command cmd = { 0 };
17387 + struct dpdmux_cmd_clear_irq_status *cmd_params;
17389 + /* prepare command */
17390 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
17393 + cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
17394 + cmd_params->status = cpu_to_le32(status);
17395 + cmd_params->irq_index = irq_index;
17397 + /* send command to mc*/
17398 + return mc_send_command(mc_io, &cmd);
17402 + * dpdmux_get_attributes() - Retrieve DPDMUX attributes
17403 + * @mc_io: Pointer to MC portal's I/O object
17404 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17405 + * @token: Token of DPDMUX object
17406 + * @attr: Returned object's attributes
17408 + * Return: '0' on Success; Error code otherwise.
17410 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
17413 + struct dpdmux_attr *attr)
17415 + struct mc_command cmd = { 0 };
17416 + struct dpdmux_rsp_get_attr *rsp_params;
17419 + /* prepare command */
17420 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
17424 + /* send command to mc*/
17425 + err = mc_send_command(mc_io, &cmd);
17429 + /* retrieve response parameters */
17430 + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
17431 + attr->id = le32_to_cpu(rsp_params->id);
17432 + attr->options = le64_to_cpu(rsp_params->options);
17433 + attr->method = rsp_params->method;
17434 + attr->manip = rsp_params->manip;
17435 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
17436 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
17442 + * dpdmux_if_enable() - Enable Interface
17443 + * @mc_io: Pointer to MC portal's I/O object
17444 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17445 + * @token: Token of DPDMUX object
17446 + * @if_id: Interface Identifier
17448 + * Return: Completion status. '0' on Success; Error code otherwise.
17450 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
17455 + struct dpdmux_cmd_if *cmd_params;
17456 + struct mc_command cmd = { 0 };
17458 + /* prepare command */
17459 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
17462 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17463 + cmd_params->if_id = cpu_to_le16(if_id);
17465 + /* send command to mc*/
17466 + return mc_send_command(mc_io, &cmd);
17470 + * dpdmux_if_disable() - Disable Interface
17471 + * @mc_io: Pointer to MC portal's I/O object
17472 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17473 + * @token: Token of DPDMUX object
17474 + * @if_id: Interface Identifier
17476 + * Return: Completion status. '0' on Success; Error code otherwise.
17478 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
17483 + struct dpdmux_cmd_if *cmd_params;
17484 + struct mc_command cmd = { 0 };
17486 + /* prepare command */
17487 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
17490 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17491 + cmd_params->if_id = cpu_to_le16(if_id);
17493 + /* send command to mc*/
17494 + return mc_send_command(mc_io, &cmd);
17498 + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
17499 + * @mc_io: Pointer to MC portal's I/O object
17500 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17501 + * @token: Token of DPDMUX object
17502 + * @max_frame_length: The required maximum frame length
17504 + * Update the maximum frame length on all DMUX interfaces.
17505 + * In case of VEPA, the maximum frame length on all dmux interfaces
17506 + * will be updated with the minimum value of the mfls of the connected
17507 + * dpnis and the actual value of dmux mfl.
17509 + * Return: '0' on Success; Error code otherwise.
17511 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
17514 + u16 max_frame_length)
17516 + struct mc_command cmd = { 0 };
17517 + struct dpdmux_cmd_set_max_frame_length *cmd_params;
17519 + /* prepare command */
17520 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
17523 + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
17524 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
17526 + /* send command to mc*/
17527 + return mc_send_command(mc_io, &cmd);
17531 + * dpdmux_ul_reset_counters() - Function resets the uplink counter
17532 + * @mc_io: Pointer to MC portal's I/O object
17533 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17534 + * @token: Token of DPDMUX object
17536 + * Return: '0' on Success; Error code otherwise.
17538 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
17542 + struct mc_command cmd = { 0 };
17544 + /* prepare command */
17545 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
17549 + /* send command to mc*/
17550 + return mc_send_command(mc_io, &cmd);
17554 + * dpdmux_if_set_accepted_frames() - Set the accepted frame types
17555 + * @mc_io: Pointer to MC portal's I/O object
17556 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17557 + * @token: Token of DPDMUX object
17558 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17559 + * @cfg: Frame types configuration
17561 + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
17562 + * priority-tagged frames are discarded.
17563 + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
17564 + * priority-tagged frames are accepted.
17565 + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
17566 + * untagged and priority-tagged frame are accepted;
17568 + * Return: '0' on Success; Error code otherwise.
17570 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
17574 + const struct dpdmux_accepted_frames *cfg)
17576 + struct mc_command cmd = { 0 };
17577 + struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
17579 + /* prepare command */
17580 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
17583 + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
17584 + cmd_params->if_id = cpu_to_le16(if_id);
17585 + dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
17587 + dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
17588 + cfg->unaccept_act);
17590 + /* send command to mc*/
17591 + return mc_send_command(mc_io, &cmd);
17595 + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
17596 + * @mc_io: Pointer to MC portal's I/O object
17597 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17598 + * @token: Token of DPDMUX object
17599 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
17600 + * @attr: Interface attributes
17602 + * Return: '0' on Success; Error code otherwise.
17604 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
17608 + struct dpdmux_if_attr *attr)
17610 + struct mc_command cmd = { 0 };
17611 + struct dpdmux_cmd_if *cmd_params;
17612 + struct dpdmux_rsp_if_get_attr *rsp_params;
17615 + /* prepare command */
17616 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
17619 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
17620 + cmd_params->if_id = cpu_to_le16(if_id);
17622 + /* send command to mc*/
17623 + err = mc_send_command(mc_io, &cmd);
17627 + /* retrieve response parameters */
17628 + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
17629 + attr->rate = le32_to_cpu(rsp_params->rate);
17630 + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
17631 + attr->accept_frame_type =
17632 + dpdmux_get_field(rsp_params->accepted_frames_type,
17633 + ACCEPTED_FRAMES_TYPE);
17639 + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
17640 + * @mc_io: Pointer to MC portal's I/O object
17641 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17642 + * @token: Token of DPDMUX object
17643 + * @if_id: Destination interface ID
17646 + * Function removes a L2 rule from DPDMUX table
17647 + * or adds an interface to an existing multicast address
17649 + * Return: '0' on Success; Error code otherwise.
17651 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
17655 + const struct dpdmux_l2_rule *rule)
17657 + struct mc_command cmd = { 0 };
17658 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17660 + /* prepare command */
17661 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
17664 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17665 + cmd_params->if_id = cpu_to_le16(if_id);
17666 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17667 + cmd_params->mac_addr5 = rule->mac_addr[5];
17668 + cmd_params->mac_addr4 = rule->mac_addr[4];
17669 + cmd_params->mac_addr3 = rule->mac_addr[3];
17670 + cmd_params->mac_addr2 = rule->mac_addr[2];
17671 + cmd_params->mac_addr1 = rule->mac_addr[1];
17672 + cmd_params->mac_addr0 = rule->mac_addr[0];
17674 + /* send command to mc*/
17675 + return mc_send_command(mc_io, &cmd);
17679 + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
17680 + * @mc_io: Pointer to MC portal's I/O object
17681 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17682 + * @token: Token of DPDMUX object
17683 + * @if_id: Destination interface ID
17686 + * Function adds a L2 rule into DPDMUX table
17687 + * or adds an interface to an existing multicast address
17689 + * Return: '0' on Success; Error code otherwise.
17691 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
17695 + const struct dpdmux_l2_rule *rule)
17697 + struct mc_command cmd = { 0 };
17698 + struct dpdmux_cmd_if_l2_rule *cmd_params;
17700 + /* prepare command */
17701 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
17704 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
17705 + cmd_params->if_id = cpu_to_le16(if_id);
17706 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
17707 + cmd_params->mac_addr5 = rule->mac_addr[5];
17708 + cmd_params->mac_addr4 = rule->mac_addr[4];
17709 + cmd_params->mac_addr3 = rule->mac_addr[3];
17710 + cmd_params->mac_addr2 = rule->mac_addr[2];
17711 + cmd_params->mac_addr1 = rule->mac_addr[1];
17712 + cmd_params->mac_addr0 = rule->mac_addr[0];
17714 + /* send command to mc*/
17715 + return mc_send_command(mc_io, &cmd);
17719 + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
17720 + * @mc_io: Pointer to MC portal's I/O object
17721 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17722 + * @token: Token of DPDMUX object
17723 + * @if_id: Interface Id
17724 + * @counter_type: counter type
17725 + * @counter: Returned specific counter information
17727 + * Return: '0' on Success; Error code otherwise.
17729 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
17733 + enum dpdmux_counter_type counter_type,
17736 + struct mc_command cmd = { 0 };
17737 + struct dpdmux_cmd_if_get_counter *cmd_params;
17738 + struct dpdmux_rsp_if_get_counter *rsp_params;
17741 + /* prepare command */
17742 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
17745 + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
17746 + cmd_params->if_id = cpu_to_le16(if_id);
17747 + cmd_params->counter_type = counter_type;
17749 + /* send command to mc*/
17750 + err = mc_send_command(mc_io, &cmd);
17754 + /* retrieve response parameters */
17755 + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
17756 + *counter = le64_to_cpu(rsp_params->counter);
17762 + * dpdmux_if_set_link_cfg() - set the link configuration.
17763 + * @mc_io: Pointer to MC portal's I/O object
17764 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17765 + * @token: Token of DPSW object
17766 + * @if_id: interface id
17767 + * @cfg: Link configuration
17769 + * Return: '0' on Success; Error code otherwise.
17771 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
17775 + struct dpdmux_link_cfg *cfg)
17777 + struct mc_command cmd = { 0 };
17778 + struct dpdmux_cmd_if_set_link_cfg *cmd_params;
17780 + /* prepare command */
17781 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
17784 + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
17785 + cmd_params->if_id = cpu_to_le16(if_id);
17786 + cmd_params->rate = cpu_to_le32(cfg->rate);
17787 + cmd_params->options = cpu_to_le64(cfg->options);
17789 + /* send command to mc*/
17790 + return mc_send_command(mc_io, &cmd);
17794 + * dpdmux_if_get_link_state - Return the link state
17795 + * @mc_io: Pointer to MC portal's I/O object
17796 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17797 + * @token: Token of DPSW object
17798 + * @if_id: interface id
17799 + * @state: link state
17801 + * @returns '0' on Success; Error code otherwise.
17803 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
17807 + struct dpdmux_link_state *state)
17809 + struct mc_command cmd = { 0 };
17810 + struct dpdmux_cmd_if_get_link_state *cmd_params;
17811 + struct dpdmux_rsp_if_get_link_state *rsp_params;
17814 + /* prepare command */
17815 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
17818 + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
17819 + cmd_params->if_id = cpu_to_le16(if_id);
17821 + /* send command to mc*/
17822 + err = mc_send_command(mc_io, &cmd);
17826 + /* retrieve response parameters */
17827 + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
17828 + state->rate = le32_to_cpu(rsp_params->rate);
17829 + state->options = le64_to_cpu(rsp_params->options);
17830 + state->up = dpdmux_get_field(rsp_params->up, ENABLE);
17836 + * dpdmux_set_custom_key - Set a custom classification key.
17838 + * This API is only available for DPDMUX instance created with
17839 + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
17840 + * classification table using dpdmux_add_custom_cls_entry.
17842 + * Calls to dpdmux_set_custom_key remove all existing classification entries
17843 + * that may have been added previously using dpdmux_add_custom_cls_entry.
17845 + * @mc_io: Pointer to MC portal's I/O object
17846 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17847 + * @token: Token of DPSW object
17848 + * @if_id: interface id
17849 + * @key_cfg_iova: DMA address of a configuration structure set up using
17850 + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
17852 + * @returns '0' on Success; Error code otherwise.
17854 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
17857 + u64 key_cfg_iova)
17859 + struct dpdmux_set_custom_key *cmd_params;
17860 + struct mc_command cmd = { 0 };
17862 + /* prepare command */
17863 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
17866 + cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
17867 + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
17869 + /* send command to mc*/
17870 + return mc_send_command(mc_io, &cmd);
17874 + * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
17876 + * This API is only available for DPDMUX instances created with
17877 + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
17878 + * composition rule must be set up using dpdmux_set_custom_key.
17880 + * @mc_io: Pointer to MC portal's I/O object
17881 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17882 + * @token: Token of DPSW object
17883 + * @rule: Classification rule to insert. Rules cannot be duplicated, if a
17884 + * matching rule already exists, the action will be replaced.
17885 + * @action: Action to perform for matching traffic.
17887 + * @returns '0' on Success; Error code otherwise.
17889 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
17892 + struct dpdmux_rule_cfg *rule,
17893 + struct dpdmux_cls_action *action)
17895 + struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
17896 + struct mc_command cmd = { 0 };
17898 + /* prepare command */
17899 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
17903 + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
17904 + cmd_params->key_size = rule->key_size;
17905 + cmd_params->dest_if = cpu_to_le16(action->dest_if);
17906 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17907 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17909 + /* send command to mc*/
17910 + return mc_send_command(mc_io, &cmd);
17914 + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
17916 + * This API is only available for DPDMUX instances created with
17917 + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
17918 + * entries previously inserted using dpdmux_add_custom_cls_entry.
17920 + * @mc_io: Pointer to MC portal's I/O object
17921 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17922 + * @token: Token of DPSW object
17923 + * @rule: Classification rule to remove
17925 + * @returns '0' on Success; Error code otherwise.
17927 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
17930 + struct dpdmux_rule_cfg *rule)
17932 + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
17933 + struct mc_command cmd = { 0 };
17935 + /* prepare command */
17936 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
17939 + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
17940 + cmd_params->key_size = rule->key_size;
17941 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
17942 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
17944 + /* send command to mc*/
17945 + return mc_send_command(mc_io, &cmd);
17949 + * dpdmux_get_api_version() - Get Data Path Demux API version
17950 + * @mc_io: Pointer to MC portal's I/O object
17951 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17952 + * @major_ver: Major version of data path demux API
17953 + * @minor_ver: Minor version of data path demux API
17955 + * Return: '0' on Success; Error code otherwise.
17957 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
17962 + struct mc_command cmd = { 0 };
17963 + struct dpdmux_rsp_get_api_version *rsp_params;
17966 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
17970 + err = mc_send_command(mc_io, &cmd);
17974 + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
17975 + *major_ver = le16_to_cpu(rsp_params->major);
17976 + *minor_ver = le16_to_cpu(rsp_params->minor);
17980 diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.h b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
17981 new file mode 100644
17982 index 00000000..a6ccc7ef
17984 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
17986 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
17988 + * Redistribution and use in source and binary forms, with or without
17989 + * modification, are permitted provided that the following conditions are met:
17990 + * * Redistributions of source code must retain the above copyright
17991 + * notice, this list of conditions and the following disclaimer.
17992 + * * Redistributions in binary form must reproduce the above copyright
17993 + * notice, this list of conditions and the following disclaimer in the
17994 + * documentation and/or other materials provided with the distribution.
17995 + * * Neither the name of the above-listed copyright holders nor the
17996 + * names of any contributors may be used to endorse or promote products
17997 + * derived from this software without specific prior written permission.
18000 + * ALTERNATIVELY, this software may be distributed under the terms of the
18001 + * GNU General Public License ("GPL") as published by the Free Software
18002 + * Foundation, either version 2 of that License or (at your option) any
18005 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18006 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18007 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18008 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
18009 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18010 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18011 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18012 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
18013 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
18014 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
18015 + * POSSIBILITY OF SUCH DAMAGE.
18017 +#ifndef __FSL_DPDMUX_H
18018 +#define __FSL_DPDMUX_H
18022 +/* Data Path Demux API
18023 + * Contains API for handling DPDMUX topology and functionality
18026 +int dpdmux_open(struct fsl_mc_io *mc_io,
18031 +int dpdmux_close(struct fsl_mc_io *mc_io,
18036 + * DPDMUX general options
18040 + * Enable bridging between internal interfaces
18042 +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
18045 + * Mask support for classification
18047 +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
18049 +#define DPDMUX_IRQ_INDEX_IF 0x0000
18050 +#define DPDMUX_IRQ_INDEX 0x0001
18053 + * IRQ event - Indicates that the link state changed
18055 +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
18058 + * enum dpdmux_manip - DPDMUX manipulation operations
18059 + * @DPDMUX_MANIP_NONE: No manipulation on frames
18060 + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
18062 +enum dpdmux_manip {
18063 + DPDMUX_MANIP_NONE = 0x0,
18064 + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
18068 + * enum dpdmux_method - DPDMUX method options
18069 + * @DPDMUX_METHOD_NONE: no DPDMUX method
18070 + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
18071 + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
18072 + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
18073 + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
18075 +enum dpdmux_method {
18076 + DPDMUX_METHOD_NONE = 0x0,
18077 + DPDMUX_METHOD_C_VLAN_MAC = 0x1,
18078 + DPDMUX_METHOD_MAC = 0x2,
18079 + DPDMUX_METHOD_C_VLAN = 0x3,
18080 + DPDMUX_METHOD_S_VLAN = 0x4,
18081 + DPDMUX_METHOD_CUSTOM = 0x5
18085 + * struct dpdmux_cfg - DPDMUX configuration parameters
18086 + * @method: Defines the operation method for the DPDMUX address table
18087 + * @manip: Required manipulation operation
18088 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18089 + * @adv: Advanced parameters; default is all zeros;
18090 + * use this structure to change default settings
18092 +struct dpdmux_cfg {
18093 + enum dpdmux_method method;
18094 + enum dpdmux_manip manip;
18097 + * struct adv - Advanced parameters
18098 + * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
18099 + * @max_dmat_entries: Maximum entries in DPDMUX address table
18100 + * 0 - indicates default: 64 entries per interface.
18101 + * @max_mc_groups: Number of multicast groups in DPDMUX table
18102 + * 0 - indicates default: 32 multicast groups
18103 + * @max_vlan_ids: max vlan ids allowed in the system -
18104 + * relevant only case of working in mac+vlan method.
18105 + * 0 - indicates default 16 vlan ids.
18109 + u16 max_dmat_entries;
18110 + u16 max_mc_groups;
18111 + u16 max_vlan_ids;
18115 +int dpdmux_create(struct fsl_mc_io *mc_io,
18118 + const struct dpdmux_cfg *cfg,
18121 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
18126 +int dpdmux_enable(struct fsl_mc_io *mc_io,
18130 +int dpdmux_disable(struct fsl_mc_io *mc_io,
18134 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
18139 +int dpdmux_reset(struct fsl_mc_io *mc_io,
18143 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
18149 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
18155 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
18161 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
18167 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
18173 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
18180 + * struct dpdmux_attr - Structure representing DPDMUX attributes
18181 + * @id: DPDMUX object ID
18182 + * @options: Configuration options (bitmap)
18183 + * @method: DPDMUX address table method
18184 + * @manip: DPDMUX manipulation type
18185 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18186 + * @mem_size: DPDMUX frame storage memory size
18188 +struct dpdmux_attr {
18191 + enum dpdmux_method method;
18192 + enum dpdmux_manip manip;
18197 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
18200 + struct dpdmux_attr *attr);
18202 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
18205 + u16 max_frame_length);
18208 + * enum dpdmux_counter_type - Counter types
18209 + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
18210 + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
18211 + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
18212 + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
18213 + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
18214 + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
18215 + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
18216 + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
18217 + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
18218 + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
18219 + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
18221 +enum dpdmux_counter_type {
18222 + DPDMUX_CNT_ING_FRAME = 0x0,
18223 + DPDMUX_CNT_ING_BYTE = 0x1,
18224 + DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
18225 + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
18226 + DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
18227 + DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
18228 + DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
18229 + DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
18230 + DPDMUX_CNT_EGR_FRAME = 0x8,
18231 + DPDMUX_CNT_EGR_BYTE = 0x9,
18232 + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
18236 + * enum dpdmux_accepted_frames_type - DPDMUX frame types
18237 + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
18238 + * priority-tagged frames
18239 + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
18240 + * priority-tagged frames that are received on this
18242 + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
18243 + * received on this interface are accepted
18245 +enum dpdmux_accepted_frames_type {
18246 + DPDMUX_ADMIT_ALL = 0,
18247 + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
18248 + DPDMUX_ADMIT_ONLY_UNTAGGED = 2
18252 + * enum dpdmux_action - DPDMUX action for un-accepted frames
18253 + * @DPDMUX_ACTION_DROP: Drop un-accepted frames
18254 + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
18255 + * control interface
18257 +enum dpdmux_action {
18258 + DPDMUX_ACTION_DROP = 0,
18259 + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
18263 + * struct dpdmux_accepted_frames - Frame types configuration
18264 + * @type: Defines ingress accepted frames
18265 + * @unaccept_act: Defines action on frames not accepted
18267 +struct dpdmux_accepted_frames {
18268 + enum dpdmux_accepted_frames_type type;
18269 + enum dpdmux_action unaccept_act;
18272 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
18276 + const struct dpdmux_accepted_frames *cfg);
18279 + * struct dpdmux_if_attr - Structure representing frame types configuration
18280 + * @rate: Configured interface rate (in bits per second)
18281 + * @enabled: Indicates if interface is enabled
18282 + * @accept_frame_type: Indicates type of accepted frames for the interface
18284 +struct dpdmux_if_attr {
18287 + enum dpdmux_accepted_frames_type accept_frame_type;
18290 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
18294 + struct dpdmux_if_attr *attr);
18296 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
18301 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
18307 + * struct dpdmux_l2_rule - Structure representing L2 rule
18308 + * @mac_addr: MAC address
18309 + * @vlan_id: VLAN ID
18311 +struct dpdmux_l2_rule {
18316 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
18320 + const struct dpdmux_l2_rule *rule);
18322 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
18326 + const struct dpdmux_l2_rule *rule);
18328 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
18332 + enum dpdmux_counter_type counter_type,
18335 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
18340 + * Enable auto-negotiation
18342 +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
18344 + * Enable half-duplex mode
18346 +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
18348 + * Enable pause frames
18350 +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
18352 + * Enable a-symmetric pause frames
18354 +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
18357 + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
18359 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18361 +struct dpdmux_link_cfg {
18366 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
18370 + struct dpdmux_link_cfg *cfg);
18372 + * struct dpdmux_link_state - Structure representing DPDMUX link state
18374 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18375 + * @up: 0 - down, 1 - up
18377 +struct dpdmux_link_state {
18383 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
18387 + struct dpdmux_link_state *state);
18389 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
18392 + u64 key_cfg_iova);
18395 + * struct dpdmux_rule_cfg - Custom classification rule.
18397 + * @key_iova: DMA address of buffer storing the look-up value
18398 + * @mask_iova: DMA address of the mask used for TCAM classification
18399 + * @key_size: size, in bytes, of the look-up value. This must match the size
18400 + * of the look-up key defined using dpdmux_set_custom_key, otherwise the
18401 + * entry will never be hit
18403 +struct dpdmux_rule_cfg {
18410 + * struct dpdmux_cls_action - Action to execute for frames matching the
18411 + * classification entry
18413 + * @dest_if: Interface to forward the frames to. Port numbering is similar to
18414 + * the one used to connect interfaces:
18415 + * - 0 is the uplink port,
18416 + * - all others are downlink ports.
18418 +struct dpdmux_cls_action {
18422 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
18425 + struct dpdmux_rule_cfg *rule,
18426 + struct dpdmux_cls_action *action);
18428 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
18431 + struct dpdmux_rule_cfg *rule);
18433 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
18438 +#endif /* __FSL_DPDMUX_H */
18439 diff --git a/drivers/staging/fsl-dpaa2/evb/evb.c b/drivers/staging/fsl-dpaa2/evb/evb.c
18440 new file mode 100644
18441 index 00000000..9ee09b42
18443 +++ b/drivers/staging/fsl-dpaa2/evb/evb.c
18445 +/* Copyright 2015 Freescale Semiconductor Inc.
18447 + * Redistribution and use in source and binary forms, with or without
18448 + * modification, are permitted provided that the following conditions are met:
18449 + * * Redistributions of source code must retain the above copyright
18450 + * notice, this list of conditions and the following disclaimer.
18451 + * * Redistributions in binary form must reproduce the above copyright
18452 + * notice, this list of conditions and the following disclaimer in the
18453 + * documentation and/or other materials provided with the distribution.
18454 + * * Neither the name of Freescale Semiconductor nor the
18455 + * names of its contributors may be used to endorse or promote products
18456 + * derived from this software without specific prior written permission.
18459 + * ALTERNATIVELY, this software may be distributed under the terms of the
18460 + * GNU General Public License ("GPL") as published by the Free Software
18461 + * Foundation, either version 2 of that License or (at your option) any
18464 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18465 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18466 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18467 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18468 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18469 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18470 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18471 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18472 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18473 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18475 +#include <linux/module.h>
18476 +#include <linux/msi.h>
18477 +#include <linux/netdevice.h>
18478 +#include <linux/etherdevice.h>
18479 +#include <linux/rtnetlink.h>
18480 +#include <linux/if_vlan.h>
18482 +#include <uapi/linux/if_bridge.h>
18483 +#include <net/netlink.h>
18485 +#include "../../fsl-mc/include/mc.h"
18487 +#include "dpdmux.h"
18488 +#include "dpdmux-cmd.h"
18490 +static const char evb_drv_version[] = "0.1";
18492 +/* Minimal supported DPDMUX version */
18493 +#define DPDMUX_MIN_VER_MAJOR 6
18494 +#define DPDMUX_MIN_VER_MINOR 0
18497 +#define DPDMUX_MAX_IRQ_NUM 2
18499 +/* MAX FRAME LENGTH (currently 10k) */
18500 +#define EVB_MAX_FRAME_LENGTH (10 * 1024)
18501 +/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
18502 +#define EVB_MIN_FRAME_LENGTH 68
18504 +struct evb_port_priv {
18505 + struct net_device *netdev;
18506 + struct list_head list;
18508 + struct evb_priv *evb_priv;
18509 + u8 vlans[VLAN_VID_MASK + 1];
18514 + struct evb_port_priv uplink;
18516 + struct fsl_mc_io *mc_io;
18517 + struct list_head port_list;
18518 + struct dpdmux_attr attr;
18523 +static int _evb_port_carrier_state_sync(struct net_device *netdev)
18525 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18526 + struct dpdmux_link_state state;
18529 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
18530 + port_priv->evb_priv->mux_handle,
18531 + port_priv->port_index, &state);
18532 + if (unlikely(err)) {
18533 + netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
18537 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
18540 + netif_carrier_on(port_priv->netdev);
18542 + netif_carrier_off(port_priv->netdev);
18547 +static int evb_port_open(struct net_device *netdev)
18551 + /* FIXME: enable port when support added */
18553 + err = _evb_port_carrier_state_sync(netdev);
18555 + netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
18563 +static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
18565 + /* we don't support I/O for now, drop the frame */
18566 + dev_kfree_skb_any(skb);
18567 + return NETDEV_TX_OK;
18570 +static int evb_links_state_update(struct evb_priv *priv)
18572 + struct evb_port_priv *port_priv;
18573 + struct list_head *pos;
18576 + list_for_each(pos, &priv->port_list) {
18577 + port_priv = list_entry(pos, struct evb_port_priv, list);
18579 + err = _evb_port_carrier_state_sync(port_priv->netdev);
18581 + netdev_err(port_priv->netdev,
18582 + "_evb_port_carrier_state_sync err %d\n",
18589 +static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
18591 + return IRQ_WAKE_THREAD;
18594 +static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
18596 + struct device *dev = (struct device *)arg;
18597 + struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
18598 + struct net_device *netdev = dev_get_drvdata(dev);
18599 + struct evb_priv *priv = netdev_priv(netdev);
18600 + struct fsl_mc_io *io = priv->mc_io;
18601 + u16 token = priv->mux_handle;
18602 + int irq_index = DPDMUX_IRQ_INDEX_IF;
18604 + /* Mask the events and the if_id reserved bits to be cleared on read */
18605 + u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
18608 + /* Sanity check */
18609 + if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
18611 + if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
18614 + err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
18615 + if (unlikely(err)) {
18616 + netdev_err(netdev, "Can't get irq status (err %d)", err);
18617 + err = dpdmux_clear_irq_status(io, 0, token, irq_index,
18619 + if (unlikely(err))
18620 + netdev_err(netdev, "Can't clear irq status (err %d)",
18625 + if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
18626 + err = evb_links_state_update(priv);
18627 + if (unlikely(err))
18632 + return IRQ_HANDLED;
18635 +static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
18637 + struct device *dev = &evb_dev->dev;
18638 + struct net_device *netdev = dev_get_drvdata(dev);
18639 + struct evb_priv *priv = netdev_priv(netdev);
18641 + struct fsl_mc_device_irq *irq;
18642 + const int irq_index = DPDMUX_IRQ_INDEX_IF;
18643 + u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
18645 + err = fsl_mc_allocate_irqs(evb_dev);
18646 + if (unlikely(err)) {
18647 + dev_err(dev, "MC irqs allocation failed\n");
18651 + if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
18656 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18658 + if (unlikely(err)) {
18659 + dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
18663 + irq = evb_dev->irqs[irq_index];
18665 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
18666 + evb_irq0_handler,
18667 + _evb_irq0_handler_thread,
18668 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
18669 + dev_name(dev), dev);
18670 + if (unlikely(err)) {
18671 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
18675 + err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
18676 + irq_index, mask);
18677 + if (unlikely(err)) {
18678 + dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
18679 + goto free_devm_irq;
18682 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18684 + if (unlikely(err)) {
18685 + dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
18686 + goto free_devm_irq;
18692 + devm_free_irq(dev, irq->msi_desc->irq, dev);
18694 + fsl_mc_free_irqs(evb_dev);
18698 +static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
18700 + struct device *dev = &evb_dev->dev;
18701 + struct net_device *netdev = dev_get_drvdata(dev);
18702 + struct evb_priv *priv = netdev_priv(netdev);
18704 + dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
18705 + DPDMUX_IRQ_INDEX_IF, 0);
18707 + devm_free_irq(dev,
18708 + evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
18710 + fsl_mc_free_irqs(evb_dev);
18713 +static int evb_port_add_rule(struct net_device *netdev,
18714 + const unsigned char *addr, u16 vid)
18716 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18717 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18721 + ether_addr_copy(rule.mac_addr, addr);
18723 + err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
18725 + port_priv->evb_priv->mux_handle,
18726 + port_priv->port_index, &rule);
18727 + if (unlikely(err))
18728 + netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
18732 +static int evb_port_del_rule(struct net_device *netdev,
18733 + const unsigned char *addr, u16 vid)
18735 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18736 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
18740 + ether_addr_copy(rule.mac_addr, addr);
18742 + err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
18744 + port_priv->evb_priv->mux_handle,
18745 + port_priv->port_index, &rule);
18746 + if (unlikely(err))
18747 + netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
18751 +static bool _lookup_address(struct net_device *netdev,
18752 + const unsigned char *addr)
18754 + struct netdev_hw_addr *ha;
18755 + struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
18756 + &netdev->uc : &netdev->mc;
18758 + netif_addr_lock_bh(netdev);
18759 + list_for_each_entry(ha, &list->list, list) {
18760 + if (ether_addr_equal(ha->addr, addr)) {
18761 + netif_addr_unlock_bh(netdev);
18765 + netif_addr_unlock_bh(netdev);
18769 +static inline int evb_port_fdb_prep(struct nlattr *tb[],
18770 + struct net_device *netdev,
18771 + const unsigned char *addr, u16 *vid,
18774 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18775 + struct evb_priv *evb_priv = port_priv->evb_priv;
18779 + if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
18780 + evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
18781 + netdev_err(netdev,
18782 + "EVB mode does not support MAC classification\n");
18783 + return -EOPNOTSUPP;
18786 + /* check if the address is configured on this port */
18787 + if (_lookup_address(netdev, addr)) {
18795 + if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18796 + if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
18797 + netdev_err(netdev, "invalid vlan size %d\n",
18798 + nla_len(tb[NDA_VLAN]));
18802 + *vid = nla_get_u16(tb[NDA_VLAN]);
18804 + if (!*vid || *vid >= VLAN_VID_MASK) {
18805 + netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
18808 + } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
18809 + netdev_err(netdev,
18810 + "EVB mode requires explicit VLAN configuration\n");
18812 + } else if (tb[NDA_VLAN]) {
18813 + netdev_warn(netdev, "VLAN not supported, argument ignored\n");
18819 +static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
18820 + struct net_device *netdev,
18821 + const unsigned char *addr, u16 vid, u16 flags)
18826 + /* TODO: add replace support when added to iproute bridge */
18827 + if (!(flags & NLM_F_REQUEST)) {
18828 + netdev_err(netdev,
18829 + "evb_port_fdb_add unexpected flags value %08x\n",
18834 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
18835 + if (unlikely(err))
18838 + err = evb_port_add_rule(netdev, addr, _vid);
18839 + if (unlikely(err))
18842 + if (is_unicast_ether_addr(addr)) {
18843 + err = dev_uc_add(netdev, addr);
18844 + if (unlikely(err)) {
18845 + netdev_err(netdev, "dev_uc_add err %d\n", err);
18849 + err = dev_mc_add(netdev, addr);
18850 + if (unlikely(err)) {
18851 + netdev_err(netdev, "dev_mc_add err %d\n", err);
18859 +static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
18860 + struct net_device *netdev,
18861 + const unsigned char *addr, u16 vid)
18866 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
18867 + if (unlikely(err))
18870 + err = evb_port_del_rule(netdev, addr, _vid);
18871 + if (unlikely(err))
18874 + if (is_unicast_ether_addr(addr)) {
18875 + err = dev_uc_del(netdev, addr);
18876 + if (unlikely(err)) {
18877 + netdev_err(netdev, "dev_uc_del err %d\n", err);
18881 + err = dev_mc_del(netdev, addr);
18882 + if (unlikely(err)) {
18883 + netdev_err(netdev, "dev_mc_del err %d\n", err);
18891 +static int evb_change_mtu(struct net_device *netdev,
18894 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18895 + struct evb_priv *evb_priv = port_priv->evb_priv;
18896 + struct list_head *pos;
18899 + /* This operation is not permitted on downlinks */
18900 + if (port_priv->port_index > 0)
18903 + if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
18904 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
18905 + mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
18909 + err = dpdmux_set_max_frame_length(evb_priv->mc_io,
18911 + evb_priv->mux_handle,
18914 + if (unlikely(err)) {
18915 + netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
18920 + /* Update the max frame length for downlinks */
18921 + list_for_each(pos, &evb_priv->port_list) {
18922 + port_priv = list_entry(pos, struct evb_port_priv, list);
18923 + port_priv->netdev->mtu = mtu;
18926 + netdev->mtu = mtu;
18930 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
18931 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
18932 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
18933 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
18934 + .len = sizeof(struct bridge_vlan_info), },
18937 +static int evb_setlink_af_spec(struct net_device *netdev,
18938 + struct nlattr **tb)
18940 + struct bridge_vlan_info *vinfo;
18941 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18944 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
18945 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
18946 + return -EOPNOTSUPP;
18949 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
18951 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
18954 + err = evb_port_add_rule(netdev, NULL, vinfo->vid);
18955 + if (unlikely(err))
18958 + port_priv->vlans[vinfo->vid] = 1;
18963 +static int evb_setlink(struct net_device *netdev,
18964 + struct nlmsghdr *nlh,
18967 + struct evb_port_priv *port_priv = netdev_priv(netdev);
18968 + struct evb_priv *evb_priv = port_priv->evb_priv;
18969 + struct nlattr *attr;
18970 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
18971 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
18974 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
18975 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
18976 + netdev_err(netdev,
18977 + "EVB mode does not support VLAN only classification\n");
18978 + return -EOPNOTSUPP;
18981 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
18983 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
18985 + if (unlikely(err)) {
18986 + netdev_err(netdev,
18987 + "nla_parse_nested for br_policy err %d\n",
18992 + err = evb_setlink_af_spec(netdev, tb);
18996 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
18997 + return -EOPNOTSUPP;
19000 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
19002 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19003 + struct evb_priv *evb_priv = port_priv->evb_priv;
19004 + u8 operstate = netif_running(netdev) ?
19005 + netdev->operstate : IF_OPER_DOWN;
19009 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
19010 + if (unlikely(err))
19011 + goto nla_put_err;
19012 + err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
19013 + if (unlikely(err))
19014 + goto nla_put_err;
19015 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
19016 + if (unlikely(err))
19017 + goto nla_put_err;
19018 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
19019 + if (unlikely(err))
19020 + goto nla_put_err;
19021 + if (netdev->addr_len) {
19022 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
19023 + netdev->dev_addr);
19024 + if (unlikely(err))
19025 + goto nla_put_err;
19028 + iflink = dev_get_iflink(netdev);
19029 + if (netdev->ifindex != iflink) {
19030 + err = nla_put_u32(skb, IFLA_LINK, iflink);
19031 + if (unlikely(err))
19032 + goto nla_put_err;
19038 + netdev_err(netdev, "nla_put_ err %d\n", err);
19042 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
19044 + struct nlattr *nest;
19047 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
19049 + netdev_err(netdev, "nla_nest_start failed\n");
19053 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
19054 + if (unlikely(err))
19055 + goto nla_put_err;
19056 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
19057 + if (unlikely(err))
19058 + goto nla_put_err;
19059 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
19060 + if (unlikely(err))
19061 + goto nla_put_err;
19062 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
19063 + if (unlikely(err))
19064 + goto nla_put_err;
19065 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
19066 + if (unlikely(err))
19067 + goto nla_put_err;
19068 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
19069 + if (unlikely(err))
19070 + goto nla_put_err;
19071 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
19072 + if (unlikely(err))
19073 + goto nla_put_err;
19074 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
19075 + if (unlikely(err))
19076 + goto nla_put_err;
19077 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
19078 + if (unlikely(err))
19079 + goto nla_put_err;
19080 + nla_nest_end(skb, nest);
19085 + netdev_err(netdev, "nla_put_ err %d\n", err);
19086 + nla_nest_cancel(skb, nest);
19090 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
19092 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19093 + struct nlattr *nest;
19094 + struct bridge_vlan_info vinfo;
19095 + const u8 *vlans = port_priv->vlans;
19099 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
19101 + netdev_err(netdev, "nla_nest_start failed");
19105 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
19112 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
19113 + sizeof(vinfo), &vinfo);
19114 + if (unlikely(err))
19115 + goto nla_put_err;
19118 + nla_nest_end(skb, nest);
19123 + netdev_err(netdev, "nla_put_ err %d\n", err);
19124 + nla_nest_cancel(skb, nest);
19128 +static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
19129 + struct net_device *netdev, u32 filter_mask, int nlflags)
19131 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19132 + struct evb_priv *evb_priv = port_priv->evb_priv;
19133 + struct ifinfomsg *hdr;
19134 + struct nlmsghdr *nlh;
19137 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
19138 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
19142 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
19144 + return -EMSGSIZE;
19146 + hdr = nlmsg_data(nlh);
19147 + memset(hdr, 0, sizeof(*hdr));
19148 + hdr->ifi_family = AF_BRIDGE;
19149 + hdr->ifi_type = netdev->type;
19150 + hdr->ifi_index = netdev->ifindex;
19151 + hdr->ifi_flags = dev_get_flags(netdev);
19153 + err = __nla_put_netdev(skb, netdev);
19154 + if (unlikely(err))
19155 + goto nla_put_err;
19157 + err = __nla_put_port(skb, netdev);
19158 + if (unlikely(err))
19159 + goto nla_put_err;
19161 + /* Check if the VID information is requested */
19162 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
19163 + err = __nla_put_vlan(skb, netdev);
19164 + if (unlikely(err))
19165 + goto nla_put_err;
19168 + nlmsg_end(skb, nlh);
19172 + nlmsg_cancel(skb, nlh);
19173 + return -EMSGSIZE;
19176 +static int evb_dellink(struct net_device *netdev,
19177 + struct nlmsghdr *nlh,
19180 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
19181 + struct nlattr *spec;
19182 + struct bridge_vlan_info *vinfo;
19183 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19186 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
19190 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
19191 + if (unlikely(err))
19194 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
19195 + return -EOPNOTSUPP;
19197 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
19199 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
19202 + err = evb_port_del_rule(netdev, NULL, vinfo->vid);
19203 + if (unlikely(err)) {
19204 + netdev_err(netdev, "evb_port_del_rule err %d\n", err);
19207 + port_priv->vlans[vinfo->vid] = 0;
19212 +void evb_port_get_stats(struct net_device *netdev,
19213 + struct rtnl_link_stats64 *storage)
19215 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19219 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19221 + port_priv->evb_priv->mux_handle,
19222 + port_priv->port_index,
19223 + DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
19224 + if (unlikely(err))
19227 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19229 + port_priv->evb_priv->mux_handle,
19230 + port_priv->port_index,
19231 + DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
19232 + if (unlikely(err))
19235 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19237 + port_priv->evb_priv->mux_handle,
19238 + port_priv->port_index,
19239 + DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
19240 + if (unlikely(err))
19243 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19245 + port_priv->evb_priv->mux_handle,
19246 + port_priv->port_index,
19247 + DPDMUX_CNT_ING_FRAME_DISCARD,
19248 + &storage->rx_dropped);
19249 + if (unlikely(err)) {
19250 + storage->rx_dropped = tmp;
19253 + storage->rx_dropped += tmp;
19255 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19257 + port_priv->evb_priv->mux_handle,
19258 + port_priv->port_index,
19259 + DPDMUX_CNT_ING_MCAST_FRAME,
19260 + &storage->multicast);
19261 + if (unlikely(err))
19264 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19266 + port_priv->evb_priv->mux_handle,
19267 + port_priv->port_index,
19268 + DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
19269 + if (unlikely(err))
19272 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19274 + port_priv->evb_priv->mux_handle,
19275 + port_priv->port_index,
19276 + DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
19277 + if (unlikely(err))
19280 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19282 + port_priv->evb_priv->mux_handle,
19283 + port_priv->port_index,
19284 + DPDMUX_CNT_EGR_FRAME_DISCARD,
19285 + &storage->tx_dropped);
19286 + if (unlikely(err))
19292 + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
19295 +static const struct net_device_ops evb_port_ops = {
19296 + .ndo_open = &evb_port_open,
19298 + .ndo_start_xmit = &evb_dropframe,
19300 + .ndo_fdb_add = &evb_port_fdb_add,
19301 + .ndo_fdb_del = &evb_port_fdb_del,
19303 + .ndo_get_stats64 = &evb_port_get_stats,
19304 + .ndo_change_mtu = &evb_change_mtu,
19307 +static void evb_get_drvinfo(struct net_device *netdev,
19308 + struct ethtool_drvinfo *drvinfo)
19310 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19311 + u16 version_major, version_minor;
19314 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
19315 + strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
19317 + err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
19321 + strlcpy(drvinfo->fw_version, "N/A",
19322 + sizeof(drvinfo->fw_version));
19324 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
19325 + "%u.%u", version_major, version_minor);
19327 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
19328 + sizeof(drvinfo->bus_info));
19331 +static int evb_get_settings(struct net_device *netdev,
19332 + struct ethtool_cmd *cmd)
19334 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19335 + struct dpdmux_link_state state = {0};
19338 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19339 + port_priv->evb_priv->mux_handle,
19340 + port_priv->port_index,
19343 + netdev_err(netdev, "ERROR %d getting link state", err);
19347 + /* At the moment, we have no way of interrogating the DPMAC
19348 + * from the DPDMUX side or there may not exist a DPMAC at all.
19349 + * Report only autoneg state, duplexity and speed.
19351 + if (state.options & DPDMUX_LINK_OPT_AUTONEG)
19352 + cmd->autoneg = AUTONEG_ENABLE;
19353 + if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
19354 + cmd->duplex = DUPLEX_FULL;
19355 + ethtool_cmd_speed_set(cmd, state.rate);
19361 +static int evb_set_settings(struct net_device *netdev,
19362 + struct ethtool_cmd *cmd)
19364 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19365 + struct dpdmux_link_state state = {0};
19366 + struct dpdmux_link_cfg cfg = {0};
19369 + netdev_dbg(netdev, "Setting link parameters...");
19371 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19372 + port_priv->evb_priv->mux_handle,
19373 + port_priv->port_index,
19376 + netdev_err(netdev, "ERROR %d getting link state", err);
19380 + /* Due to a temporary MC limitation, the DPDMUX port must be down
19381 + * in order to be able to change link settings. Taking steps to let
19382 + * the user know that.
19384 + if (netif_running(netdev)) {
19385 + netdev_info(netdev,
19386 + "Sorry, interface must be brought down first.\n");
19390 + cfg.options = state.options;
19391 + cfg.rate = ethtool_cmd_speed(cmd);
19392 + if (cmd->autoneg == AUTONEG_ENABLE)
19393 + cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
19395 + cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
19396 + if (cmd->duplex == DUPLEX_HALF)
19397 + cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
19399 + cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
19401 + err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
19402 + port_priv->evb_priv->mux_handle,
19403 + port_priv->port_index,
19406 + /* ethtool will be loud enough if we return an error; no point
19407 + * in putting our own error message on the console by default
19409 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
19416 + enum dpdmux_counter_type id;
19417 + char name[ETH_GSTRING_LEN];
19418 +} evb_ethtool_counters[] = {
19419 + {DPDMUX_CNT_ING_FRAME, "rx frames"},
19420 + {DPDMUX_CNT_ING_BYTE, "rx bytes"},
19421 + {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
19422 + {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
19423 + {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
19424 + {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
19425 + {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
19426 + {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
19427 + {DPDMUX_CNT_EGR_FRAME, "tx frames"},
19428 + {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
19429 + {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
19432 +static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
19435 + case ETH_SS_STATS:
19436 + return ARRAY_SIZE(evb_ethtool_counters);
19438 + return -EOPNOTSUPP;
19442 +static void evb_ethtool_get_strings(struct net_device *netdev,
19443 + u32 stringset, u8 *data)
19447 + switch (stringset) {
19448 + case ETH_SS_STATS:
19449 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
19450 + memcpy(data + i * ETH_GSTRING_LEN,
19451 + evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
19456 +static void evb_ethtool_get_stats(struct net_device *netdev,
19457 + struct ethtool_stats *stats,
19460 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19464 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
19465 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19467 + port_priv->evb_priv->mux_handle,
19468 + port_priv->port_index,
19469 + evb_ethtool_counters[i].id,
19472 + netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
19473 + evb_ethtool_counters[i].name, err);
19477 +static const struct ethtool_ops evb_port_ethtool_ops = {
19478 + .get_drvinfo = &evb_get_drvinfo,
19479 + .get_link = ðtool_op_get_link,
19480 + .get_settings = &evb_get_settings,
19481 + .set_settings = &evb_set_settings,
19482 + .get_strings = &evb_ethtool_get_strings,
19483 + .get_ethtool_stats = &evb_ethtool_get_stats,
19484 + .get_sset_count = &evb_ethtool_get_sset_count,
19487 +static int evb_open(struct net_device *netdev)
19489 + struct evb_priv *priv = netdev_priv(netdev);
19492 + err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
19493 + if (unlikely(err))
19494 + netdev_err(netdev, "dpdmux_enable err %d\n", err);
19499 +static int evb_close(struct net_device *netdev)
19501 + struct evb_priv *priv = netdev_priv(netdev);
19504 + err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
19505 + if (unlikely(err))
19506 + netdev_err(netdev, "dpdmux_disable err %d\n", err);
19511 +static const struct net_device_ops evb_ops = {
19512 + .ndo_start_xmit = &evb_dropframe,
19513 + .ndo_open = &evb_open,
19514 + .ndo_stop = &evb_close,
19516 + .ndo_bridge_setlink = &evb_setlink,
19517 + .ndo_bridge_getlink = &evb_getlink,
19518 + .ndo_bridge_dellink = &evb_dellink,
19520 + .ndo_get_stats64 = &evb_port_get_stats,
19521 + .ndo_change_mtu = &evb_change_mtu,
19524 +static int evb_takedown(struct fsl_mc_device *evb_dev)
19526 + struct device *dev = &evb_dev->dev;
19527 + struct net_device *netdev = dev_get_drvdata(dev);
19528 + struct evb_priv *priv = netdev_priv(netdev);
19531 + err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19532 + if (unlikely(err))
19533 + dev_warn(dev, "dpdmux_close err %d\n", err);
19538 +static int evb_init(struct fsl_mc_device *evb_dev)
19540 + struct device *dev = &evb_dev->dev;
19541 + struct net_device *netdev = dev_get_drvdata(dev);
19542 + struct evb_priv *priv = netdev_priv(netdev);
19543 + u16 version_major;
19544 + u16 version_minor;
19547 + priv->dev_id = evb_dev->obj_desc.id;
19549 + err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
19550 + if (unlikely(err)) {
19551 + dev_err(dev, "dpdmux_open err %d\n", err);
19554 + if (!priv->mux_handle) {
19555 + dev_err(dev, "dpdmux_open returned null handle but no error\n");
19560 + err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
19562 + if (unlikely(err)) {
19563 + dev_err(dev, "dpdmux_get_attributes err %d\n", err);
19567 + err = dpdmux_get_api_version(priv->mc_io, 0,
19570 + if (unlikely(err)) {
19571 + dev_err(dev, "dpdmux_get_api_version err %d\n", err);
19575 + /* Minimum supported DPDMUX version check */
19576 + if (version_major < DPDMUX_MIN_VER_MAJOR ||
19577 + (version_major == DPDMUX_MIN_VER_MAJOR &&
19578 + version_minor < DPDMUX_MIN_VER_MINOR)) {
19579 + dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
19580 + version_major, version_minor,
19581 + DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
19586 + err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
19587 + if (unlikely(err)) {
19588 + dev_err(dev, "dpdmux_reset err %d\n", err);
19595 + dpdmux_close(priv->mc_io, 0, priv->mux_handle);
19600 +static int evb_remove(struct fsl_mc_device *evb_dev)
19602 + struct device *dev = &evb_dev->dev;
19603 + struct net_device *netdev = dev_get_drvdata(dev);
19604 + struct evb_priv *priv = netdev_priv(netdev);
19605 + struct evb_port_priv *port_priv;
19606 + struct list_head *pos;
19608 + list_for_each(pos, &priv->port_list) {
19609 + port_priv = list_entry(pos, struct evb_port_priv, list);
19612 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
19615 + unregister_netdev(port_priv->netdev);
19616 + free_netdev(port_priv->netdev);
19619 + evb_teardown_irqs(evb_dev);
19621 + unregister_netdev(netdev);
19623 + evb_takedown(evb_dev);
19624 + fsl_mc_portal_free(priv->mc_io);
19626 + dev_set_drvdata(dev, NULL);
19627 + free_netdev(netdev);
19632 +static int evb_probe(struct fsl_mc_device *evb_dev)
19634 + struct device *dev;
19635 + struct evb_priv *priv = NULL;
19636 + struct net_device *netdev = NULL;
19637 + char port_name[IFNAMSIZ];
19641 + dev = &evb_dev->dev;
19643 + /* register switch device, it's for management only - no I/O */
19644 + netdev = alloc_etherdev(sizeof(*priv));
19646 + dev_err(dev, "alloc_etherdev error\n");
19649 + netdev->netdev_ops = &evb_ops;
19651 + dev_set_drvdata(dev, netdev);
19653 + priv = netdev_priv(netdev);
19655 + err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
19656 + if (unlikely(err)) {
19657 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
19658 + goto err_free_netdev;
19660 + if (!priv->mc_io) {
19661 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
19663 + goto err_free_netdev;
19666 + err = evb_init(evb_dev);
19667 + if (unlikely(err)) {
19668 + dev_err(dev, "evb init err %d\n", err);
19669 + goto err_free_cmdport;
19672 + INIT_LIST_HEAD(&priv->port_list);
19673 + netdev->flags |= IFF_PROMISC | IFF_MASTER;
19675 + dev_alloc_name(netdev, "evb%d");
19677 + /* register switch ports */
19678 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
19680 + /* only register downlinks? */
19681 + for (i = 0; i < priv->attr.num_ifs + 1; i++) {
19682 + struct net_device *port_netdev;
19683 + struct evb_port_priv *port_priv;
19687 + alloc_etherdev(sizeof(struct evb_port_priv));
19688 + if (!port_netdev) {
19689 + dev_err(dev, "alloc_etherdev error\n");
19690 + goto err_takedown;
19693 + port_priv = netdev_priv(port_netdev);
19695 + port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
19697 + dev_alloc_name(port_netdev, port_name);
19699 + port_netdev = netdev;
19700 + port_priv = &priv->uplink;
19703 + port_priv->netdev = port_netdev;
19704 + port_priv->evb_priv = priv;
19705 + port_priv->port_index = i;
19707 + SET_NETDEV_DEV(port_netdev, dev);
19710 + port_netdev->netdev_ops = &evb_port_ops;
19712 + err = register_netdev(port_netdev);
19714 + dev_err(dev, "register_netdev err %d\n", err);
19715 + free_netdev(port_netdev);
19716 + goto err_takedown;
19720 + err = netdev_master_upper_dev_link(port_netdev, netdev,
19722 + if (unlikely(err)) {
19723 + dev_err(dev, "netdev_master_upper_dev_link err %d\n",
19725 + unregister_netdev(port_netdev);
19726 + free_netdev(port_netdev);
19728 + goto err_takedown;
19730 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
19731 + IFF_SLAVE, GFP_KERNEL);
19734 + list_add(&port_priv->list, &priv->port_list);
19736 + err = register_netdev(netdev);
19739 + dev_err(dev, "register_netdev error %d\n", err);
19740 + goto err_takedown;
19744 + port_netdev->ethtool_ops = &evb_port_ethtool_ops;
19746 + /* ports are up from init */
19748 + err = dev_open(port_netdev);
19750 + if (unlikely(err))
19751 + dev_warn(dev, "dev_open err %d\n", err);
19755 + err = evb_setup_irqs(evb_dev);
19756 + if (unlikely(err)) {
19757 + dev_warn(dev, "evb_setup_irqs err %d\n", err);
19758 + goto err_takedown;
19761 + dev_info(dev, "probed evb device with %d ports\n",
19762 + priv->attr.num_ifs);
19766 + evb_remove(evb_dev);
19768 + fsl_mc_portal_free(priv->mc_io);
19773 +static const struct fsl_mc_device_id evb_match_id_table[] = {
19775 + .vendor = FSL_MC_VENDOR_FREESCALE,
19776 + .obj_type = "dpdmux",
19781 +static struct fsl_mc_driver evb_drv = {
19783 + .name = KBUILD_MODNAME,
19784 + .owner = THIS_MODULE,
19786 + .probe = evb_probe,
19787 + .remove = evb_remove,
19788 + .match_id_table = evb_match_id_table,
19791 +module_fsl_mc_driver(evb_drv);
19793 +MODULE_LICENSE("GPL");
19794 +MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
19795 diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig
19796 new file mode 100644
19797 index 00000000..c94f7c1b
19799 +++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
19801 +config FSL_DPAA2_MAC
19802 + tristate "DPAA2 MAC / PHY interface"
19803 + depends on FSL_MC_BUS && FSL_DPAA2
19804 + select MDIO_BUS_MUX_MMIOREG
19805 + select FSL_XGMAC_MDIO
19808 + Prototype driver for DPAA2 MAC / PHY interface object.
19809 + This driver works as a proxy between phylib including phy drivers and
19810 + the MC firmware. It receives updates on link state changes from PHY
19811 + lib and forwards them to MC and receives interrupt from MC whenever
19812 + a request is made to change the link state.
19815 +config FSL_DPAA2_MAC_NETDEVS
19816 + bool "Expose net interfaces for PHYs"
19818 + depends on FSL_DPAA2_MAC
19820 + Exposes macX net interfaces which allow direct control over MACs and
19823 + Leave disabled if unsure.
19824 diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile
19825 new file mode 100644
19826 index 00000000..bda94101
19828 +++ b/drivers/staging/fsl-dpaa2/mac/Makefile
19831 +obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
19833 +dpaa2-mac-objs := mac.o dpmac.o
19836 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
19839 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
19840 diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
19841 new file mode 100644
19842 index 00000000..abdc3c0d
19844 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
19846 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
19848 + * Redistribution and use in source and binary forms, with or without
19849 + * modification, are permitted provided that the following conditions are met:
19850 + * * Redistributions of source code must retain the above copyright
19851 + * notice, this list of conditions and the following disclaimer.
19852 + * * Redistributions in binary form must reproduce the above copyright
19853 + * notice, this list of conditions and the following disclaimer in the
19854 + * documentation and/or other materials provided with the distribution.
19855 + * * Neither the name of the above-listed copyright holders nor the
19856 + * names of any contributors may be used to endorse or promote products
19857 + * derived from this software without specific prior written permission.
19860 + * ALTERNATIVELY, this software may be distributed under the terms of the
19861 + * GNU General Public License ("GPL") as published by the Free Software
19862 + * Foundation, either version 2 of that License or (at your option) any
19865 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19866 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19867 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19868 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19869 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19870 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19871 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19872 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19873 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19874 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19875 + * POSSIBILITY OF SUCH DAMAGE.
19877 +#ifndef _FSL_DPMAC_CMD_H
19878 +#define _FSL_DPMAC_CMD_H
19880 +/* DPMAC Version */
19881 +#define DPMAC_VER_MAJOR 4
19882 +#define DPMAC_VER_MINOR 2
19883 +#define DPMAC_CMD_BASE_VERSION 1
19884 +#define DPMAC_CMD_ID_OFFSET 4
19886 +#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
19889 +#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
19890 +#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
19891 +#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
19892 +#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
19893 +#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
19895 +#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
19896 +#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
19898 +#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
19899 +#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
19900 +#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
19901 +#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
19902 +#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
19903 +#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
19905 +#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
19906 +#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
19907 +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
19909 +#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
19911 +/* Macros for accessing command fields smaller than 1byte */
19912 +#define DPMAC_MASK(field) \
19913 + GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
19914 + DPMAC_##field##_SHIFT)
19915 +#define dpmac_set_field(var, field, val) \
19916 + ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
19917 +#define dpmac_get_field(var, field) \
19918 + (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
19920 +struct dpmac_cmd_open {
19924 +struct dpmac_cmd_create {
19928 +struct dpmac_cmd_destroy {
19932 +struct dpmac_cmd_set_irq_enable {
19938 +struct dpmac_cmd_get_irq_enable {
19943 +struct dpmac_rsp_get_irq_enable {
19947 +struct dpmac_cmd_set_irq_mask {
19952 +struct dpmac_cmd_get_irq_mask {
19957 +struct dpmac_rsp_get_irq_mask {
19961 +struct dpmac_cmd_get_irq_status {
19966 +struct dpmac_rsp_get_irq_status {
19970 +struct dpmac_cmd_clear_irq_status {
19975 +struct dpmac_rsp_get_attributes {
19982 +struct dpmac_rsp_get_link_cfg {
19987 +#define DPMAC_STATE_SIZE 1
19988 +#define DPMAC_STATE_SHIFT 0
19990 +struct dpmac_cmd_set_link_state {
19994 + /* only least significant bit is valid */
19998 +struct dpmac_cmd_get_counter {
20002 +struct dpmac_rsp_get_counter {
20007 +struct dpmac_rsp_get_api_version {
20012 +struct dpmac_cmd_set_port_mac_addr {
20017 +#endif /* _FSL_DPMAC_CMD_H */
20018 diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c
20019 new file mode 100644
20020 index 00000000..f7827423
20022 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
20024 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20026 + * Redistribution and use in source and binary forms, with or without
20027 + * modification, are permitted provided that the following conditions are met:
20028 + * * Redistributions of source code must retain the above copyright
20029 + * notice, this list of conditions and the following disclaimer.
20030 + * * Redistributions in binary form must reproduce the above copyright
20031 + * notice, this list of conditions and the following disclaimer in the
20032 + * documentation and/or other materials provided with the distribution.
20033 + * * Neither the name of the above-listed copyright holders nor the
20034 + * names of any contributors may be used to endorse or promote products
20035 + * derived from this software without specific prior written permission.
20038 + * ALTERNATIVELY, this software may be distributed under the terms of the
20039 + * GNU General Public License ("GPL") as published by the Free Software
20040 + * Foundation, either version 2 of that License or (at your option) any
20043 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20044 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20045 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20046 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20047 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20048 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20049 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20050 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20051 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20052 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20053 + * POSSIBILITY OF SUCH DAMAGE.
20055 +#include "../../fsl-mc/include/mc-sys.h"
20056 +#include "../../fsl-mc/include/mc-cmd.h"
20057 +#include "dpmac.h"
20058 +#include "dpmac-cmd.h"
20061 + * dpmac_open() - Open a control session for the specified object.
20062 + * @mc_io: Pointer to MC portal's I/O object
20063 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20064 + * @dpmac_id: DPMAC unique ID
20065 + * @token: Returned token; use in subsequent API calls
20067 + * This function can be used to open a control session for an
20068 + * already created object; an object may have been declared in
20069 + * the DPL or by calling the dpmac_create function.
20070 + * This function returns a unique authentication token,
20071 + * associated with the specific object ID and the specific MC
20072 + * portal; this token must be used in all subsequent commands for
20073 + * this specific object
20075 + * Return: '0' on Success; Error code otherwise.
20077 +int dpmac_open(struct fsl_mc_io *mc_io,
20082 + struct dpmac_cmd_open *cmd_params;
20083 + struct mc_command cmd = { 0 };
20086 + /* prepare command */
20087 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
20090 + cmd_params = (struct dpmac_cmd_open *)cmd.params;
20091 + cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
20093 + /* send command to mc*/
20094 + err = mc_send_command(mc_io, &cmd);
20098 + /* retrieve response parameters */
20099 + *token = mc_cmd_hdr_read_token(&cmd);
20105 + * dpmac_close() - Close the control session of the object
20106 + * @mc_io: Pointer to MC portal's I/O object
20107 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20108 + * @token: Token of DPMAC object
20110 + * After this function is called, no further operations are
20111 + * allowed on the object without opening a new control session.
20113 + * Return: '0' on Success; Error code otherwise.
20115 +int dpmac_close(struct fsl_mc_io *mc_io,
20119 + struct mc_command cmd = { 0 };
20121 + /* prepare command */
20122 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
20125 + /* send command to mc*/
20126 + return mc_send_command(mc_io, &cmd);
20130 + * dpmac_create() - Create the DPMAC object.
20131 + * @mc_io: Pointer to MC portal's I/O object
20132 + * @dprc_token: Parent container token; '0' for default container
20133 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20134 + * @cfg: Configuration structure
20135 + * @obj_id: Returned object id
20137 + * Create the DPMAC object, allocate required resources and
20138 + * perform required initialization.
20140 + * The function accepts an authentication token of a parent
20141 + * container that this object should be assigned to. The token
20142 + * can be '0' so the object will be assigned to the default container.
20143 + * The newly created object can be opened with the returned
20144 + * object id and using the container's associated tokens and MC portals.
20146 + * Return: '0' on Success; Error code otherwise.
20148 +int dpmac_create(struct fsl_mc_io *mc_io,
20151 + const struct dpmac_cfg *cfg,
20154 + struct dpmac_cmd_create *cmd_params;
20155 + struct mc_command cmd = { 0 };
20158 + /* prepare command */
20159 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
20162 + cmd_params = (struct dpmac_cmd_create *)cmd.params;
20163 + cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
20165 + /* send command to mc*/
20166 + err = mc_send_command(mc_io, &cmd);
20170 + /* retrieve response parameters */
20171 + *obj_id = mc_cmd_read_object_id(&cmd);
20177 + * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
20178 + * @mc_io: Pointer to MC portal's I/O object
20179 + * @dprc_token: Parent container token; '0' for default container
20180 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20181 + * @object_id: The object id; it must be a valid id within the container that
20182 + * created this object;
20184 + * The function accepts the authentication token of the parent container that
20185 + * created the object (not the one that currently owns the object). The object
20186 + * is searched within parent using the provided 'object_id'.
20187 + * All tokens to the object must be closed before calling destroy.
20189 + * Return: '0' on Success; error code otherwise.
20191 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20196 + struct dpmac_cmd_destroy *cmd_params;
20197 + struct mc_command cmd = { 0 };
20199 + /* prepare command */
20200 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
20203 + cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
20204 + cmd_params->dpmac_id = cpu_to_le32(object_id);
20206 + /* send command to mc*/
20207 + return mc_send_command(mc_io, &cmd);
20211 + * dpmac_set_irq_enable() - Set overall interrupt state.
20212 + * @mc_io: Pointer to MC portal's I/O object
20213 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20214 + * @token: Token of DPMAC object
20215 + * @irq_index: The interrupt index to configure
20216 + * @en: Interrupt state - enable = 1, disable = 0
20218 + * Allows GPP software to control when interrupts are generated.
20219 + * Each interrupt can have up to 32 causes. The enable/disable control's the
20220 + * overall interrupt state. if the interrupt is disabled no causes will cause
20223 + * Return: '0' on Success; Error code otherwise.
20225 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20231 + struct dpmac_cmd_set_irq_enable *cmd_params;
20232 + struct mc_command cmd = { 0 };
20234 + /* prepare command */
20235 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
20238 + cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
20239 + cmd_params->irq_index = irq_index;
20240 + cmd_params->enable = en;
20242 + /* send command to mc*/
20243 + return mc_send_command(mc_io, &cmd);
20247 + * dpmac_get_irq_enable() - Get overall interrupt state
20248 + * @mc_io: Pointer to MC portal's I/O object
20249 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20250 + * @token: Token of DPMAC object
20251 + * @irq_index: The interrupt index to configure
20252 + * @en: Returned interrupt state - enable = 1, disable = 0
20254 + * Return: '0' on Success; Error code otherwise.
20256 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20262 + struct dpmac_cmd_get_irq_enable *cmd_params;
20263 + struct dpmac_rsp_get_irq_enable *rsp_params;
20264 + struct mc_command cmd = { 0 };
20267 + /* prepare command */
20268 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
20271 + cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
20272 + cmd_params->irq_index = irq_index;
20274 + /* send command to mc*/
20275 + err = mc_send_command(mc_io, &cmd);
20279 + /* retrieve response parameters */
20280 + rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
20281 + *en = rsp_params->enabled;
20287 + * dpmac_set_irq_mask() - Set interrupt mask.
20288 + * @mc_io: Pointer to MC portal's I/O object
20289 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20290 + * @token: Token of DPMAC object
20291 + * @irq_index: The interrupt index to configure
20292 + * @mask: Event mask to trigger interrupt;
20294 + * 0 = ignore event
20295 + * 1 = consider event for asserting IRQ
20297 + * Every interrupt can have up to 32 causes and the interrupt model supports
20298 + * masking/unmasking each cause independently
20300 + * Return: '0' on Success; Error code otherwise.
20302 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20308 + struct dpmac_cmd_set_irq_mask *cmd_params;
20309 + struct mc_command cmd = { 0 };
20311 + /* prepare command */
20312 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
20315 + cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
20316 + cmd_params->mask = cpu_to_le32(mask);
20317 + cmd_params->irq_index = irq_index;
20319 + /* send command to mc*/
20320 + return mc_send_command(mc_io, &cmd);
20324 + * dpmac_get_irq_mask() - Get interrupt mask.
20325 + * @mc_io: Pointer to MC portal's I/O object
20326 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20327 + * @token: Token of DPMAC object
20328 + * @irq_index: The interrupt index to configure
20329 + * @mask: Returned event mask to trigger interrupt
20331 + * Every interrupt can have up to 32 causes and the interrupt model supports
20332 + * masking/unmasking each cause independently
20334 + * Return: '0' on Success; Error code otherwise.
20336 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20342 + struct dpmac_cmd_get_irq_mask *cmd_params;
20343 + struct dpmac_rsp_get_irq_mask *rsp_params;
20344 + struct mc_command cmd = { 0 };
20347 + /* prepare command */
20348 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
20351 + cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
20352 + cmd_params->irq_index = irq_index;
20354 + /* send command to mc*/
20355 + err = mc_send_command(mc_io, &cmd);
20359 + /* retrieve response parameters */
20360 + rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
20361 + *mask = le32_to_cpu(rsp_params->mask);
20367 + * dpmac_get_irq_status() - Get the current status of any pending interrupts.
20369 + * @mc_io: Pointer to MC portal's I/O object
20370 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20371 + * @token: Token of DPMAC object
20372 + * @irq_index: The interrupt index to configure
20373 + * @status: Returned interrupts status - one bit per cause:
20374 + * 0 = no interrupt pending
20375 + * 1 = interrupt pending
20377 + * Return: '0' on Success; Error code otherwise.
20379 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20385 + struct dpmac_cmd_get_irq_status *cmd_params;
20386 + struct dpmac_rsp_get_irq_status *rsp_params;
20387 + struct mc_command cmd = { 0 };
20390 + /* prepare command */
20391 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
20394 + cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
20395 + cmd_params->status = cpu_to_le32(*status);
20396 + cmd_params->irq_index = irq_index;
20398 + /* send command to mc*/
20399 + err = mc_send_command(mc_io, &cmd);
20403 + /* retrieve response parameters */
20404 + rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
20405 + *status = le32_to_cpu(rsp_params->status);
20411 + * dpmac_clear_irq_status() - Clear a pending interrupt's status
20413 + * @mc_io: Pointer to MC portal's I/O object
20414 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20415 + * @token: Token of DPMAC object
20416 + * @irq_index: The interrupt index to configure
20417 + * @status: Bits to clear (W1C) - one bit per cause:
20418 + * 0 = don't change
20419 + * 1 = clear status bit
20421 + * Return: '0' on Success; Error code otherwise.
20423 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20429 + struct dpmac_cmd_clear_irq_status *cmd_params;
20430 + struct mc_command cmd = { 0 };
20432 + /* prepare command */
20433 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
20436 + cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
20437 + cmd_params->status = cpu_to_le32(status);
20438 + cmd_params->irq_index = irq_index;
20440 + /* send command to mc*/
20441 + return mc_send_command(mc_io, &cmd);
20445 + * dpmac_get_attributes - Retrieve DPMAC attributes.
20447 + * @mc_io: Pointer to MC portal's I/O object
20448 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20449 + * @token: Token of DPMAC object
20450 + * @attr: Returned object's attributes
20452 + * Return: '0' on Success; Error code otherwise.
20454 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20457 + struct dpmac_attr *attr)
20459 + struct dpmac_rsp_get_attributes *rsp_params;
20460 + struct mc_command cmd = { 0 };
20463 + /* prepare command */
20464 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
20468 + /* send command to mc*/
20469 + err = mc_send_command(mc_io, &cmd);
20473 + /* retrieve response parameters */
20474 + rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
20475 + attr->eth_if = rsp_params->eth_if;
20476 + attr->link_type = rsp_params->link_type;
20477 + attr->id = le16_to_cpu(rsp_params->id);
20478 + attr->max_rate = le32_to_cpu(rsp_params->max_rate);
20484 + * dpmac_get_link_cfg() - Get Ethernet link configuration
20485 + * @mc_io: Pointer to opaque I/O object
20486 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20487 + * @token: Token of DPMAC object
20488 + * @cfg: Returned structure with the link configuration
20490 + * Return: '0' on Success; Error code otherwise.
20492 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20495 + struct dpmac_link_cfg *cfg)
20497 + struct dpmac_rsp_get_link_cfg *rsp_params;
20498 + struct mc_command cmd = { 0 };
20501 + /* prepare command */
20502 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
20506 + /* send command to mc*/
20507 + err = mc_send_command(mc_io, &cmd);
20511 + rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
20512 + cfg->options = le64_to_cpu(rsp_params->options);
20513 + cfg->rate = le32_to_cpu(rsp_params->rate);
20519 + * dpmac_set_link_state() - Set the Ethernet link status
20520 + * @mc_io: Pointer to opaque I/O object
20521 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20522 + * @token: Token of DPMAC object
20523 + * @link_state: Link state configuration
20525 + * Return: '0' on Success; Error code otherwise.
20527 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20530 + struct dpmac_link_state *link_state)
20532 + struct dpmac_cmd_set_link_state *cmd_params;
20533 + struct mc_command cmd = { 0 };
20535 + /* prepare command */
20536 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
20539 + cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
20540 + cmd_params->options = cpu_to_le64(link_state->options);
20541 + cmd_params->rate = cpu_to_le32(link_state->rate);
20542 + cmd_params->up = dpmac_get_field(link_state->up, STATE);
20544 + /* send command to mc*/
20545 + return mc_send_command(mc_io, &cmd);
20549 + * dpmac_get_counter() - Read a specific DPMAC counter
20550 + * @mc_io: Pointer to opaque I/O object
20551 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20552 + * @token: Token of DPMAC object
20553 + * @type: The requested counter
20554 + * @counter: Returned counter value
20556 + * Return: The requested counter; '0' otherwise.
20558 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20561 + enum dpmac_counter type,
20564 + struct dpmac_cmd_get_counter *dpmac_cmd;
20565 + struct dpmac_rsp_get_counter *dpmac_rsp;
20566 + struct mc_command cmd = { 0 };
20569 + /* prepare command */
20570 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
20573 + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
20574 + dpmac_cmd->type = type;
20576 + /* send command to mc*/
20577 + err = mc_send_command(mc_io, &cmd);
20581 + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
20582 + *counter = le64_to_cpu(dpmac_rsp->counter);
20588 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20591 + const u8 addr[6])
20593 + struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
20594 + struct mc_command cmd = { 0 };
20596 + /* prepare command */
20597 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
20600 + dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
20601 + dpmac_cmd->addr[0] = addr[5];
20602 + dpmac_cmd->addr[1] = addr[4];
20603 + dpmac_cmd->addr[2] = addr[3];
20604 + dpmac_cmd->addr[3] = addr[2];
20605 + dpmac_cmd->addr[4] = addr[1];
20606 + dpmac_cmd->addr[5] = addr[0];
20608 + /* send command to mc*/
20609 + return mc_send_command(mc_io, &cmd);
20613 + * dpmac_get_api_version() - Get Data Path MAC version
20614 + * @mc_io: Pointer to MC portal's I/O object
20615 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20616 + * @major_ver: Major version of data path mac API
20617 + * @minor_ver: Minor version of data path mac API
20619 + * Return: '0' on Success; Error code otherwise.
20621 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20626 + struct dpmac_rsp_get_api_version *rsp_params;
20627 + struct mc_command cmd = { 0 };
20630 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
20634 + err = mc_send_command(mc_io, &cmd);
20638 + rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
20639 + *major_ver = le16_to_cpu(rsp_params->major);
20640 + *minor_ver = le16_to_cpu(rsp_params->minor);
20644 diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h
20645 new file mode 100644
20646 index 00000000..32d4ada2
20648 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
20650 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20652 + * Redistribution and use in source and binary forms, with or without
20653 + * modification, are permitted provided that the following conditions are met:
20654 + * * Redistributions of source code must retain the above copyright
20655 + * notice, this list of conditions and the following disclaimer.
20656 + * * Redistributions in binary form must reproduce the above copyright
20657 + * notice, this list of conditions and the following disclaimer in the
20658 + * documentation and/or other materials provided with the distribution.
20659 + * * Neither the name of the above-listed copyright holders nor the
20660 + * names of any contributors may be used to endorse or promote products
20661 + * derived from this software without specific prior written permission.
20664 + * ALTERNATIVELY, this software may be distributed under the terms of the
20665 + * GNU General Public License ("GPL") as published by the Free Software
20666 + * Foundation, either version 2 of that License or (at your option) any
20669 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20670 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20671 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20672 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20673 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20674 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20675 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20676 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20677 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20678 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20679 + * POSSIBILITY OF SUCH DAMAGE.
20681 +#ifndef __FSL_DPMAC_H
20682 +#define __FSL_DPMAC_H
20684 +/* Data Path MAC API
20685 + * Contains initialization APIs and runtime control APIs for DPMAC
20690 +int dpmac_open(struct fsl_mc_io *mc_io,
20695 +int dpmac_close(struct fsl_mc_io *mc_io,
20700 + * enum dpmac_link_type - DPMAC link type
20701 + * @DPMAC_LINK_TYPE_NONE: No link
20702 + * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
20703 + * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
20704 + * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
20706 +enum dpmac_link_type {
20707 + DPMAC_LINK_TYPE_NONE,
20708 + DPMAC_LINK_TYPE_FIXED,
20709 + DPMAC_LINK_TYPE_PHY,
20710 + DPMAC_LINK_TYPE_BACKPLANE
20714 + * enum dpmac_eth_if - DPMAC Ethrnet interface
20715 + * @DPMAC_ETH_IF_MII: MII interface
20716 + * @DPMAC_ETH_IF_RMII: RMII interface
20717 + * @DPMAC_ETH_IF_SMII: SMII interface
20718 + * @DPMAC_ETH_IF_GMII: GMII interface
20719 + * @DPMAC_ETH_IF_RGMII: RGMII interface
20720 + * @DPMAC_ETH_IF_SGMII: SGMII interface
20721 + * @DPMAC_ETH_IF_QSGMII: QSGMII interface
20722 + * @DPMAC_ETH_IF_XAUI: XAUI interface
20723 + * @DPMAC_ETH_IF_XFI: XFI interface
20725 +enum dpmac_eth_if {
20726 + DPMAC_ETH_IF_MII,
20727 + DPMAC_ETH_IF_RMII,
20728 + DPMAC_ETH_IF_SMII,
20729 + DPMAC_ETH_IF_GMII,
20730 + DPMAC_ETH_IF_RGMII,
20731 + DPMAC_ETH_IF_SGMII,
20732 + DPMAC_ETH_IF_QSGMII,
20733 + DPMAC_ETH_IF_XAUI,
20738 + * struct dpmac_cfg - Structure representing DPMAC configuration
20739 + * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
20740 + * the MAC IDs are continuous.
20741 + * For example: 2 WRIOPs, 16 MACs in each:
20742 + * MAC IDs for the 1st WRIOP: 1-16,
20743 + * MAC IDs for the 2nd WRIOP: 17-32.
20745 +struct dpmac_cfg {
20749 +int dpmac_create(struct fsl_mc_io *mc_io,
20752 + const struct dpmac_cfg *cfg,
20755 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20761 + * DPMAC IRQ Index and Events
20767 +#define DPMAC_IRQ_INDEX 0
20769 + * IRQ event - indicates a change in link state
20771 +#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
20773 + * IRQ event - Indicates that the link state changed
20775 +#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
20777 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20783 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20789 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20795 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20801 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20807 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20814 + * struct dpmac_attr - Structure representing DPMAC attributes
20815 + * @id: DPMAC object ID
20816 + * @max_rate: Maximum supported rate - in Mbps
20817 + * @eth_if: Ethernet interface
20818 + * @link_type: link type
20820 +struct dpmac_attr {
20823 + enum dpmac_eth_if eth_if;
20824 + enum dpmac_link_type link_type;
20827 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
20830 + struct dpmac_attr *attr);
20833 + * DPMAC link configuration/state options
20837 + * Enable auto-negotiation
20839 +#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
20841 + * Enable half-duplex mode
20843 +#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
20845 + * Enable pause frames
20847 +#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
20849 + * Enable a-symmetric pause frames
20851 +#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
20854 + * struct dpmac_link_cfg - Structure representing DPMAC link configuration
20855 + * @rate: Link's rate - in Mbps
20856 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20858 +struct dpmac_link_cfg {
20863 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
20866 + struct dpmac_link_cfg *cfg);
20869 + * struct dpmac_link_state - DPMAC link configuration request
20870 + * @rate: Rate in Mbps
20871 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
20872 + * @up: Link state
20874 +struct dpmac_link_state {
20880 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
20883 + struct dpmac_link_state *link_state);
20886 + * enum dpmac_counter - DPMAC counter types
20887 + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
20888 + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
20889 + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
20890 + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
20891 + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
20892 + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
20893 + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
20894 + * (up to max frame length specified),
20896 + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
20897 + * with a wrong CRC
20898 + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
20899 + * specified, with a bad frame check sequence.
20900 + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
20901 + * Occurs when a receive FIFO overflows.
20902 + * Includes also frames truncated as a result of
20903 + * the receive FIFO overflow.
20904 + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
20905 + * (optional used for wrong SFD).
20906 + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
20907 + * bytes long with a good CRC.
20908 + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
20909 + * specified, with a good frame check sequence.
20910 + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
20911 + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
20912 + * (regular and PFC).
20913 + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
20914 + * frames and valid pause frames.
20915 + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
20916 + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
20917 + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
20918 + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
20919 + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
20920 + * (except for undersized/fragment frame).
20921 + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
20922 + * frames and valid pause frames transmitted.
20923 + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
20924 + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
20925 + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
20926 + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
20927 + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
20929 + * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
20932 +enum dpmac_counter {
20933 + DPMAC_CNT_ING_FRAME_64,
20934 + DPMAC_CNT_ING_FRAME_127,
20935 + DPMAC_CNT_ING_FRAME_255,
20936 + DPMAC_CNT_ING_FRAME_511,
20937 + DPMAC_CNT_ING_FRAME_1023,
20938 + DPMAC_CNT_ING_FRAME_1518,
20939 + DPMAC_CNT_ING_FRAME_1519_MAX,
20940 + DPMAC_CNT_ING_FRAG,
20941 + DPMAC_CNT_ING_JABBER,
20942 + DPMAC_CNT_ING_FRAME_DISCARD,
20943 + DPMAC_CNT_ING_ALIGN_ERR,
20944 + DPMAC_CNT_EGR_UNDERSIZED,
20945 + DPMAC_CNT_ING_OVERSIZED,
20946 + DPMAC_CNT_ING_VALID_PAUSE_FRAME,
20947 + DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
20948 + DPMAC_CNT_ING_BYTE,
20949 + DPMAC_CNT_ING_MCAST_FRAME,
20950 + DPMAC_CNT_ING_BCAST_FRAME,
20951 + DPMAC_CNT_ING_ALL_FRAME,
20952 + DPMAC_CNT_ING_UCAST_FRAME,
20953 + DPMAC_CNT_ING_ERR_FRAME,
20954 + DPMAC_CNT_EGR_BYTE,
20955 + DPMAC_CNT_EGR_MCAST_FRAME,
20956 + DPMAC_CNT_EGR_BCAST_FRAME,
20957 + DPMAC_CNT_EGR_UCAST_FRAME,
20958 + DPMAC_CNT_EGR_ERR_FRAME,
20959 + DPMAC_CNT_ING_GOOD_FRAME,
20960 + DPMAC_CNT_ENG_GOOD_FRAME
20963 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
20966 + enum dpmac_counter type,
20970 + * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
20971 + * port. This is not used for filtering, MAC is always in
20972 + * promiscuous mode, it is passed to DPNIs through DPNI API for
20973 + * application used.
20974 + * @mc_io: Pointer to opaque I/O object
20975 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20976 + * @token: Token of DPMAC object
20977 + * @addr: MAC address to set
20979 + * Return: The requested counter; '0' otherwise.
20981 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
20984 + const u8 addr[6]);
20986 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
20991 +#endif /* __FSL_DPMAC_H */
20992 diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c
20993 new file mode 100644
20994 index 00000000..30169639
20996 +++ b/drivers/staging/fsl-dpaa2/mac/mac.c
20998 +/* Copyright 2015 Freescale Semiconductor Inc.
21000 + * Redistribution and use in source and binary forms, with or without
21001 + * modification, are permitted provided that the following conditions are met:
21002 + * * Redistributions of source code must retain the above copyright
21003 + * notice, this list of conditions and the following disclaimer.
21004 + * * Redistributions in binary form must reproduce the above copyright
21005 + * notice, this list of conditions and the following disclaimer in the
21006 + * documentation and/or other materials provided with the distribution.
21007 + * * Neither the name of Freescale Semiconductor nor the
21008 + * names of its contributors may be used to endorse or promote products
21009 + * derived from this software without specific prior written permission.
21012 + * ALTERNATIVELY, this software may be distributed under the terms of the
21013 + * GNU General Public License ("GPL") as published by the Free Software
21014 + * Foundation, either version 2 of that License or (at your option) any
21017 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21018 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21019 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21020 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21021 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21022 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21023 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
21024 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21025 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21026 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21029 +#include <linux/module.h>
21031 +#include <linux/netdevice.h>
21032 +#include <linux/etherdevice.h>
21033 +#include <linux/msi.h>
21034 +#include <linux/rtnetlink.h>
21035 +#include <linux/if_vlan.h>
21037 +#include <uapi/linux/if_bridge.h>
21038 +#include <net/netlink.h>
21040 +#include <linux/of.h>
21041 +#include <linux/of_mdio.h>
21042 +#include <linux/of_net.h>
21043 +#include <linux/phy.h>
21044 +#include <linux/phy_fixed.h>
21046 +#include "../../fsl-mc/include/mc.h"
21047 +#include "../../fsl-mc/include/mc-sys.h"
21049 +#include "dpmac.h"
21050 +#include "dpmac-cmd.h"
21052 +struct dpaa2_mac_priv {
21053 + struct net_device *netdev;
21054 + struct fsl_mc_device *mc_dev;
21055 + struct dpmac_attr attr;
21056 + struct dpmac_link_state old_state;
21059 +/* TODO: fix the 10G modes, mapping can't be right:
21060 + * XGMII is paralel
21061 + * XAUI is serial, using 8b/10b encoding
21062 + * XFI is also serial but using 64b/66b encoding
21063 + * they can't all map to XGMII...
21065 + * This must be kept in sync with enum dpmac_eth_if.
21067 +static phy_interface_t dpaa2_mac_iface_mode[] = {
21068 + PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
21069 + PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
21070 + PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
21071 + PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
21072 + PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
21073 + PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
21074 + PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
21075 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
21076 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
21079 +static void dpaa2_mac_link_changed(struct net_device *netdev)
21081 + struct phy_device *phydev;
21082 + struct dpmac_link_state state = { 0 };
21083 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21086 + /* the PHY just notified us of link state change */
21087 + phydev = netdev->phydev;
21089 + state.up = !!phydev->link;
21090 + if (phydev->link) {
21091 + state.rate = phydev->speed;
21093 + if (!phydev->duplex)
21094 + state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
21095 + if (phydev->autoneg)
21096 + state.options |= DPMAC_LINK_OPT_AUTONEG;
21098 + netif_carrier_on(netdev);
21100 + netif_carrier_off(netdev);
21103 + if (priv->old_state.up != state.up ||
21104 + priv->old_state.rate != state.rate ||
21105 + priv->old_state.options != state.options) {
21106 + priv->old_state = state;
21107 + phy_print_status(phydev);
21110 + /* We must interrogate MC at all times, because we don't know
21111 + * when and whether a potential DPNI may have read the link state.
21113 + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
21114 + priv->mc_dev->mc_handle, &state);
21115 + if (unlikely(err))
21116 + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
21119 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21120 +static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
21121 + struct net_device *dev)
21123 + /* we don't support I/O for now, drop the frame */
21124 + dev_kfree_skb_any(skb);
21125 + return NETDEV_TX_OK;
21128 +static int dpaa2_mac_open(struct net_device *netdev)
21130 + /* start PHY state machine */
21131 + phy_start(netdev->phydev);
21136 +static int dpaa2_mac_stop(struct net_device *netdev)
21138 + if (!netdev->phydev)
21141 + /* stop PHY state machine */
21142 + phy_stop(netdev->phydev);
21144 + /* signal link down to firmware */
21145 + netdev->phydev->link = 0;
21146 + dpaa2_mac_link_changed(netdev);
21152 +static int dpaa2_mac_get_settings(struct net_device *netdev,
21153 + struct ethtool_cmd *cmd)
21155 + return phy_ethtool_gset(netdev->phydev, cmd);
21158 +static int dpaa2_mac_set_settings(struct net_device *netdev,
21159 + struct ethtool_cmd *cmd)
21161 + return phy_ethtool_sset(netdev->phydev, cmd);
21164 +static void dpaa2_mac_get_stats(struct net_device *netdev,
21165 + struct rtnl_link_stats64 *storage)
21167 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21171 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21172 + DPMAC_CNT_EGR_MCAST_FRAME,
21173 + &storage->tx_packets);
21176 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21177 + DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
21180 + storage->tx_packets += tmp;
21181 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21182 + DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
21185 + storage->tx_packets += tmp;
21187 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21188 + DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
21191 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21192 + DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
21195 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21196 + DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
21200 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21201 + DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
21204 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21205 + DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
21208 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21209 + DPMAC_CNT_ING_FRAME_DISCARD,
21210 + &storage->rx_dropped);
21213 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21214 + DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
21217 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21218 + DPMAC_CNT_ING_OVERSIZED, &tmp);
21221 + storage->rx_errors += tmp;
21222 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21223 + DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
21229 + netdev_err(netdev, "dpmac_get_counter err %d\n", err);
21233 + enum dpmac_counter id;
21234 + char name[ETH_GSTRING_LEN];
21235 +} dpaa2_mac_counters[] = {
21236 + {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
21237 + {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
21238 + {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
21239 + {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
21240 + {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
21241 + {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
21242 + {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
21243 + {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
21244 + {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
21245 + {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
21246 + {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
21247 + {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
21248 + {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
21249 + {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
21250 + {DPMAC_CNT_ING_FRAG, "rx frags"},
21251 + {DPMAC_CNT_ING_JABBER, "rx jabber"},
21252 + {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
21253 + {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
21254 + {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
21255 + {DPMAC_CNT_ING_BYTE, "rx bytes"},
21256 + {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
21257 + {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
21258 + {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
21259 + {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
21260 + {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
21261 + {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
21262 + {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
21263 + {DPMAC_CNT_EGR_BYTE, "tx bytes"},
21267 +static void dpaa2_mac_get_strings(struct net_device *netdev,
21268 + u32 stringset, u8 *data)
21272 + switch (stringset) {
21273 + case ETH_SS_STATS:
21274 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
21275 + memcpy(data + i * ETH_GSTRING_LEN,
21276 + dpaa2_mac_counters[i].name,
21277 + ETH_GSTRING_LEN);
21282 +static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
21283 + struct ethtool_stats *stats,
21286 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21290 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
21291 + err = dpmac_get_counter(priv->mc_dev->mc_io,
21293 + priv->mc_dev->mc_handle,
21294 + dpaa2_mac_counters[i].id, &data[i]);
21296 + netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
21297 + dpaa2_mac_counters[i].name, err);
21301 +static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
21304 + case ETH_SS_STATS:
21305 + return ARRAY_SIZE(dpaa2_mac_counters);
21307 + return -EOPNOTSUPP;
21311 +static const struct net_device_ops dpaa2_mac_ndo_ops = {
21312 + .ndo_start_xmit = &dpaa2_mac_drop_frame,
21313 + .ndo_open = &dpaa2_mac_open,
21314 + .ndo_stop = &dpaa2_mac_stop,
21315 + .ndo_get_stats64 = &dpaa2_mac_get_stats,
21318 +static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
21319 + .get_settings = &dpaa2_mac_get_settings,
21320 + .set_settings = &dpaa2_mac_set_settings,
21321 + .get_strings = &dpaa2_mac_get_strings,
21322 + .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
21323 + .get_sset_count = &dpaa2_mac_get_sset_count,
21325 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21327 +static void configure_link(struct dpaa2_mac_priv *priv,
21328 + struct dpmac_link_cfg *cfg)
21330 + struct phy_device *phydev = priv->netdev->phydev;
21332 + if (unlikely(!phydev))
21335 + phydev->speed = cfg->rate;
21336 + phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
21338 + if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
21339 + phydev->autoneg = 1;
21340 + phydev->advertising |= ADVERTISED_Autoneg;
21342 + phydev->autoneg = 0;
21343 + phydev->advertising &= ~ADVERTISED_Autoneg;
21346 + phy_start_aneg(phydev);
21349 +static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
21351 + struct device *dev = (struct device *)arg;
21352 + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
21353 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21354 + struct dpmac_link_cfg link_cfg;
21358 + err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21359 + DPMAC_IRQ_INDEX, &status);
21360 + if (unlikely(err || !status))
21363 + /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
21364 + if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
21365 + err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
21367 + if (unlikely(err))
21370 + configure_link(priv, &link_cfg);
21374 + dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21375 + DPMAC_IRQ_INDEX, status);
21377 + return IRQ_HANDLED;
21380 +static int setup_irqs(struct fsl_mc_device *mc_dev)
21383 + struct fsl_mc_device_irq *irq;
21385 + err = fsl_mc_allocate_irqs(mc_dev);
21387 + dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
21391 + irq = mc_dev->irqs[0];
21392 + err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
21393 + NULL, &dpaa2_mac_irq_handler,
21394 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
21395 + dev_name(&mc_dev->dev), &mc_dev->dev);
21397 + dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
21402 + err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
21403 + DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
21405 + dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
21408 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21409 + DPMAC_IRQ_INDEX, 1);
21411 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21418 + fsl_mc_free_irqs(mc_dev);
21423 +static void teardown_irqs(struct fsl_mc_device *mc_dev)
21427 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21428 + DPMAC_IRQ_INDEX, 0);
21430 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21432 + fsl_mc_free_irqs(mc_dev);
21435 +static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
21437 + struct device_node *dpmacs, *dpmac = NULL;
21438 + struct device_node *mc_node = dev->of_node;
21442 + dpmacs = of_find_node_by_name(mc_node, "dpmacs");
21444 + dev_err(dev, "No dpmacs subnode in device-tree\n");
21448 + while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
21449 + err = of_property_read_u32(dpmac, "reg", &id);
21452 + if (id == dpmac_id)
21459 +static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
21461 + struct device *dev;
21462 + struct dpaa2_mac_priv *priv = NULL;
21463 + struct device_node *phy_node, *dpmac_node;
21464 + struct net_device *netdev;
21465 + phy_interface_t if_mode;
21468 + dev = &mc_dev->dev;
21470 + /* prepare a net_dev structure to make the phy lib API happy */
21471 + netdev = alloc_etherdev(sizeof(*priv));
21473 + dev_err(dev, "alloc_etherdev error\n");
21477 + priv = netdev_priv(netdev);
21478 + priv->mc_dev = mc_dev;
21479 + priv->netdev = netdev;
21481 + SET_NETDEV_DEV(netdev, dev);
21483 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21484 + snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
21487 + dev_set_drvdata(dev, priv);
21489 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
21490 + if (err || !mc_dev->mc_io) {
21491 + dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
21493 + goto err_free_netdev;
21496 + err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
21497 + &mc_dev->mc_handle);
21498 + if (err || !mc_dev->mc_handle) {
21499 + dev_err(dev, "dpmac_open error: %d\n", err);
21501 + goto err_free_mcp;
21504 + err = dpmac_get_attributes(mc_dev->mc_io, 0,
21505 + mc_dev->mc_handle, &priv->attr);
21507 + dev_err(dev, "dpmac_get_attributes err %d\n", err);
21512 + /* Look up the DPMAC node in the device-tree. */
21513 + dpmac_node = find_dpmac_node(dev, priv->attr.id);
21514 + if (!dpmac_node) {
21515 + dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
21520 + err = setup_irqs(mc_dev);
21526 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21527 + /* OPTIONAL, register netdev just to make it visible to the user */
21528 + netdev->netdev_ops = &dpaa2_mac_ndo_ops;
21529 + netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
21531 + /* phy starts up enabled so netdev should be up too */
21532 + netdev->flags |= IFF_UP;
21534 + err = register_netdev(priv->netdev);
21536 + dev_err(dev, "register_netdev error %d\n", err);
21538 + goto err_free_irq;
21540 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21542 + /* probe the PHY as a fixed-link if the link type declared in DPC
21543 + * explicitly mandates this
21546 + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
21548 + goto probe_fixed_link;
21551 + if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
21552 + if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
21553 + dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
21554 + phy_modes(if_mode), priv->attr.eth_if);
21556 + dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
21557 + priv->attr.eth_if);
21558 + goto probe_fixed_link;
21561 + /* try to connect to the PHY */
21562 + netdev->phydev = of_phy_connect(netdev, phy_node,
21563 + &dpaa2_mac_link_changed, 0, if_mode);
21564 + if (!netdev->phydev) {
21565 + /* No need for dev_err(); the kernel's loud enough as it is. */
21566 + dev_dbg(dev, "Can't of_phy_connect() now.\n");
21567 + /* We might be waiting for the MDIO MUX to probe, so defer
21568 + * our own probing.
21570 + err = -EPROBE_DEFER;
21573 + dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
21576 + if (!netdev->phydev) {
21577 + struct fixed_phy_status status = {
21579 + /* fixed-phys don't support 10Gbps speed for now */
21584 + /* try to register a fixed link phy */
21585 + netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
21587 + if (!netdev->phydev || IS_ERR(netdev->phydev)) {
21588 + dev_err(dev, "error trying to register fixed PHY\n");
21589 + /* So we don't crash unregister_netdev() later on */
21590 + netdev->phydev = NULL;
21594 + dev_info(dev, "Registered fixed PHY.\n");
21597 + /* start PHY state machine */
21598 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21599 + dpaa2_mac_open(netdev);
21600 +#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21601 + phy_start(netdev->phydev);
21602 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21607 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21608 + unregister_netdev(netdev);
21611 + teardown_irqs(mc_dev);
21613 + dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
21615 + fsl_mc_portal_free(mc_dev->mc_io);
21617 + free_netdev(netdev);
21622 +static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
21624 + struct device *dev = &mc_dev->dev;
21625 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21627 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21628 + unregister_netdev(priv->netdev);
21630 + teardown_irqs(priv->mc_dev);
21631 + dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
21632 + fsl_mc_portal_free(priv->mc_dev->mc_io);
21633 + free_netdev(priv->netdev);
21635 + dev_set_drvdata(dev, NULL);
21641 +static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
21643 + .vendor = FSL_MC_VENDOR_FREESCALE,
21644 + .obj_type = "dpmac",
21646 + { .vendor = 0x0 }
21648 +MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
21650 +static struct fsl_mc_driver dpaa2_mac_drv = {
21652 + .name = KBUILD_MODNAME,
21653 + .owner = THIS_MODULE,
21655 + .probe = dpaa2_mac_probe,
21656 + .remove = dpaa2_mac_remove,
21657 + .match_id_table = dpaa2_mac_match_id_table,
21660 +module_fsl_mc_driver(dpaa2_mac_drv);
21662 +MODULE_LICENSE("GPL");
21663 +MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
21664 diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile
21665 new file mode 100644
21666 index 00000000..541a7acd
21668 +++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
21671 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
21673 +dpaa2-rtc-objs := rtc.o dprtc.o
21676 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
21679 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
21680 diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
21681 new file mode 100644
21682 index 00000000..618c7e54
21684 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
21686 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21688 + * Redistribution and use in source and binary forms, with or without
21689 + * modification, are permitted provided that the following conditions are met:
21690 + * * Redistributions of source code must retain the above copyright
21691 + * notice, this list of conditions and the following disclaimer.
21692 + * * Redistributions in binary form must reproduce the above copyright
21693 + * notice, this list of conditions and the following disclaimer in the
21694 + * documentation and/or other materials provided with the distribution.
21695 + * * Neither the name of the above-listed copyright holders nor the
21696 + * names of any contributors may be used to endorse or promote products
21697 + * derived from this software without specific prior written permission.
21700 + * ALTERNATIVELY, this software may be distributed under the terms of the
21701 + * GNU General Public License ("GPL") as published by the Free Software
21702 + * Foundation, either version 2 of that License or (at your option) any
21705 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21706 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21707 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21708 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21709 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21710 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21711 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21712 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21713 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21714 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21715 + * POSSIBILITY OF SUCH DAMAGE.
21717 +#ifndef _FSL_DPRTC_CMD_H
21718 +#define _FSL_DPRTC_CMD_H
21720 +/* DPRTC Version */
21721 +#define DPRTC_VER_MAJOR 2
21722 +#define DPRTC_VER_MINOR 0
21724 +/* Command versioning */
21725 +#define DPRTC_CMD_BASE_VERSION 1
21726 +#define DPRTC_CMD_ID_OFFSET 4
21728 +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
21731 +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
21732 +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
21733 +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
21734 +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
21735 +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
21737 +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
21738 +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
21739 +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
21740 +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
21741 +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
21743 +#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
21744 +#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
21745 +#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
21746 +#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
21747 +#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
21748 +#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
21750 +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
21751 +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
21752 +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
21753 +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
21754 +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
21755 +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
21756 +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
21757 +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
21758 +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
21759 +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
21760 +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
21762 +/* Macros for accessing command fields smaller than 1byte */
21763 +#define DPRTC_MASK(field) \
21764 + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
21765 + DPRTC_##field##_SHIFT)
21766 +#define dprtc_get_field(var, field) \
21767 + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
21769 +#pragma pack(push, 1)
21770 +struct dprtc_cmd_open {
21771 + uint32_t dprtc_id;
21774 +struct dprtc_cmd_destroy {
21775 + uint32_t object_id;
21778 +#define DPRTC_ENABLE_SHIFT 0
21779 +#define DPRTC_ENABLE_SIZE 1
21781 +struct dprtc_rsp_is_enabled {
21785 +struct dprtc_cmd_get_irq {
21787 + uint8_t irq_index;
21790 +struct dprtc_cmd_set_irq_enable {
21793 + uint8_t irq_index;
21796 +struct dprtc_rsp_get_irq_enable {
21800 +struct dprtc_cmd_set_irq_mask {
21802 + uint8_t irq_index;
21805 +struct dprtc_rsp_get_irq_mask {
21809 +struct dprtc_cmd_get_irq_status {
21811 + uint8_t irq_index;
21814 +struct dprtc_rsp_get_irq_status {
21818 +struct dprtc_cmd_clear_irq_status {
21820 + uint8_t irq_index;
21823 +struct dprtc_rsp_get_attributes {
21828 +struct dprtc_cmd_set_clock_offset {
21832 +struct dprtc_get_freq_compensation {
21833 + uint32_t freq_compensation;
21836 +struct dprtc_time {
21840 +struct dprtc_rsp_get_api_version {
21845 +#endif /* _FSL_DPRTC_CMD_H */
21846 diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
21847 new file mode 100644
21848 index 00000000..399177e4
21850 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
21852 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21854 + * Redistribution and use in source and binary forms, with or without
21855 + * modification, are permitted provided that the following conditions are met:
21856 + * * Redistributions of source code must retain the above copyright
21857 + * notice, this list of conditions and the following disclaimer.
21858 + * * Redistributions in binary form must reproduce the above copyright
21859 + * notice, this list of conditions and the following disclaimer in the
21860 + * documentation and/or other materials provided with the distribution.
21861 + * * Neither the name of the above-listed copyright holders nor the
21862 + * names of any contributors may be used to endorse or promote products
21863 + * derived from this software without specific prior written permission.
21866 + * ALTERNATIVELY, this software may be distributed under the terms of the
21867 + * GNU General Public License ("GPL") as published by the Free Software
21868 + * Foundation, either version 2 of that License or (at your option) any
21871 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21872 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21873 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21874 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21875 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21876 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21877 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21878 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21879 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21880 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21881 + * POSSIBILITY OF SUCH DAMAGE.
21883 +#include "../../fsl-mc/include/mc-sys.h"
21884 +#include "../../fsl-mc/include/mc-cmd.h"
21885 +#include "dprtc.h"
21886 +#include "dprtc-cmd.h"
21889 + * dprtc_open() - Open a control session for the specified object.
21890 + * @mc_io: Pointer to MC portal's I/O object
21891 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21892 + * @dprtc_id: DPRTC unique ID
21893 + * @token: Returned token; use in subsequent API calls
21895 + * This function can be used to open a control session for an
21896 + * already created object; an object may have been declared in
21897 + * the DPL or by calling the dprtc_create function.
21898 + * This function returns a unique authentication token,
21899 + * associated with the specific object ID and the specific MC
21900 + * portal; this token must be used in all subsequent commands for
21901 + * this specific object
21903 + * Return: '0' on Success; Error code otherwise.
21905 +int dprtc_open(struct fsl_mc_io *mc_io,
21906 + uint32_t cmd_flags,
21910 + struct dprtc_cmd_open *cmd_params;
21911 + struct mc_command cmd = { 0 };
21914 + /* prepare command */
21915 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
21918 + cmd_params = (struct dprtc_cmd_open *)cmd.params;
21919 + cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
21921 + /* send command to mc*/
21922 + err = mc_send_command(mc_io, &cmd);
21926 + /* retrieve response parameters */
21927 + *token = mc_cmd_hdr_read_token(&cmd);
21933 + * dprtc_close() - Close the control session of the object
21934 + * @mc_io: Pointer to MC portal's I/O object
21935 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21936 + * @token: Token of DPRTC object
21938 + * After this function is called, no further operations are
21939 + * allowed on the object without opening a new control session.
21941 + * Return: '0' on Success; Error code otherwise.
21943 +int dprtc_close(struct fsl_mc_io *mc_io,
21944 + uint32_t cmd_flags,
21947 + struct mc_command cmd = { 0 };
21949 + /* prepare command */
21950 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
21953 + /* send command to mc*/
21954 + return mc_send_command(mc_io, &cmd);
21958 + * dprtc_create() - Create the DPRTC object.
21959 + * @mc_io: Pointer to MC portal's I/O object
21960 + * @dprc_token: Parent container token; '0' for default container
21961 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21962 + * @cfg: Configuration structure
21963 + * @obj_id: Returned object id
21965 + * Create the DPRTC object, allocate required resources and
21966 + * perform required initialization.
21968 + * The function accepts an authentication token of a parent
21969 + * container that this object should be assigned to. The token
21970 + * can be '0' so the object will be assigned to the default container.
21971 + * The newly created object can be opened with the returned
21972 + * object id and using the container's associated tokens and MC portals.
21974 + * Return: '0' on Success; Error code otherwise.
21976 +int dprtc_create(struct fsl_mc_io *mc_io,
21977 + uint16_t dprc_token,
21978 + uint32_t cmd_flags,
21979 + const struct dprtc_cfg *cfg,
21980 + uint32_t *obj_id)
21982 + struct mc_command cmd = { 0 };
21985 + (void)(cfg); /* unused */
21987 + /* prepare command */
21988 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
21992 + /* send command to mc*/
21993 + err = mc_send_command(mc_io, &cmd);
21997 + /* retrieve response parameters */
21998 + *obj_id = mc_cmd_read_object_id(&cmd);
22004 + * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
22005 + * @mc_io: Pointer to MC portal's I/O object
22006 + * @dprc_token: Parent container token; '0' for default container
22007 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22008 + * @object_id: The object id; it must be a valid id within the container that
22009 + * created this object;
22011 + * The function accepts the authentication token of the parent container that
22012 + * created the object (not the one that currently owns the object). The object
22013 + * is searched within parent using the provided 'object_id'.
22014 + * All tokens to the object must be closed before calling destroy.
22016 + * Return: '0' on Success; error code otherwise.
22018 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22019 + uint16_t dprc_token,
22020 + uint32_t cmd_flags,
22021 + uint32_t object_id)
22023 + struct dprtc_cmd_destroy *cmd_params;
22024 + struct mc_command cmd = { 0 };
22026 + /* prepare command */
22027 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
22030 + cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
22031 + cmd_params->object_id = cpu_to_le32(object_id);
22033 + /* send command to mc*/
22034 + return mc_send_command(mc_io, &cmd);
22037 +int dprtc_enable(struct fsl_mc_io *mc_io,
22038 + uint32_t cmd_flags,
22041 + struct mc_command cmd = { 0 };
22043 + /* prepare command */
22044 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
22047 + /* send command to mc*/
22048 + return mc_send_command(mc_io, &cmd);
22051 +int dprtc_disable(struct fsl_mc_io *mc_io,
22052 + uint32_t cmd_flags,
22055 + struct mc_command cmd = { 0 };
22057 + /* prepare command */
22058 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
22062 + /* send command to mc*/
22063 + return mc_send_command(mc_io, &cmd);
22066 +int dprtc_is_enabled(struct fsl_mc_io *mc_io,
22067 + uint32_t cmd_flags,
22071 + struct dprtc_rsp_is_enabled *rsp_params;
22072 + struct mc_command cmd = { 0 };
22075 + /* prepare command */
22076 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
22079 + /* send command to mc*/
22080 + err = mc_send_command(mc_io, &cmd);
22084 + /* retrieve response parameters */
22085 + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
22086 + *en = dprtc_get_field(rsp_params->en, ENABLE);
22091 +int dprtc_reset(struct fsl_mc_io *mc_io,
22092 + uint32_t cmd_flags,
22095 + struct mc_command cmd = { 0 };
22097 + /* prepare command */
22098 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
22102 + /* send command to mc*/
22103 + return mc_send_command(mc_io, &cmd);
22107 + * dprtc_set_irq_enable() - Set overall interrupt state.
22108 + * @mc_io: Pointer to MC portal's I/O object
22109 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22110 + * @token: Token of DPRTC object
22111 + * @irq_index: The interrupt index to configure
22112 + * @en: Interrupt state - enable = 1, disable = 0
22114 + * Allows GPP software to control when interrupts are generated.
22115 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22116 + * overall interrupt state. if the interrupt is disabled no causes will cause
22119 + * Return: '0' on Success; Error code otherwise.
22121 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22122 + uint32_t cmd_flags,
22124 + uint8_t irq_index,
22127 + struct dprtc_cmd_set_irq_enable *cmd_params;
22128 + struct mc_command cmd = { 0 };
22130 + /* prepare command */
22131 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
22134 + cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
22135 + cmd_params->irq_index = irq_index;
22136 + cmd_params->en = en;
22138 + /* send command to mc*/
22139 + return mc_send_command(mc_io, &cmd);
22143 + * dprtc_get_irq_enable() - Get overall interrupt state
22144 + * @mc_io: Pointer to MC portal's I/O object
22145 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22146 + * @token: Token of DPRTC object
22147 + * @irq_index: The interrupt index to configure
22148 + * @en: Returned interrupt state - enable = 1, disable = 0
22150 + * Return: '0' on Success; Error code otherwise.
22152 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22153 + uint32_t cmd_flags,
22155 + uint8_t irq_index,
22158 + struct dprtc_rsp_get_irq_enable *rsp_params;
22159 + struct dprtc_cmd_get_irq *cmd_params;
22160 + struct mc_command cmd = { 0 };
22163 + /* prepare command */
22164 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
22167 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22168 + cmd_params->irq_index = irq_index;
22170 + /* send command to mc*/
22171 + err = mc_send_command(mc_io, &cmd);
22175 + /* retrieve response parameters */
22176 + rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
22177 + *en = rsp_params->en;
22183 + * dprtc_set_irq_mask() - Set interrupt mask.
22184 + * @mc_io: Pointer to MC portal's I/O object
22185 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22186 + * @token: Token of DPRTC object
22187 + * @irq_index: The interrupt index to configure
22188 + * @mask: Event mask to trigger interrupt;
22190 + * 0 = ignore event
22191 + * 1 = consider event for asserting IRQ
22193 + * Every interrupt can have up to 32 causes and the interrupt model supports
22194 + * masking/unmasking each cause independently
22196 + * Return: '0' on Success; Error code otherwise.
22198 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22199 + uint32_t cmd_flags,
22201 + uint8_t irq_index,
22204 + struct dprtc_cmd_set_irq_mask *cmd_params;
22205 + struct mc_command cmd = { 0 };
22207 + /* prepare command */
22208 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
22211 + cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
22212 + cmd_params->mask = cpu_to_le32(mask);
22213 + cmd_params->irq_index = irq_index;
22215 + /* send command to mc*/
22216 + return mc_send_command(mc_io, &cmd);
22220 + * dprtc_get_irq_mask() - Get interrupt mask.
22221 + * @mc_io: Pointer to MC portal's I/O object
22222 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22223 + * @token: Token of DPRTC object
22224 + * @irq_index: The interrupt index to configure
22225 + * @mask: Returned event mask to trigger interrupt
22227 + * Every interrupt can have up to 32 causes and the interrupt model supports
22228 + * masking/unmasking each cause independently
22230 + * Return: '0' on Success; Error code otherwise.
22232 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22233 + uint32_t cmd_flags,
22235 + uint8_t irq_index,
22238 + struct dprtc_rsp_get_irq_mask *rsp_params;
22239 + struct dprtc_cmd_get_irq *cmd_params;
22240 + struct mc_command cmd = { 0 };
22243 + /* prepare command */
22244 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
22247 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22248 + cmd_params->irq_index = irq_index;
22250 + /* send command to mc*/
22251 + err = mc_send_command(mc_io, &cmd);
22255 + /* retrieve response parameters */
22256 + rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
22257 + *mask = le32_to_cpu(rsp_params->mask);
22263 + * dprtc_get_irq_status() - Get the current status of any pending interrupts.
22265 + * @mc_io: Pointer to MC portal's I/O object
22266 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22267 + * @token: Token of DPRTC object
22268 + * @irq_index: The interrupt index to configure
22269 + * @status: Returned interrupts status - one bit per cause:
22270 + * 0 = no interrupt pending
22271 + * 1 = interrupt pending
22273 + * Return: '0' on Success; Error code otherwise.
22275 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22276 + uint32_t cmd_flags,
22278 + uint8_t irq_index,
22279 + uint32_t *status)
22281 + struct dprtc_cmd_get_irq_status *cmd_params;
22282 + struct dprtc_rsp_get_irq_status *rsp_params;
22283 + struct mc_command cmd = { 0 };
22286 + /* prepare command */
22287 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
22290 + cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
22291 + cmd_params->status = cpu_to_le32(*status);
22292 + cmd_params->irq_index = irq_index;
22294 + /* send command to mc*/
22295 + err = mc_send_command(mc_io, &cmd);
22299 + /* retrieve response parameters */
22300 + rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
22301 + *status = rsp_params->status;
22307 + * dprtc_clear_irq_status() - Clear a pending interrupt's status
22309 + * @mc_io: Pointer to MC portal's I/O object
22310 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22311 + * @token: Token of DPRTC object
22312 + * @irq_index: The interrupt index to configure
22313 + * @status: Bits to clear (W1C) - one bit per cause:
22314 + * 0 = don't change
22315 + * 1 = clear status bit
22317 + * Return: '0' on Success; Error code otherwise.
22319 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22320 + uint32_t cmd_flags,
22322 + uint8_t irq_index,
22325 + struct dprtc_cmd_clear_irq_status *cmd_params;
22326 + struct mc_command cmd = { 0 };
22328 + /* prepare command */
22329 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
22332 + cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
22333 + cmd_params->irq_index = irq_index;
22334 + cmd_params->status = cpu_to_le32(status);
22336 + /* send command to mc*/
22337 + return mc_send_command(mc_io, &cmd);
22341 + * dprtc_get_attributes - Retrieve DPRTC attributes.
22343 + * @mc_io: Pointer to MC portal's I/O object
22344 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22345 + * @token: Token of DPRTC object
22346 + * @attr: Returned object's attributes
22348 + * Return: '0' on Success; Error code otherwise.
22350 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22351 + uint32_t cmd_flags,
22353 + struct dprtc_attr *attr)
22355 + struct dprtc_rsp_get_attributes *rsp_params;
22356 + struct mc_command cmd = { 0 };
22359 + /* prepare command */
22360 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
22364 + /* send command to mc*/
22365 + err = mc_send_command(mc_io, &cmd);
22369 + /* retrieve response parameters */
22370 + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
22371 + attr->id = le32_to_cpu(rsp_params->id);
22377 + * dprtc_set_clock_offset() - Sets the clock's offset
22378 + * (usually relative to another clock).
22380 + * @mc_io: Pointer to MC portal's I/O object
22381 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22382 + * @token: Token of DPRTC object
22383 + * @offset: New clock offset (in nanoseconds).
22385 + * Return: '0' on Success; Error code otherwise.
22387 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22388 + uint32_t cmd_flags,
22392 + struct dprtc_cmd_set_clock_offset *cmd_params;
22393 + struct mc_command cmd = { 0 };
22395 + /* prepare command */
22396 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
22399 + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
22400 + cmd_params->offset = cpu_to_le64(offset);
22402 + /* send command to mc*/
22403 + return mc_send_command(mc_io, &cmd);
22407 + * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
22409 + * @mc_io: Pointer to MC portal's I/O object
22410 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22411 + * @token: Token of DPRTC object
22412 + * @freq_compensation: The new frequency compensation value to set.
22414 + * Return: '0' on Success; Error code otherwise.
22416 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22417 + uint32_t cmd_flags,
22419 + uint32_t freq_compensation)
22421 + struct dprtc_get_freq_compensation *cmd_params;
22422 + struct mc_command cmd = { 0 };
22424 + /* prepare command */
22425 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
22428 + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
22429 + cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
22431 + /* send command to mc*/
22432 + return mc_send_command(mc_io, &cmd);
22436 + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
22438 + * @mc_io: Pointer to MC portal's I/O object
22439 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22440 + * @token: Token of DPRTC object
22441 + * @freq_compensation: Frequency compensation value
22443 + * Return: '0' on Success; Error code otherwise.
22445 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22446 + uint32_t cmd_flags,
22448 + uint32_t *freq_compensation)
22450 + struct dprtc_get_freq_compensation *rsp_params;
22451 + struct mc_command cmd = { 0 };
22454 + /* prepare command */
22455 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
22459 + /* send command to mc*/
22460 + err = mc_send_command(mc_io, &cmd);
22464 + /* retrieve response parameters */
22465 + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
22466 + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
22472 + * dprtc_get_time() - Returns the current RTC time.
22474 + * @mc_io: Pointer to MC portal's I/O object
22475 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22476 + * @token: Token of DPRTC object
22477 + * @time: Current RTC time.
22479 + * Return: '0' on Success; Error code otherwise.
22481 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22482 + uint32_t cmd_flags,
22486 + struct dprtc_time *rsp_params;
22487 + struct mc_command cmd = { 0 };
22490 + /* prepare command */
22491 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
22495 + /* send command to mc*/
22496 + err = mc_send_command(mc_io, &cmd);
22500 + /* retrieve response parameters */
22501 + rsp_params = (struct dprtc_time *)cmd.params;
22502 + *time = le64_to_cpu(rsp_params->time);
22508 + * dprtc_set_time() - Updates current RTC time.
22510 + * @mc_io: Pointer to MC portal's I/O object
22511 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22512 + * @token: Token of DPRTC object
22513 + * @time: New RTC time.
22515 + * Return: '0' on Success; Error code otherwise.
22517 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22518 + uint32_t cmd_flags,
22522 + struct dprtc_time *cmd_params;
22523 + struct mc_command cmd = { 0 };
22525 + /* prepare command */
22526 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
22529 + cmd_params = (struct dprtc_time *)cmd.params;
22530 + cmd_params->time = cpu_to_le64(time);
22532 + /* send command to mc*/
22533 + return mc_send_command(mc_io, &cmd);
22537 + * dprtc_set_alarm() - Defines and sets alarm.
22539 + * @mc_io: Pointer to MC portal's I/O object
22540 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22541 + * @token: Token of DPRTC object
22542 + * @time: In nanoseconds, the time when the alarm
22543 + * should go off - must be a multiple of
22546 + * Return: '0' on Success; Error code otherwise.
22548 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22549 + uint32_t cmd_flags,
22550 + uint16_t token, uint64_t time)
22552 + struct dprtc_time *cmd_params;
22553 + struct mc_command cmd = { 0 };
22555 + /* prepare command */
22556 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
22559 + cmd_params = (struct dprtc_time *)cmd.params;
22560 + cmd_params->time = cpu_to_le64(time);
22562 + /* send command to mc*/
22563 + return mc_send_command(mc_io, &cmd);
22567 + * dprtc_get_api_version() - Get Data Path Real Time Counter API version
22568 + * @mc_io: Pointer to MC portal's I/O object
22569 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22570 + * @major_ver: Major version of data path real time counter API
22571 + * @minor_ver: Minor version of data path real time counter API
22573 + * Return: '0' on Success; Error code otherwise.
22575 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22576 + uint32_t cmd_flags,
22577 + uint16_t *major_ver,
22578 + uint16_t *minor_ver)
22580 + struct dprtc_rsp_get_api_version *rsp_params;
22581 + struct mc_command cmd = { 0 };
22584 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
22588 + err = mc_send_command(mc_io, &cmd);
22592 + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
22593 + *major_ver = le16_to_cpu(rsp_params->major);
22594 + *minor_ver = le16_to_cpu(rsp_params->minor);
22598 diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
22599 new file mode 100644
22600 index 00000000..fc96cac6
22602 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
22604 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
22606 + * Redistribution and use in source and binary forms, with or without
22607 + * modification, are permitted provided that the following conditions are met:
22608 + * * Redistributions of source code must retain the above copyright
22609 + * notice, this list of conditions and the following disclaimer.
22610 + * * Redistributions in binary form must reproduce the above copyright
22611 + * notice, this list of conditions and the following disclaimer in the
22612 + * documentation and/or other materials provided with the distribution.
22613 + * * Neither the name of the above-listed copyright holders nor the
22614 + * names of any contributors may be used to endorse or promote products
22615 + * derived from this software without specific prior written permission.
22618 + * ALTERNATIVELY, this software may be distributed under the terms of the
22619 + * GNU General Public License ("GPL") as published by the Free Software
22620 + * Foundation, either version 2 of that License or (at your option) any
22623 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22624 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22625 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22626 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22627 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22628 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22629 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22630 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22631 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22632 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22633 + * POSSIBILITY OF SUCH DAMAGE.
22635 +#ifndef __FSL_DPRTC_H
22636 +#define __FSL_DPRTC_H
22638 +/* Data Path Real Time Counter API
22639 + * Contains initialization APIs and runtime control APIs for RTC
22645 + * Number of irq's
22647 +#define DPRTC_MAX_IRQ_NUM 1
22648 +#define DPRTC_IRQ_INDEX 0
22651 + * Interrupt event masks:
22655 + * Interrupt event mask indicating alarm event had occurred
22657 +#define DPRTC_EVENT_ALARM 0x40000000
22659 + * Interrupt event mask indicating periodic pulse event had occurred
22661 +#define DPRTC_EVENT_PPS 0x08000000
22663 +int dprtc_open(struct fsl_mc_io *mc_io,
22664 + uint32_t cmd_flags,
22666 + uint16_t *token);
22668 +int dprtc_close(struct fsl_mc_io *mc_io,
22669 + uint32_t cmd_flags,
22673 + * struct dprtc_cfg - Structure representing DPRTC configuration
22674 + * @options: place holder
22676 +struct dprtc_cfg {
22677 + uint32_t options;
22680 +int dprtc_create(struct fsl_mc_io *mc_io,
22681 + uint16_t dprc_token,
22682 + uint32_t cmd_flags,
22683 + const struct dprtc_cfg *cfg,
22684 + uint32_t *obj_id);
22686 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22687 + uint16_t dprc_token,
22688 + uint32_t cmd_flags,
22689 + uint32_t object_id);
22691 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22692 + uint32_t cmd_flags,
22696 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22697 + uint32_t cmd_flags,
22699 + uint32_t freq_compensation);
22701 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
22702 + uint32_t cmd_flags,
22704 + uint32_t *freq_compensation);
22706 +int dprtc_get_time(struct fsl_mc_io *mc_io,
22707 + uint32_t cmd_flags,
22711 +int dprtc_set_time(struct fsl_mc_io *mc_io,
22712 + uint32_t cmd_flags,
22716 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
22717 + uint32_t cmd_flags,
22721 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22722 + uint32_t cmd_flags,
22724 + uint8_t irq_index,
22727 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22728 + uint32_t cmd_flags,
22730 + uint8_t irq_index,
22733 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22734 + uint32_t cmd_flags,
22736 + uint8_t irq_index,
22739 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22740 + uint32_t cmd_flags,
22742 + uint8_t irq_index,
22745 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22746 + uint32_t cmd_flags,
22748 + uint8_t irq_index,
22749 + uint32_t *status);
22751 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22752 + uint32_t cmd_flags,
22754 + uint8_t irq_index,
22755 + uint32_t status);
22758 + * struct dprtc_attr - Structure representing DPRTC attributes
22759 + * @id: DPRTC object ID
22761 +struct dprtc_attr {
22765 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22766 + uint32_t cmd_flags,
22768 + struct dprtc_attr *attr);
22770 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
22771 + uint32_t cmd_flags,
22772 + uint16_t *major_ver,
22773 + uint16_t *minor_ver);
22775 +#endif /* __FSL_DPRTC_H */
22776 diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
22777 new file mode 100644
22778 index 00000000..0afc6538
22780 +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
22782 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
22784 + * Redistribution and use in source and binary forms, with or without
22785 + * modification, are permitted provided that the following conditions are met:
22786 + * * Redistributions of source code must retain the above copyright
22787 + * notice, this list of conditions and the following disclaimer.
22788 + * * Redistributions in binary form must reproduce the above copyright
22789 + * notice, this list of conditions and the following disclaimer in the
22790 + * documentation and/or other materials provided with the distribution.
22791 + * * Neither the name of the above-listed copyright holders nor the
22792 + * names of any contributors may be used to endorse or promote products
22793 + * derived from this software without specific prior written permission.
22796 + * ALTERNATIVELY, this software may be distributed under the terms of the
22797 + * GNU General Public License ("GPL") as published by the Free Software
22798 + * Foundation, either version 2 of that License or (at your option) any
22801 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22802 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22803 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22804 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22805 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22806 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22807 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22808 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22809 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22810 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22811 + * POSSIBILITY OF SUCH DAMAGE.
22814 +#include <linux/module.h>
22815 +#include <linux/ptp_clock_kernel.h>
22817 +#include "../../fsl-mc/include/mc.h"
22818 +#include "../../fsl-mc/include/mc-sys.h"
22820 +#include "dprtc.h"
22821 +#include "dprtc-cmd.h"
22823 +#define N_EXT_TS 2
22825 +struct ptp_clock *clock;
22826 +struct fsl_mc_device *rtc_mc_dev;
22827 +u32 freqCompensation;
22829 +/* PTP clock operations */
22830 +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
22833 + u32 diff, tmr_add;
22836 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22837 + struct device *dev = &mc_dev->dev;
22844 + tmr_add = freqCompensation;
22847 + diff = div_u64(adj, 1000000000ULL);
22849 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
22851 + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
22852 + mc_dev->mc_handle, tmr_add);
22854 + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
22858 +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
22862 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22863 + struct device *dev = &mc_dev->dev;
22865 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
22867 + dev_err(dev, "dprtc_get_time err %d\n", err);
22873 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
22875 + dev_err(dev, "dprtc_set_time err %d\n", err);
22881 +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
22886 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22887 + struct device *dev = &mc_dev->dev;
22889 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
22891 + dev_err(dev, "dprtc_get_time err %d\n", err);
22895 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
22896 + ts->tv_nsec = remainder;
22900 +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
22901 + const struct timespec *ts)
22905 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
22906 + struct device *dev = &mc_dev->dev;
22908 + ns = ts->tv_sec * 1000000000ULL;
22909 + ns += ts->tv_nsec;
22911 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
22913 + dev_err(dev, "dprtc_set_time err %d\n", err);
22917 +static struct ptp_clock_info ptp_dpaa2_caps = {
22918 + .owner = THIS_MODULE,
22919 + .name = "dpaa2 clock",
22920 + .max_adj = 512000,
22922 + .n_ext_ts = N_EXT_TS,
22926 + .adjfreq = ptp_dpaa2_adjfreq,
22927 + .adjtime = ptp_dpaa2_adjtime,
22928 + .gettime64 = ptp_dpaa2_gettime,
22929 + .settime64 = ptp_dpaa2_settime,
22932 +static int rtc_probe(struct fsl_mc_device *mc_dev)
22934 + struct device *dev;
22936 + int dpaa2_phc_index;
22942 + dev = &mc_dev->dev;
22944 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
22945 + if (unlikely(err)) {
22946 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
22949 + if (!mc_dev->mc_io) {
22951 + "fsl_mc_portal_allocate returned null handle but no error\n");
22956 + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
22957 + &mc_dev->mc_handle);
22959 + dev_err(dev, "dprtc_open err %d\n", err);
22960 + goto err_free_mcp;
22962 + if (!mc_dev->mc_handle) {
22963 + dev_err(dev, "dprtc_open returned null handle but no error\n");
22965 + goto err_free_mcp;
22968 + rtc_mc_dev = mc_dev;
22970 + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
22971 + mc_dev->mc_handle, &tmr_add);
22973 + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
22976 + freqCompensation = tmr_add;
22978 + clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
22979 + if (IS_ERR(clock)) {
22980 + err = PTR_ERR(clock);
22983 + dpaa2_phc_index = ptp_clock_index(clock);
22987 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22989 + fsl_mc_portal_free(mc_dev->mc_io);
22994 +static int rtc_remove(struct fsl_mc_device *mc_dev)
22996 + ptp_clock_unregister(clock);
22997 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22998 + fsl_mc_portal_free(mc_dev->mc_io);
23003 +static const struct fsl_mc_device_id rtc_match_id_table[] = {
23005 + .vendor = FSL_MC_VENDOR_FREESCALE,
23006 + .obj_type = "dprtc",
23011 +static struct fsl_mc_driver rtc_drv = {
23013 + .name = KBUILD_MODNAME,
23014 + .owner = THIS_MODULE,
23016 + .probe = rtc_probe,
23017 + .remove = rtc_remove,
23018 + .match_id_table = rtc_match_id_table,
23021 +module_fsl_mc_driver(rtc_drv);
23023 +MODULE_LICENSE("GPL");
23024 +MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");