c0f5819be9132180a0a4451c0623cf141c8013cd
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 705-dpaa2-support-layerscape.patch
1 From 72b1e89ab8edb5e883e812d07d0751fe2b140548 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:12:58 +0800
4 Subject: [PATCH 11/30] dpaa2: support layerscape
5
6 This is an integrated patch for layerscape dpaa2 support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
10 Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
11 Signed-off-by: costi <constantin.tudor@freescale.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Mathew McBride <matt@traverse.com.au>
14 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
15 ---
16 drivers/soc/fsl/ls2-console/Kconfig | 4 +
17 drivers/soc/fsl/ls2-console/Makefile | 1 +
18 drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
19 drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
20 drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
21 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 352 ++
22 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
23 .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 +
24 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3516 ++++++++++++++++++++
25 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 499 +++
26 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 864 +++++
27 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
28 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 658 ++++
29 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1903 +++++++++++
30 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1053 ++++++
31 drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
32 drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
33 drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
34 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 +++++
35 drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++
36 drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 +++++++
37 drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 +++++++++++
38 drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
39 drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
40 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
41 drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
42 drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
43 drivers/staging/fsl-dpaa2/evb/evb.c | 1350 ++++++++
44 drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
45 drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
46 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
47 drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
48 drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
49 drivers/staging/fsl-dpaa2/mac/mac.c | 669 ++++
50 drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
51 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
52 drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
53 drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
54 drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
55 39 files changed, 23364 insertions(+)
56 create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
57 create mode 100644 drivers/soc/fsl/ls2-console/Makefile
58 create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
59 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
60 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
61 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
62 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
63 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
64 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
65 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
66 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
67 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h
68 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
69 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
70 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
71 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
72 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
73 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
74 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
75 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
76 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
77 create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
78 create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
79 create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
80 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
81 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.c
82 create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux.h
83 create mode 100644 drivers/staging/fsl-dpaa2/evb/evb.c
84 create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
85 create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
86 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
87 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c
88 create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h
89 create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c
90 create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
91 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
92 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
93 create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
94 create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
95
96 --- /dev/null
97 +++ b/drivers/soc/fsl/ls2-console/Kconfig
98 @@ -0,0 +1,4 @@
99 +config FSL_LS2_CONSOLE
100 + tristate "Layerscape MC and AIOP console support"
101 + depends on ARCH_LAYERSCAPE
102 + default y
103 --- /dev/null
104 +++ b/drivers/soc/fsl/ls2-console/Makefile
105 @@ -0,0 +1 @@
106 +obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o
107 --- /dev/null
108 +++ b/drivers/soc/fsl/ls2-console/ls2-console.c
109 @@ -0,0 +1,284 @@
110 +/* Copyright 2015-2016 Freescale Semiconductor Inc.
111 + *
112 + * Redistribution and use in source and binary forms, with or without
113 + * modification, are permitted provided that the following conditions are met:
114 + * * Redistributions of source code must retain the above copyright
115 + * notice, this list of conditions and the following disclaimer.
116 + * * Redistributions in binary form must reproduce the above copyright
117 + * notice, this list of conditions and the following disclaimer in the
118 + * documentation and/or other materials provided with the distribution.
119 + * * Neither the name of the above-listed copyright holders nor the
120 + * names of any contributors may be used to endorse or promote products
121 + * derived from this software without specific prior written permission.
122 + *
123 + *
124 + * ALTERNATIVELY, this software may be distributed under the terms of the
125 + * GNU General Public License ("GPL") as published by the Free Software
126 + * Foundation, either version 2 of that License or (at your option) any
127 + * later version.
128 + *
129 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
130 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
131 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
132 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
133 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
134 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
135 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
136 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
137 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
138 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
139 + * POSSIBILITY OF SUCH DAMAGE.
140 + */
141 +
142 +#include <linux/miscdevice.h>
143 +#include <linux/uaccess.h>
144 +#include <linux/poll.h>
145 +#include <linux/compat.h>
146 +#include <linux/module.h>
147 +#include <linux/slab.h>
148 +#include <linux/io.h>
149 +
150 +/* SoC address for the MC firmware base low/high registers */
151 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020
152 +#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2
153 +/* MC firmware base low/high registers indexes */
154 +#define MCFBALR_OFFSET 0
155 +#define MCFBAHR_OFFSET 1
156 +
157 +/* Bit mask used to obtain the most significant part of the MC base address */
158 +#define MC_FW_HIGH_ADDR_MASK 0x1FFFF
159 +/* Bit mask used to obtain the least significant part of the MC base address */
160 +#define MC_FW_LOW_ADDR_MASK 0xE0000000
161 +
162 +#define MC_BUFFER_OFFSET 0x01000000
163 +#define MC_BUFFER_SIZE (1024*1024*16)
164 +#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET)
165 +
166 +#define AIOP_BUFFER_OFFSET 0x06000000
167 +#define AIOP_BUFFER_SIZE (1024*1024*16)
168 +#define AIOP_OFFSET_DELTA (0)
169 +
170 +struct log_header {
171 + char magic_word[8]; /* magic word */
172 + uint32_t buf_start; /* holds the 32-bit little-endian
173 + * offset of the start of the buffer
174 + */
175 + uint32_t buf_length; /* holds the 32-bit little-endian
176 + * length of the buffer
177 + */
178 + uint32_t last_byte; /* holds the 32-bit little-endian offset
179 + * of the byte after the last byte that
180 + * was written
181 + */
182 + char reserved[44];
183 +};
184 +
185 +#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
186 +#define LOG_VERSION_MAJOR 1
187 +#define LOG_VERSION_MINOR 0
188 +
189 +
190 +#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); }
191 +
192 +struct console_data {
193 + char *map_addr;
194 + struct log_header *hdr;
195 + char *start_addr; /* Start of buffer */
196 + char *end_addr; /* End of buffer */
197 + char *end_of_data; /* Current end of data */
198 + char *cur_ptr; /* Last data sent to console */
199 +};
200 +
201 +#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
202 +
203 +static inline void __adjust_end(struct console_data *cd)
204 +{
205 + cd->end_of_data = cd->start_addr
206 + + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte));
207 +}
208 +
209 +static inline void adjust_end(struct console_data *cd)
210 +{
211 + invalidate(cd->hdr);
212 + __adjust_end(cd);
213 +}
214 +
215 +static inline uint64_t get_mc_fw_base_address(void)
216 +{
217 + u32 *mcfbaregs = (u32 *) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS,
218 + SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE);
219 + u64 mcfwbase = 0ULL;
220 +
221 + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK;
222 + mcfwbase <<= 32;
223 + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK;
224 + iounmap(mcfbaregs);
225 + pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase);
226 + return mcfwbase;
227 +}
228 +
229 +static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp,
230 + u64 offset, u64 size,
231 + uint8_t *emagic, uint8_t magic_len,
232 + u32 offset_delta)
233 +{
234 + struct console_data *cd;
235 + uint8_t *magic;
236 + uint32_t wrapped;
237 +
238 + cd = kmalloc(sizeof(*cd), GFP_KERNEL);
239 + if (cd == NULL)
240 + return -ENOMEM;
241 + fp->private_data = cd;
242 + cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size);
243 +
244 + cd->hdr = (struct log_header *) cd->map_addr;
245 + invalidate(cd->hdr);
246 +
247 + magic = cd->hdr->magic_word;
248 + if (memcmp(magic, emagic, magic_len)) {
249 + pr_info("magic didn't match!\n");
250 + pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n",
251 + emagic[0], emagic[1], emagic[2], emagic[3],
252 + emagic[4], emagic[5], emagic[6], emagic[7]);
253 + pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n",
254 + magic[0], magic[1], magic[2], magic[3],
255 + magic[4], magic[5], magic[6], magic[7]);
256 + kfree(cd);
257 + iounmap(cd->map_addr);
258 + return -EIO;
259 + }
260 +
261 + cd->start_addr = cd->map_addr
262 + + le32_to_cpu(cd->hdr->buf_start) - offset_delta;
263 + cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length);
264 +
265 + wrapped = le32_to_cpu(cd->hdr->last_byte)
266 + & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
267 +
268 + __adjust_end(cd);
269 + if (wrapped && (cd->end_of_data != cd->end_addr))
270 + cd->cur_ptr = cd->end_of_data+1;
271 + else
272 + cd->cur_ptr = cd->start_addr;
273 +
274 + return 0;
275 +}
276 +
277 +static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp)
278 +{
279 + uint8_t magic_word[] = { 0, 1, 'C', 'M' };
280 +
281 + return fsl_ls2_generic_console_open(node, fp,
282 + MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
283 + magic_word, sizeof(magic_word),
284 + MC_OFFSET_DELTA);
285 +}
286 +
287 +static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp)
288 +{
289 + uint8_t magic_word[] = { 'P', 'O', 'I', 'A' };
290 +
291 + return fsl_ls2_generic_console_open(node, fp,
292 + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
293 + magic_word, sizeof(magic_word),
294 + AIOP_OFFSET_DELTA);
295 +}
296 +
297 +static int fsl_ls2_console_close(struct inode *node, struct file *fp)
298 +{
299 + struct console_data *cd = fp->private_data;
300 +
301 + iounmap(cd->map_addr);
302 + kfree(cd);
303 + return 0;
304 +}
305 +
306 +ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count,
307 + loff_t *f_pos)
308 +{
309 + struct console_data *cd = fp->private_data;
310 + size_t bytes = 0;
311 + char data;
312 +
313 + /* Check if we need to adjust the end of data addr */
314 + adjust_end(cd);
315 +
316 + while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) {
317 + if (((u64)cd->cur_ptr) % 64 == 0)
318 + invalidate(cd->cur_ptr);
319 +
320 + data = *(cd->cur_ptr);
321 + if (copy_to_user(&buf[bytes], &data, 1))
322 + return -EFAULT;
323 + cd->cur_ptr++;
324 + if (cd->cur_ptr >= cd->end_addr)
325 + cd->cur_ptr = cd->start_addr;
326 + ++bytes;
327 + }
328 + return bytes;
329 +}
330 +
331 +static const struct file_operations fsl_ls2_mc_console_fops = {
332 + .owner = THIS_MODULE,
333 + .open = fsl_ls2_mc_console_open,
334 + .release = fsl_ls2_console_close,
335 + .read = fsl_ls2_console_read,
336 +};
337 +
338 +static struct miscdevice fsl_ls2_mc_console_dev = {
339 + .minor = MISC_DYNAMIC_MINOR,
340 + .name = "fsl_mc_console",
341 + .fops = &fsl_ls2_mc_console_fops
342 +};
343 +
344 +static const struct file_operations fsl_ls2_aiop_console_fops = {
345 + .owner = THIS_MODULE,
346 + .open = fsl_ls2_aiop_console_open,
347 + .release = fsl_ls2_console_close,
348 + .read = fsl_ls2_console_read,
349 +};
350 +
351 +static struct miscdevice fsl_ls2_aiop_console_dev = {
352 + .minor = MISC_DYNAMIC_MINOR,
353 + .name = "fsl_aiop_console",
354 + .fops = &fsl_ls2_aiop_console_fops
355 +};
356 +
357 +static int __init fsl_ls2_console_init(void)
358 +{
359 + int err = 0;
360 +
361 + pr_info("Freescale LS2 console driver\n");
362 + err = misc_register(&fsl_ls2_mc_console_dev);
363 + if (err) {
364 + pr_err("fsl_mc_console: cannot register device\n");
365 + return err;
366 + }
367 + pr_info("fsl-ls2-console: device %s registered\n",
368 + fsl_ls2_mc_console_dev.name);
369 +
370 + err = misc_register(&fsl_ls2_aiop_console_dev);
371 + if (err) {
372 + pr_err("fsl_aiop_console: cannot register device\n");
373 + return err;
374 + }
375 + pr_info("fsl-ls2-console: device %s registered\n",
376 + fsl_ls2_aiop_console_dev.name);
377 +
378 + return 0;
379 +}
380 +
381 +static void __exit fsl_ls2_console_exit(void)
382 +{
383 + misc_deregister(&fsl_ls2_mc_console_dev);
384 +
385 + misc_deregister(&fsl_ls2_aiop_console_dev);
386 +}
387 +
388 +module_init(fsl_ls2_console_init);
389 +module_exit(fsl_ls2_console_exit);
390 +
391 +MODULE_AUTHOR("Roy Pledge <roy.pledge@freescale.com>");
392 +MODULE_LICENSE("Dual BSD/GPL");
393 +MODULE_DESCRIPTION("Freescale LS2 console driver");
394 --- /dev/null
395 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
396 @@ -0,0 +1,11 @@
397 +#
398 +# Makefile for the Freescale DPAA2 Ethernet controller
399 +#
400 +
401 +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
402 +
403 +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
404 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
405 +
406 +# Needed by the tracing framework
407 +CFLAGS_dpaa2-eth.o := -I$(src)
408 --- /dev/null
409 +++ b/drivers/staging/fsl-dpaa2/ethernet/README
410 @@ -0,0 +1,186 @@
411 +Freescale DPAA2 Ethernet driver
412 +===============================
413 +
414 +This file provides documentation for the Freescale DPAA2 Ethernet driver.
415 +
416 +
417 +Contents
418 +========
419 + Supported Platforms
420 + Architecture Overview
421 + Creating a Network Interface
422 + Features & Offloads
423 +
424 +
425 +Supported Platforms
426 +===================
427 +This driver provides networking support for Freescale DPAA2 SoCs, e.g.
428 +LS2080A, LS2088A, LS1088A.
429 +
430 +
431 +Architecture Overview
432 +=====================
433 +Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
434 +representing network interfaces; instead, several separate hardware resources
435 +concur to provide the networking functionality:
436 + - network interfaces
437 + - queues, channels
438 + - buffer pools
439 + - MAC/PHY
440 +
441 +All hardware resources are allocated and configured through the Management
442 +Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
443 +and exposes ABIs through which they can be configured and controlled. A few
444 +hardware resources, like queues, do not have a corresponding MC object and
445 +are treated as internal resources of other objects.
446 +
447 +For a more detailed description of the DPAA2 architecture and its object
448 +abstractions see:
449 + drivers/staging/fsl-mc/README.txt
450 +
451 +Each Linux net device is built on top of a Datapath Network Interface (DPNI)
452 +object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
453 +(DPCONs).
454 +
455 +Configuration interface:
456 +
457 + -----------------------
458 + | DPAA2 Ethernet Driver |
459 + -----------------------
460 + . . .
461 + . . .
462 + . . . . . . . . . . . .
463 + . . .
464 + . . .
465 + ---------- ---------- -----------
466 + | DPBP API | | DPNI API | | DPCON API |
467 + ---------- ---------- -----------
468 + . . . software
469 +=========== . ========== . ============ . ===================
470 + . . . hardware
471 + ------------------------------------------
472 + | MC hardware portals |
473 + ------------------------------------------
474 + . . .
475 + . . .
476 + ------ ------ -------
477 + | DPBP | | DPNI | | DPCON |
478 + ------ ------ -------
479 +
480 +The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
481 +DPBPs represent hardware buffer pools. Packet I/O is performed in the context
482 +of DPCON objects, using DPIO portals for managing and communicating with the
483 +hardware resources.
484 +
485 +Datapath (I/O) interface:
486 +
487 + -----------------------------------------------
488 + | DPAA2 Ethernet Driver |
489 + -----------------------------------------------
490 + | ^ ^ | |
491 + | | | | |
492 + enqueue| dequeue| data | dequeue| seed |
493 + (Tx) | (Rx, TxC)| avail.| request| buffers|
494 + | | notify| | |
495 + | | | | |
496 + V | | V V
497 + -----------------------------------------------
498 + | DPIO Driver |
499 + -----------------------------------------------
500 + | | | | | software
501 + | | | | | ================
502 + | | | | | hardware
503 + -----------------------------------------------
504 + | I/O hardware portals |
505 + -----------------------------------------------
506 + | ^ ^ | |
507 + | | | | |
508 + | | | V |
509 + V | ================ V
510 + ---------------------- | -------------
511 + queues ---------------------- | | Buffer pool |
512 + ---------------------- | -------------
513 + =======================
514 + Channel
515 +
516 +Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
517 +availability notifications and buffer pool management. DPIOs are shared between
518 +all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
519 +frames, but must be affine to the CPUs for the purpose of traffic distribution.
520 +
521 +Frames are transmitted and received through hardware frame queues, which can be
522 +grouped in channels for the purpose of hardware scheduling. The Ethernet driver
523 +enqueues TX frames on egress queues and after transmission is complete a TX
524 +confirmation frame is sent back to the CPU.
525 +
526 +When frames are available on ingress queues, a data availability notification
527 +is sent to the CPU; notifications are raised per channel, so even if multiple
528 +queues in the same channel have available frames, only one notification is sent.
529 +After a channel fires a notification, is must be explicitly rearmed.
530 +
531 +Each network interface can have multiple Rx, Tx and confirmation queues affined
532 +to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
533 +DPCONs are used to distribute ingress traffic to different CPUs via the cores'
534 +affine DPIOs.
535 +
536 +The role of hardware buffer pools is storage of ingress frame data. Each network
537 +interface has a privately owned buffer pool which it seeds with kernel allocated
538 +buffers.
539 +
540 +
541 +DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
542 +object or to another DPNI through an internal link, but the connection is
543 +managed by MC and completely transparent to the Ethernet driver.
544 +
545 + --------- --------- ---------
546 + | eth if1 | | eth if2 | | eth ifn |
547 + --------- --------- ---------
548 + . . .
549 + . . .
550 + . . .
551 + ---------------------------
552 + | DPAA2 Ethernet Driver |
553 + ---------------------------
554 + . . .
555 + . . .
556 + . . .
557 + ------ ------ ------ -------
558 + | DPNI | | DPNI | | DPNI | | DPMAC |----+
559 + ------ ------ ------ ------- |
560 + | | | | |
561 + | | | | -----
562 + =========== ================== | PHY |
563 + -----
564 +
565 +Creating a Network Interface
566 +============================
567 +A net device is created for each DPNI object probed on the MC bus. Each DPNI has
568 +a number of properties which determine the network interface configuration
569 +options and associated hardware resources.
570 +
571 +DPNI objects (and the other DPAA2 objects needed for a network interface) can be
572 +added to a container on the MC bus in one of two ways: statically, through a
573 +Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
574 +dynamically at runtime, via the DPAA2 objects APIs.
575 +
576 +
577 +Features & Offloads
578 +===================
579 +Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
580 +The checksum offloads can be independently configured on RX and TX through
581 +ethtool.
582 +
583 +Hardware offload of unicast and multicast MAC filtering is supported on the
584 +ingress path and permanently enabled.
585 +
586 +Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
587 +is configurable via ethtool; on RX it is always enabled.
588 +
589 +The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
590 +
591 +The Ethernet driver defines a static flow hashing scheme that distributes
592 +traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
593 +L4 dst port. No user configuration is supported for now.
594 +
595 +Hardware specific statistics for the network interface as well as some
596 +non-standard driver stats can be consulted through ethtool -S option.
597 --- /dev/null
598 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
599 @@ -0,0 +1,352 @@
600 +
601 +/* Copyright 2015 Freescale Semiconductor Inc.
602 + *
603 + * Redistribution and use in source and binary forms, with or without
604 + * modification, are permitted provided that the following conditions are met:
605 + * * Redistributions of source code must retain the above copyright
606 + * notice, this list of conditions and the following disclaimer.
607 + * * Redistributions in binary form must reproduce the above copyright
608 + * notice, this list of conditions and the following disclaimer in the
609 + * documentation and/or other materials provided with the distribution.
610 + * * Neither the name of Freescale Semiconductor nor the
611 + * names of its contributors may be used to endorse or promote products
612 + * derived from this software without specific prior written permission.
613 + *
614 + *
615 + * ALTERNATIVELY, this software may be distributed under the terms of the
616 + * GNU General Public License ("GPL") as published by the Free Software
617 + * Foundation, either version 2 of that License or (at your option) any
618 + * later version.
619 + *
620 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
621 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
622 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
623 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
624 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
625 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
626 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
627 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
628 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
629 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
630 + */
631 +
632 +#include <linux/module.h>
633 +#include <linux/debugfs.h>
634 +#include "dpaa2-eth.h"
635 +#include "dpaa2-eth-debugfs.h"
636 +
637 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
638 +
639 +static struct dentry *dpaa2_dbg_root;
640 +
641 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
642 +{
643 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
644 + struct rtnl_link_stats64 *stats;
645 + struct dpaa2_eth_drv_stats *extras;
646 + int i;
647 +
648 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
649 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
650 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
651 + "Tx SG", "Enq busy");
652 +
653 + for_each_online_cpu(i) {
654 + stats = per_cpu_ptr(priv->percpu_stats, i);
655 + extras = per_cpu_ptr(priv->percpu_extras, i);
656 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
657 + i,
658 + stats->rx_packets,
659 + stats->rx_errors,
660 + extras->rx_sg_frames,
661 + stats->tx_packets,
662 + stats->tx_errors,
663 + extras->tx_conf_frames,
664 + extras->tx_sg_frames,
665 + extras->tx_portal_busy);
666 + }
667 +
668 + return 0;
669 +}
670 +
671 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
672 +{
673 + int err;
674 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
675 +
676 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
677 + if (err < 0)
678 + netdev_err(priv->net_dev, "single_open() failed\n");
679 +
680 + return err;
681 +}
682 +
683 +static const struct file_operations dpaa2_dbg_cpu_ops = {
684 + .open = dpaa2_dbg_cpu_open,
685 + .read = seq_read,
686 + .llseek = seq_lseek,
687 + .release = single_release,
688 +};
689 +
690 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
691 +{
692 + switch (fq->type) {
693 + case DPAA2_RX_FQ:
694 + return "Rx";
695 + case DPAA2_TX_CONF_FQ:
696 + return "Tx conf";
697 + case DPAA2_RX_ERR_FQ:
698 + return "Rx err";
699 + default:
700 + return "N/A";
701 + }
702 +}
703 +
704 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
705 +{
706 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
707 + struct dpaa2_eth_fq *fq;
708 + u32 fcnt, bcnt;
709 + int i, err;
710 +
711 + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
712 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
713 + "VFQID", "CPU", "Traffic Class", "Type", "Frames",
714 + "Pending frames", "Congestion");
715 +
716 + for (i = 0; i < priv->num_fqs; i++) {
717 + fq = &priv->fq[i];
718 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
719 + if (err)
720 + fcnt = 0;
721 +
722 + seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
723 + fq->fqid,
724 + fq->target_cpu,
725 + fq->tc,
726 + fq_type_to_str(fq),
727 + fq->stats.frames,
728 + fcnt,
729 + fq->stats.congestion_entry);
730 + }
731 +
732 + return 0;
733 +}
734 +
735 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
736 +{
737 + int err;
738 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
739 +
740 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
741 + if (err < 0)
742 + netdev_err(priv->net_dev, "single_open() failed\n");
743 +
744 + return err;
745 +}
746 +
747 +static const struct file_operations dpaa2_dbg_fq_ops = {
748 + .open = dpaa2_dbg_fqs_open,
749 + .read = seq_read,
750 + .llseek = seq_lseek,
751 + .release = single_release,
752 +};
753 +
754 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
755 +{
756 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
757 + struct dpaa2_eth_channel *ch;
758 + int i;
759 +
760 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
761 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
762 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
763 + "Avg frm/CDAN", "Buf count");
764 +
765 + for (i = 0; i < priv->num_channels; i++) {
766 + ch = priv->channel[i];
767 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
768 + ch->ch_id,
769 + ch->nctx.desired_cpu,
770 + ch->stats.dequeue_portal_busy,
771 + ch->stats.frames,
772 + ch->stats.cdan,
773 + ch->stats.frames / ch->stats.cdan,
774 + ch->buf_count);
775 + }
776 +
777 + return 0;
778 +}
779 +
780 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
781 +{
782 + int err;
783 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
784 +
785 + err = single_open(file, dpaa2_dbg_ch_show, priv);
786 + if (err < 0)
787 + netdev_err(priv->net_dev, "single_open() failed\n");
788 +
789 + return err;
790 +}
791 +
792 +static const struct file_operations dpaa2_dbg_ch_ops = {
793 + .open = dpaa2_dbg_ch_open,
794 + .read = seq_read,
795 + .llseek = seq_lseek,
796 + .release = single_release,
797 +};
798 +
799 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
800 + size_t count, loff_t *offset)
801 +{
802 + struct dpaa2_eth_priv *priv = file->private_data;
803 + struct rtnl_link_stats64 *percpu_stats;
804 + struct dpaa2_eth_drv_stats *percpu_extras;
805 + struct dpaa2_eth_fq *fq;
806 + struct dpaa2_eth_channel *ch;
807 + int i;
808 +
809 + for_each_online_cpu(i) {
810 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
811 + memset(percpu_stats, 0, sizeof(*percpu_stats));
812 +
813 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
814 + memset(percpu_extras, 0, sizeof(*percpu_extras));
815 + }
816 +
817 + for (i = 0; i < priv->num_fqs; i++) {
818 + fq = &priv->fq[i];
819 + memset(&fq->stats, 0, sizeof(fq->stats));
820 + }
821 +
822 + for (i = 0; i < priv->num_channels; i++) {
823 + ch = priv->channel[i];
824 + memset(&ch->stats, 0, sizeof(ch->stats));
825 + }
826 +
827 + return count;
828 +}
829 +
830 +static const struct file_operations dpaa2_dbg_reset_ops = {
831 + .open = simple_open,
832 + .write = dpaa2_dbg_reset_write,
833 +};
834 +
835 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
836 + const char __user *buf,
837 + size_t count, loff_t *offset)
838 +{
839 + struct dpaa2_eth_priv *priv = file->private_data;
840 + int err;
841 +
842 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
843 + if (err)
844 + netdev_err(priv->net_dev,
845 + "dpni_reset_statistics() failed %d\n", err);
846 +
847 + return count;
848 +}
849 +
850 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
851 + .open = simple_open,
852 + .write = dpaa2_dbg_reset_mc_write,
853 +};
854 +
855 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
856 +{
857 + if (!dpaa2_dbg_root)
858 + return;
859 +
860 + /* Create a directory for the interface */
861 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
862 + dpaa2_dbg_root);
863 + if (!priv->dbg.dir) {
864 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
865 + return;
866 + }
867 +
868 + /* per-cpu stats file */
869 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
870 + priv->dbg.dir, priv,
871 + &dpaa2_dbg_cpu_ops);
872 + if (!priv->dbg.cpu_stats) {
873 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
874 + goto err_cpu_stats;
875 + }
876 +
877 + /* per-fq stats file */
878 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
879 + priv->dbg.dir, priv,
880 + &dpaa2_dbg_fq_ops);
881 + if (!priv->dbg.fq_stats) {
882 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
883 + goto err_fq_stats;
884 + }
885 +
886 + /* per-fq stats file */
887 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
888 + priv->dbg.dir, priv,
889 + &dpaa2_dbg_ch_ops);
890 + if (!priv->dbg.fq_stats) {
891 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
892 + goto err_ch_stats;
893 + }
894 +
895 + /* reset stats */
896 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
897 + priv->dbg.dir, priv,
898 + &dpaa2_dbg_reset_ops);
899 + if (!priv->dbg.reset_stats) {
900 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
901 + goto err_reset_stats;
902 + }
903 +
904 + /* reset MC stats */
905 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
906 + 0222, priv->dbg.dir, priv,
907 + &dpaa2_dbg_reset_mc_ops);
908 + if (!priv->dbg.reset_mc_stats) {
909 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
910 + goto err_reset_mc_stats;
911 + }
912 +
913 + return;
914 +
915 +err_reset_mc_stats:
916 + debugfs_remove(priv->dbg.reset_stats);
917 +err_reset_stats:
918 + debugfs_remove(priv->dbg.ch_stats);
919 +err_ch_stats:
920 + debugfs_remove(priv->dbg.fq_stats);
921 +err_fq_stats:
922 + debugfs_remove(priv->dbg.cpu_stats);
923 +err_cpu_stats:
924 + debugfs_remove(priv->dbg.dir);
925 +}
926 +
927 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
928 +{
929 + debugfs_remove(priv->dbg.reset_mc_stats);
930 + debugfs_remove(priv->dbg.reset_stats);
931 + debugfs_remove(priv->dbg.fq_stats);
932 + debugfs_remove(priv->dbg.ch_stats);
933 + debugfs_remove(priv->dbg.cpu_stats);
934 + debugfs_remove(priv->dbg.dir);
935 +}
936 +
937 +void dpaa2_eth_dbg_init(void)
938 +{
939 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
940 + if (!dpaa2_dbg_root) {
941 + pr_err("DPAA2-ETH: debugfs create failed\n");
942 + return;
943 + }
944 +
945 + pr_info("DPAA2-ETH: debugfs created\n");
946 +}
947 +
948 +void __exit dpaa2_eth_dbg_exit(void)
949 +{
950 + debugfs_remove(dpaa2_dbg_root);
951 +}
952 --- /dev/null
953 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
954 @@ -0,0 +1,60 @@
955 +/* Copyright 2015 Freescale Semiconductor Inc.
956 + *
957 + * Redistribution and use in source and binary forms, with or without
958 + * modification, are permitted provided that the following conditions are met:
959 + * * Redistributions of source code must retain the above copyright
960 + * notice, this list of conditions and the following disclaimer.
961 + * * Redistributions in binary form must reproduce the above copyright
962 + * notice, this list of conditions and the following disclaimer in the
963 + * documentation and/or other materials provided with the distribution.
964 + * * Neither the name of Freescale Semiconductor nor the
965 + * names of its contributors may be used to endorse or promote products
966 + * derived from this software without specific prior written permission.
967 + *
968 + *
969 + * ALTERNATIVELY, this software may be distributed under the terms of the
970 + * GNU General Public License ("GPL") as published by the Free Software
971 + * Foundation, either version 2 of that License or (at your option) any
972 + * later version.
973 + *
974 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
975 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
976 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
977 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
978 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
979 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
980 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
981 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
982 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
983 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
984 + */
985 +
986 +#ifndef DPAA2_ETH_DEBUGFS_H
987 +#define DPAA2_ETH_DEBUGFS_H
988 +
989 +#include <linux/dcache.h>
990 +
991 +struct dpaa2_eth_priv;
992 +
993 +struct dpaa2_debugfs {
994 + struct dentry *dir;
995 + struct dentry *fq_stats;
996 + struct dentry *ch_stats;
997 + struct dentry *cpu_stats;
998 + struct dentry *reset_stats;
999 + struct dentry *reset_mc_stats;
1000 +};
1001 +
1002 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1003 +void dpaa2_eth_dbg_init(void);
1004 +void dpaa2_eth_dbg_exit(void);
1005 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1006 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1007 +#else
1008 +static inline void dpaa2_eth_dbg_init(void) {}
1009 +static inline void dpaa2_eth_dbg_exit(void) {}
1010 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1011 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1012 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1013 +
1014 +#endif /* DPAA2_ETH_DEBUGFS_H */
1015 --- /dev/null
1016 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1017 @@ -0,0 +1,184 @@
1018 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1019 + *
1020 + * Redistribution and use in source and binary forms, with or without
1021 + * modification, are permitted provided that the following conditions are met:
1022 + * * Redistributions of source code must retain the above copyright
1023 + * notice, this list of conditions and the following disclaimer.
1024 + * * Redistributions in binary form must reproduce the above copyright
1025 + * notice, this list of conditions and the following disclaimer in the
1026 + * documentation and/or other materials provided with the distribution.
1027 + * * Neither the name of Freescale Semiconductor nor the
1028 + * names of its contributors may be used to endorse or promote products
1029 + * derived from this software without specific prior written permission.
1030 + *
1031 + *
1032 + * ALTERNATIVELY, this software may be distributed under the terms of the
1033 + * GNU General Public License ("GPL") as published by the Free Software
1034 + * Foundation, either version 2 of that License or (at your option) any
1035 + * later version.
1036 + *
1037 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1038 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1039 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1040 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1041 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1042 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1043 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1044 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1045 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1046 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1047 + */
1048 +
1049 +#undef TRACE_SYSTEM
1050 +#define TRACE_SYSTEM dpaa2_eth
1051 +
1052 +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
1053 +#define _DPAA2_ETH_TRACE_H
1054 +
1055 +#include <linux/skbuff.h>
1056 +#include <linux/netdevice.h>
1057 +#include <linux/tracepoint.h>
1058 +
1059 +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
1060 +/* trace_printk format for raw buffer event class */
1061 +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
1062 +
1063 +/* This is used to declare a class of events.
1064 + * individual events of this type will be defined below.
1065 + */
1066 +
1067 +/* Store details about a frame descriptor */
1068 +DECLARE_EVENT_CLASS(dpaa2_eth_fd,
1069 + /* Trace function prototype */
1070 + TP_PROTO(struct net_device *netdev,
1071 + const struct dpaa2_fd *fd),
1072 +
1073 + /* Repeat argument list here */
1074 + TP_ARGS(netdev, fd),
1075 +
1076 + /* A structure containing the relevant information we want
1077 + * to record. Declare name and type for each normal element,
1078 + * name, type and size for arrays. Use __string for variable
1079 + * length strings.
1080 + */
1081 + TP_STRUCT__entry(
1082 + __field(u64, fd_addr)
1083 + __field(u32, fd_len)
1084 + __field(u16, fd_offset)
1085 + __string(name, netdev->name)
1086 + ),
1087 +
1088 + /* The function that assigns values to the above declared
1089 + * fields
1090 + */
1091 + TP_fast_assign(
1092 + __entry->fd_addr = dpaa2_fd_get_addr(fd);
1093 + __entry->fd_len = dpaa2_fd_get_len(fd);
1094 + __entry->fd_offset = dpaa2_fd_get_offset(fd);
1095 + __assign_str(name, netdev->name);
1096 + ),
1097 +
1098 + /* This is what gets printed when the trace event is
1099 + * triggered.
1100 + */
1101 + TP_printk(TR_FMT,
1102 + __get_str(name),
1103 + __entry->fd_addr,
1104 + __entry->fd_len,
1105 + __entry->fd_offset)
1106 +);
1107 +
1108 +/* Now declare events of the above type. Format is:
1109 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
1110 + */
1111 +
1112 +/* Tx (egress) fd */
1113 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
1114 + TP_PROTO(struct net_device *netdev,
1115 + const struct dpaa2_fd *fd),
1116 +
1117 + TP_ARGS(netdev, fd)
1118 +);
1119 +
1120 +/* Rx fd */
1121 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
1122 + TP_PROTO(struct net_device *netdev,
1123 + const struct dpaa2_fd *fd),
1124 +
1125 + TP_ARGS(netdev, fd)
1126 +);
1127 +
1128 +/* Tx confirmation fd */
1129 +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
1130 + TP_PROTO(struct net_device *netdev,
1131 + const struct dpaa2_fd *fd),
1132 +
1133 + TP_ARGS(netdev, fd)
1134 +);
1135 +
1136 +/* Log data about raw buffers. Useful for tracing DPBP content. */
1137 +TRACE_EVENT(dpaa2_eth_buf_seed,
1138 + /* Trace function prototype */
1139 + TP_PROTO(struct net_device *netdev,
1140 + /* virtual address and size */
1141 + void *vaddr,
1142 + size_t size,
1143 + /* dma map address and size */
1144 + dma_addr_t dma_addr,
1145 + size_t map_size,
1146 + /* buffer pool id, if relevant */
1147 + u16 bpid),
1148 +
1149 + /* Repeat argument list here */
1150 + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
1151 +
1152 + /* A structure containing the relevant information we want
1153 + * to record. Declare name and type for each normal element,
1154 + * name, type and size for arrays. Use __string for variable
1155 + * length strings.
1156 + */
1157 + TP_STRUCT__entry(
1158 + __field(void *, vaddr)
1159 + __field(size_t, size)
1160 + __field(dma_addr_t, dma_addr)
1161 + __field(size_t, map_size)
1162 + __field(u16, bpid)
1163 + __string(name, netdev->name)
1164 + ),
1165 +
1166 + /* The function that assigns values to the above declared
1167 + * fields
1168 + */
1169 + TP_fast_assign(
1170 + __entry->vaddr = vaddr;
1171 + __entry->size = size;
1172 + __entry->dma_addr = dma_addr;
1173 + __entry->map_size = map_size;
1174 + __entry->bpid = bpid;
1175 + __assign_str(name, netdev->name);
1176 + ),
1177 +
1178 + /* This is what gets printed when the trace event is
1179 + * triggered.
1180 + */
1181 + TP_printk(TR_BUF_FMT,
1182 + __get_str(name),
1183 + __entry->vaddr,
1184 + __entry->size,
1185 + &__entry->dma_addr,
1186 + __entry->map_size,
1187 + __entry->bpid)
1188 +);
1189 +
1190 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
1191 + * The syntax is the same as for DECLARE_EVENT_CLASS().
1192 + */
1193 +
1194 +#endif /* _DPAA2_ETH_TRACE_H */
1195 +
1196 +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
1197 +#undef TRACE_INCLUDE_PATH
1198 +#define TRACE_INCLUDE_PATH .
1199 +#undef TRACE_INCLUDE_FILE
1200 +#define TRACE_INCLUDE_FILE dpaa2-eth-trace
1201 +#include <trace/define_trace.h>
1202 --- /dev/null
1203 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1204 @@ -0,0 +1,3516 @@
1205 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
1206 + *
1207 + * Redistribution and use in source and binary forms, with or without
1208 + * modification, are permitted provided that the following conditions are met:
1209 + * * Redistributions of source code must retain the above copyright
1210 + * notice, this list of conditions and the following disclaimer.
1211 + * * Redistributions in binary form must reproduce the above copyright
1212 + * notice, this list of conditions and the following disclaimer in the
1213 + * documentation and/or other materials provided with the distribution.
1214 + * * Neither the name of Freescale Semiconductor nor the
1215 + * names of its contributors may be used to endorse or promote products
1216 + * derived from this software without specific prior written permission.
1217 + *
1218 + *
1219 + * ALTERNATIVELY, this software may be distributed under the terms of the
1220 + * GNU General Public License ("GPL") as published by the Free Software
1221 + * Foundation, either version 2 of that License or (at your option) any
1222 + * later version.
1223 + *
1224 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1225 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1226 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1227 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1228 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1229 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1230 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1231 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1232 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1233 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1234 + */
1235 +#include <linux/init.h>
1236 +#include <linux/module.h>
1237 +#include <linux/platform_device.h>
1238 +#include <linux/etherdevice.h>
1239 +#include <linux/of_net.h>
1240 +#include <linux/interrupt.h>
1241 +#include <linux/debugfs.h>
1242 +#include <linux/kthread.h>
1243 +#include <linux/msi.h>
1244 +#include <linux/net_tstamp.h>
1245 +#include <linux/iommu.h>
1246 +
1247 +#include "../../fsl-mc/include/dpbp.h"
1248 +#include "../../fsl-mc/include/dpcon.h"
1249 +#include "../../fsl-mc/include/mc.h"
1250 +#include "../../fsl-mc/include/mc-sys.h"
1251 +#include "dpaa2-eth.h"
1252 +#include "dpkg.h"
1253 +
1254 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1255 + * using trace events only need to #include <trace/events/sched.h>
1256 + */
1257 +#define CREATE_TRACE_POINTS
1258 +#include "dpaa2-eth-trace.h"
1259 +
1260 +MODULE_LICENSE("Dual BSD/GPL");
1261 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
1262 +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1263 +
1264 +const char dpaa2_eth_drv_version[] = "0.1";
1265 +
1266 +void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
1267 +{
1268 + phys_addr_t phys_addr;
1269 +
1270 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
1271 +
1272 + return phys_to_virt(phys_addr);
1273 +}
1274 +
1275 +static void validate_rx_csum(struct dpaa2_eth_priv *priv,
1276 + u32 fd_status,
1277 + struct sk_buff *skb)
1278 +{
1279 + skb_checksum_none_assert(skb);
1280 +
1281 + /* HW checksum validation is disabled, nothing to do here */
1282 + if (!(priv->net_dev->features & NETIF_F_RXCSUM))
1283 + return;
1284 +
1285 + /* Read checksum validation bits */
1286 + if (!((fd_status & DPAA2_FAS_L3CV) &&
1287 + (fd_status & DPAA2_FAS_L4CV)))
1288 + return;
1289 +
1290 + /* Inform the stack there's no need to compute L3/L4 csum anymore */
1291 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1292 +}
1293 +
1294 +/* Free a received FD.
1295 + * Not to be used for Tx conf FDs or on any other paths.
1296 + */
1297 +static void free_rx_fd(struct dpaa2_eth_priv *priv,
1298 + const struct dpaa2_fd *fd,
1299 + void *vaddr)
1300 +{
1301 + struct device *dev = priv->net_dev->dev.parent;
1302 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1303 + u8 fd_format = dpaa2_fd_get_format(fd);
1304 + struct dpaa2_sg_entry *sgt;
1305 + void *sg_vaddr;
1306 + int i;
1307 +
1308 + /* If single buffer frame, just free the data buffer */
1309 + if (fd_format == dpaa2_fd_single)
1310 + goto free_buf;
1311 + else if (fd_format != dpaa2_fd_sg)
1312 + /* we don't support any other format */
1313 + return;
1314 +
1315 + /* For S/G frames, we first need to free all SG entries */
1316 + sgt = vaddr + dpaa2_fd_get_offset(fd);
1317 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1318 + addr = dpaa2_sg_get_addr(&sgt[i]);
1319 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1320 +
1321 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1322 + DMA_FROM_DEVICE);
1323 +
1324 + put_page(virt_to_head_page(sg_vaddr));
1325 +
1326 + if (dpaa2_sg_is_final(&sgt[i]))
1327 + break;
1328 + }
1329 +
1330 +free_buf:
1331 + put_page(virt_to_head_page(vaddr));
1332 +}
1333 +
1334 +/* Build a linear skb based on a single-buffer frame descriptor */
1335 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
1336 + struct dpaa2_eth_channel *ch,
1337 + const struct dpaa2_fd *fd,
1338 + void *fd_vaddr)
1339 +{
1340 + struct sk_buff *skb = NULL;
1341 + u16 fd_offset = dpaa2_fd_get_offset(fd);
1342 + u32 fd_length = dpaa2_fd_get_len(fd);
1343 +
1344 + ch->buf_count--;
1345 +
1346 + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
1347 + if (unlikely(!skb))
1348 + return NULL;
1349 +
1350 + skb_reserve(skb, fd_offset);
1351 + skb_put(skb, fd_length);
1352 +
1353 + return skb;
1354 +}
1355 +
1356 +/* Build a non linear (fragmented) skb based on a S/G table */
1357 +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
1358 + struct dpaa2_eth_channel *ch,
1359 + struct dpaa2_sg_entry *sgt)
1360 +{
1361 + struct sk_buff *skb = NULL;
1362 + struct device *dev = priv->net_dev->dev.parent;
1363 + void *sg_vaddr;
1364 + dma_addr_t sg_addr;
1365 + u16 sg_offset;
1366 + u32 sg_length;
1367 + struct page *page, *head_page;
1368 + int page_offset;
1369 + int i;
1370 +
1371 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1372 + struct dpaa2_sg_entry *sge = &sgt[i];
1373 +
1374 + /* NOTE: We only support SG entries in dpaa2_sg_single format,
1375 + * but this is the only format we may receive from HW anyway
1376 + */
1377 +
1378 + /* Get the address and length from the S/G entry */
1379 + sg_addr = dpaa2_sg_get_addr(sge);
1380 + sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
1381 + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
1382 + DMA_FROM_DEVICE);
1383 +
1384 + sg_length = dpaa2_sg_get_len(sge);
1385 +
1386 + if (i == 0) {
1387 + /* We build the skb around the first data buffer */
1388 + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
1389 + if (unlikely(!skb))
1390 + goto err_build;
1391 +
1392 + sg_offset = dpaa2_sg_get_offset(sge);
1393 + skb_reserve(skb, sg_offset);
1394 + skb_put(skb, sg_length);
1395 + } else {
1396 + /* Rest of the data buffers are stored as skb frags */
1397 + page = virt_to_page(sg_vaddr);
1398 + head_page = virt_to_head_page(sg_vaddr);
1399 +
1400 + /* Offset in page (which may be compound).
1401 + * Data in subsequent SG entries is stored from the
1402 + * beginning of the buffer, so we don't need to add the
1403 + * sg_offset.
1404 + */
1405 + page_offset = ((unsigned long)sg_vaddr &
1406 + (PAGE_SIZE - 1)) +
1407 + (page_address(page) - page_address(head_page));
1408 +
1409 + skb_add_rx_frag(skb, i - 1, head_page, page_offset,
1410 + sg_length, DPAA2_ETH_RX_BUF_SIZE);
1411 + }
1412 +
1413 + if (dpaa2_sg_is_final(sge))
1414 + break;
1415 + }
1416 +
1417 + /* Count all data buffers + SG table buffer */
1418 + ch->buf_count -= i + 2;
1419 +
1420 + return skb;
1421 +
1422 +err_build:
1423 + /* We still need to subtract the buffers used by this FD from our
1424 + * software counter
1425 + */
1426 + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++)
1427 + if (dpaa2_sg_is_final(&sgt[i]))
1428 + break;
1429 + ch->buf_count -= i + 2;
1430 +
1431 + return NULL;
1432 +}
1433 +
1434 +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
1435 +{
1436 + struct device *dev = priv->net_dev->dev.parent;
1437 + void *vaddr;
1438 + int i;
1439 +
1440 + for (i = 0; i < count; i++) {
1441 + /* Same logic as on regular Rx path */
1442 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, buf_array[i]);
1443 + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
1444 + DMA_FROM_DEVICE);
1445 + put_page(virt_to_head_page(vaddr));
1446 + }
1447 +}
1448 +
1449 +/* Main Rx frame processing routine */
1450 +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
1451 + struct dpaa2_eth_channel *ch,
1452 + const struct dpaa2_fd *fd,
1453 + struct napi_struct *napi,
1454 + u16 queue_id)
1455 +{
1456 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1457 + u8 fd_format = dpaa2_fd_get_format(fd);
1458 + void *vaddr;
1459 + struct sk_buff *skb;
1460 + struct rtnl_link_stats64 *percpu_stats;
1461 + struct dpaa2_eth_drv_stats *percpu_extras;
1462 + struct device *dev = priv->net_dev->dev.parent;
1463 + struct dpaa2_fas *fas;
1464 + void *buf_data;
1465 + u32 status = 0;
1466 +
1467 + /* Tracing point */
1468 + trace_dpaa2_rx_fd(priv->net_dev, fd);
1469 +
1470 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1471 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1472 +
1473 + /* HWA - FAS, timestamp */
1474 + fas = dpaa2_eth_get_fas(vaddr);
1475 + prefetch(fas);
1476 + /* data / SG table */
1477 + buf_data = vaddr + dpaa2_fd_get_offset(fd);
1478 + prefetch(buf_data);
1479 +
1480 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1481 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1482 +
1483 + switch (fd_format) {
1484 + case dpaa2_fd_single:
1485 + skb = build_linear_skb(priv, ch, fd, vaddr);
1486 + break;
1487 + case dpaa2_fd_sg:
1488 + skb = build_frag_skb(priv, ch, buf_data);
1489 + put_page(virt_to_head_page(vaddr));
1490 + percpu_extras->rx_sg_frames++;
1491 + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
1492 + break;
1493 + default:
1494 + /* We don't support any other format */
1495 + goto err_frame_format;
1496 + }
1497 +
1498 + if (unlikely(!skb))
1499 + goto err_build_skb;
1500 +
1501 + prefetch(skb->data);
1502 +
1503 + /* Get the timestamp value */
1504 + if (priv->ts_rx_en) {
1505 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1506 + u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
1507 +
1508 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1509 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1510 + shhwtstamps->hwtstamp = ns_to_ktime(*ns);
1511 + }
1512 +
1513 + /* Check if we need to validate the L4 csum */
1514 + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
1515 + status = le32_to_cpu(fas->status);
1516 + validate_rx_csum(priv, status, skb);
1517 + }
1518 +
1519 + skb->protocol = eth_type_trans(skb, priv->net_dev);
1520 +
1521 + /* Record Rx queue - this will be used when picking a Tx queue to
1522 + * forward the frames. We're keeping flow affinity through the
1523 + * network stack.
1524 + */
1525 + skb_record_rx_queue(skb, queue_id);
1526 +
1527 + percpu_stats->rx_packets++;
1528 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
1529 +
1530 + napi_gro_receive(napi, skb);
1531 +
1532 + return;
1533 +
1534 +err_build_skb:
1535 + free_rx_fd(priv, fd, vaddr);
1536 +err_frame_format:
1537 + percpu_stats->rx_dropped++;
1538 +}
1539 +
1540 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
1541 +/* Processing of Rx frames received on the error FQ
1542 + * We check and print the error bits and then free the frame
1543 + */
1544 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
1545 + struct dpaa2_eth_channel *ch,
1546 + const struct dpaa2_fd *fd,
1547 + struct napi_struct *napi __always_unused,
1548 + u16 queue_id __always_unused)
1549 +{
1550 + struct device *dev = priv->net_dev->dev.parent;
1551 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
1552 + void *vaddr;
1553 + struct rtnl_link_stats64 *percpu_stats;
1554 + struct dpaa2_fas *fas;
1555 + u32 status = 0;
1556 + bool check_fas_errors = false;
1557 +
1558 + vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
1559 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
1560 +
1561 + /* check frame errors in the FD field */
1562 + if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
1563 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
1564 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
1565 + if (net_ratelimit())
1566 + netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
1567 + fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
1568 + }
1569 +
1570 + /* check frame errors in the FAS field */
1571 + if (check_fas_errors) {
1572 + fas = dpaa2_eth_get_fas(vaddr);
1573 + status = le32_to_cpu(fas->status);
1574 + if (net_ratelimit())
1575 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
1576 + status & DPAA2_FAS_RX_ERR_MASK);
1577 + }
1578 + free_rx_fd(priv, fd, vaddr);
1579 +
1580 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1581 + percpu_stats->rx_errors++;
1582 +}
1583 +#endif
1584 +
1585 +/* Consume all frames pull-dequeued into the store. This is the simplest way to
1586 + * make sure we don't accidentally issue another volatile dequeue which would
1587 + * overwrite (leak) frames already in the store.
1588 + *
1589 + * The number of frames is returned using the last 2 output arguments,
1590 + * separately for Rx and Tx confirmations.
1591 + *
1592 + * Observance of NAPI budget is not our concern, leaving that to the caller.
1593 + */
1594 +static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
1595 + int *tx_conf_cleaned)
1596 +{
1597 + struct dpaa2_eth_priv *priv = ch->priv;
1598 + struct dpaa2_eth_fq *fq = NULL;
1599 + struct dpaa2_dq *dq;
1600 + const struct dpaa2_fd *fd;
1601 + int cleaned = 0;
1602 + int is_last;
1603 +
1604 + do {
1605 + dq = dpaa2_io_store_next(ch->store, &is_last);
1606 + if (unlikely(!dq)) {
1607 + /* If we're here, we *must* have placed a
1608 + * volatile dequeue comnmand, so keep reading through
1609 + * the store until we get some sort of valid response
1610 + * token (either a valid frame or an "empty dequeue")
1611 + */
1612 + continue;
1613 + }
1614 +
1615 + fd = dpaa2_dq_fd(dq);
1616 +
1617 + /* prefetch the frame descriptor */
1618 + prefetch(fd);
1619 +
1620 + fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
1621 + fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
1622 + cleaned++;
1623 + } while (!is_last);
1624 +
1625 + if (!cleaned)
1626 + return false;
1627 +
1628 + /* All frames brought in store by a volatile dequeue
1629 + * come from the same queue
1630 + */
1631 + if (fq->type == DPAA2_TX_CONF_FQ)
1632 + *tx_conf_cleaned += cleaned;
1633 + else
1634 + *rx_cleaned += cleaned;
1635 +
1636 + fq->stats.frames += cleaned;
1637 + ch->stats.frames += cleaned;
1638 +
1639 + return true;
1640 +}
1641 +
1642 +/* Configure the egress frame annotation for timestamp update */
1643 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
1644 +{
1645 + struct dpaa2_faead *faead;
1646 + u32 ctrl;
1647 + u32 frc;
1648 +
1649 + /* Mark the egress frame annotation area as valid */
1650 + frc = dpaa2_fd_get_frc(fd);
1651 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
1652 +
1653 + /* enable UPD (update prepanded data) bit in FAEAD field of
1654 + * hardware frame annotation area
1655 + */
1656 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
1657 + faead = dpaa2_eth_get_faead(buf_start);
1658 + faead->ctrl = cpu_to_le32(ctrl);
1659 +}
1660 +
1661 +/* Create a frame descriptor based on a fragmented skb */
1662 +static int build_sg_fd(struct dpaa2_eth_priv *priv,
1663 + struct sk_buff *skb,
1664 + struct dpaa2_fd *fd)
1665 +{
1666 + struct device *dev = priv->net_dev->dev.parent;
1667 + void *sgt_buf = NULL;
1668 + dma_addr_t addr;
1669 + int nr_frags = skb_shinfo(skb)->nr_frags;
1670 + struct dpaa2_sg_entry *sgt;
1671 + int i, err;
1672 + int sgt_buf_size;
1673 + struct scatterlist *scl, *crt_scl;
1674 + int num_sg;
1675 + int num_dma_bufs;
1676 + struct dpaa2_fas *fas;
1677 + struct dpaa2_eth_swa *swa;
1678 +
1679 + /* Create and map scatterlist.
1680 + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
1681 + * to go beyond nr_frags+1.
1682 + * Note: We don't support chained scatterlists
1683 + */
1684 + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
1685 + return -EINVAL;
1686 +
1687 + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
1688 + if (unlikely(!scl))
1689 + return -ENOMEM;
1690 +
1691 + sg_init_table(scl, nr_frags + 1);
1692 + num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
1693 + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1694 + if (unlikely(!num_dma_bufs)) {
1695 + err = -ENOMEM;
1696 + goto dma_map_sg_failed;
1697 + }
1698 +
1699 + /* Prepare the HW SGT structure */
1700 + sgt_buf_size = priv->tx_data_offset +
1701 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1702 + sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
1703 + if (unlikely(!sgt_buf)) {
1704 + err = -ENOMEM;
1705 + goto sgt_buf_alloc_failed;
1706 + }
1707 + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
1708 +
1709 + /* PTA from egress side is passed as is to the confirmation side so
1710 + * we need to clear some fields here in order to find consistent values
1711 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1712 + * field from the hardware annotation area
1713 + */
1714 + fas = dpaa2_eth_get_fas(sgt_buf);
1715 + memset(fas, 0, DPAA2_FAS_SIZE);
1716 +
1717 + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1718 +
1719 + /* Fill in the HW SGT structure.
1720 + *
1721 + * sgt_buf is zeroed out, so the following fields are implicit
1722 + * in all sgt entries:
1723 + * - offset is 0
1724 + * - format is 'dpaa2_sg_single'
1725 + */
1726 + for_each_sg(scl, crt_scl, num_dma_bufs, i) {
1727 + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
1728 + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
1729 + }
1730 + dpaa2_sg_set_final(&sgt[i - 1], true);
1731 +
1732 + /* Store the skb backpointer in the SGT buffer.
1733 + * Fit the scatterlist and the number of buffers alongside the
1734 + * skb backpointer in the software annotation area. We'll need
1735 + * all of them on Tx Conf.
1736 + */
1737 + swa = (struct dpaa2_eth_swa *)sgt_buf;
1738 + swa->skb = skb;
1739 + swa->scl = scl;
1740 + swa->num_sg = num_sg;
1741 + swa->num_dma_bufs = num_dma_bufs;
1742 +
1743 + /* Separately map the SGT buffer */
1744 + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1745 + if (unlikely(dma_mapping_error(dev, addr))) {
1746 + err = -ENOMEM;
1747 + goto dma_map_single_failed;
1748 + }
1749 + dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1750 + dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1751 + dpaa2_fd_set_addr(fd, addr);
1752 + dpaa2_fd_set_len(fd, skb->len);
1753 +
1754 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
1755 +
1756 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1757 + enable_tx_tstamp(fd, sgt_buf);
1758 +
1759 + return 0;
1760 +
1761 +dma_map_single_failed:
1762 + kfree(sgt_buf);
1763 +sgt_buf_alloc_failed:
1764 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1765 +dma_map_sg_failed:
1766 + kfree(scl);
1767 + return err;
1768 +}
1769 +
1770 +/* Create a frame descriptor based on a linear skb */
1771 +static int build_single_fd(struct dpaa2_eth_priv *priv,
1772 + struct sk_buff *skb,
1773 + struct dpaa2_fd *fd)
1774 +{
1775 + struct device *dev = priv->net_dev->dev.parent;
1776 + u8 *buffer_start;
1777 + struct sk_buff **skbh;
1778 + dma_addr_t addr;
1779 + struct dpaa2_fas *fas;
1780 +
1781 + buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
1782 + DPAA2_ETH_TX_BUF_ALIGN,
1783 + DPAA2_ETH_TX_BUF_ALIGN);
1784 +
1785 + /* PTA from egress side is passed as is to the confirmation side so
1786 + * we need to clear some fields here in order to find consistent values
1787 + * on TX confirmation. We are clearing FAS (Frame Annotation Status)
1788 + * field from the hardware annotation area
1789 + */
1790 + fas = dpaa2_eth_get_fas(buffer_start);
1791 + memset(fas, 0, DPAA2_FAS_SIZE);
1792 +
1793 + /* Store a backpointer to the skb at the beginning of the buffer
1794 + * (in the private data area) such that we can release it
1795 + * on Tx confirm
1796 + */
1797 + skbh = (struct sk_buff **)buffer_start;
1798 + *skbh = skb;
1799 +
1800 + addr = dma_map_single(dev, buffer_start,
1801 + skb_tail_pointer(skb) - buffer_start,
1802 + DMA_BIDIRECTIONAL);
1803 + if (unlikely(dma_mapping_error(dev, addr)))
1804 + return -ENOMEM;
1805 +
1806 + dpaa2_fd_set_addr(fd, addr);
1807 + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1808 + dpaa2_fd_set_len(fd, skb->len);
1809 + dpaa2_fd_set_format(fd, dpaa2_fd_single);
1810 +
1811 + fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
1812 +
1813 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1814 + enable_tx_tstamp(fd, buffer_start);
1815 +
1816 + return 0;
1817 +}
1818 +
1819 +/* FD freeing routine on the Tx path
1820 + *
1821 + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1822 + * back-pointed to is also freed.
1823 + * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1824 + * dpaa2_eth_tx().
1825 + * Optionally, return the frame annotation status word (FAS), which needs
1826 + * to be checked if we're on the confirmation path.
1827 + */
1828 +static void free_tx_fd(const struct dpaa2_eth_priv *priv,
1829 + const struct dpaa2_fd *fd,
1830 + u32 *status, bool in_napi)
1831 +{
1832 + struct device *dev = priv->net_dev->dev.parent;
1833 + dma_addr_t fd_addr;
1834 + struct sk_buff **skbh, *skb;
1835 + unsigned char *buffer_start;
1836 + int unmap_size;
1837 + struct scatterlist *scl;
1838 + int num_sg, num_dma_bufs;
1839 + struct dpaa2_eth_swa *swa;
1840 + u8 fd_format = dpaa2_fd_get_format(fd);
1841 + struct dpaa2_fas *fas;
1842 +
1843 + fd_addr = dpaa2_fd_get_addr(fd);
1844 + skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
1845 +
1846 + /* HWA - FAS, timestamp (for Tx confirmation frames) */
1847 + fas = dpaa2_eth_get_fas(skbh);
1848 + prefetch(fas);
1849 +
1850 + switch (fd_format) {
1851 + case dpaa2_fd_single:
1852 + skb = *skbh;
1853 + buffer_start = (unsigned char *)skbh;
1854 + /* Accessing the skb buffer is safe before dma unmap, because
1855 + * we didn't map the actual skb shell.
1856 + */
1857 + dma_unmap_single(dev, fd_addr,
1858 + skb_tail_pointer(skb) - buffer_start,
1859 + DMA_BIDIRECTIONAL);
1860 + break;
1861 + case dpaa2_fd_sg:
1862 + swa = (struct dpaa2_eth_swa *)skbh;
1863 + skb = swa->skb;
1864 + scl = swa->scl;
1865 + num_sg = swa->num_sg;
1866 + num_dma_bufs = swa->num_dma_bufs;
1867 +
1868 + /* Unmap the scatterlist */
1869 + dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
1870 + kfree(scl);
1871 +
1872 + /* Unmap the SGT buffer */
1873 + unmap_size = priv->tx_data_offset +
1874 + sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
1875 + dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
1876 + break;
1877 + default:
1878 + /* Unsupported format, mark it as errored and give up */
1879 + if (status)
1880 + *status = ~0;
1881 + return;
1882 + }
1883 +
1884 + /* Get the timestamp value */
1885 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1886 + struct skb_shared_hwtstamps shhwtstamps;
1887 + u64 *ns;
1888 +
1889 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1890 +
1891 + ns = (u64 *)dpaa2_eth_get_ts(skbh);
1892 + *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
1893 + shhwtstamps.hwtstamp = ns_to_ktime(*ns);
1894 + skb_tstamp_tx(skb, &shhwtstamps);
1895 + }
1896 +
1897 + /* Read the status from the Frame Annotation after we unmap the first
1898 + * buffer but before we free it. The caller function is responsible
1899 + * for checking the status value.
1900 + */
1901 + if (status)
1902 + *status = le32_to_cpu(fas->status);
1903 +
1904 + /* Free SGT buffer kmalloc'ed on tx */
1905 + if (fd_format != dpaa2_fd_single)
1906 + kfree(skbh);
1907 +
1908 + /* Move on with skb release */
1909 + napi_consume_skb(skb, in_napi);
1910 +}
1911 +
1912 +static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1913 +{
1914 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1915 + struct device *dev = net_dev->dev.parent;
1916 + struct dpaa2_fd fd;
1917 + struct rtnl_link_stats64 *percpu_stats;
1918 + struct dpaa2_eth_drv_stats *percpu_extras;
1919 + struct dpaa2_eth_fq *fq;
1920 + u16 queue_mapping = skb_get_queue_mapping(skb);
1921 + int err, i;
1922 +
1923 + /* If we're congested, stop this tx queue; transmission of the
1924 + * current skb happens regardless of congestion state
1925 + */
1926 + fq = &priv->fq[queue_mapping];
1927 +
1928 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
1929 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
1930 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
1931 + netif_stop_subqueue(net_dev, queue_mapping);
1932 + fq->stats.congestion_entry++;
1933 + }
1934 +
1935 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
1936 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
1937 +
1938 + if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
1939 + struct sk_buff *ns;
1940 +
1941 + ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
1942 + if (unlikely(!ns)) {
1943 + percpu_stats->tx_dropped++;
1944 + goto err_alloc_headroom;
1945 + }
1946 + dev_kfree_skb(skb);
1947 + skb = ns;
1948 + }
1949 +
1950 + /* We'll be holding a back-reference to the skb until Tx Confirmation;
1951 + * we don't want that overwritten by a concurrent Tx with a cloned skb.
1952 + */
1953 + skb = skb_unshare(skb, GFP_ATOMIC);
1954 + if (unlikely(!skb)) {
1955 + /* skb_unshare() has already freed the skb */
1956 + percpu_stats->tx_dropped++;
1957 + return NETDEV_TX_OK;
1958 + }
1959 +
1960 + /* Setup the FD fields */
1961 + memset(&fd, 0, sizeof(fd));
1962 +
1963 + if (skb_is_nonlinear(skb)) {
1964 + err = build_sg_fd(priv, skb, &fd);
1965 + percpu_extras->tx_sg_frames++;
1966 + percpu_extras->tx_sg_bytes += skb->len;
1967 + } else {
1968 + err = build_single_fd(priv, skb, &fd);
1969 + }
1970 +
1971 + if (unlikely(err)) {
1972 + percpu_stats->tx_dropped++;
1973 + goto err_build_fd;
1974 + }
1975 +
1976 + /* Tracing point */
1977 + trace_dpaa2_tx_fd(net_dev, &fd);
1978 +
1979 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1980 + err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
1981 + fq->tx_qdbin, &fd);
1982 + /* TODO: This doesn't work. Check on simulator.
1983 + * err = dpaa2_io_service_enqueue_fq(NULL,
1984 + * priv->fq[0].fqid_tx, &fd);
1985 + */
1986 + if (err != -EBUSY)
1987 + break;
1988 + }
1989 + percpu_extras->tx_portal_busy += i;
1990 + if (unlikely(err < 0)) {
1991 + percpu_stats->tx_errors++;
1992 + /* Clean up everything, including freeing the skb */
1993 + free_tx_fd(priv, &fd, NULL, false);
1994 + } else {
1995 + percpu_stats->tx_packets++;
1996 + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1997 + }
1998 +
1999 + return NETDEV_TX_OK;
2000 +
2001 +err_build_fd:
2002 +err_alloc_headroom:
2003 + dev_kfree_skb(skb);
2004 +
2005 + return NETDEV_TX_OK;
2006 +}
2007 +
2008 +/* Tx confirmation frame processing routine */
2009 +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
2010 + struct dpaa2_eth_channel *ch,
2011 + const struct dpaa2_fd *fd,
2012 + struct napi_struct *napi __always_unused,
2013 + u16 queue_id)
2014 +{
2015 + struct device *dev = priv->net_dev->dev.parent;
2016 + struct rtnl_link_stats64 *percpu_stats;
2017 + struct dpaa2_eth_drv_stats *percpu_extras;
2018 + u32 status = 0;
2019 + bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2020 + bool check_fas_errors = false;
2021 +
2022 + /* Tracing point */
2023 + trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
2024 +
2025 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
2026 + percpu_extras->tx_conf_frames++;
2027 + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
2028 +
2029 + /* Check congestion state and wake all queues if necessary */
2030 + if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
2031 + dma_sync_single_for_cpu(dev, priv->cscn_dma,
2032 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
2033 + if (!dpaa2_cscn_state_congested(priv->cscn_mem))
2034 + netif_tx_wake_all_queues(priv->net_dev);
2035 + }
2036 +
2037 + /* check frame errors in the FD field */
2038 + if (unlikely(errors)) {
2039 + check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
2040 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2041 + if (net_ratelimit())
2042 + netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
2043 + fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
2044 + }
2045 +
2046 + free_tx_fd(priv, fd, check_fas_errors ? &status : NULL, true);
2047 +
2048 + /* if there are no errors, we're done */
2049 + if (likely(!errors))
2050 + return;
2051 +
2052 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2053 + /* Tx-conf logically pertains to the egress path. */
2054 + percpu_stats->tx_errors++;
2055 +
2056 + if (net_ratelimit())
2057 + netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
2058 + status & DPAA2_FAS_TX_ERR_MASK);
2059 +}
2060 +
2061 +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2062 +{
2063 + int err;
2064 +
2065 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2066 + DPNI_OFF_RX_L3_CSUM, enable);
2067 + if (err) {
2068 + netdev_err(priv->net_dev,
2069 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2070 + return err;
2071 + }
2072 +
2073 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2074 + DPNI_OFF_RX_L4_CSUM, enable);
2075 + if (err) {
2076 + netdev_err(priv->net_dev,
2077 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2078 + return err;
2079 + }
2080 +
2081 + return 0;
2082 +}
2083 +
2084 +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
2085 +{
2086 + int err;
2087 +
2088 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2089 + DPNI_OFF_TX_L3_CSUM, enable);
2090 + if (err) {
2091 + netdev_err(priv->net_dev,
2092 + "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
2093 + return err;
2094 + }
2095 +
2096 + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
2097 + DPNI_OFF_TX_L4_CSUM, enable);
2098 + if (err) {
2099 + netdev_err(priv->net_dev,
2100 + "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
2101 + return err;
2102 + }
2103 +
2104 + return 0;
2105 +}
2106 +
2107 +/* Perform a single release command to add buffers
2108 + * to the specified buffer pool
2109 + */
2110 +static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2111 +{
2112 + struct device *dev = priv->net_dev->dev.parent;
2113 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2114 + void *buf;
2115 + dma_addr_t addr;
2116 + int i, err;
2117 +
2118 + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2119 + /* Allocate buffer visible to WRIOP + skb shared info +
2120 + * alignment padding.
2121 + */
2122 + buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
2123 + if (unlikely(!buf))
2124 + goto err_alloc;
2125 +
2126 + buf = PTR_ALIGN(buf, priv->rx_buf_align);
2127 +
2128 + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2129 + DMA_FROM_DEVICE);
2130 + if (unlikely(dma_mapping_error(dev, addr)))
2131 + goto err_map;
2132 +
2133 + buf_array[i] = addr;
2134 +
2135 + /* tracing point */
2136 + trace_dpaa2_eth_buf_seed(priv->net_dev,
2137 + buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
2138 + addr, DPAA2_ETH_RX_BUF_SIZE,
2139 + bpid);
2140 + }
2141 +
2142 +release_bufs:
2143 + /* In case the portal is busy, retry until successful */
2144 + while ((err = dpaa2_io_service_release(NULL, bpid,
2145 + buf_array, i)) == -EBUSY)
2146 + cpu_relax();
2147 +
2148 + /* If release command failed, clean up and bail out; not much
2149 + * else we can do about it
2150 + */
2151 + if (unlikely(err)) {
2152 + free_bufs(priv, buf_array, i);
2153 + return 0;
2154 + }
2155 +
2156 + return i;
2157 +
2158 +err_map:
2159 + put_page(virt_to_head_page(buf));
2160 +err_alloc:
2161 + /* If we managed to allocate at least some buffers, release them */
2162 + if (i)
2163 + goto release_bufs;
2164 +
2165 + return 0;
2166 +}
2167 +
2168 +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
2169 +{
2170 + int i, j;
2171 + int new_count;
2172 +
2173 + /* This is the lazy seeding of Rx buffer pools.
2174 + * dpaa2_add_bufs() is also used on the Rx hotpath and calls
2175 + * napi_alloc_frag(). The trouble with that is that it in turn ends up
2176 + * calling this_cpu_ptr(), which mandates execution in atomic context.
2177 + * Rather than splitting up the code, do a one-off preempt disable.
2178 + */
2179 + preempt_disable();
2180 + for (j = 0; j < priv->num_channels; j++) {
2181 + priv->channel[j]->buf_count = 0;
2182 + for (i = 0; i < priv->num_bufs;
2183 + i += DPAA2_ETH_BUFS_PER_CMD) {
2184 + new_count = add_bufs(priv, bpid);
2185 + priv->channel[j]->buf_count += new_count;
2186 +
2187 + if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2188 + preempt_enable();
2189 + return -ENOMEM;
2190 + }
2191 + }
2192 + }
2193 + preempt_enable();
2194 +
2195 + return 0;
2196 +}
2197 +
2198 +/**
2199 + * Drain the specified number of buffers from the DPNI's private buffer pool.
2200 + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
2201 + */
2202 +static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2203 +{
2204 + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2205 + int ret;
2206 +
2207 + do {
2208 + ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2209 + buf_array, count);
2210 + if (ret < 0) {
2211 + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2212 + return;
2213 + }
2214 + free_bufs(priv, buf_array, ret);
2215 + } while (ret);
2216 +}
2217 +
2218 +static void drain_pool(struct dpaa2_eth_priv *priv)
2219 +{
2220 + preempt_disable();
2221 + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2222 + drain_bufs(priv, 1);
2223 + preempt_enable();
2224 +}
2225 +
2226 +/* Function is called from softirq context only, so we don't need to guard
2227 + * the access to percpu count
2228 + */
2229 +static int refill_pool(struct dpaa2_eth_priv *priv,
2230 + struct dpaa2_eth_channel *ch,
2231 + u16 bpid)
2232 +{
2233 + int new_count;
2234 +
2235 + if (likely(ch->buf_count >= priv->refill_thresh))
2236 + return 0;
2237 +
2238 + do {
2239 + new_count = add_bufs(priv, bpid);
2240 + if (unlikely(!new_count)) {
2241 + /* Out of memory; abort for now, we'll try later on */
2242 + break;
2243 + }
2244 + ch->buf_count += new_count;
2245 + } while (ch->buf_count < priv->num_bufs);
2246 +
2247 + if (unlikely(ch->buf_count < priv->num_bufs))
2248 + return -ENOMEM;
2249 +
2250 + return 0;
2251 +}
2252 +
2253 +static int pull_channel(struct dpaa2_eth_channel *ch)
2254 +{
2255 + int err;
2256 + int dequeues = -1;
2257 +
2258 + /* Retry while portal is busy */
2259 + do {
2260 + err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2261 + dequeues++;
2262 + cpu_relax();
2263 + } while (err == -EBUSY);
2264 +
2265 + ch->stats.dequeue_portal_busy += dequeues;
2266 + if (unlikely(err))
2267 + ch->stats.pull_err++;
2268 +
2269 + return err;
2270 +}
2271 +
2272 +/* NAPI poll routine
2273 + *
2274 + * Frames are dequeued from the QMan channel associated with this NAPI context.
2275 + * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
2276 + * confirmation frames are limited by a threshold per NAPI poll cycle.
2277 + */
2278 +static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
2279 +{
2280 + struct dpaa2_eth_channel *ch;
2281 + int rx_cleaned = 0, tx_conf_cleaned = 0;
2282 + bool store_cleaned;
2283 + struct dpaa2_eth_priv *priv;
2284 + int err;
2285 +
2286 + ch = container_of(napi, struct dpaa2_eth_channel, napi);
2287 + priv = ch->priv;
2288 +
2289 + do {
2290 + err = pull_channel(ch);
2291 + if (unlikely(err))
2292 + break;
2293 +
2294 + /* Refill pool if appropriate */
2295 + refill_pool(priv, ch, priv->bpid);
2296 +
2297 + store_cleaned = consume_frames(ch, &rx_cleaned,
2298 + &tx_conf_cleaned);
2299 +
2300 + /* If we've either consumed the budget with Rx frames,
2301 + * or reached the Tx conf threshold, we're done.
2302 + */
2303 + if (rx_cleaned >= budget ||
2304 + tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
2305 + return budget;
2306 + } while (store_cleaned);
2307 +
2308 + /* We didn't consume the entire budget, finish napi and
2309 + * re-enable data availability notifications.
2310 + */
2311 + napi_complete(napi);
2312 + do {
2313 + err = dpaa2_io_service_rearm(NULL, &ch->nctx);
2314 + cpu_relax();
2315 + } while (err == -EBUSY);
2316 +
2317 + return max(rx_cleaned, 1);
2318 +}
2319 +
2320 +static void enable_ch_napi(struct dpaa2_eth_priv *priv)
2321 +{
2322 + struct dpaa2_eth_channel *ch;
2323 + int i;
2324 +
2325 + for (i = 0; i < priv->num_channels; i++) {
2326 + ch = priv->channel[i];
2327 + napi_enable(&ch->napi);
2328 + }
2329 +}
2330 +
2331 +static void disable_ch_napi(struct dpaa2_eth_priv *priv)
2332 +{
2333 + struct dpaa2_eth_channel *ch;
2334 + int i;
2335 +
2336 + for (i = 0; i < priv->num_channels; i++) {
2337 + ch = priv->channel[i];
2338 + napi_disable(&ch->napi);
2339 + }
2340 +}
2341 +
2342 +static int link_state_update(struct dpaa2_eth_priv *priv)
2343 +{
2344 + struct dpni_link_state state;
2345 + int err;
2346 +
2347 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2348 + if (unlikely(err)) {
2349 + netdev_err(priv->net_dev,
2350 + "dpni_get_link_state() failed\n");
2351 + return err;
2352 + }
2353 +
2354 + /* Chech link state; speed / duplex changes are not treated yet */
2355 + if (priv->link_state.up == state.up)
2356 + return 0;
2357 +
2358 + priv->link_state = state;
2359 + if (state.up) {
2360 + netif_carrier_on(priv->net_dev);
2361 + netif_tx_start_all_queues(priv->net_dev);
2362 + } else {
2363 + netif_tx_stop_all_queues(priv->net_dev);
2364 + netif_carrier_off(priv->net_dev);
2365 + }
2366 +
2367 + netdev_info(priv->net_dev, "Link Event: state %s",
2368 + state.up ? "up" : "down");
2369 +
2370 + return 0;
2371 +}
2372 +
2373 +static int dpaa2_eth_open(struct net_device *net_dev)
2374 +{
2375 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2376 + int err;
2377 +
2378 + /* We'll only start the txqs when the link is actually ready; make sure
2379 + * we don't race against the link up notification, which may come
2380 + * immediately after dpni_enable();
2381 + */
2382 + netif_tx_stop_all_queues(net_dev);
2383 +
2384 + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
2385 + * return true and cause 'ip link show' to report the LOWER_UP flag,
2386 + * even though the link notification wasn't even received.
2387 + */
2388 + netif_carrier_off(net_dev);
2389 +
2390 + err = seed_pool(priv, priv->bpid);
2391 + if (err) {
2392 + /* Not much to do; the buffer pool, though not filled up,
2393 + * may still contain some buffers which would enable us
2394 + * to limp on.
2395 + */
2396 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
2397 + priv->dpbp_dev->obj_desc.id, priv->bpid);
2398 + }
2399 +
2400 + if (priv->tx_pause_frames)
2401 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
2402 + else
2403 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
2404 +
2405 + err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2406 + if (err < 0) {
2407 + netdev_err(net_dev, "dpni_enable() failed\n");
2408 + goto enable_err;
2409 + }
2410 +
2411 + /* If the DPMAC object has already processed the link up interrupt,
2412 + * we have to learn the link state ourselves.
2413 + */
2414 + err = link_state_update(priv);
2415 + if (err < 0) {
2416 + netdev_err(net_dev, "Can't update link state\n");
2417 + goto link_state_err;
2418 + }
2419 +
2420 + return 0;
2421 +
2422 +link_state_err:
2423 +enable_err:
2424 + priv->refill_thresh = 0;
2425 + drain_pool(priv);
2426 + return err;
2427 +}
2428 +
2429 +static int dpaa2_eth_stop(struct net_device *net_dev)
2430 +{
2431 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2432 + int dpni_enabled;
2433 + int retries = 10, i;
2434 +
2435 + netif_tx_stop_all_queues(net_dev);
2436 + netif_carrier_off(net_dev);
2437 +
2438 + /* Loop while dpni_disable() attempts to drain the egress FQs
2439 + * and confirm them back to us.
2440 + */
2441 + do {
2442 + dpni_disable(priv->mc_io, 0, priv->mc_token);
2443 + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2444 + if (dpni_enabled)
2445 + /* Allow the MC some slack */
2446 + msleep(100);
2447 + } while (dpni_enabled && --retries);
2448 + if (!retries) {
2449 + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2450 + /* Must go on and disable NAPI nonetheless, so we don't crash at
2451 + * the next "ifconfig up"
2452 + */
2453 + }
2454 +
2455 + priv->refill_thresh = 0;
2456 +
2457 + /* Wait for all running napi poll routines to finish, so that no
2458 + * new refill operations are started.
2459 + */
2460 + for (i = 0; i < priv->num_channels; i++)
2461 + napi_synchronize(&priv->channel[i]->napi);
2462 +
2463 + /* Empty the buffer pool */
2464 + drain_pool(priv);
2465 +
2466 + return 0;
2467 +}
2468 +
2469 +static int dpaa2_eth_init(struct net_device *net_dev)
2470 +{
2471 + u64 supported = 0;
2472 + u64 not_supported = 0;
2473 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2474 + u32 options = priv->dpni_attrs.options;
2475 +
2476 + /* Capabilities listing */
2477 + supported |= IFF_LIVE_ADDR_CHANGE;
2478 +
2479 + if (options & DPNI_OPT_NO_MAC_FILTER)
2480 + not_supported |= IFF_UNICAST_FLT;
2481 + else
2482 + supported |= IFF_UNICAST_FLT;
2483 +
2484 + net_dev->priv_flags |= supported;
2485 + net_dev->priv_flags &= ~not_supported;
2486 +
2487 + /* Features */
2488 + net_dev->features = NETIF_F_RXCSUM |
2489 + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2490 + NETIF_F_SG | NETIF_F_HIGHDMA |
2491 + NETIF_F_LLTX;
2492 + net_dev->hw_features = net_dev->features;
2493 +
2494 + return 0;
2495 +}
2496 +
2497 +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2498 +{
2499 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2500 + struct device *dev = net_dev->dev.parent;
2501 + int err;
2502 +
2503 + err = eth_mac_addr(net_dev, addr);
2504 + if (err < 0) {
2505 + dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2506 + return err;
2507 + }
2508 +
2509 + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2510 + net_dev->dev_addr);
2511 + if (err) {
2512 + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2513 + return err;
2514 + }
2515 +
2516 + return 0;
2517 +}
2518 +
2519 +/** Fill in counters maintained by the GPP driver. These may be different from
2520 + * the hardware counters obtained by ethtool.
2521 + */
2522 +static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev,
2523 + struct rtnl_link_stats64 *stats)
2524 +{
2525 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2526 + struct rtnl_link_stats64 *percpu_stats;
2527 + u64 *cpustats;
2528 + u64 *netstats = (u64 *)stats;
2529 + int i, j;
2530 + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2531 +
2532 + for_each_possible_cpu(i) {
2533 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2534 + cpustats = (u64 *)percpu_stats;
2535 + for (j = 0; j < num; j++)
2536 + netstats[j] += cpustats[j];
2537 + }
2538 + return stats;
2539 +}
2540 +
2541 +static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
2542 +{
2543 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2544 + int err;
2545 +
2546 + /* Set the maximum Rx frame length to match the transmit side;
2547 + * account for L2 headers when computing the MFL
2548 + */
2549 + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2550 + (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
2551 + if (err) {
2552 + netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
2553 + return err;
2554 + }
2555 +
2556 + net_dev->mtu = mtu;
2557 + return 0;
2558 +}
2559 +
2560 +/* Copy mac unicast addresses from @net_dev to @priv.
2561 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2562 + */
2563 +static void add_uc_hw_addr(const struct net_device *net_dev,
2564 + struct dpaa2_eth_priv *priv)
2565 +{
2566 + struct netdev_hw_addr *ha;
2567 + int err;
2568 +
2569 + netdev_for_each_uc_addr(ha, net_dev) {
2570 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2571 + ha->addr);
2572 + if (err)
2573 + netdev_warn(priv->net_dev,
2574 + "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2575 + ha->addr, err);
2576 + }
2577 +}
2578 +
2579 +/* Copy mac multicast addresses from @net_dev to @priv
2580 + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2581 + */
2582 +static void add_mc_hw_addr(const struct net_device *net_dev,
2583 + struct dpaa2_eth_priv *priv)
2584 +{
2585 + struct netdev_hw_addr *ha;
2586 + int err;
2587 +
2588 + netdev_for_each_mc_addr(ha, net_dev) {
2589 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2590 + ha->addr);
2591 + if (err)
2592 + netdev_warn(priv->net_dev,
2593 + "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2594 + ha->addr, err);
2595 + }
2596 +}
2597 +
2598 +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2599 +{
2600 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2601 + int uc_count = netdev_uc_count(net_dev);
2602 + int mc_count = netdev_mc_count(net_dev);
2603 + u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2604 + u32 options = priv->dpni_attrs.options;
2605 + u16 mc_token = priv->mc_token;
2606 + struct fsl_mc_io *mc_io = priv->mc_io;
2607 + int err;
2608 +
2609 + /* Basic sanity checks; these probably indicate a misconfiguration */
2610 + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2611 + netdev_info(net_dev,
2612 + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2613 + max_mac);
2614 +
2615 + /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2616 + if (uc_count > max_mac) {
2617 + netdev_info(net_dev,
2618 + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2619 + uc_count, max_mac);
2620 + goto force_promisc;
2621 + }
2622 + if (mc_count + uc_count > max_mac) {
2623 + netdev_info(net_dev,
2624 + "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2625 + uc_count + mc_count, max_mac);
2626 + goto force_mc_promisc;
2627 + }
2628 +
2629 + /* Adjust promisc settings due to flag combinations */
2630 + if (net_dev->flags & IFF_PROMISC)
2631 + goto force_promisc;
2632 + if (net_dev->flags & IFF_ALLMULTI) {
2633 + /* First, rebuild unicast filtering table. This should be done
2634 + * in promisc mode, in order to avoid frame loss while we
2635 + * progressively add entries to the table.
2636 + * We don't know whether we had been in promisc already, and
2637 + * making an MC call to find out is expensive; so set uc promisc
2638 + * nonetheless.
2639 + */
2640 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2641 + if (err)
2642 + netdev_warn(net_dev, "Can't set uc promisc\n");
2643 +
2644 + /* Actual uc table reconstruction. */
2645 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2646 + if (err)
2647 + netdev_warn(net_dev, "Can't clear uc filters\n");
2648 + add_uc_hw_addr(net_dev, priv);
2649 +
2650 + /* Finally, clear uc promisc and set mc promisc as requested. */
2651 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2652 + if (err)
2653 + netdev_warn(net_dev, "Can't clear uc promisc\n");
2654 + goto force_mc_promisc;
2655 + }
2656 +
2657 + /* Neither unicast, nor multicast promisc will be on... eventually.
2658 + * For now, rebuild mac filtering tables while forcing both of them on.
2659 + */
2660 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2661 + if (err)
2662 + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2663 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2664 + if (err)
2665 + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2666 +
2667 + /* Actual mac filtering tables reconstruction */
2668 + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2669 + if (err)
2670 + netdev_warn(net_dev, "Can't clear mac filters\n");
2671 + add_mc_hw_addr(net_dev, priv);
2672 + add_uc_hw_addr(net_dev, priv);
2673 +
2674 + /* Now we can clear both ucast and mcast promisc, without risking
2675 + * to drop legitimate frames anymore.
2676 + */
2677 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2678 + if (err)
2679 + netdev_warn(net_dev, "Can't clear ucast promisc\n");
2680 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2681 + if (err)
2682 + netdev_warn(net_dev, "Can't clear mcast promisc\n");
2683 +
2684 + return;
2685 +
2686 +force_promisc:
2687 + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2688 + if (err)
2689 + netdev_warn(net_dev, "Can't set ucast promisc\n");
2690 +force_mc_promisc:
2691 + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2692 + if (err)
2693 + netdev_warn(net_dev, "Can't set mcast promisc\n");
2694 +}
2695 +
2696 +static int dpaa2_eth_set_features(struct net_device *net_dev,
2697 + netdev_features_t features)
2698 +{
2699 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2700 + netdev_features_t changed = features ^ net_dev->features;
2701 + bool enable;
2702 + int err;
2703 +
2704 + if (changed & NETIF_F_RXCSUM) {
2705 + enable = !!(features & NETIF_F_RXCSUM);
2706 + err = set_rx_csum(priv, enable);
2707 + if (err)
2708 + return err;
2709 + }
2710 +
2711 + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2712 + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2713 + err = set_tx_csum(priv, enable);
2714 + if (err)
2715 + return err;
2716 + }
2717 +
2718 + return 0;
2719 +}
2720 +
2721 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2722 +{
2723 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
2724 + struct hwtstamp_config config;
2725 +
2726 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2727 + return -EFAULT;
2728 +
2729 + switch (config.tx_type) {
2730 + case HWTSTAMP_TX_OFF:
2731 + priv->ts_tx_en = false;
2732 + break;
2733 + case HWTSTAMP_TX_ON:
2734 + priv->ts_tx_en = true;
2735 + break;
2736 + default:
2737 + return -ERANGE;
2738 + }
2739 +
2740 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2741 + priv->ts_rx_en = false;
2742 + } else {
2743 + priv->ts_rx_en = true;
2744 + /* TS is set for all frame types, not only those requested */
2745 + config.rx_filter = HWTSTAMP_FILTER_ALL;
2746 + }
2747 +
2748 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2749 + -EFAULT : 0;
2750 +}
2751 +
2752 +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2753 +{
2754 + if (cmd == SIOCSHWTSTAMP)
2755 + return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2756 +
2757 + return -EINVAL;
2758 +}
2759 +
2760 +static const struct net_device_ops dpaa2_eth_ops = {
2761 + .ndo_open = dpaa2_eth_open,
2762 + .ndo_start_xmit = dpaa2_eth_tx,
2763 + .ndo_stop = dpaa2_eth_stop,
2764 + .ndo_init = dpaa2_eth_init,
2765 + .ndo_set_mac_address = dpaa2_eth_set_addr,
2766 + .ndo_get_stats64 = dpaa2_eth_get_stats,
2767 + .ndo_change_mtu = dpaa2_eth_change_mtu,
2768 + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2769 + .ndo_set_features = dpaa2_eth_set_features,
2770 + .ndo_do_ioctl = dpaa2_eth_ioctl,
2771 +};
2772 +
2773 +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2774 +{
2775 + struct dpaa2_eth_channel *ch;
2776 +
2777 + ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2778 +
2779 + /* Update NAPI statistics */
2780 + ch->stats.cdan++;
2781 +
2782 + napi_schedule_irqoff(&ch->napi);
2783 +}
2784 +
2785 +/* Allocate and configure a DPCON object */
2786 +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2787 +{
2788 + struct fsl_mc_device *dpcon;
2789 + struct device *dev = priv->net_dev->dev.parent;
2790 + struct dpcon_attr attrs;
2791 + int err;
2792 +
2793 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2794 + FSL_MC_POOL_DPCON, &dpcon);
2795 + if (err) {
2796 + dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2797 + return NULL;
2798 + }
2799 +
2800 + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2801 + if (err) {
2802 + dev_err(dev, "dpcon_open() failed\n");
2803 + goto err_open;
2804 + }
2805 +
2806 + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2807 + if (err) {
2808 + dev_err(dev, "dpcon_reset() failed\n");
2809 + goto err_reset;
2810 + }
2811 +
2812 + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
2813 + if (err) {
2814 + dev_err(dev, "dpcon_get_attributes() failed\n");
2815 + goto err_get_attr;
2816 + }
2817 +
2818 + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2819 + if (err) {
2820 + dev_err(dev, "dpcon_enable() failed\n");
2821 + goto err_enable;
2822 + }
2823 +
2824 + return dpcon;
2825 +
2826 +err_enable:
2827 +err_get_attr:
2828 +err_reset:
2829 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2830 +err_open:
2831 + fsl_mc_object_free(dpcon);
2832 +
2833 + return NULL;
2834 +}
2835 +
2836 +static void free_dpcon(struct dpaa2_eth_priv *priv,
2837 + struct fsl_mc_device *dpcon)
2838 +{
2839 + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2840 + dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2841 + fsl_mc_object_free(dpcon);
2842 +}
2843 +
2844 +static struct dpaa2_eth_channel *
2845 +alloc_channel(struct dpaa2_eth_priv *priv)
2846 +{
2847 + struct dpaa2_eth_channel *channel;
2848 + struct dpcon_attr attr;
2849 + struct device *dev = priv->net_dev->dev.parent;
2850 + int err;
2851 +
2852 + channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2853 + if (!channel)
2854 + return NULL;
2855 +
2856 + channel->dpcon = setup_dpcon(priv);
2857 + if (!channel->dpcon)
2858 + goto err_setup;
2859 +
2860 + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2861 + &attr);
2862 + if (err) {
2863 + dev_err(dev, "dpcon_get_attributes() failed\n");
2864 + goto err_get_attr;
2865 + }
2866 +
2867 + channel->dpcon_id = attr.id;
2868 + channel->ch_id = attr.qbman_ch_id;
2869 + channel->priv = priv;
2870 +
2871 + return channel;
2872 +
2873 +err_get_attr:
2874 + free_dpcon(priv, channel->dpcon);
2875 +err_setup:
2876 + kfree(channel);
2877 + return NULL;
2878 +}
2879 +
2880 +static void free_channel(struct dpaa2_eth_priv *priv,
2881 + struct dpaa2_eth_channel *channel)
2882 +{
2883 + free_dpcon(priv, channel->dpcon);
2884 + kfree(channel);
2885 +}
2886 +
2887 +/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2888 + * and register data availability notifications
2889 + */
2890 +static int setup_dpio(struct dpaa2_eth_priv *priv)
2891 +{
2892 + struct dpaa2_io_notification_ctx *nctx;
2893 + struct dpaa2_eth_channel *channel;
2894 + struct dpcon_notification_cfg dpcon_notif_cfg;
2895 + struct device *dev = priv->net_dev->dev.parent;
2896 + int i, err;
2897 +
2898 + /* We want the ability to spread ingress traffic (RX, TX conf) to as
2899 + * many cores as possible, so we need one channel for each core
2900 + * (unless there's fewer queues than cores, in which case the extra
2901 + * channels would be wasted).
2902 + * Allocate one channel per core and register it to the core's
2903 + * affine DPIO. If not enough channels are available for all cores
2904 + * or if some cores don't have an affine DPIO, there will be no
2905 + * ingress frame processing on those cores.
2906 + */
2907 + cpumask_clear(&priv->dpio_cpumask);
2908 + for_each_online_cpu(i) {
2909 + /* Try to allocate a channel */
2910 + channel = alloc_channel(priv);
2911 + if (!channel) {
2912 + dev_info(dev,
2913 + "No affine channel for cpu %d and above\n", i);
2914 + goto err_alloc_ch;
2915 + }
2916 +
2917 + priv->channel[priv->num_channels] = channel;
2918 +
2919 + nctx = &channel->nctx;
2920 + nctx->is_cdan = 1;
2921 + nctx->cb = cdan_cb;
2922 + nctx->id = channel->ch_id;
2923 + nctx->desired_cpu = i;
2924 +
2925 + /* Register the new context */
2926 + err = dpaa2_io_service_register(NULL, nctx);
2927 + if (err) {
2928 + dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2929 + /* If no affine DPIO for this core, there's probably
2930 + * none available for next cores either.
2931 + */
2932 + goto err_service_reg;
2933 + }
2934 +
2935 + /* Register DPCON notification with MC */
2936 + dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2937 + dpcon_notif_cfg.priority = 0;
2938 + dpcon_notif_cfg.user_ctx = nctx->qman64;
2939 + err = dpcon_set_notification(priv->mc_io, 0,
2940 + channel->dpcon->mc_handle,
2941 + &dpcon_notif_cfg);
2942 + if (err) {
2943 + dev_err(dev, "dpcon_set_notification failed()\n");
2944 + goto err_set_cdan;
2945 + }
2946 +
2947 + /* If we managed to allocate a channel and also found an affine
2948 + * DPIO for this core, add it to the final mask
2949 + */
2950 + cpumask_set_cpu(i, &priv->dpio_cpumask);
2951 + priv->num_channels++;
2952 +
2953 + /* Stop if we already have enough channels to accommodate all
2954 + * RX and TX conf queues
2955 + */
2956 + if (priv->num_channels == dpaa2_eth_queue_count(priv))
2957 + break;
2958 + }
2959 +
2960 + /* Tx confirmation queues can only be serviced by cpus
2961 + * with an affine DPIO/channel
2962 + */
2963 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2964 +
2965 + return 0;
2966 +
2967 +err_set_cdan:
2968 + dpaa2_io_service_deregister(NULL, nctx);
2969 +err_service_reg:
2970 + free_channel(priv, channel);
2971 +err_alloc_ch:
2972 + if (cpumask_empty(&priv->dpio_cpumask)) {
2973 + dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
2974 + return -ENODEV;
2975 + }
2976 + cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
2977 +
2978 + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2979 + cpumask_pr_args(&priv->dpio_cpumask));
2980 +
2981 + return 0;
2982 +}
2983 +
2984 +static void free_dpio(struct dpaa2_eth_priv *priv)
2985 +{
2986 + int i;
2987 + struct dpaa2_eth_channel *ch;
2988 +
2989 + /* deregister CDAN notifications and free channels */
2990 + for (i = 0; i < priv->num_channels; i++) {
2991 + ch = priv->channel[i];
2992 + dpaa2_io_service_deregister(NULL, &ch->nctx);
2993 + free_channel(priv, ch);
2994 + }
2995 +}
2996 +
2997 +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2998 + int cpu)
2999 +{
3000 + struct device *dev = priv->net_dev->dev.parent;
3001 + int i;
3002 +
3003 + for (i = 0; i < priv->num_channels; i++)
3004 + if (priv->channel[i]->nctx.desired_cpu == cpu)
3005 + return priv->channel[i];
3006 +
3007 + /* We should never get here. Issue a warning and return
3008 + * the first channel, because it's still better than nothing
3009 + */
3010 + dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3011 +
3012 + return priv->channel[0];
3013 +}
3014 +
3015 +static void set_fq_affinity(struct dpaa2_eth_priv *priv)
3016 +{
3017 + struct device *dev = priv->net_dev->dev.parent;
3018 + struct cpumask xps_mask = CPU_MASK_NONE;
3019 + struct dpaa2_eth_fq *fq;
3020 + int rx_cpu, txc_cpu;
3021 + int i, err;
3022 +
3023 + /* For each FQ, pick one channel/CPU to deliver frames to.
3024 + * This may well change at runtime, either through irqbalance or
3025 + * through direct user intervention.
3026 + */
3027 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3028 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3029 +
3030 + for (i = 0; i < priv->num_fqs; i++) {
3031 + fq = &priv->fq[i];
3032 + switch (fq->type) {
3033 + case DPAA2_RX_FQ:
3034 + case DPAA2_RX_ERR_FQ:
3035 + fq->target_cpu = rx_cpu;
3036 + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3037 + if (rx_cpu >= nr_cpu_ids)
3038 + rx_cpu = cpumask_first(&priv->dpio_cpumask);
3039 + break;
3040 + case DPAA2_TX_CONF_FQ:
3041 + fq->target_cpu = txc_cpu;
3042 +
3043 + /* register txc_cpu to XPS */
3044 + cpumask_set_cpu(txc_cpu, &xps_mask);
3045 + err = netif_set_xps_queue(priv->net_dev, &xps_mask,
3046 + fq->flowid);
3047 + if (err)
3048 + dev_info_once(dev,
3049 + "Tx: error setting XPS queue\n");
3050 + cpumask_clear_cpu(txc_cpu, &xps_mask);
3051 +
3052 + txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
3053 + if (txc_cpu >= nr_cpu_ids)
3054 + txc_cpu = cpumask_first(&priv->txconf_cpumask);
3055 + break;
3056 + default:
3057 + dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3058 + }
3059 + fq->channel = get_affine_channel(priv, fq->target_cpu);
3060 + }
3061 +}
3062 +
3063 +static void setup_fqs(struct dpaa2_eth_priv *priv)
3064 +{
3065 + int i, j;
3066 +
3067 + /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
3068 + * beginning of the queue array.
3069 + * Number of Rx and Tx queues are the same.
3070 + * We only support one traffic class for now.
3071 + */
3072 + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3073 + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3074 + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3075 + priv->fq[priv->num_fqs++].flowid = (u16)i;
3076 + }
3077 +
3078 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
3079 + for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
3080 + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3081 + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3082 + priv->fq[priv->num_fqs].tc = (u8)i;
3083 + priv->fq[priv->num_fqs++].flowid = (u16)j;
3084 + }
3085 +
3086 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3087 + /* We have exactly one Rx error queue per DPNI */
3088 + priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3089 + priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3090 +#endif
3091 +
3092 + /* For each FQ, decide on which core to process incoming frames */
3093 + set_fq_affinity(priv);
3094 +}
3095 +
3096 +/* Allocate and configure one buffer pool for each interface */
3097 +static int setup_dpbp(struct dpaa2_eth_priv *priv)
3098 +{
3099 + int err;
3100 + struct fsl_mc_device *dpbp_dev;
3101 + struct dpbp_attr dpbp_attrs;
3102 + struct device *dev = priv->net_dev->dev.parent;
3103 +
3104 + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3105 + &dpbp_dev);
3106 + if (err) {
3107 + dev_err(dev, "DPBP device allocation failed\n");
3108 + return err;
3109 + }
3110 +
3111 + priv->dpbp_dev = dpbp_dev;
3112 +
3113 + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
3114 + &dpbp_dev->mc_handle);
3115 + if (err) {
3116 + dev_err(dev, "dpbp_open() failed\n");
3117 + goto err_open;
3118 + }
3119 +
3120 + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3121 + if (err) {
3122 + dev_err(dev, "dpbp_reset() failed\n");
3123 + goto err_reset;
3124 + }
3125 +
3126 + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3127 + if (err) {
3128 + dev_err(dev, "dpbp_enable() failed\n");
3129 + goto err_enable;
3130 + }
3131 +
3132 + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3133 + &dpbp_attrs);
3134 + if (err) {
3135 + dev_err(dev, "dpbp_get_attributes() failed\n");
3136 + goto err_get_attr;
3137 + }
3138 +
3139 + priv->bpid = dpbp_attrs.bpid;
3140 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
3141 +
3142 + return 0;
3143 +
3144 +err_get_attr:
3145 + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3146 +err_enable:
3147 +err_reset:
3148 + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3149 +err_open:
3150 + fsl_mc_object_free(dpbp_dev);
3151 +
3152 + return err;
3153 +}
3154 +
3155 +static void free_dpbp(struct dpaa2_eth_priv *priv)
3156 +{
3157 + drain_pool(priv);
3158 + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3159 + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3160 + fsl_mc_object_free(priv->dpbp_dev);
3161 +}
3162 +
3163 +static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
3164 +{
3165 + struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
3166 + struct device *dev = priv->net_dev->dev.parent;
3167 + int err;
3168 +
3169 + priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
3170 + GFP_KERNEL);
3171 + if (!priv->cscn_unaligned)
3172 + return -ENOMEM;
3173 +
3174 + priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
3175 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
3176 + DMA_FROM_DEVICE);
3177 + if (dma_mapping_error(dev, priv->cscn_dma)) {
3178 + dev_err(dev, "Error mapping CSCN memory area\n");
3179 + err = -ENOMEM;
3180 + goto err_dma_map;
3181 + }
3182 +
3183 + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
3184 + cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
3185 + cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
3186 + cong_notif_cfg.message_ctx = (u64)priv;
3187 + cong_notif_cfg.message_iova = priv->cscn_dma;
3188 + cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
3189 + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
3190 + DPNI_CONG_OPT_COHERENT_WRITE;
3191 + err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
3192 + DPNI_QUEUE_TX, 0,
3193 + &cong_notif_cfg);
3194 + if (err) {
3195 + dev_err(dev, "dpni_set_congestion_notification failed\n");
3196 + goto err_set_cong;
3197 + }
3198 +
3199 + return 0;
3200 +
3201 +err_set_cong:
3202 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3203 +err_dma_map:
3204 + kfree(priv->cscn_unaligned);
3205 +
3206 + return err;
3207 +}
3208 +
3209 +/* Configure the DPNI object this interface is associated with */
3210 +static int setup_dpni(struct fsl_mc_device *ls_dev)
3211 +{
3212 + struct device *dev = &ls_dev->dev;
3213 + struct dpaa2_eth_priv *priv;
3214 + struct net_device *net_dev;
3215 + struct dpni_buffer_layout buf_layout;
3216 + struct dpni_link_cfg cfg = {0};
3217 + int err;
3218 +
3219 + net_dev = dev_get_drvdata(dev);
3220 + priv = netdev_priv(net_dev);
3221 +
3222 + priv->dpni_id = ls_dev->obj_desc.id;
3223 +
3224 + /* get a handle for the DPNI object */
3225 + err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
3226 + if (err) {
3227 + dev_err(dev, "dpni_open() failed\n");
3228 + goto err_open;
3229 + }
3230 +
3231 + ls_dev->mc_io = priv->mc_io;
3232 + ls_dev->mc_handle = priv->mc_token;
3233 +
3234 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3235 + if (err) {
3236 + dev_err(dev, "dpni_reset() failed\n");
3237 + goto err_reset;
3238 + }
3239 +
3240 + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3241 + &priv->dpni_attrs);
3242 +
3243 + if (err) {
3244 + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3245 + goto err_get_attr;
3246 + }
3247 +
3248 + /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
3249 + * align value must be a multiple of 256.
3250 + */
3251 + priv->rx_buf_align =
3252 + priv->dpni_attrs.wriop_version & 0x3ff ?
3253 + DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
3254 +
3255 + /* Update number of logical FQs in netdev */
3256 + err = netif_set_real_num_tx_queues(net_dev,
3257 + dpaa2_eth_queue_count(priv));
3258 + if (err) {
3259 + dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
3260 + goto err_set_tx_queues;
3261 + }
3262 +
3263 + err = netif_set_real_num_rx_queues(net_dev,
3264 + dpaa2_eth_queue_count(priv));
3265 + if (err) {
3266 + dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
3267 + goto err_set_rx_queues;
3268 + }
3269 +
3270 + /* Configure buffer layouts */
3271 + /* rx buffer */
3272 + buf_layout.pass_parser_result = true;
3273 + buf_layout.pass_frame_status = true;
3274 + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3275 + buf_layout.data_align = priv->rx_buf_align;
3276 + buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
3277 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3278 + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3279 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3280 + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3281 + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
3282 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3283 + DPNI_QUEUE_RX, &buf_layout);
3284 + if (err) {
3285 + dev_err(dev,
3286 + "dpni_set_buffer_layout(RX) failed\n");
3287 + goto err_buf_layout;
3288 + }
3289 +
3290 + /* tx buffer */
3291 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3292 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3293 + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
3294 + buf_layout.pass_timestamp = true;
3295 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3296 + DPNI_QUEUE_TX, &buf_layout);
3297 + if (err) {
3298 + dev_err(dev,
3299 + "dpni_set_buffer_layout(TX) failed\n");
3300 + goto err_buf_layout;
3301 + }
3302 +
3303 + /* tx-confirm buffer */
3304 + buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3305 + DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3306 + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3307 + DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3308 + if (err) {
3309 + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3310 + goto err_buf_layout;
3311 + }
3312 +
3313 + /* Now that we've set our tx buffer layout, retrieve the minimum
3314 + * required tx data offset.
3315 + */
3316 + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3317 + &priv->tx_data_offset);
3318 + if (err) {
3319 + dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
3320 + goto err_data_offset;
3321 + }
3322 +
3323 + if ((priv->tx_data_offset % 64) != 0)
3324 + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
3325 + priv->tx_data_offset);
3326 +
3327 + /* Enable congestion notifications for Tx queues */
3328 + err = setup_tx_congestion(priv);
3329 + if (err)
3330 + goto err_tx_cong;
3331 +
3332 + /* allocate classification rule space */
3333 + priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
3334 + dpaa2_eth_fs_count(priv), GFP_KERNEL);
3335 + if (!priv->cls_rule)
3336 + goto err_cls_rule;
3337 +
3338 + /* Enable flow control */
3339 + cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
3340 + priv->tx_pause_frames = 1;
3341 +
3342 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
3343 + if (err) {
3344 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
3345 + goto err_set_link_cfg;
3346 + }
3347 +
3348 + return 0;
3349 +
3350 +err_set_link_cfg:
3351 +err_cls_rule:
3352 +err_tx_cong:
3353 +err_data_offset:
3354 +err_buf_layout:
3355 +err_set_rx_queues:
3356 +err_set_tx_queues:
3357 +err_get_attr:
3358 +err_reset:
3359 + dpni_close(priv->mc_io, 0, priv->mc_token);
3360 +err_open:
3361 + return err;
3362 +}
3363 +
3364 +static void free_dpni(struct dpaa2_eth_priv *priv)
3365 +{
3366 + struct device *dev = priv->net_dev->dev.parent;
3367 + int err;
3368 +
3369 + err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3370 + if (err)
3371 + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3372 + err);
3373 +
3374 + dpni_close(priv->mc_io, 0, priv->mc_token);
3375 +
3376 + kfree(priv->cls_rule);
3377 +
3378 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
3379 + kfree(priv->cscn_unaligned);
3380 +}
3381 +
3382 +static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
3383 + struct dpni_taildrop *td)
3384 +{
3385 + struct device *dev = priv->net_dev->dev.parent;
3386 + int err, i;
3387 +
3388 +
3389 + for (i = 0; i < priv->num_fqs; i++) {
3390 + if (priv->fq[i].type != DPAA2_RX_FQ)
3391 + continue;
3392 +
3393 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
3394 + DPNI_CP_QUEUE, DPNI_QUEUE_RX,
3395 + priv->fq[i].tc, priv->fq[i].flowid,
3396 + td);
3397 + if (err) {
3398 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
3399 + return err;
3400 + }
3401 + }
3402 +
3403 + return 0;
3404 +}
3405 +
3406 +static int set_group_taildrop(struct dpaa2_eth_priv *priv,
3407 + struct dpni_taildrop *td)
3408 +{
3409 + struct device *dev = priv->net_dev->dev.parent;
3410 + struct dpni_taildrop disable_td, *tc_td;
3411 + int i, err;
3412 +
3413 + memset(&disable_td, 0, sizeof(struct dpni_taildrop));
3414 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3415 + if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
3416 + /* Do not set taildrop thresholds for PFC-enabled
3417 + * traffic classes. We will enable congestion
3418 + * notifications for them.
3419 + */
3420 + tc_td = &disable_td;
3421 + else
3422 + tc_td = td;
3423 +
3424 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
3425 + DPNI_CP_GROUP, DPNI_QUEUE_RX,
3426 + i, 0, tc_td);
3427 + if (err) {
3428 + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
3429 + return err;
3430 + }
3431 + }
3432 + return 0;
3433 +}
3434 +
3435 +/* Enable/disable Rx FQ taildrop
3436 + *
3437 + * Rx FQ taildrop is mutually exclusive with flow control and it only gets
3438 + * disabled when FC is active. Depending on FC status, we need to compute
3439 + * the maximum number of buffers in the pool differently, so use the
3440 + * opportunity to update max number of buffers as well.
3441 + */
3442 +int set_rx_taildrop(struct dpaa2_eth_priv *priv)
3443 +{
3444 + enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
3445 + struct dpni_taildrop td_queue, td_group;
3446 + int err = 0;
3447 +
3448 + switch (cfg) {
3449 + case DPAA2_ETH_TD_NONE:
3450 + memset(&td_queue, 0, sizeof(struct dpni_taildrop));
3451 + memset(&td_group, 0, sizeof(struct dpni_taildrop));
3452 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
3453 + priv->num_channels;
3454 + break;
3455 + case DPAA2_ETH_TD_QUEUE:
3456 + memset(&td_group, 0, sizeof(struct dpni_taildrop));
3457 + td_queue.enable = 1;
3458 + td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
3459 + td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
3460 + dpaa2_eth_tc_count(priv);
3461 + priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
3462 + break;
3463 + case DPAA2_ETH_TD_GROUP:
3464 + memset(&td_queue, 0, sizeof(struct dpni_taildrop));
3465 + td_group.enable = 1;
3466 + td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
3467 + td_group.threshold = NAPI_POLL_WEIGHT *
3468 + dpaa2_eth_queue_count(priv);
3469 + priv->num_bufs = NAPI_POLL_WEIGHT *
3470 + dpaa2_eth_tc_count(priv);
3471 + break;
3472 + default:
3473 + break;
3474 + }
3475 +
3476 + err = set_queue_taildrop(priv, &td_queue);
3477 + if (err)
3478 + return err;
3479 +
3480 + err = set_group_taildrop(priv, &td_group);
3481 + if (err)
3482 + return err;
3483 +
3484 + priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
3485 +
3486 + return 0;
3487 +}
3488 +
3489 +static int setup_rx_flow(struct dpaa2_eth_priv *priv,
3490 + struct dpaa2_eth_fq *fq)
3491 +{
3492 + struct device *dev = priv->net_dev->dev.parent;
3493 + struct dpni_queue q = { { 0 } };
3494 + struct dpni_queue_id qid;
3495 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3496 + int err;
3497 +
3498 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3499 + DPNI_QUEUE_RX, fq->tc, fq->flowid, &q, &qid);
3500 + if (err) {
3501 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3502 + return err;
3503 + }
3504 +
3505 + fq->fqid = qid.fqid;
3506 +
3507 + q.destination.id = fq->channel->dpcon_id;
3508 + q.destination.type = DPNI_DEST_DPCON;
3509 + q.destination.priority = 1;
3510 + q.user_context = (u64)fq;
3511 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3512 + DPNI_QUEUE_RX, fq->tc, fq->flowid, q_opt, &q);
3513 + if (err) {
3514 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3515 + return err;
3516 + }
3517 +
3518 + return 0;
3519 +}
3520 +
3521 +static int setup_tx_flow(struct dpaa2_eth_priv *priv,
3522 + struct dpaa2_eth_fq *fq)
3523 +{
3524 + struct device *dev = priv->net_dev->dev.parent;
3525 + struct dpni_queue q = { { 0 } };
3526 + struct dpni_queue_id qid;
3527 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3528 + int err;
3529 +
3530 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3531 + DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
3532 + if (err) {
3533 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3534 + return err;
3535 + }
3536 +
3537 + fq->tx_qdbin = qid.qdbin;
3538 +
3539 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3540 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
3541 + if (err) {
3542 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3543 + return err;
3544 + }
3545 +
3546 + fq->fqid = qid.fqid;
3547 +
3548 + q.destination.id = fq->channel->dpcon_id;
3549 + q.destination.type = DPNI_DEST_DPCON;
3550 + q.destination.priority = 0;
3551 + q.user_context = (u64)fq;
3552 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3553 + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
3554 + if (err) {
3555 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3556 + return err;
3557 + }
3558 +
3559 + return 0;
3560 +}
3561 +
3562 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3563 +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3564 + struct dpaa2_eth_fq *fq)
3565 +{
3566 + struct device *dev = priv->net_dev->dev.parent;
3567 + struct dpni_queue q = { { 0 } };
3568 + struct dpni_queue_id qid;
3569 + u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3570 + int err;
3571 +
3572 + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3573 + DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3574 + if (err) {
3575 + dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3576 + return err;
3577 + }
3578 +
3579 + fq->fqid = qid.fqid;
3580 +
3581 + q.destination.id = fq->channel->dpcon_id;
3582 + q.destination.type = DPNI_DEST_DPCON;
3583 + q.destination.priority = 1;
3584 + q.user_context = (u64)fq;
3585 + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3586 + DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3587 + if (err) {
3588 + dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3589 + return err;
3590 + }
3591 +
3592 + return 0;
3593 +}
3594 +#endif
3595 +
3596 +/* default hash key fields */
3597 +static struct dpaa2_eth_hash_fields default_hash_fields[] = {
3598 + {
3599 + /* L2 header */
3600 + .rxnfc_field = RXH_L2DA,
3601 + .cls_prot = NET_PROT_ETH,
3602 + .cls_field = NH_FLD_ETH_DA,
3603 + .size = 6,
3604 + }, {
3605 + .cls_prot = NET_PROT_ETH,
3606 + .cls_field = NH_FLD_ETH_SA,
3607 + .size = 6,
3608 + }, {
3609 + /* This is the last ethertype field parsed:
3610 + * depending on frame format, it can be the MAC ethertype
3611 + * or the VLAN etype.
3612 + */
3613 + .cls_prot = NET_PROT_ETH,
3614 + .cls_field = NH_FLD_ETH_TYPE,
3615 + .size = 2,
3616 + }, {
3617 + /* VLAN header */
3618 + .rxnfc_field = RXH_VLAN,
3619 + .cls_prot = NET_PROT_VLAN,
3620 + .cls_field = NH_FLD_VLAN_TCI,
3621 + .size = 2,
3622 + }, {
3623 + /* IP header */
3624 + .rxnfc_field = RXH_IP_SRC,
3625 + .cls_prot = NET_PROT_IP,
3626 + .cls_field = NH_FLD_IP_SRC,
3627 + .size = 4,
3628 + }, {
3629 + .rxnfc_field = RXH_IP_DST,
3630 + .cls_prot = NET_PROT_IP,
3631 + .cls_field = NH_FLD_IP_DST,
3632 + .size = 4,
3633 + }, {
3634 + .rxnfc_field = RXH_L3_PROTO,
3635 + .cls_prot = NET_PROT_IP,
3636 + .cls_field = NH_FLD_IP_PROTO,
3637 + .size = 1,
3638 + }, {
3639 + /* Using UDP ports, this is functionally equivalent to raw
3640 + * byte pairs from L4 header.
3641 + */
3642 + .rxnfc_field = RXH_L4_B_0_1,
3643 + .cls_prot = NET_PROT_UDP,
3644 + .cls_field = NH_FLD_UDP_PORT_SRC,
3645 + .size = 2,
3646 + }, {
3647 + .rxnfc_field = RXH_L4_B_2_3,
3648 + .cls_prot = NET_PROT_UDP,
3649 + .cls_field = NH_FLD_UDP_PORT_DST,
3650 + .size = 2,
3651 + },
3652 +};
3653 +
3654 +/* Set RX hash options */
3655 +static int set_hash(struct dpaa2_eth_priv *priv)
3656 +{
3657 + struct device *dev = priv->net_dev->dev.parent;
3658 + struct dpkg_profile_cfg cls_cfg;
3659 + struct dpni_rx_tc_dist_cfg dist_cfg;
3660 + u8 *dma_mem;
3661 + int i;
3662 + int err = 0;
3663 +
3664 + memset(&cls_cfg, 0, sizeof(cls_cfg));
3665 +
3666 + for (i = 0; i < priv->num_hash_fields; i++) {
3667 + struct dpkg_extract *key =
3668 + &cls_cfg.extracts[cls_cfg.num_extracts];
3669 +
3670 + key->type = DPKG_EXTRACT_FROM_HDR;
3671 + key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
3672 + key->extract.from_hdr.type = DPKG_FULL_FIELD;
3673 + key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
3674 + cls_cfg.num_extracts++;
3675 +
3676 + priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
3677 + }
3678 +
3679 + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
3680 + if (!dma_mem)
3681 + return -ENOMEM;
3682 +
3683 + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3684 + if (err) {
3685 + dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
3686 + goto err_prep_key;
3687 + }
3688 +
3689 + memset(&dist_cfg, 0, sizeof(dist_cfg));
3690 +
3691 + /* Prepare for setting the rx dist */
3692 + dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3693 + DPAA2_CLASSIFIER_DMA_SIZE,
3694 + DMA_TO_DEVICE);
3695 + if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
3696 + dev_err(dev, "DMA mapping failed\n");
3697 + err = -ENOMEM;
3698 + goto err_dma_map;
3699 + }
3700 +
3701 + dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3702 + if (dpaa2_eth_fs_enabled(priv)) {
3703 + dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
3704 + dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
3705 + } else {
3706 + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3707 + }
3708 +
3709 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3710 + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
3711 + &dist_cfg);
3712 + if (err)
3713 + break;
3714 + }
3715 +
3716 + dma_unmap_single(dev, dist_cfg.key_cfg_iova,
3717 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
3718 + if (err)
3719 + dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
3720 +
3721 +err_dma_map:
3722 +err_prep_key:
3723 + kfree(dma_mem);
3724 + return err;
3725 +}
3726 +
3727 +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3728 + * frame queues and channels
3729 + */
3730 +static int bind_dpni(struct dpaa2_eth_priv *priv)
3731 +{
3732 + struct net_device *net_dev = priv->net_dev;
3733 + struct device *dev = net_dev->dev.parent;
3734 + struct dpni_pools_cfg pools_params;
3735 + struct dpni_error_cfg err_cfg;
3736 + int err = 0;
3737 + int i;
3738 +
3739 + pools_params.num_dpbp = 1;
3740 + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3741 + pools_params.pools[0].backup_pool = 0;
3742 + pools_params.pools[0].priority_mask = 0xff;
3743 + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3744 + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3745 + if (err) {
3746 + dev_err(dev, "dpni_set_pools() failed\n");
3747 + return err;
3748 + }
3749 +
3750 + /* Verify classification options and disable hashing and/or
3751 + * flow steering support in case of invalid configuration values
3752 + */
3753 + priv->hash_fields = default_hash_fields;
3754 + priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
3755 + check_cls_support(priv);
3756 +
3757 + /* have the interface implicitly distribute traffic based on
3758 + * a static hash key
3759 + */
3760 + if (dpaa2_eth_hash_enabled(priv)) {
3761 + err = set_hash(priv);
3762 + if (err) {
3763 + dev_err(dev, "Hashing configuration failed\n");
3764 + return err;
3765 + }
3766 + }
3767 +
3768 + /* Configure handling of error frames */
3769 + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3770 + err_cfg.set_frame_annotation = 1;
3771 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3772 + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
3773 +#else
3774 + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3775 +#endif
3776 + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3777 + &err_cfg);
3778 + if (err) {
3779 + dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
3780 + return err;
3781 + }
3782 +
3783 + /* Configure Rx and Tx conf queues to generate CDANs */
3784 + for (i = 0; i < priv->num_fqs; i++) {
3785 + switch (priv->fq[i].type) {
3786 + case DPAA2_RX_FQ:
3787 + err = setup_rx_flow(priv, &priv->fq[i]);
3788 + break;
3789 + case DPAA2_TX_CONF_FQ:
3790 + err = setup_tx_flow(priv, &priv->fq[i]);
3791 + break;
3792 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
3793 + case DPAA2_RX_ERR_FQ:
3794 + err = setup_rx_err_flow(priv, &priv->fq[i]);
3795 + break;
3796 +#endif
3797 + default:
3798 + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3799 + return -EINVAL;
3800 + }
3801 + if (err)
3802 + return err;
3803 + }
3804 +
3805 + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
3806 + &priv->tx_qdid);
3807 + if (err) {
3808 + dev_err(dev, "dpni_get_qdid() failed\n");
3809 + return err;
3810 + }
3811 +
3812 + return 0;
3813 +}
3814 +
3815 +/* Allocate rings for storing incoming frame descriptors */
3816 +static int alloc_rings(struct dpaa2_eth_priv *priv)
3817 +{
3818 + struct net_device *net_dev = priv->net_dev;
3819 + struct device *dev = net_dev->dev.parent;
3820 + int i;
3821 +
3822 + for (i = 0; i < priv->num_channels; i++) {
3823 + priv->channel[i]->store =
3824 + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3825 + if (!priv->channel[i]->store) {
3826 + netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3827 + goto err_ring;
3828 + }
3829 + }
3830 +
3831 + return 0;
3832 +
3833 +err_ring:
3834 + for (i = 0; i < priv->num_channels; i++) {
3835 + if (!priv->channel[i]->store)
3836 + break;
3837 + dpaa2_io_store_destroy(priv->channel[i]->store);
3838 + }
3839 +
3840 + return -ENOMEM;
3841 +}
3842 +
3843 +static void free_rings(struct dpaa2_eth_priv *priv)
3844 +{
3845 + int i;
3846 +
3847 + for (i = 0; i < priv->num_channels; i++)
3848 + dpaa2_io_store_destroy(priv->channel[i]->store);
3849 +}
3850 +
3851 +static int netdev_init(struct net_device *net_dev)
3852 +{
3853 + int err;
3854 + struct device *dev = net_dev->dev.parent;
3855 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3856 + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3857 + u8 bcast_addr[ETH_ALEN];
3858 + u16 rx_headroom, rx_req_headroom;
3859 +
3860 + net_dev->netdev_ops = &dpaa2_eth_ops;
3861 +
3862 + /* Get firmware address, if any */
3863 + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3864 + if (err) {
3865 + dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
3866 + return err;
3867 + }
3868 +
3869 + /* Get DPNI atttributes address, if any */
3870 + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3871 + dpni_mac_addr);
3872 + if (err) {
3873 + dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
3874 + return err;
3875 + }
3876 +
3877 + /* First check if firmware has any address configured by bootloader */
3878 + if (!is_zero_ether_addr(mac_addr)) {
3879 + /* If the DPMAC addr != the DPNI addr, update it */
3880 + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3881 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3882 + priv->mc_token,
3883 + mac_addr);
3884 + if (err) {
3885 + dev_err(dev,
3886 + "dpni_set_primary_mac_addr() failed (%d)\n",
3887 + err);
3888 + return err;
3889 + }
3890 + }
3891 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3892 + } else if (is_zero_ether_addr(dpni_mac_addr)) {
3893 + /* Fills in net_dev->dev_addr, as required by
3894 + * register_netdevice()
3895 + */
3896 + eth_hw_addr_random(net_dev);
3897 + /* Make the user aware, without cluttering the boot log */
3898 + dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
3899 + err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3900 + priv->mc_token, net_dev->dev_addr);
3901 + if (err) {
3902 + dev_err(dev,
3903 + "dpni_set_primary_mac_addr() failed (%d)\n", err);
3904 + return err;
3905 + }
3906 + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3907 + * practical purposes, this will be our "permanent" mac address,
3908 + * at least until the next reboot. This move will also permit
3909 + * register_netdevice() to properly fill up net_dev->perm_addr.
3910 + */
3911 + net_dev->addr_assign_type = NET_ADDR_PERM;
3912 + /* If DPMAC address is non-zero, use that one */
3913 + } else {
3914 + /* NET_ADDR_PERM is default, all we have to do is
3915 + * fill in the device addr.
3916 + */
3917 + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3918 + }
3919 +
3920 + /* Explicitly add the broadcast address to the MAC filtering table;
3921 + * the MC won't do that for us.
3922 + */
3923 + eth_broadcast_addr(bcast_addr);
3924 + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3925 + if (err) {
3926 + dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
3927 + /* Won't return an error; at least, we'd have egress traffic */
3928 + }
3929 +
3930 + /* Reserve enough space to align buffer as per hardware requirement;
3931 + * NOTE: priv->tx_data_offset MUST be initialized at this point.
3932 + */
3933 + net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
3934 +
3935 + /* Set MTU limits */
3936 + net_dev->min_mtu = 68;
3937 + net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3938 +
3939 + /* Required headroom for Rx skbs, to avoid reallocation on
3940 + * forwarding path.
3941 + */
3942 + rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
3943 + rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
3944 + DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
3945 + if (rx_req_headroom > rx_headroom)
3946 + dev_info_once(dev,
3947 + "Required headroom (%d) greater than available (%d).\n"
3948 + "This will impact performance due to reallocations.\n",
3949 + rx_req_headroom, rx_headroom);
3950 +
3951 + /* Our .ndo_init will be called herein */
3952 + err = register_netdev(net_dev);
3953 + if (err < 0) {
3954 + dev_err(dev, "register_netdev() failed (%d)\n", err);
3955 + return err;
3956 + }
3957 +
3958 + return 0;
3959 +}
3960 +
3961 +static int poll_link_state(void *arg)
3962 +{
3963 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3964 + int err;
3965 +
3966 + while (!kthread_should_stop()) {
3967 + err = link_state_update(priv);
3968 + if (unlikely(err))
3969 + return err;
3970 +
3971 + msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3972 + }
3973 +
3974 + return 0;
3975 +}
3976 +
3977 +static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
3978 +{
3979 + return IRQ_WAKE_THREAD;
3980 +}
3981 +
3982 +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3983 +{
3984 + u32 status = 0, clear = 0;
3985 + struct device *dev = (struct device *)arg;
3986 + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3987 + struct net_device *net_dev = dev_get_drvdata(dev);
3988 + int err;
3989 +
3990 + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3991 + DPNI_IRQ_INDEX, &status);
3992 + if (unlikely(err)) {
3993 + netdev_err(net_dev, "Can't get irq status (err %d)", err);
3994 + clear = 0xffffffff;
3995 + goto out;
3996 + }
3997 +
3998 + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
3999 + clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
4000 + link_state_update(netdev_priv(net_dev));
4001 + }
4002 +
4003 +out:
4004 + dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4005 + DPNI_IRQ_INDEX, clear);
4006 + return IRQ_HANDLED;
4007 +}
4008 +
4009 +static int setup_irqs(struct fsl_mc_device *ls_dev)
4010 +{
4011 + int err = 0;
4012 + struct fsl_mc_device_irq *irq;
4013 +
4014 + err = fsl_mc_allocate_irqs(ls_dev);
4015 + if (err) {
4016 + dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4017 + return err;
4018 + }
4019 +
4020 + irq = ls_dev->irqs[0];
4021 + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
4022 + dpni_irq0_handler,
4023 + dpni_irq0_handler_thread,
4024 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
4025 + dev_name(&ls_dev->dev), &ls_dev->dev);
4026 + if (err < 0) {
4027 + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
4028 + goto free_mc_irq;
4029 + }
4030 +
4031 + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4032 + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
4033 + if (err < 0) {
4034 + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
4035 + goto free_irq;
4036 + }
4037 +
4038 + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4039 + DPNI_IRQ_INDEX, 1);
4040 + if (err < 0) {
4041 + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
4042 + goto free_irq;
4043 + }
4044 +
4045 + return 0;
4046 +
4047 +free_irq:
4048 + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
4049 +free_mc_irq:
4050 + fsl_mc_free_irqs(ls_dev);
4051 +
4052 + return err;
4053 +}
4054 +
4055 +static void add_ch_napi(struct dpaa2_eth_priv *priv)
4056 +{
4057 + int i;
4058 + struct dpaa2_eth_channel *ch;
4059 +
4060 + for (i = 0; i < priv->num_channels; i++) {
4061 + ch = priv->channel[i];
4062 + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4063 + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4064 + NAPI_POLL_WEIGHT);
4065 + }
4066 +}
4067 +
4068 +static void del_ch_napi(struct dpaa2_eth_priv *priv)
4069 +{
4070 + int i;
4071 + struct dpaa2_eth_channel *ch;
4072 +
4073 + for (i = 0; i < priv->num_channels; i++) {
4074 + ch = priv->channel[i];
4075 + netif_napi_del(&ch->napi);
4076 + }
4077 +}
4078 +
4079 +/* SysFS support */
4080 +static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
4081 + struct device_attribute *attr,
4082 + char *buf)
4083 +{
4084 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4085 + /* No MC API for getting the shaping config. We're stateful. */
4086 + struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
4087 +
4088 + return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
4089 +}
4090 +
4091 +static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
4092 + struct device_attribute *attr,
4093 + const char *buf,
4094 + size_t count)
4095 +{
4096 + int err, items;
4097 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4098 + struct dpni_tx_shaping_cfg scfg;
4099 +
4100 + items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
4101 + if (items != 2) {
4102 + pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
4103 + return -EINVAL;
4104 + }
4105 + /* Size restriction as per MC API documentation */
4106 + if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
4107 + pr_err("max_burst_size must be <= %d\n",
4108 + DPAA2_ETH_MAX_BURST_SIZE);
4109 + return -EINVAL;
4110 + }
4111 +
4112 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
4113 + if (err) {
4114 + dev_err(dev, "dpni_set_tx_shaping() failed\n");
4115 + return -EPERM;
4116 + }
4117 + /* If successful, save the current configuration for future inquiries */
4118 + priv->shaping_cfg = scfg;
4119 +
4120 + return count;
4121 +}
4122 +
4123 +static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
4124 + struct device_attribute *attr,
4125 + char *buf)
4126 +{
4127 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4128 +
4129 + return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
4130 +}
4131 +
4132 +static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
4133 + struct device_attribute *attr,
4134 + const char *buf,
4135 + size_t count)
4136 +{
4137 + struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
4138 + struct dpaa2_eth_fq *fq;
4139 + bool running = netif_running(priv->net_dev);
4140 + int i, err;
4141 +
4142 + err = cpulist_parse(buf, &priv->txconf_cpumask);
4143 + if (err)
4144 + return err;
4145 +
4146 + /* Only accept CPUs that have an affine DPIO */
4147 + if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
4148 + netdev_info(priv->net_dev,
4149 + "cpumask must be a subset of 0x%lx\n",
4150 + *cpumask_bits(&priv->dpio_cpumask));
4151 + cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
4152 + &priv->txconf_cpumask);
4153 + }
4154 +
4155 + /* Rewiring the TxConf FQs requires interface shutdown.
4156 + */
4157 + if (running) {
4158 + err = dpaa2_eth_stop(priv->net_dev);
4159 + if (err)
4160 + return -ENODEV;
4161 + }
4162 +
4163 + /* Set the new TxConf FQ affinities */
4164 + set_fq_affinity(priv);
4165 +
4166 + /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
4167 + * link up notification is received. Give the polling thread enough time
4168 + * to detect the link state change, or else we'll end up with the
4169 + * transmission side forever shut down.
4170 + */
4171 + if (priv->do_link_poll)
4172 + msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
4173 +
4174 + for (i = 0; i < priv->num_fqs; i++) {
4175 + fq = &priv->fq[i];
4176 + if (fq->type != DPAA2_TX_CONF_FQ)
4177 + continue;
4178 + setup_tx_flow(priv, fq);
4179 + }
4180 +
4181 + if (running) {
4182 + err = dpaa2_eth_open(priv->net_dev);
4183 + if (err)
4184 + return -ENODEV;
4185 + }
4186 +
4187 + return count;
4188 +}
4189 +
4190 +static struct device_attribute dpaa2_eth_attrs[] = {
4191 + __ATTR(txconf_cpumask,
4192 + 0600,
4193 + dpaa2_eth_show_txconf_cpumask,
4194 + dpaa2_eth_write_txconf_cpumask),
4195 +
4196 + __ATTR(tx_shaping,
4197 + 0600,
4198 + dpaa2_eth_show_tx_shaping,
4199 + dpaa2_eth_write_tx_shaping),
4200 +};
4201 +
4202 +static void dpaa2_eth_sysfs_init(struct device *dev)
4203 +{
4204 + int i, err;
4205 +
4206 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
4207 + err = device_create_file(dev, &dpaa2_eth_attrs[i]);
4208 + if (err) {
4209 + dev_err(dev, "ERROR creating sysfs file\n");
4210 + goto undo;
4211 + }
4212 + }
4213 + return;
4214 +
4215 +undo:
4216 + while (i > 0)
4217 + device_remove_file(dev, &dpaa2_eth_attrs[--i]);
4218 +}
4219 +
4220 +static void dpaa2_eth_sysfs_remove(struct device *dev)
4221 +{
4222 + int i;
4223 +
4224 + for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
4225 + device_remove_file(dev, &dpaa2_eth_attrs[i]);
4226 +}
4227 +
4228 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4229 +static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
4230 + struct ieee_pfc *pfc)
4231 +{
4232 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4233 + struct dpni_congestion_notification_cfg notification_cfg;
4234 + struct dpni_link_state state;
4235 + int err, i;
4236 +
4237 + pfc->pfc_cap = dpaa2_eth_tc_count(priv);
4238 +
4239 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4240 + if (err) {
4241 + netdev_err(net_dev, "ERROR %d getting link state", err);
4242 + return err;
4243 + }
4244 +
4245 + if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
4246 + return 0;
4247 +
4248 + priv->pfc.pfc_en = 0;
4249 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4250 + err = dpni_get_congestion_notification(priv->mc_io, 0,
4251 + priv->mc_token,
4252 + DPNI_QUEUE_RX,
4253 + i, &notification_cfg);
4254 + if (err) {
4255 + netdev_err(net_dev, "Error %d getting congestion notif",
4256 + err);
4257 + return err;
4258 + }
4259 +
4260 + if (notification_cfg.threshold_entry)
4261 + priv->pfc.pfc_en |= 1 << i;
4262 + }
4263 +
4264 + pfc->pfc_en = priv->pfc.pfc_en;
4265 + pfc->mbc = priv->pfc.mbc;
4266 + pfc->delay = priv->pfc.delay;
4267 +
4268 + return 0;
4269 +}
4270 +
4271 +/* Configure ingress classification based on VLAN PCP */
4272 +static int set_vlan_qos(struct dpaa2_eth_priv *priv)
4273 +{
4274 + struct device *dev = priv->net_dev->dev.parent;
4275 + struct dpkg_profile_cfg kg_cfg = {0};
4276 + struct dpni_qos_tbl_cfg qos_cfg = {0};
4277 + struct dpni_rule_cfg key_params;
4278 + u8 *params_iova;
4279 + __be16 key, mask = cpu_to_be16(VLAN_PRIO_MASK);
4280 + int err = 0, i, j = 0;
4281 +
4282 + if (priv->vlan_clsf_set)
4283 + return 0;
4284 +
4285 + params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4286 + if (!params_iova)
4287 + return -ENOMEM;
4288 +
4289 + kg_cfg.num_extracts = 1;
4290 + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
4291 + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
4292 + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
4293 + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
4294 +
4295 + err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
4296 + if (err) {
4297 + dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
4298 + goto out_free;
4299 + }
4300 +
4301 + /* Set QoS table */
4302 + qos_cfg.default_tc = 0;
4303 + qos_cfg.discard_on_miss = 0;
4304 + qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
4305 + DPAA2_CLASSIFIER_DMA_SIZE,
4306 + DMA_TO_DEVICE);
4307 + if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
4308 + dev_err(dev, "%s: DMA mapping failed\n", __func__);
4309 + err = -ENOMEM;
4310 + goto out_free;
4311 + }
4312 + err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
4313 + dma_unmap_single(dev, qos_cfg.key_cfg_iova,
4314 + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
4315 +
4316 + if (err) {
4317 + dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
4318 + goto out_free;
4319 + }
4320 +
4321 + key_params.key_size = sizeof(key);
4322 +
4323 + if (dpaa2_eth_fs_mask_enabled(priv)) {
4324 + key_params.mask_iova = dma_map_single(dev, &mask, sizeof(mask),
4325 + DMA_TO_DEVICE);
4326 + if (dma_mapping_error(dev, key_params.mask_iova)) {
4327 + dev_err(dev, "DMA mapping failed %s\n", __func__);
4328 + err = -ENOMEM;
4329 + goto out_free;
4330 + }
4331 + } else {
4332 + key_params.mask_iova = 0;
4333 + }
4334 +
4335 + key_params.key_iova = dma_map_single(dev, &key, sizeof(key),
4336 + DMA_TO_DEVICE);
4337 + if (dma_mapping_error(dev, key_params.key_iova)) {
4338 + dev_err(dev, "%s: DMA mapping failed\n", __func__);
4339 + err = -ENOMEM;
4340 + goto out_unmap_mask;
4341 + }
4342 +
4343 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4344 + key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
4345 + dma_sync_single_for_device(dev, key_params.key_iova,
4346 + sizeof(key), DMA_TO_DEVICE);
4347 +
4348 + err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
4349 + &key_params, i, j++);
4350 + if (err) {
4351 + dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
4352 + goto out_unmap;
4353 + }
4354 + }
4355 +
4356 + priv->vlan_clsf_set = true;
4357 +
4358 +out_unmap:
4359 + dma_unmap_single(dev, key_params.key_iova, sizeof(key), DMA_TO_DEVICE);
4360 +out_unmap_mask:
4361 + if (key_params.mask_iova)
4362 + dma_unmap_single(dev, key_params.mask_iova, sizeof(mask),
4363 + DMA_TO_DEVICE);
4364 +out_free:
4365 + kfree(params_iova);
4366 + return err;
4367 +}
4368 +
4369 +static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
4370 + struct ieee_pfc *pfc)
4371 +{
4372 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4373 + struct dpni_congestion_notification_cfg notification_cfg = {0};
4374 + struct dpni_link_state state = {0};
4375 + struct dpni_link_cfg cfg = {0};
4376 + int err = 0, i;
4377 +
4378 + if (priv->pfc.pfc_en == pfc->pfc_en)
4379 + /* Same enabled mask, nothing to be done */
4380 + return 0;
4381 +
4382 + err = set_vlan_qos(priv);
4383 + if (err)
4384 + return err;
4385 +
4386 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
4387 + if (err) {
4388 + netdev_err(net_dev, "ERROR %d getting link state", err);
4389 + return err;
4390 + }
4391 +
4392 + cfg.rate = state.rate;
4393 + cfg.options = state.options;
4394 + if (pfc->pfc_en)
4395 + cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
4396 + else
4397 + cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
4398 +
4399 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
4400 + if (err) {
4401 + netdev_err(net_dev, "ERROR %d setting link cfg", err);
4402 + return err;
4403 + }
4404 +
4405 + memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
4406 +
4407 + err = set_rx_taildrop(priv);
4408 + if (err)
4409 + return err;
4410 +
4411 + /* configure congestion notifications */
4412 + notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
4413 + notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
4414 + notification_cfg.message_iova = 0ULL;
4415 + notification_cfg.message_ctx = 0ULL;
4416 +
4417 + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4418 + if (dpaa2_eth_is_pfc_enabled(priv, i)) {
4419 + notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
4420 + notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
4421 + } else {
4422 + notification_cfg.threshold_entry = 0;
4423 + notification_cfg.threshold_exit = 0;
4424 + }
4425 +
4426 + err = dpni_set_congestion_notification(priv->mc_io, 0,
4427 + priv->mc_token,
4428 + DPNI_QUEUE_RX,
4429 + i, &notification_cfg);
4430 + if (err) {
4431 + netdev_err(net_dev, "Error %d setting congestion notif",
4432 + err);
4433 + return err;
4434 + }
4435 + }
4436 +
4437 + return 0;
4438 +}
4439 +
4440 +static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
4441 +{
4442 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4443 +
4444 + return priv->dcbx_mode;
4445 +}
4446 +
4447 +static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
4448 +{
4449 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4450 +
4451 + priv->dcbx_mode = mode;
4452 + return 0;
4453 +}
4454 +
4455 +static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
4456 +{
4457 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4458 +
4459 + switch (capid) {
4460 + case DCB_CAP_ATTR_PFC:
4461 + *cap = true;
4462 + break;
4463 + case DCB_CAP_ATTR_PFC_TCS:
4464 + *cap = 1 << dpaa2_eth_tc_count(priv);
4465 + break;
4466 + case DCB_CAP_ATTR_DCBX:
4467 + *cap = priv->dcbx_mode;
4468 + break;
4469 + default:
4470 + *cap = false;
4471 + break;
4472 + }
4473 +
4474 + return 0;
4475 +}
4476 +
4477 +const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
4478 + .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
4479 + .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
4480 + .getdcbx = dpaa2_eth_dcbnl_getdcbx,
4481 + .setdcbx = dpaa2_eth_dcbnl_setdcbx,
4482 + .getcap = dpaa2_eth_dcbnl_getcap,
4483 +};
4484 +#endif
4485 +
4486 +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4487 +{
4488 + struct device *dev;
4489 + struct net_device *net_dev = NULL;
4490 + struct dpaa2_eth_priv *priv = NULL;
4491 + int err = 0;
4492 +
4493 + dev = &dpni_dev->dev;
4494 +
4495 + /* Net device */
4496 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
4497 + if (!net_dev) {
4498 + dev_err(dev, "alloc_etherdev_mq() failed\n");
4499 + return -ENOMEM;
4500 + }
4501 +
4502 + SET_NETDEV_DEV(net_dev, dev);
4503 + dev_set_drvdata(dev, net_dev);
4504 +
4505 + priv = netdev_priv(net_dev);
4506 + priv->net_dev = net_dev;
4507 +
4508 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
4509 +
4510 + /* Obtain a MC portal */
4511 + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4512 + &priv->mc_io);
4513 + if (err) {
4514 + dev_dbg(dev, "MC portal allocation failed\n");
4515 + err = -EPROBE_DEFER;
4516 + goto err_portal_alloc;
4517 + }
4518 +
4519 + /* MC objects initialization and configuration */
4520 + err = setup_dpni(dpni_dev);
4521 + if (err)
4522 + goto err_dpni_setup;
4523 +
4524 + err = setup_dpio(priv);
4525 + if (err) {
4526 + dev_info(dev, "Defer probing as no DPIO available\n");
4527 + err = -EPROBE_DEFER;
4528 + goto err_dpio_setup;
4529 + }
4530 +
4531 + setup_fqs(priv);
4532 +
4533 + err = setup_dpbp(priv);
4534 + if (err)
4535 + goto err_dpbp_setup;
4536 +
4537 + err = bind_dpni(priv);
4538 + if (err)
4539 + goto err_bind;
4540 +
4541 + /* Percpu statistics */
4542 + priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4543 + if (!priv->percpu_stats) {
4544 + dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4545 + err = -ENOMEM;
4546 + goto err_alloc_percpu_stats;
4547 + }
4548 + priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4549 + if (!priv->percpu_extras) {
4550 + dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4551 + err = -ENOMEM;
4552 + goto err_alloc_percpu_extras;
4553 + }
4554 +
4555 + snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
4556 + if (!dev_valid_name(net_dev->name)) {
4557 + dev_warn(&net_dev->dev,
4558 + "netdevice name \"%s\" cannot be used, reverting to default..\n",
4559 + net_dev->name);
4560 + dev_alloc_name(net_dev, "eth%d");
4561 + dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
4562 + }
4563 +
4564 + err = netdev_init(net_dev);
4565 + if (err)
4566 + goto err_netdev_init;
4567 +
4568 + /* Configure checksum offload based on current interface flags */
4569 + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4570 + if (err)
4571 + goto err_csum;
4572 +
4573 + err = set_tx_csum(priv, !!(net_dev->features &
4574 + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4575 + if (err)
4576 + goto err_csum;
4577 +
4578 + err = alloc_rings(priv);
4579 + if (err)
4580 + goto err_alloc_rings;
4581 +
4582 + net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4583 +#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4584 + net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4585 + priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4586 +#endif
4587 +
4588 + /* Add a NAPI context for each channel */
4589 + add_ch_napi(priv);
4590 + enable_ch_napi(priv);
4591 +
4592 + err = setup_irqs(dpni_dev);
4593 + if (err) {
4594 + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4595 + priv->poll_thread = kthread_run(poll_link_state, priv,
4596 + "%s_poll_link", net_dev->name);
4597 + if (IS_ERR(priv->poll_thread)) {
4598 + netdev_err(net_dev, "Error starting polling thread\n");
4599 + goto err_poll_thread;
4600 + }
4601 + priv->do_link_poll = true;
4602 + }
4603 +
4604 + dpaa2_eth_sysfs_init(&net_dev->dev);
4605 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4606 + dpaa2_dbg_add(priv);
4607 +#endif
4608 +
4609 + dev_info(dev, "Probed interface %s\n", net_dev->name);
4610 + return 0;
4611 +
4612 +err_poll_thread:
4613 + free_rings(priv);
4614 +err_alloc_rings:
4615 +err_csum:
4616 + unregister_netdev(net_dev);
4617 +err_netdev_init:
4618 + free_percpu(priv->percpu_extras);
4619 +err_alloc_percpu_extras:
4620 + free_percpu(priv->percpu_stats);
4621 +err_alloc_percpu_stats:
4622 + disable_ch_napi(priv);
4623 + del_ch_napi(priv);
4624 +err_bind:
4625 + free_dpbp(priv);
4626 +err_dpbp_setup:
4627 + free_dpio(priv);
4628 +err_dpio_setup:
4629 + free_dpni(priv);
4630 +err_dpni_setup:
4631 + fsl_mc_portal_free(priv->mc_io);
4632 +err_portal_alloc:
4633 + dev_set_drvdata(dev, NULL);
4634 + free_netdev(net_dev);
4635 +
4636 + return err;
4637 +}
4638 +
4639 +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4640 +{
4641 + struct device *dev;
4642 + struct net_device *net_dev;
4643 + struct dpaa2_eth_priv *priv;
4644 +
4645 + dev = &ls_dev->dev;
4646 + net_dev = dev_get_drvdata(dev);
4647 + priv = netdev_priv(net_dev);
4648 +
4649 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
4650 + dpaa2_dbg_remove(priv);
4651 +#endif
4652 + dpaa2_eth_sysfs_remove(&net_dev->dev);
4653 +
4654 + disable_ch_napi(priv);
4655 + del_ch_napi(priv);
4656 +
4657 + unregister_netdev(net_dev);
4658 + dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4659 +
4660 + if (priv->do_link_poll)
4661 + kthread_stop(priv->poll_thread);
4662 + else
4663 + fsl_mc_free_irqs(ls_dev);
4664 +
4665 + free_rings(priv);
4666 + free_percpu(priv->percpu_stats);
4667 + free_percpu(priv->percpu_extras);
4668 + free_dpbp(priv);
4669 + free_dpio(priv);
4670 + free_dpni(priv);
4671 +
4672 + fsl_mc_portal_free(priv->mc_io);
4673 +
4674 + dev_set_drvdata(dev, NULL);
4675 + free_netdev(net_dev);
4676 +
4677 + return 0;
4678 +}
4679 +
4680 +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4681 + {
4682 + .vendor = FSL_MC_VENDOR_FREESCALE,
4683 + .obj_type = "dpni",
4684 + },
4685 + { .vendor = 0x0 }
4686 +};
4687 +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4688 +
4689 +static struct fsl_mc_driver dpaa2_eth_driver = {
4690 + .driver = {
4691 + .name = KBUILD_MODNAME,
4692 + .owner = THIS_MODULE,
4693 + },
4694 + .probe = dpaa2_eth_probe,
4695 + .remove = dpaa2_eth_remove,
4696 + .match_id_table = dpaa2_eth_match_id_table
4697 +};
4698 +
4699 +static int __init dpaa2_eth_driver_init(void)
4700 +{
4701 + int err;
4702 +
4703 + dpaa2_eth_dbg_init();
4704 + err = fsl_mc_driver_register(&dpaa2_eth_driver);
4705 + if (err) {
4706 + dpaa2_eth_dbg_exit();
4707 + return err;
4708 + }
4709 +
4710 + return 0;
4711 +}
4712 +
4713 +static void __exit dpaa2_eth_driver_exit(void)
4714 +{
4715 + dpaa2_eth_dbg_exit();
4716 + fsl_mc_driver_unregister(&dpaa2_eth_driver);
4717 +}
4718 +
4719 +module_init(dpaa2_eth_driver_init);
4720 +module_exit(dpaa2_eth_driver_exit);
4721 --- /dev/null
4722 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
4723 @@ -0,0 +1,499 @@
4724 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
4725 + *
4726 + * Redistribution and use in source and binary forms, with or without
4727 + * modification, are permitted provided that the following conditions are met:
4728 + * * Redistributions of source code must retain the above copyright
4729 + * notice, this list of conditions and the following disclaimer.
4730 + * * Redistributions in binary form must reproduce the above copyright
4731 + * notice, this list of conditions and the following disclaimer in the
4732 + * documentation and/or other materials provided with the distribution.
4733 + * * Neither the name of Freescale Semiconductor nor the
4734 + * names of its contributors may be used to endorse or promote products
4735 + * derived from this software without specific prior written permission.
4736 + *
4737 + *
4738 + * ALTERNATIVELY, this software may be distributed under the terms of the
4739 + * GNU General Public License ("GPL") as published by the Free Software
4740 + * Foundation, either version 2 of that License or (at your option) any
4741 + * later version.
4742 + *
4743 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
4744 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
4745 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
4746 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
4747 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
4748 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
4749 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
4750 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4751 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
4752 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4753 + */
4754 +
4755 +#ifndef __DPAA2_ETH_H
4756 +#define __DPAA2_ETH_H
4757 +
4758 +#include <linux/atomic.h>
4759 +#include <linux/dcbnl.h>
4760 +#include <linux/netdevice.h>
4761 +#include <linux/if_vlan.h>
4762 +#include "../../fsl-mc/include/dpaa2-io.h"
4763 +#include "dpni.h"
4764 +#include "net.h"
4765 +
4766 +#include "dpaa2-eth-debugfs.h"
4767 +
4768 +#define DPAA2_ETH_STORE_SIZE 16
4769 +
4770 +/* We set a max threshold for how many Tx confirmations we should process
4771 + * on a NAPI poll call, they take less processing time.
4772 + */
4773 +#define TX_CONF_PER_NAPI_POLL 256
4774 +
4775 +/* Maximum number of scatter-gather entries in an ingress frame,
4776 + * considering the maximum receive frame size is 64K
4777 + */
4778 +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
4779 +
4780 +/* Maximum acceptable MTU value. It is in direct relation with the hardware
4781 + * enforced Max Frame Length (currently 10k).
4782 + */
4783 +#define DPAA2_ETH_MFL (10 * 1024)
4784 +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
4785 +/* Convert L3 MTU to L2 MFL */
4786 +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
4787 +
4788 +/* Maximum burst size value for Tx shaping */
4789 +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
4790 +
4791 +/* Maximum number of buffers that can be acquired/released through a single
4792 + * QBMan command
4793 + */
4794 +#define DPAA2_ETH_BUFS_PER_CMD 7
4795 +
4796 +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
4797 + * frames in the Rx queues (length of the current frame is not
4798 + * taken into account when making the taildrop decision)
4799 + */
4800 +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
4801 +
4802 +/* Buffer quota per queue. Must be large enough such that for minimum sized
4803 + * frames taildrop kicks in before the bpool gets depleted, so we compute
4804 + * how many 64B frames fit inside the taildrop threshold and add a margin
4805 + * to accommodate the buffer refill delay.
4806 + */
4807 +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
4808 +#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
4809 +#define DPAA2_ETH_REFILL_THRESH_TD \
4810 + (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
4811 +
4812 +/* Buffer quota per queue to use when flow control is active. */
4813 +#define DPAA2_ETH_NUM_BUFS_FC 256
4814 +
4815 +/* Hardware requires alignment for ingress/egress buffer addresses
4816 + * and ingress buffer lengths.
4817 + */
4818 +#define DPAA2_ETH_RX_BUF_SIZE 2048
4819 +#define DPAA2_ETH_TX_BUF_ALIGN 64
4820 +#define DPAA2_ETH_RX_BUF_ALIGN 64
4821 +#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
4822 +#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
4823 + ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD)
4824 +
4825 +/* rx_extra_head prevents reallocations in L3 processing. */
4826 +#define DPAA2_ETH_SKB_SIZE \
4827 + (DPAA2_ETH_RX_BUF_SIZE + \
4828 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
4829 +
4830 +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
4831 + * buffers large enough to allow building an skb around them and also account
4832 + * for alignment restrictions.
4833 + */
4834 +#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
4835 + (DPAA2_ETH_SKB_SIZE + \
4836 + (p_priv)->rx_buf_align)
4837 +
4838 +/* PTP nominal frequency 1GHz */
4839 +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
4840 +
4841 +/* We are accommodating a skb backpointer and some S/G info
4842 + * in the frame's software annotation. The hardware
4843 + * options are either 0 or 64, so we choose the latter.
4844 + */
4845 +#define DPAA2_ETH_SWA_SIZE 64
4846 +
4847 +/* Extra headroom space requested to hardware, in order to make sure there's
4848 + * no realloc'ing in forwarding scenarios
4849 + */
4850 +#define DPAA2_ETH_RX_HEAD_ROOM \
4851 + (DPAA2_ETH_TX_HWA_SIZE - DPAA2_ETH_RX_HWA_SIZE + \
4852 + DPAA2_ETH_TX_BUF_ALIGN)
4853 +
4854 +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
4855 +struct dpaa2_eth_swa {
4856 + struct sk_buff *skb;
4857 + struct scatterlist *scl;
4858 + int num_sg;
4859 + int num_dma_bufs;
4860 +};
4861 +
4862 +/* Annotation valid bits in FD FRC */
4863 +#define DPAA2_FD_FRC_FASV 0x8000
4864 +#define DPAA2_FD_FRC_FAEADV 0x4000
4865 +#define DPAA2_FD_FRC_FAPRV 0x2000
4866 +#define DPAA2_FD_FRC_FAIADV 0x1000
4867 +#define DPAA2_FD_FRC_FASWOV 0x0800
4868 +#define DPAA2_FD_FRC_FAICFDV 0x0400
4869 +
4870 +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
4871 +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
4872 + FD_CTRL_SBE | \
4873 + FD_CTRL_FSE | \
4874 + FD_CTRL_FAERR)
4875 +
4876 +/* Annotation bits in FD CTRL */
4877 +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
4878 +
4879 +/* Size of hardware annotation area based on the current buffer layout
4880 + * configuration
4881 + */
4882 +#define DPAA2_ETH_RX_HWA_SIZE 64
4883 +#define DPAA2_ETH_TX_HWA_SIZE 128
4884 +
4885 +/* Frame annotation status */
4886 +struct dpaa2_fas {
4887 + u8 reserved;
4888 + u8 ppid;
4889 + __le16 ifpid;
4890 + __le32 status;
4891 +} __packed;
4892 +
4893 +/* Frame annotation status word is located in the first 8 bytes
4894 + * of the buffer's hardware annotation area
4895 + */
4896 +#define DPAA2_FAS_OFFSET 0
4897 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
4898 +
4899 +/* Timestamp is located in the next 8 bytes of the buffer's
4900 + * hardware annotation area
4901 + */
4902 +#define DPAA2_TS_OFFSET 0x8
4903 +
4904 +/* Frame annotation egress action descriptor */
4905 +#define DPAA2_FAEAD_OFFSET 0x58
4906 +
4907 +struct dpaa2_faead {
4908 + __le32 conf_fqid;
4909 + __le32 ctrl;
4910 +};
4911 +
4912 +#define DPAA2_FAEAD_A2V 0x20000000
4913 +#define DPAA2_FAEAD_UPDV 0x00001000
4914 +#define DPAA2_FAEAD_UPD 0x00000010
4915 +
4916 +/* accessors for the hardware annotation fields that we use */
4917 +#define dpaa2_eth_get_hwa(buf_addr) \
4918 + ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
4919 +
4920 +#define dpaa2_eth_get_fas(buf_addr) \
4921 + (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
4922 +
4923 +#define dpaa2_eth_get_ts(buf_addr) \
4924 + (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
4925 +
4926 +#define dpaa2_eth_get_faead(buf_addr) \
4927 + (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
4928 +
4929 +/* Error and status bits in the frame annotation status word */
4930 +/* Debug frame, otherwise supposed to be discarded */
4931 +#define DPAA2_FAS_DISC 0x80000000
4932 +/* MACSEC frame */
4933 +#define DPAA2_FAS_MS 0x40000000
4934 +#define DPAA2_FAS_PTP 0x08000000
4935 +/* Ethernet multicast frame */
4936 +#define DPAA2_FAS_MC 0x04000000
4937 +/* Ethernet broadcast frame */
4938 +#define DPAA2_FAS_BC 0x02000000
4939 +#define DPAA2_FAS_KSE 0x00040000
4940 +#define DPAA2_FAS_EOFHE 0x00020000
4941 +#define DPAA2_FAS_MNLE 0x00010000
4942 +#define DPAA2_FAS_TIDE 0x00008000
4943 +#define DPAA2_FAS_PIEE 0x00004000
4944 +/* Frame length error */
4945 +#define DPAA2_FAS_FLE 0x00002000
4946 +/* Frame physical error */
4947 +#define DPAA2_FAS_FPE 0x00001000
4948 +#define DPAA2_FAS_PTE 0x00000080
4949 +#define DPAA2_FAS_ISP 0x00000040
4950 +#define DPAA2_FAS_PHE 0x00000020
4951 +#define DPAA2_FAS_BLE 0x00000010
4952 +/* L3 csum validation performed */
4953 +#define DPAA2_FAS_L3CV 0x00000008
4954 +/* L3 csum error */
4955 +#define DPAA2_FAS_L3CE 0x00000004
4956 +/* L4 csum validation performed */
4957 +#define DPAA2_FAS_L4CV 0x00000002
4958 +/* L4 csum error */
4959 +#define DPAA2_FAS_L4CE 0x00000001
4960 +/* Possible errors on the ingress path */
4961 +#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
4962 + (DPAA2_FAS_EOFHE) | \
4963 + (DPAA2_FAS_MNLE) | \
4964 + (DPAA2_FAS_TIDE) | \
4965 + (DPAA2_FAS_PIEE) | \
4966 + (DPAA2_FAS_FLE) | \
4967 + (DPAA2_FAS_FPE) | \
4968 + (DPAA2_FAS_PTE) | \
4969 + (DPAA2_FAS_ISP) | \
4970 + (DPAA2_FAS_PHE) | \
4971 + (DPAA2_FAS_BLE) | \
4972 + (DPAA2_FAS_L3CE) | \
4973 + (DPAA2_FAS_L4CE))
4974 +/* Tx errors */
4975 +#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
4976 + (DPAA2_FAS_EOFHE) | \
4977 + (DPAA2_FAS_MNLE) | \
4978 + (DPAA2_FAS_TIDE))
4979 +
4980 +/* Time in milliseconds between link state updates */
4981 +#define DPAA2_ETH_LINK_STATE_REFRESH 1000
4982 +
4983 +/* Number of times to retry a frame enqueue before giving up.
4984 + * Value determined empirically, in order to minimize the number
4985 + * of frames dropped on Tx
4986 + */
4987 +#define DPAA2_ETH_ENQUEUE_RETRIES 10
4988 +
4989 +/* Tx congestion entry & exit thresholds, in number of bytes.
4990 + * We allow a maximum of 512KB worth of frames pending processing on the Tx
4991 + * queues of an interface
4992 + */
4993 +#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
4994 +#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
4995 +
4996 +/* Driver statistics, other than those in struct rtnl_link_stats64.
4997 + * These are usually collected per-CPU and aggregated by ethtool.
4998 + */
4999 +struct dpaa2_eth_drv_stats {
5000 + __u64 tx_conf_frames;
5001 + __u64 tx_conf_bytes;
5002 + __u64 tx_sg_frames;
5003 + __u64 tx_sg_bytes;
5004 + __u64 rx_sg_frames;
5005 + __u64 rx_sg_bytes;
5006 + /* Enqueues retried due to portal busy */
5007 + __u64 tx_portal_busy;
5008 +};
5009 +
5010 +/* Per-FQ statistics */
5011 +struct dpaa2_eth_fq_stats {
5012 + /* Number of frames received on this queue */
5013 + __u64 frames;
5014 + /* Number of times this queue entered congestion */
5015 + __u64 congestion_entry;
5016 +};
5017 +
5018 +/* Per-channel statistics */
5019 +struct dpaa2_eth_ch_stats {
5020 + /* Volatile dequeues retried due to portal busy */
5021 + __u64 dequeue_portal_busy;
5022 + /* Number of CDANs; useful to estimate avg NAPI len */
5023 + __u64 cdan;
5024 + /* Number of frames received on queues from this channel */
5025 + __u64 frames;
5026 + /* Pull errors */
5027 + __u64 pull_err;
5028 +};
5029 +
5030 +#define DPAA2_ETH_MAX_DPCONS NR_CPUS
5031 +#define DPAA2_ETH_MAX_TCS 8
5032 +
5033 +/* Maximum number of queues associated with a DPNI */
5034 +#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
5035 +#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
5036 +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
5037 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
5038 + DPAA2_ETH_MAX_TX_QUEUES + \
5039 + DPAA2_ETH_MAX_RX_ERR_QUEUES)
5040 +
5041 +enum dpaa2_eth_fq_type {
5042 + DPAA2_RX_FQ = 0,
5043 + DPAA2_TX_CONF_FQ,
5044 + DPAA2_RX_ERR_FQ
5045 +};
5046 +
5047 +struct dpaa2_eth_priv;
5048 +
5049 +struct dpaa2_eth_fq {
5050 + u32 fqid;
5051 + u32 tx_qdbin;
5052 + u16 flowid;
5053 + u8 tc;
5054 + int target_cpu;
5055 + struct dpaa2_eth_channel *channel;
5056 + enum dpaa2_eth_fq_type type;
5057 +
5058 + void (*consume)(struct dpaa2_eth_priv *,
5059 + struct dpaa2_eth_channel *,
5060 + const struct dpaa2_fd *,
5061 + struct napi_struct *,
5062 + u16 queue_id);
5063 + struct dpaa2_eth_fq_stats stats;
5064 +};
5065 +
5066 +struct dpaa2_eth_channel {
5067 + struct dpaa2_io_notification_ctx nctx;
5068 + struct fsl_mc_device *dpcon;
5069 + int dpcon_id;
5070 + int ch_id;
5071 + int dpio_id;
5072 + struct napi_struct napi;
5073 + struct dpaa2_io_store *store;
5074 + struct dpaa2_eth_priv *priv;
5075 + int buf_count;
5076 + struct dpaa2_eth_ch_stats stats;
5077 +};
5078 +
5079 +struct dpaa2_eth_cls_rule {
5080 + struct ethtool_rx_flow_spec fs;
5081 + bool in_use;
5082 +};
5083 +
5084 +struct dpaa2_eth_hash_fields {
5085 + u64 rxnfc_field;
5086 + enum net_prot cls_prot;
5087 + int cls_field;
5088 + int offset;
5089 + int size;
5090 +};
5091 +
5092 +/* Driver private data */
5093 +struct dpaa2_eth_priv {
5094 + struct net_device *net_dev;
5095 +
5096 + /* Standard statistics */
5097 + struct rtnl_link_stats64 __percpu *percpu_stats;
5098 + /* Extra stats, in addition to the ones known by the kernel */
5099 + struct dpaa2_eth_drv_stats __percpu *percpu_extras;
5100 + struct iommu_domain *iommu_domain;
5101 +
5102 + bool ts_tx_en; /* Tx timestamping enabled */
5103 + bool ts_rx_en; /* Rx timestamping enabled */
5104 +
5105 + u16 tx_data_offset;
5106 + u16 rx_buf_align;
5107 +
5108 + u16 bpid;
5109 + u16 tx_qdid;
5110 +
5111 + int tx_pause_frames;
5112 + int num_bufs;
5113 + int refill_thresh;
5114 +
5115 + /* Tx congestion notifications are written here */
5116 + void *cscn_mem;
5117 + void *cscn_unaligned;
5118 + dma_addr_t cscn_dma;
5119 +
5120 + u8 num_fqs;
5121 + /* Tx queues are at the beginning of the array */
5122 + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
5123 +
5124 + u8 num_channels;
5125 + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
5126 +
5127 + int dpni_id;
5128 + struct dpni_attr dpni_attrs;
5129 + struct fsl_mc_device *dpbp_dev;
5130 +
5131 + struct fsl_mc_io *mc_io;
5132 + /* SysFS-controlled affinity mask for TxConf FQs */
5133 + struct cpumask txconf_cpumask;
5134 + /* Cores which have an affine DPIO/DPCON.
5135 + * This is the cpu set on which Rx frames are processed;
5136 + * Tx confirmation frames are processed on a subset of this,
5137 + * depending on user settings.
5138 + */
5139 + struct cpumask dpio_cpumask;
5140 +
5141 + u16 mc_token;
5142 +
5143 + struct dpni_link_state link_state;
5144 + bool do_link_poll;
5145 + struct task_struct *poll_thread;
5146 +
5147 + struct dpaa2_eth_hash_fields *hash_fields;
5148 + u8 num_hash_fields;
5149 + /* enabled ethtool hashing bits */
5150 + u64 rx_flow_hash;
5151 +
5152 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
5153 + struct dpaa2_debugfs dbg;
5154 +#endif
5155 +
5156 + /* array of classification rules */
5157 + struct dpaa2_eth_cls_rule *cls_rule;
5158 +
5159 + struct dpni_tx_shaping_cfg shaping_cfg;
5160 +
5161 + u8 dcbx_mode;
5162 + struct ieee_pfc pfc;
5163 + bool vlan_clsf_set;
5164 +};
5165 +
5166 +#define dpaa2_eth_hash_enabled(priv) \
5167 + ((priv)->dpni_attrs.num_queues > 1)
5168 +
5169 +#define dpaa2_eth_fs_enabled(priv) \
5170 + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
5171 +
5172 +#define dpaa2_eth_fs_mask_enabled(priv) \
5173 + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
5174 +
5175 +#define dpaa2_eth_fs_count(priv) \
5176 + ((priv)->dpni_attrs.fs_entries)
5177 +
5178 +/* size of DMA memory used to pass configuration to classifier, in bytes */
5179 +#define DPAA2_CLASSIFIER_DMA_SIZE 256
5180 +
5181 +extern const struct ethtool_ops dpaa2_ethtool_ops;
5182 +extern const char dpaa2_eth_drv_version[];
5183 +
5184 +static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
5185 +{
5186 + return priv->dpni_attrs.num_queues;
5187 +}
5188 +
5189 +static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
5190 +{
5191 + return priv->dpni_attrs.num_tcs;
5192 +}
5193 +
5194 +static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
5195 + int traffic_class)
5196 +{
5197 + return priv->pfc.pfc_en & (1 << traffic_class);
5198 +}
5199 +
5200 +enum dpaa2_eth_td_cfg {
5201 + DPAA2_ETH_TD_NONE,
5202 + DPAA2_ETH_TD_QUEUE,
5203 + DPAA2_ETH_TD_GROUP
5204 +};
5205 +
5206 +static inline enum dpaa2_eth_td_cfg
5207 +dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
5208 +{
5209 + bool pfc_enabled = !!(priv->pfc.pfc_en);
5210 +
5211 + if (pfc_enabled)
5212 + return DPAA2_ETH_TD_GROUP;
5213 + else if (priv->tx_pause_frames)
5214 + return DPAA2_ETH_TD_NONE;
5215 + else
5216 + return DPAA2_ETH_TD_QUEUE;
5217 +}
5218 +
5219 +void check_cls_support(struct dpaa2_eth_priv *priv);
5220 +
5221 +int set_rx_taildrop(struct dpaa2_eth_priv *priv);
5222 +#endif /* __DPAA2_H */
5223 --- /dev/null
5224 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
5225 @@ -0,0 +1,864 @@
5226 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
5227 + *
5228 + * Redistribution and use in source and binary forms, with or without
5229 + * modification, are permitted provided that the following conditions are met:
5230 + * * Redistributions of source code must retain the above copyright
5231 + * notice, this list of conditions and the following disclaimer.
5232 + * * Redistributions in binary form must reproduce the above copyright
5233 + * notice, this list of conditions and the following disclaimer in the
5234 + * documentation and/or other materials provided with the distribution.
5235 + * * Neither the name of Freescale Semiconductor nor the
5236 + * names of its contributors may be used to endorse or promote products
5237 + * derived from this software without specific prior written permission.
5238 + *
5239 + *
5240 + * ALTERNATIVELY, this software may be distributed under the terms of the
5241 + * GNU General Public License ("GPL") as published by the Free Software
5242 + * Foundation, either version 2 of that License or (at your option) any
5243 + * later version.
5244 + *
5245 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5246 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5247 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5248 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5249 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5250 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5251 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5252 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5253 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5254 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5255 + */
5256 +
5257 +#include "dpni.h" /* DPNI_LINK_OPT_* */
5258 +#include "dpaa2-eth.h"
5259 +
5260 +/* To be kept in sync with dpni_statistics */
5261 +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
5262 + "rx frames",
5263 + "rx bytes",
5264 + "rx mcast frames",
5265 + "rx mcast bytes",
5266 + "rx bcast frames",
5267 + "rx bcast bytes",
5268 + "tx frames",
5269 + "tx bytes",
5270 + "tx mcast frames",
5271 + "tx mcast bytes",
5272 + "tx bcast frames",
5273 + "tx bcast bytes",
5274 + "rx filtered frames",
5275 + "rx discarded frames",
5276 + "rx nobuffer discards",
5277 + "tx discarded frames",
5278 + "tx confirmed frames",
5279 +};
5280 +
5281 +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
5282 +
5283 +/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
5284 +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
5285 + /* per-cpu stats */
5286 +
5287 + "tx conf frames",
5288 + "tx conf bytes",
5289 + "tx sg frames",
5290 + "tx sg bytes",
5291 + "rx sg frames",
5292 + "rx sg bytes",
5293 + /* how many times we had to retry the enqueue command */
5294 + "enqueue portal busy",
5295 +
5296 + /* Channel stats */
5297 + /* How many times we had to retry the volatile dequeue command */
5298 + "dequeue portal busy",
5299 + "channel pull errors",
5300 + /* Number of notifications received */
5301 + "cdan",
5302 + "tx congestion state",
5303 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5304 + /* FQ stats */
5305 + "rx pending frames",
5306 + "rx pending bytes",
5307 + "tx conf pending frames",
5308 + "tx conf pending bytes",
5309 + "buffer count"
5310 +#endif
5311 +};
5312 +
5313 +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
5314 +
5315 +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
5316 + struct ethtool_drvinfo *drvinfo)
5317 +{
5318 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
5319 + strlcpy(drvinfo->version, dpaa2_eth_drv_version,
5320 + sizeof(drvinfo->version));
5321 + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
5322 + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
5323 + sizeof(drvinfo->bus_info));
5324 +}
5325 +
5326 +static int dpaa2_eth_get_settings(struct net_device *net_dev,
5327 + struct ethtool_cmd *cmd)
5328 +{
5329 + struct dpni_link_state state = {0};
5330 + int err = 0;
5331 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5332 +
5333 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5334 + if (err) {
5335 + netdev_err(net_dev, "ERROR %d getting link state", err);
5336 + goto out;
5337 + }
5338 +
5339 + /* At the moment, we have no way of interrogating the DPMAC
5340 + * from the DPNI side - and for that matter there may exist
5341 + * no DPMAC at all. So for now we just don't report anything
5342 + * beyond the DPNI attributes.
5343 + */
5344 + if (state.options & DPNI_LINK_OPT_AUTONEG)
5345 + cmd->autoneg = AUTONEG_ENABLE;
5346 + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
5347 + cmd->duplex = DUPLEX_FULL;
5348 + ethtool_cmd_speed_set(cmd, state.rate);
5349 +
5350 +out:
5351 + return err;
5352 +}
5353 +
5354 +static int dpaa2_eth_set_settings(struct net_device *net_dev,
5355 + struct ethtool_cmd *cmd)
5356 +{
5357 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5358 + struct dpni_link_state state = {0};
5359 + struct dpni_link_cfg cfg = {0};
5360 + int err = 0;
5361 +
5362 + netdev_dbg(net_dev, "Setting link parameters...");
5363 +
5364 + /* Need to interrogate on link state to get flow control params */
5365 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5366 + if (err) {
5367 + netdev_err(net_dev, "ERROR %d getting link state", err);
5368 + goto out;
5369 + }
5370 +
5371 + cfg.options = state.options;
5372 + cfg.rate = ethtool_cmd_speed(cmd);
5373 + if (cmd->autoneg == AUTONEG_ENABLE)
5374 + cfg.options |= DPNI_LINK_OPT_AUTONEG;
5375 + else
5376 + cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
5377 + if (cmd->duplex == DUPLEX_HALF)
5378 + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
5379 + else
5380 + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
5381 +
5382 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5383 + if (err)
5384 + /* ethtool will be loud enough if we return an error; no point
5385 + * in putting our own error message on the console by default
5386 + */
5387 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5388 +
5389 +out:
5390 + return err;
5391 +}
5392 +
5393 +static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
5394 + struct ethtool_pauseparam *pause)
5395 +{
5396 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5397 + struct dpni_link_state state = {0};
5398 + int err;
5399 +
5400 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5401 + if (err)
5402 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5403 +
5404 + /* for now, pause frames autonegotiation is not separate */
5405 + pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
5406 + pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
5407 + pause->tx_pause = pause->rx_pause ^
5408 + !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
5409 +}
5410 +
5411 +static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
5412 + struct ethtool_pauseparam *pause)
5413 +{
5414 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5415 + struct dpni_link_state state = {0};
5416 + struct dpni_link_cfg cfg = {0};
5417 + u32 current_tx_pause;
5418 + int err = 0;
5419 +
5420 + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
5421 + if (err) {
5422 + netdev_dbg(net_dev, "ERROR %d getting link state", err);
5423 + goto out;
5424 + }
5425 +
5426 + cfg.rate = state.rate;
5427 + cfg.options = state.options;
5428 + current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
5429 + !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
5430 +
5431 + if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
5432 + netdev_warn(net_dev,
5433 + "WARN: Can't change pause frames autoneg separately\n");
5434 +
5435 + if (pause->rx_pause)
5436 + cfg.options |= DPNI_LINK_OPT_PAUSE;
5437 + else
5438 + cfg.options &= ~DPNI_LINK_OPT_PAUSE;
5439 +
5440 + if (pause->rx_pause ^ pause->tx_pause)
5441 + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
5442 + else
5443 + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
5444 +
5445 + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
5446 + if (err) {
5447 + /* ethtool will be loud enough if we return an error; no point
5448 + * in putting our own error message on the console by default
5449 + */
5450 + netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
5451 + goto out;
5452 + }
5453 +
5454 + /* Enable / disable taildrops if Tx pause frames have changed */
5455 + if (current_tx_pause == pause->tx_pause)
5456 + goto out;
5457 +
5458 + priv->tx_pause_frames = pause->tx_pause;
5459 + err = set_rx_taildrop(priv);
5460 + if (err)
5461 + netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
5462 +
5463 + priv->tx_pause_frames = pause->tx_pause;
5464 +out:
5465 + return err;
5466 +}
5467 +
5468 +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
5469 + u8 *data)
5470 +{
5471 + u8 *p = data;
5472 + int i;
5473 +
5474 + switch (stringset) {
5475 + case ETH_SS_STATS:
5476 + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
5477 + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
5478 + p += ETH_GSTRING_LEN;
5479 + }
5480 + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
5481 + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
5482 + p += ETH_GSTRING_LEN;
5483 + }
5484 + break;
5485 + }
5486 +}
5487 +
5488 +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
5489 +{
5490 + switch (sset) {
5491 + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
5492 + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
5493 + default:
5494 + return -EOPNOTSUPP;
5495 + }
5496 +}
5497 +
5498 +/** Fill in hardware counters, as returned by MC.
5499 + */
5500 +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
5501 + struct ethtool_stats *stats,
5502 + u64 *data)
5503 +{
5504 + int i = 0; /* Current index in the data array */
5505 + int j = 0, k, err;
5506 + union dpni_statistics dpni_stats;
5507 +
5508 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5509 + u32 fcnt, bcnt;
5510 + u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
5511 + u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
5512 + u32 buf_cnt;
5513 +#endif
5514 + u64 cdan = 0;
5515 + u64 portal_busy = 0, pull_err = 0;
5516 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5517 + struct dpaa2_eth_drv_stats *extras;
5518 + struct dpaa2_eth_ch_stats *ch_stats;
5519 +
5520 + memset(data, 0,
5521 + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
5522 +
5523 + /* Print standard counters, from DPNI statistics */
5524 + for (j = 0; j <= 2; j++) {
5525 + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
5526 + j, &dpni_stats);
5527 + if (err != 0)
5528 + netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
5529 + err, j);
5530 +
5531 + switch (j) {
5532 + case 0:
5533 + *(data + i++) = dpni_stats.page_0.ingress_all_frames;
5534 + *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
5535 + *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
5536 + *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
5537 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
5538 + *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
5539 + break;
5540 + case 1:
5541 + *(data + i++) = dpni_stats.page_1.egress_all_frames;
5542 + *(data + i++) = dpni_stats.page_1.egress_all_bytes;
5543 + *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
5544 + *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
5545 + *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
5546 + *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
5547 + break;
5548 + case 2:
5549 + *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
5550 + *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
5551 + *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
5552 + *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
5553 + *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
5554 + break;
5555 + default:
5556 + break;
5557 + }
5558 + }
5559 +
5560 + /* Print per-cpu extra stats */
5561 + for_each_online_cpu(k) {
5562 + extras = per_cpu_ptr(priv->percpu_extras, k);
5563 + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
5564 + *((__u64 *)data + i + j) += *((__u64 *)extras + j);
5565 + }
5566 +
5567 + i += j;
5568 +
5569 + /* We may be using fewer DPIOs than actual CPUs */
5570 + for (j = 0; j < priv->num_channels; j++) {
5571 + ch_stats = &priv->channel[j]->stats;
5572 + cdan += ch_stats->cdan;
5573 + portal_busy += ch_stats->dequeue_portal_busy;
5574 + pull_err += ch_stats->pull_err;
5575 + }
5576 +
5577 + *(data + i++) = portal_busy;
5578 + *(data + i++) = pull_err;
5579 + *(data + i++) = cdan;
5580 +
5581 + *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
5582 +
5583 +#ifdef CONFIG_FSL_QBMAN_DEBUG
5584 + for (j = 0; j < priv->num_fqs; j++) {
5585 + /* Print FQ instantaneous counts */
5586 + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
5587 + &fcnt, &bcnt);
5588 + if (err) {
5589 + netdev_warn(net_dev, "FQ query error %d", err);
5590 + return;
5591 + }
5592 +
5593 + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
5594 + fcnt_tx_total += fcnt;
5595 + bcnt_tx_total += bcnt;
5596 + } else {
5597 + fcnt_rx_total += fcnt;
5598 + bcnt_rx_total += bcnt;
5599 + }
5600 + }
5601 +
5602 + *(data + i++) = fcnt_rx_total;
5603 + *(data + i++) = bcnt_rx_total;
5604 + *(data + i++) = fcnt_tx_total;
5605 + *(data + i++) = bcnt_tx_total;
5606 +
5607 + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
5608 + if (err) {
5609 + netdev_warn(net_dev, "Buffer count query error %d\n", err);
5610 + return;
5611 + }
5612 + *(data + i++) = buf_cnt;
5613 +#endif
5614 +}
5615 +
5616 +static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
5617 +{
5618 + int i, off = 0;
5619 +
5620 + for (i = 0; i < priv->num_hash_fields; i++) {
5621 + if (priv->hash_fields[i].cls_prot == prot &&
5622 + priv->hash_fields[i].cls_field == field)
5623 + return off;
5624 + off += priv->hash_fields[i].size;
5625 + }
5626 +
5627 + return -1;
5628 +}
5629 +
5630 +static u8 cls_key_size(struct dpaa2_eth_priv *priv)
5631 +{
5632 + u8 i, size = 0;
5633 +
5634 + for (i = 0; i < priv->num_hash_fields; i++)
5635 + size += priv->hash_fields[i].size;
5636 +
5637 + return size;
5638 +}
5639 +
5640 +void check_cls_support(struct dpaa2_eth_priv *priv)
5641 +{
5642 + u8 key_size = cls_key_size(priv);
5643 + struct device *dev = priv->net_dev->dev.parent;
5644 +
5645 + if (dpaa2_eth_hash_enabled(priv)) {
5646 + if (priv->dpni_attrs.fs_key_size < key_size) {
5647 + dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
5648 + priv->dpni_attrs.fs_key_size,
5649 + key_size);
5650 + goto disable_fs;
5651 + }
5652 + if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
5653 + dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
5654 + DPKG_MAX_NUM_OF_EXTRACTS);
5655 + goto disable_fs;
5656 + }
5657 + }
5658 +
5659 + if (dpaa2_eth_fs_enabled(priv)) {
5660 + if (!dpaa2_eth_hash_enabled(priv)) {
5661 + dev_info(dev, "Insufficient queues. Steering is disabled\n");
5662 + goto disable_fs;
5663 + }
5664 +
5665 + if (!dpaa2_eth_fs_mask_enabled(priv)) {
5666 + dev_info(dev, "Key masks not supported. Steering is disabled\n");
5667 + goto disable_fs;
5668 + }
5669 + }
5670 +
5671 + return;
5672 +
5673 +disable_fs:
5674 + priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
5675 + priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
5676 +}
5677 +
5678 +static int prep_l4_rule(struct dpaa2_eth_priv *priv,
5679 + struct ethtool_tcpip4_spec *l4_value,
5680 + struct ethtool_tcpip4_spec *l4_mask,
5681 + void *key, void *mask, u8 l4_proto)
5682 +{
5683 + int offset;
5684 +
5685 + if (l4_mask->tos) {
5686 + netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
5687 + return -EOPNOTSUPP;
5688 + }
5689 +
5690 + if (l4_mask->ip4src) {
5691 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5692 + *(u32 *)(key + offset) = l4_value->ip4src;
5693 + *(u32 *)(mask + offset) = l4_mask->ip4src;
5694 + }
5695 +
5696 + if (l4_mask->ip4dst) {
5697 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5698 + *(u32 *)(key + offset) = l4_value->ip4dst;
5699 + *(u32 *)(mask + offset) = l4_mask->ip4dst;
5700 + }
5701 +
5702 + if (l4_mask->psrc) {
5703 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5704 + *(u32 *)(key + offset) = l4_value->psrc;
5705 + *(u32 *)(mask + offset) = l4_mask->psrc;
5706 + }
5707 +
5708 + if (l4_mask->pdst) {
5709 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5710 + *(u32 *)(key + offset) = l4_value->pdst;
5711 + *(u32 *)(mask + offset) = l4_mask->pdst;
5712 + }
5713 +
5714 + /* Only apply the rule for the user-specified L4 protocol
5715 + * and if ethertype matches IPv4
5716 + */
5717 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5718 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5719 + *(u16 *)(mask + offset) = 0xFFFF;
5720 +
5721 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5722 + *(u8 *)(key + offset) = l4_proto;
5723 + *(u8 *)(mask + offset) = 0xFF;
5724 +
5725 + /* TODO: check IP version */
5726 +
5727 + return 0;
5728 +}
5729 +
5730 +static int prep_eth_rule(struct dpaa2_eth_priv *priv,
5731 + struct ethhdr *eth_value, struct ethhdr *eth_mask,
5732 + void *key, void *mask)
5733 +{
5734 + int offset;
5735 +
5736 + if (eth_mask->h_proto) {
5737 + netdev_err(priv->net_dev, "Ethertype is not supported!\n");
5738 + return -EOPNOTSUPP;
5739 + }
5740 +
5741 + if (!is_zero_ether_addr(eth_mask->h_source)) {
5742 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
5743 + ether_addr_copy(key + offset, eth_value->h_source);
5744 + ether_addr_copy(mask + offset, eth_mask->h_source);
5745 + }
5746 +
5747 + if (!is_zero_ether_addr(eth_mask->h_dest)) {
5748 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5749 + ether_addr_copy(key + offset, eth_value->h_dest);
5750 + ether_addr_copy(mask + offset, eth_mask->h_dest);
5751 + }
5752 +
5753 + return 0;
5754 +}
5755 +
5756 +static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
5757 + struct ethtool_usrip4_spec *uip_value,
5758 + struct ethtool_usrip4_spec *uip_mask,
5759 + void *key, void *mask)
5760 +{
5761 + int offset;
5762 +
5763 + if (uip_mask->tos)
5764 + return -EOPNOTSUPP;
5765 +
5766 + if (uip_mask->ip4src) {
5767 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
5768 + *(u32 *)(key + offset) = uip_value->ip4src;
5769 + *(u32 *)(mask + offset) = uip_mask->ip4src;
5770 + }
5771 +
5772 + if (uip_mask->ip4dst) {
5773 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
5774 + *(u32 *)(key + offset) = uip_value->ip4dst;
5775 + *(u32 *)(mask + offset) = uip_mask->ip4dst;
5776 + }
5777 +
5778 + if (uip_mask->proto) {
5779 + offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
5780 + *(u32 *)(key + offset) = uip_value->proto;
5781 + *(u32 *)(mask + offset) = uip_mask->proto;
5782 + }
5783 + if (uip_mask->l4_4_bytes) {
5784 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
5785 + *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
5786 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
5787 +
5788 + offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
5789 + *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
5790 + *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
5791 + }
5792 +
5793 + /* Ethertype must be IP */
5794 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
5795 + *(u16 *)(key + offset) = htons(ETH_P_IP);
5796 + *(u16 *)(mask + offset) = 0xFFFF;
5797 +
5798 + return 0;
5799 +}
5800 +
5801 +static int prep_ext_rule(struct dpaa2_eth_priv *priv,
5802 + struct ethtool_flow_ext *ext_value,
5803 + struct ethtool_flow_ext *ext_mask,
5804 + void *key, void *mask)
5805 +{
5806 + int offset;
5807 +
5808 + if (ext_mask->vlan_etype)
5809 + return -EOPNOTSUPP;
5810 +
5811 + if (ext_mask->vlan_tci) {
5812 + offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
5813 + *(u16 *)(key + offset) = ext_value->vlan_tci;
5814 + *(u16 *)(mask + offset) = ext_mask->vlan_tci;
5815 + }
5816 +
5817 + return 0;
5818 +}
5819 +
5820 +static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
5821 + struct ethtool_flow_ext *ext_value,
5822 + struct ethtool_flow_ext *ext_mask,
5823 + void *key, void *mask)
5824 +{
5825 + int offset;
5826 +
5827 + if (!is_zero_ether_addr(ext_mask->h_dest)) {
5828 + offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
5829 + ether_addr_copy(key + offset, ext_value->h_dest);
5830 + ether_addr_copy(mask + offset, ext_mask->h_dest);
5831 + }
5832 +
5833 + return 0;
5834 +}
5835 +
5836 +static int prep_cls_rule(struct net_device *net_dev,
5837 + struct ethtool_rx_flow_spec *fs,
5838 + void *key)
5839 +{
5840 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5841 + const u8 key_size = cls_key_size(priv);
5842 + void *msk = key + key_size;
5843 + int err;
5844 +
5845 + memset(key, 0, key_size * 2);
5846 +
5847 + switch (fs->flow_type & 0xff) {
5848 + case TCP_V4_FLOW:
5849 + err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
5850 + &fs->m_u.tcp_ip4_spec, key, msk,
5851 + IPPROTO_TCP);
5852 + break;
5853 + case UDP_V4_FLOW:
5854 + err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
5855 + &fs->m_u.udp_ip4_spec, key, msk,
5856 + IPPROTO_UDP);
5857 + break;
5858 + case SCTP_V4_FLOW:
5859 + err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
5860 + &fs->m_u.sctp_ip4_spec, key, msk,
5861 + IPPROTO_SCTP);
5862 + break;
5863 + case ETHER_FLOW:
5864 + err = prep_eth_rule(priv, &fs->h_u.ether_spec,
5865 + &fs->m_u.ether_spec, key, msk);
5866 + break;
5867 + case IP_USER_FLOW:
5868 + err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
5869 + &fs->m_u.usr_ip4_spec, key, msk);
5870 + break;
5871 + default:
5872 + /* TODO: AH, ESP */
5873 + return -EOPNOTSUPP;
5874 + }
5875 + if (err)
5876 + return err;
5877 +
5878 + if (fs->flow_type & FLOW_EXT) {
5879 + err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5880 + if (err)
5881 + return err;
5882 + }
5883 +
5884 + if (fs->flow_type & FLOW_MAC_EXT) {
5885 + err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
5886 + if (err)
5887 + return err;
5888 + }
5889 +
5890 + return 0;
5891 +}
5892 +
5893 +static int del_cls(struct net_device *net_dev, int location);
5894 +
5895 +static int do_cls(struct net_device *net_dev,
5896 + struct ethtool_rx_flow_spec *fs,
5897 + bool add)
5898 +{
5899 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5900 + struct device *dev = net_dev->dev.parent;
5901 + const int rule_cnt = dpaa2_eth_fs_count(priv);
5902 + struct dpni_rule_cfg rule_cfg;
5903 + struct dpni_fs_action_cfg fs_act = { 0 };
5904 + void *dma_mem;
5905 + int err = 0, tc;
5906 +
5907 + if (!dpaa2_eth_fs_enabled(priv)) {
5908 + netdev_err(net_dev, "dev does not support steering!\n");
5909 + /* dev doesn't support steering */
5910 + return -EOPNOTSUPP;
5911 + }
5912 +
5913 + if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
5914 + fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
5915 + fs->location >= rule_cnt)
5916 + return -EINVAL;
5917 +
5918 + /* When adding a new rule, check if location if available,
5919 + * and if not free the existing table entry before inserting
5920 + * the new one
5921 + */
5922 + if (add && (priv->cls_rule[fs->location].in_use == true))
5923 + del_cls(net_dev, fs->location);
5924 +
5925 + memset(&rule_cfg, 0, sizeof(rule_cfg));
5926 + rule_cfg.key_size = cls_key_size(priv);
5927 +
5928 + /* allocate twice the key size, for the actual key and for mask */
5929 + dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
5930 + if (!dma_mem)
5931 + return -ENOMEM;
5932 +
5933 + err = prep_cls_rule(net_dev, fs, dma_mem);
5934 + if (err)
5935 + goto err_free_mem;
5936 +
5937 + rule_cfg.key_iova = dma_map_single(dev, dma_mem,
5938 + rule_cfg.key_size * 2,
5939 + DMA_TO_DEVICE);
5940 +
5941 + rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
5942 +
5943 + if (fs->ring_cookie == RX_CLS_FLOW_DISC)
5944 + fs_act.options |= DPNI_FS_OPT_DISCARD;
5945 + else
5946 + fs_act.flow_id = fs->ring_cookie;
5947 +
5948 + for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
5949 + if (add)
5950 + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
5951 + tc, fs->location, &rule_cfg,
5952 + &fs_act);
5953 + else
5954 + err = dpni_remove_fs_entry(priv->mc_io, 0,
5955 + priv->mc_token, tc,
5956 + &rule_cfg);
5957 +
5958 + if (err)
5959 + break;
5960 + }
5961 +
5962 + dma_unmap_single(dev, rule_cfg.key_iova,
5963 + rule_cfg.key_size * 2, DMA_TO_DEVICE);
5964 +
5965 + if (err)
5966 + netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
5967 +
5968 +err_free_mem:
5969 + kfree(dma_mem);
5970 +
5971 + return err;
5972 +}
5973 +
5974 +static int add_cls(struct net_device *net_dev,
5975 + struct ethtool_rx_flow_spec *fs)
5976 +{
5977 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5978 + int err;
5979 +
5980 + err = do_cls(net_dev, fs, true);
5981 + if (err)
5982 + return err;
5983 +
5984 + priv->cls_rule[fs->location].in_use = true;
5985 + priv->cls_rule[fs->location].fs = *fs;
5986 +
5987 + return 0;
5988 +}
5989 +
5990 +static int del_cls(struct net_device *net_dev, int location)
5991 +{
5992 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
5993 + int err;
5994 +
5995 + err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
5996 + if (err)
5997 + return err;
5998 +
5999 + priv->cls_rule[location].in_use = false;
6000 +
6001 + return 0;
6002 +}
6003 +
6004 +static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
6005 + struct ethtool_rxnfc *rxnfc)
6006 +{
6007 + int err = 0;
6008 +
6009 + switch (rxnfc->cmd) {
6010 + case ETHTOOL_SRXCLSRLINS:
6011 + err = add_cls(net_dev, &rxnfc->fs);
6012 + break;
6013 +
6014 + case ETHTOOL_SRXCLSRLDEL:
6015 + err = del_cls(net_dev, rxnfc->fs.location);
6016 + break;
6017 +
6018 + default:
6019 + err = -EOPNOTSUPP;
6020 + }
6021 +
6022 + return err;
6023 +}
6024 +
6025 +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
6026 + struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
6027 +{
6028 + struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6029 + const int rule_cnt = dpaa2_eth_fs_count(priv);
6030 + int i, j;
6031 +
6032 + switch (rxnfc->cmd) {
6033 + case ETHTOOL_GRXFH:
6034 + /* we purposely ignore cmd->flow_type, because the hashing key
6035 + * is the same (and fixed) for all protocols
6036 + */
6037 + rxnfc->data = priv->rx_flow_hash;
6038 + break;
6039 +
6040 + case ETHTOOL_GRXRINGS:
6041 + rxnfc->data = dpaa2_eth_queue_count(priv);
6042 + break;
6043 +
6044 + case ETHTOOL_GRXCLSRLCNT:
6045 + for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
6046 + if (priv->cls_rule[i].in_use)
6047 + rxnfc->rule_cnt++;
6048 + rxnfc->data = rule_cnt;
6049 + break;
6050 +
6051 + case ETHTOOL_GRXCLSRULE:
6052 + if (!priv->cls_rule[rxnfc->fs.location].in_use)
6053 + return -EINVAL;
6054 +
6055 + rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
6056 + break;
6057 +
6058 + case ETHTOOL_GRXCLSRLALL:
6059 + for (i = 0, j = 0; i < rule_cnt; i++) {
6060 + if (!priv->cls_rule[i].in_use)
6061 + continue;
6062 + if (j == rxnfc->rule_cnt)
6063 + return -EMSGSIZE;
6064 + rule_locs[j++] = i;
6065 + }
6066 + rxnfc->rule_cnt = j;
6067 + rxnfc->data = rule_cnt;
6068 + break;
6069 +
6070 + default:
6071 + return -EOPNOTSUPP;
6072 + }
6073 +
6074 + return 0;
6075 +}
6076 +
6077 +const struct ethtool_ops dpaa2_ethtool_ops = {
6078 + .get_drvinfo = dpaa2_eth_get_drvinfo,
6079 + .get_link = ethtool_op_get_link,
6080 + .get_settings = dpaa2_eth_get_settings,
6081 + .set_settings = dpaa2_eth_set_settings,
6082 + .get_pauseparam = dpaa2_eth_get_pauseparam,
6083 + .set_pauseparam = dpaa2_eth_set_pauseparam,
6084 + .get_sset_count = dpaa2_eth_get_sset_count,
6085 + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
6086 + .get_strings = dpaa2_eth_get_strings,
6087 + .get_rxnfc = dpaa2_eth_get_rxnfc,
6088 + .set_rxnfc = dpaa2_eth_set_rxnfc,
6089 +};
6090 --- /dev/null
6091 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
6092 @@ -0,0 +1,176 @@
6093 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
6094 + *
6095 + * Redistribution and use in source and binary forms, with or without
6096 + * modification, are permitted provided that the following conditions are met:
6097 + * * Redistributions of source code must retain the above copyright
6098 + * notice, this list of conditions and the following disclaimer.
6099 + * * Redistributions in binary form must reproduce the above copyright
6100 + * notice, this list of conditions and the following disclaimer in the
6101 + * documentation and/or other materials provided with the distribution.
6102 + * * Neither the name of the above-listed copyright holders nor the
6103 + * names of any contributors may be used to endorse or promote products
6104 + * derived from this software without specific prior written permission.
6105 + *
6106 + *
6107 + * ALTERNATIVELY, this software may be distributed under the terms of the
6108 + * GNU General Public License ("GPL") as published by the Free Software
6109 + * Foundation, either version 2 of that License or (at your option) any
6110 + * later version.
6111 + *
6112 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6113 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6114 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6115 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6116 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6117 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6118 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6119 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6120 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6121 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6122 + * POSSIBILITY OF SUCH DAMAGE.
6123 + */
6124 +#ifndef __FSL_DPKG_H_
6125 +#define __FSL_DPKG_H_
6126 +
6127 +#include <linux/types.h>
6128 +#include "net.h"
6129 +
6130 +/* Data Path Key Generator API
6131 + * Contains initialization APIs and runtime APIs for the Key Generator
6132 + */
6133 +
6134 +/** Key Generator properties */
6135 +
6136 +/**
6137 + * Number of masks per key extraction
6138 + */
6139 +#define DPKG_NUM_OF_MASKS 4
6140 +/**
6141 + * Number of extractions per key profile
6142 + */
6143 +#define DPKG_MAX_NUM_OF_EXTRACTS 10
6144 +
6145 +/**
6146 + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
6147 + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
6148 + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
6149 + * @DPKG_FULL_FIELD: Extract a full field
6150 + */
6151 +enum dpkg_extract_from_hdr_type {
6152 + DPKG_FROM_HDR = 0,
6153 + DPKG_FROM_FIELD = 1,
6154 + DPKG_FULL_FIELD = 2
6155 +};
6156 +
6157 +/**
6158 + * enum dpkg_extract_type - Enumeration for selecting extraction type
6159 + * @DPKG_EXTRACT_FROM_HDR: Extract from the header
6160 + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
6161 + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
6162 + * e.g. can be used to extract header existence;
6163 + * please refer to 'Parse Result definition' section in the parser BG
6164 + */
6165 +enum dpkg_extract_type {
6166 + DPKG_EXTRACT_FROM_HDR = 0,
6167 + DPKG_EXTRACT_FROM_DATA = 1,
6168 + DPKG_EXTRACT_FROM_PARSE = 3
6169 +};
6170 +
6171 +/**
6172 + * struct dpkg_mask - A structure for defining a single extraction mask
6173 + * @mask: Byte mask for the extracted content
6174 + * @offset: Offset within the extracted content
6175 + */
6176 +struct dpkg_mask {
6177 + u8 mask;
6178 + u8 offset;
6179 +};
6180 +
6181 +/**
6182 + * struct dpkg_extract - A structure for defining a single extraction
6183 + * @type: Determines how the union below is interpreted:
6184 + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
6185 + * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
6186 + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
6187 + * @extract: Selects extraction method
6188 + * @num_of_byte_masks: Defines the number of valid entries in the array below;
6189 + * This is also the number of bytes to be used as masks
6190 + * @masks: Masks parameters
6191 + */
6192 +struct dpkg_extract {
6193 + enum dpkg_extract_type type;
6194 + /**
6195 + * union extract - Selects extraction method
6196 + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
6197 + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
6198 + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
6199 + */
6200 + union {
6201 + /**
6202 + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
6203 + * @prot: Any of the supported headers
6204 + * @type: Defines the type of header extraction:
6205 + * DPKG_FROM_HDR: use size & offset below;
6206 + * DPKG_FROM_FIELD: use field, size and offset below;
6207 + * DPKG_FULL_FIELD: use field below
6208 + * @field: One of the supported fields (NH_FLD_)
6209 + *
6210 + * @size: Size in bytes
6211 + * @offset: Byte offset
6212 + * @hdr_index: Clear for cases not listed below;
6213 + * Used for protocols that may have more than a single
6214 + * header, 0 indicates an outer header;
6215 + * Supported protocols (possible values):
6216 + * NET_PROT_VLAN (0, HDR_INDEX_LAST);
6217 + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
6218 + * NET_PROT_IP(0, HDR_INDEX_LAST);
6219 + * NET_PROT_IPv4(0, HDR_INDEX_LAST);
6220 + * NET_PROT_IPv6(0, HDR_INDEX_LAST);
6221 + */
6222 +
6223 + struct {
6224 + enum net_prot prot;
6225 + enum dpkg_extract_from_hdr_type type;
6226 + u32 field;
6227 + u8 size;
6228 + u8 offset;
6229 + u8 hdr_index;
6230 + } from_hdr;
6231 + /**
6232 + * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
6233 + * @size: Size in bytes
6234 + * @offset: Byte offset
6235 + */
6236 + struct {
6237 + u8 size;
6238 + u8 offset;
6239 + } from_data;
6240 +
6241 + /**
6242 + * struct from_parse - Used when
6243 + * 'type = DPKG_EXTRACT_FROM_PARSE'
6244 + * @size: Size in bytes
6245 + * @offset: Byte offset
6246 + */
6247 + struct {
6248 + u8 size;
6249 + u8 offset;
6250 + } from_parse;
6251 + } extract;
6252 +
6253 + u8 num_of_byte_masks;
6254 + struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
6255 +};
6256 +
6257 +/**
6258 + * struct dpkg_profile_cfg - A structure for defining a full Key Generation
6259 + * profile (rule)
6260 + * @num_extracts: Defines the number of valid entries in the array below
6261 + * @extracts: Array of required extractions
6262 + */
6263 +struct dpkg_profile_cfg {
6264 + u8 num_extracts;
6265 + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
6266 +};
6267 +
6268 +#endif /* __FSL_DPKG_H_ */
6269 --- /dev/null
6270 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
6271 @@ -0,0 +1,658 @@
6272 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
6273 + * Copyright 2016 NXP
6274 + *
6275 + * Redistribution and use in source and binary forms, with or without
6276 + * modification, are permitted provided that the following conditions are met:
6277 + * * Redistributions of source code must retain the above copyright
6278 + * notice, this list of conditions and the following disclaimer.
6279 + * * Redistributions in binary form must reproduce the above copyright
6280 + * notice, this list of conditions and the following disclaimer in the
6281 + * documentation and/or other materials provided with the distribution.
6282 + * * Neither the name of the above-listed copyright holders nor the
6283 + * names of any contributors may be used to endorse or promote products
6284 + * derived from this software without specific prior written permission.
6285 + *
6286 + *
6287 + * ALTERNATIVELY, this software may be distributed under the terms of the
6288 + * GNU General Public License ("GPL") as published by the Free Software
6289 + * Foundation, either version 2 of that License or (at your option) any
6290 + * later version.
6291 + *
6292 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6293 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6294 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6295 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6296 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6297 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6298 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6299 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6300 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6301 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6302 + * POSSIBILITY OF SUCH DAMAGE.
6303 + */
6304 +#ifndef _FSL_DPNI_CMD_H
6305 +#define _FSL_DPNI_CMD_H
6306 +
6307 +/* DPNI Version */
6308 +#define DPNI_VER_MAJOR 7
6309 +#define DPNI_VER_MINOR 0
6310 +#define DPNI_CMD_BASE_VERSION 1
6311 +#define DPNI_CMD_2ND_VERSION 2
6312 +#define DPNI_CMD_ID_OFFSET 4
6313 +
6314 +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
6315 +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
6316 +
6317 +#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
6318 +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
6319 +#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
6320 +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
6321 +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
6322 +
6323 +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
6324 +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
6325 +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
6326 +#define DPNI_CMDID_RESET DPNI_CMD(0x005)
6327 +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
6328 +
6329 +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
6330 +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
6331 +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
6332 +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
6333 +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
6334 +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
6335 +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
6336 +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
6337 +
6338 +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
6339 +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
6340 +
6341 +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
6342 +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
6343 +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
6344 +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
6345 +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
6346 +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
6347 +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
6348 +
6349 +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
6350 +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
6351 +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
6352 +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
6353 +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
6354 +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
6355 +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
6356 +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
6357 +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
6358 +
6359 +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
6360 +
6361 +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
6362 +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
6363 +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
6364 +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
6365 +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
6366 +
6367 +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
6368 +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
6369 +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
6370 +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
6371 +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
6372 +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
6373 +
6374 +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
6375 +
6376 +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
6377 +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
6378 +
6379 +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
6380 +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
6381 +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
6382 +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
6383 +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
6384 +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
6385 +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
6386 +
6387 +/* Macros for accessing command fields smaller than 1byte */
6388 +#define DPNI_MASK(field) \
6389 + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
6390 + DPNI_##field##_SHIFT)
6391 +
6392 +#define dpni_set_field(var, field, val) \
6393 + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
6394 +#define dpni_get_field(var, field) \
6395 + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
6396 +
6397 +struct dpni_cmd_open {
6398 + __le32 dpni_id;
6399 +};
6400 +
6401 +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
6402 +struct dpni_cmd_set_pools {
6403 + u8 num_dpbp;
6404 + u8 backup_pool_mask;
6405 + __le16 pad;
6406 + struct {
6407 + __le16 dpbp_id;
6408 + u8 priority_mask;
6409 + u8 pad;
6410 + } pool[DPNI_MAX_DPBP];
6411 + __le16 buffer_size[DPNI_MAX_DPBP];
6412 +};
6413 +
6414 +/* The enable indication is always the least significant bit */
6415 +#define DPNI_ENABLE_SHIFT 0
6416 +#define DPNI_ENABLE_SIZE 1
6417 +
6418 +struct dpni_rsp_is_enabled {
6419 + u8 enabled;
6420 +};
6421 +
6422 +struct dpni_rsp_get_irq {
6423 + /* response word 0 */
6424 + __le32 irq_val;
6425 + __le32 pad;
6426 + /* response word 1 */
6427 + __le64 irq_addr;
6428 + /* response word 2 */
6429 + __le32 irq_num;
6430 + __le32 type;
6431 +};
6432 +
6433 +struct dpni_cmd_set_irq_enable {
6434 + u8 enable;
6435 + u8 pad[3];
6436 + u8 irq_index;
6437 +};
6438 +
6439 +struct dpni_cmd_get_irq_enable {
6440 + __le32 pad;
6441 + u8 irq_index;
6442 +};
6443 +
6444 +struct dpni_rsp_get_irq_enable {
6445 + u8 enabled;
6446 +};
6447 +
6448 +struct dpni_cmd_set_irq_mask {
6449 + __le32 mask;
6450 + u8 irq_index;
6451 +};
6452 +
6453 +struct dpni_cmd_get_irq_mask {
6454 + __le32 pad;
6455 + u8 irq_index;
6456 +};
6457 +
6458 +struct dpni_rsp_get_irq_mask {
6459 + __le32 mask;
6460 +};
6461 +
6462 +struct dpni_cmd_get_irq_status {
6463 + __le32 status;
6464 + u8 irq_index;
6465 +};
6466 +
6467 +struct dpni_rsp_get_irq_status {
6468 + __le32 status;
6469 +};
6470 +
6471 +struct dpni_cmd_clear_irq_status {
6472 + __le32 status;
6473 + u8 irq_index;
6474 +};
6475 +
6476 +struct dpni_rsp_get_attr {
6477 + /* response word 0 */
6478 + __le32 options;
6479 + u8 num_queues;
6480 + u8 num_tcs;
6481 + u8 mac_filter_entries;
6482 + u8 pad0;
6483 + /* response word 1 */
6484 + u8 vlan_filter_entries;
6485 + u8 pad1;
6486 + u8 qos_entries;
6487 + u8 pad2;
6488 + __le16 fs_entries;
6489 + __le16 pad3;
6490 + /* response word 2 */
6491 + u8 qos_key_size;
6492 + u8 fs_key_size;
6493 + __le16 wriop_version;
6494 +};
6495 +
6496 +#define DPNI_ERROR_ACTION_SHIFT 0
6497 +#define DPNI_ERROR_ACTION_SIZE 4
6498 +#define DPNI_FRAME_ANN_SHIFT 4
6499 +#define DPNI_FRAME_ANN_SIZE 1
6500 +
6501 +struct dpni_cmd_set_errors_behavior {
6502 + __le32 errors;
6503 + /* from least significant bit: error_action:4, set_frame_annotation:1 */
6504 + u8 flags;
6505 +};
6506 +
6507 +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
6508 + * buffer layouts, but they all share the same parameters.
6509 + * If one of the functions changes, below structure needs to be split.
6510 + */
6511 +
6512 +#define DPNI_PASS_TS_SHIFT 0
6513 +#define DPNI_PASS_TS_SIZE 1
6514 +#define DPNI_PASS_PR_SHIFT 1
6515 +#define DPNI_PASS_PR_SIZE 1
6516 +#define DPNI_PASS_FS_SHIFT 2
6517 +#define DPNI_PASS_FS_SIZE 1
6518 +
6519 +struct dpni_cmd_get_buffer_layout {
6520 + u8 qtype;
6521 +};
6522 +
6523 +struct dpni_rsp_get_buffer_layout {
6524 + /* response word 0 */
6525 + u8 pad0[6];
6526 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6527 + u8 flags;
6528 + u8 pad1;
6529 + /* response word 1 */
6530 + __le16 private_data_size;
6531 + __le16 data_align;
6532 + __le16 head_room;
6533 + __le16 tail_room;
6534 +};
6535 +
6536 +struct dpni_cmd_set_buffer_layout {
6537 + /* cmd word 0 */
6538 + u8 qtype;
6539 + u8 pad0[3];
6540 + __le16 options;
6541 + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
6542 + u8 flags;
6543 + u8 pad1;
6544 + /* cmd word 1 */
6545 + __le16 private_data_size;
6546 + __le16 data_align;
6547 + __le16 head_room;
6548 + __le16 tail_room;
6549 +};
6550 +
6551 +struct dpni_cmd_set_offload {
6552 + u8 pad[3];
6553 + u8 dpni_offload;
6554 + __le32 config;
6555 +};
6556 +
6557 +struct dpni_cmd_get_offload {
6558 + u8 pad[3];
6559 + u8 dpni_offload;
6560 +};
6561 +
6562 +struct dpni_rsp_get_offload {
6563 + __le32 pad;
6564 + __le32 config;
6565 +};
6566 +
6567 +struct dpni_cmd_get_qdid {
6568 + u8 qtype;
6569 +};
6570 +
6571 +struct dpni_rsp_get_qdid {
6572 + __le16 qdid;
6573 +};
6574 +
6575 +struct dpni_rsp_get_tx_data_offset {
6576 + __le16 data_offset;
6577 +};
6578 +
6579 +struct dpni_cmd_get_statistics {
6580 + u8 page_number;
6581 +};
6582 +
6583 +struct dpni_rsp_get_statistics {
6584 + __le64 counter[DPNI_STATISTICS_CNT];
6585 +};
6586 +
6587 +struct dpni_cmd_set_link_cfg {
6588 + /* cmd word 0 */
6589 + __le64 pad0;
6590 + /* cmd word 1 */
6591 + __le32 rate;
6592 + __le32 pad1;
6593 + /* cmd word 2 */
6594 + __le64 options;
6595 +};
6596 +
6597 +#define DPNI_LINK_STATE_SHIFT 0
6598 +#define DPNI_LINK_STATE_SIZE 1
6599 +
6600 +struct dpni_rsp_get_link_state {
6601 + /* response word 0 */
6602 + __le32 pad0;
6603 + /* from LSB: up:1 */
6604 + u8 flags;
6605 + u8 pad1[3];
6606 + /* response word 1 */
6607 + __le32 rate;
6608 + __le32 pad2;
6609 + /* response word 2 */
6610 + __le64 options;
6611 +};
6612 +
6613 +struct dpni_cmd_set_tx_shaping {
6614 + /* cmd word 0 */
6615 + __le16 max_burst_size;
6616 + __le16 pad0[3];
6617 + /* cmd word 1 */
6618 + __le32 rate_limit;
6619 +};
6620 +
6621 +struct dpni_cmd_set_max_frame_length {
6622 + __le16 max_frame_length;
6623 +};
6624 +
6625 +struct dpni_rsp_get_max_frame_length {
6626 + __le16 max_frame_length;
6627 +};
6628 +
6629 +struct dpni_cmd_set_multicast_promisc {
6630 + u8 enable;
6631 +};
6632 +
6633 +struct dpni_rsp_get_multicast_promisc {
6634 + u8 enabled;
6635 +};
6636 +
6637 +struct dpni_cmd_set_unicast_promisc {
6638 + u8 enable;
6639 +};
6640 +
6641 +struct dpni_rsp_get_unicast_promisc {
6642 + u8 enabled;
6643 +};
6644 +
6645 +struct dpni_cmd_set_primary_mac_addr {
6646 + __le16 pad;
6647 + u8 mac_addr[6];
6648 +};
6649 +
6650 +struct dpni_rsp_get_primary_mac_addr {
6651 + __le16 pad;
6652 + u8 mac_addr[6];
6653 +};
6654 +
6655 +struct dpni_rsp_get_port_mac_addr {
6656 + __le16 pad;
6657 + u8 mac_addr[6];
6658 +};
6659 +
6660 +struct dpni_cmd_add_mac_addr {
6661 + __le16 pad;
6662 + u8 mac_addr[6];
6663 +};
6664 +
6665 +struct dpni_cmd_remove_mac_addr {
6666 + __le16 pad;
6667 + u8 mac_addr[6];
6668 +};
6669 +
6670 +#define DPNI_UNICAST_FILTERS_SHIFT 0
6671 +#define DPNI_UNICAST_FILTERS_SIZE 1
6672 +#define DPNI_MULTICAST_FILTERS_SHIFT 1
6673 +#define DPNI_MULTICAST_FILTERS_SIZE 1
6674 +
6675 +struct dpni_cmd_clear_mac_filters {
6676 + /* from LSB: unicast:1, multicast:1 */
6677 + u8 flags;
6678 +};
6679 +
6680 +#define DPNI_DIST_MODE_SHIFT 0
6681 +#define DPNI_DIST_MODE_SIZE 4
6682 +#define DPNI_MISS_ACTION_SHIFT 4
6683 +#define DPNI_MISS_ACTION_SIZE 4
6684 +
6685 +struct dpni_cmd_set_rx_tc_dist {
6686 + /* cmd word 0 */
6687 + __le16 dist_size;
6688 + u8 tc_id;
6689 + /* from LSB: dist_mode:4, miss_action:4 */
6690 + u8 flags;
6691 + __le16 pad0;
6692 + __le16 default_flow_id;
6693 + /* cmd word 1..5 */
6694 + __le64 pad1[5];
6695 + /* cmd word 6 */
6696 + __le64 key_cfg_iova;
6697 +};
6698 +
6699 +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
6700 + * key_cfg_iova)
6701 + */
6702 +struct dpni_mask_cfg {
6703 + u8 mask;
6704 + u8 offset;
6705 +};
6706 +
6707 +#define DPNI_EFH_TYPE_SHIFT 0
6708 +#define DPNI_EFH_TYPE_SIZE 4
6709 +#define DPNI_EXTRACT_TYPE_SHIFT 0
6710 +#define DPNI_EXTRACT_TYPE_SIZE 4
6711 +
6712 +struct dpni_dist_extract {
6713 + /* word 0 */
6714 + u8 prot;
6715 + /* EFH type stored in the 4 least significant bits */
6716 + u8 efh_type;
6717 + u8 size;
6718 + u8 offset;
6719 + __le32 field;
6720 + /* word 1 */
6721 + u8 hdr_index;
6722 + u8 constant;
6723 + u8 num_of_repeats;
6724 + u8 num_of_byte_masks;
6725 + /* Extraction type is stored in the 4 LSBs */
6726 + u8 extract_type;
6727 + u8 pad[3];
6728 + /* word 2 */
6729 + struct dpni_mask_cfg masks[4];
6730 +};
6731 +
6732 +struct dpni_ext_set_rx_tc_dist {
6733 + /* extension word 0 */
6734 + u8 num_extracts;
6735 + u8 pad[7];
6736 + /* words 1..25 */
6737 + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
6738 +};
6739 +
6740 +struct dpni_cmd_get_queue {
6741 + u8 qtype;
6742 + u8 tc;
6743 + u8 index;
6744 +};
6745 +
6746 +#define DPNI_DEST_TYPE_SHIFT 0
6747 +#define DPNI_DEST_TYPE_SIZE 4
6748 +#define DPNI_STASH_CTRL_SHIFT 6
6749 +#define DPNI_STASH_CTRL_SIZE 1
6750 +#define DPNI_HOLD_ACTIVE_SHIFT 7
6751 +#define DPNI_HOLD_ACTIVE_SIZE 1
6752 +
6753 +struct dpni_rsp_get_queue {
6754 + /* response word 0 */
6755 + __le64 pad0;
6756 + /* response word 1 */
6757 + __le32 dest_id;
6758 + __le16 pad1;
6759 + u8 dest_prio;
6760 + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
6761 + u8 flags;
6762 + /* response word 2 */
6763 + __le64 flc;
6764 + /* response word 3 */
6765 + __le64 user_context;
6766 + /* response word 4 */
6767 + __le32 fqid;
6768 + __le16 qdbin;
6769 +};
6770 +
6771 +struct dpni_cmd_set_queue {
6772 + /* cmd word 0 */
6773 + u8 qtype;
6774 + u8 tc;
6775 + u8 index;
6776 + u8 options;
6777 + __le32 pad0;
6778 + /* cmd word 1 */
6779 + __le32 dest_id;
6780 + __le16 pad1;
6781 + u8 dest_prio;
6782 + u8 flags;
6783 + /* cmd word 2 */
6784 + __le64 flc;
6785 + /* cmd word 3 */
6786 + __le64 user_context;
6787 +};
6788 +
6789 +#define DPNI_DISCARD_ON_MISS_SHIFT 0
6790 +#define DPNI_DISCARD_ON_MISS_SIZE 1
6791 +
6792 +struct dpni_cmd_set_qos_table {
6793 + u32 pad;
6794 + u8 default_tc;
6795 + /* only the LSB */
6796 + u8 discard_on_miss;
6797 + u16 pad1[21];
6798 + u64 key_cfg_iova;
6799 +};
6800 +
6801 +struct dpni_cmd_add_qos_entry {
6802 + u16 pad;
6803 + u8 tc_id;
6804 + u8 key_size;
6805 + u16 index;
6806 + u16 pad2;
6807 + u64 key_iova;
6808 + u64 mask_iova;
6809 +};
6810 +
6811 +struct dpni_cmd_remove_qos_entry {
6812 + u8 pad1[3];
6813 + u8 key_size;
6814 + u32 pad2;
6815 + u64 key_iova;
6816 + u64 mask_iova;
6817 +};
6818 +
6819 +struct dpni_cmd_add_fs_entry {
6820 + /* cmd word 0 */
6821 + u16 options;
6822 + u8 tc_id;
6823 + u8 key_size;
6824 + u16 index;
6825 + u16 flow_id;
6826 + /* cmd word 1 */
6827 + u64 key_iova;
6828 + /* cmd word 2 */
6829 + u64 mask_iova;
6830 + /* cmd word 3 */
6831 + u64 flc;
6832 +};
6833 +
6834 +struct dpni_cmd_remove_fs_entry {
6835 + /* cmd word 0 */
6836 + __le16 pad0;
6837 + u8 tc_id;
6838 + u8 key_size;
6839 + __le32 pad1;
6840 + /* cmd word 1 */
6841 + u64 key_iova;
6842 + /* cmd word 2 */
6843 + u64 mask_iova;
6844 +};
6845 +
6846 +struct dpni_cmd_set_taildrop {
6847 + /* cmd word 0 */
6848 + u8 congestion_point;
6849 + u8 qtype;
6850 + u8 tc;
6851 + u8 index;
6852 + __le32 pad0;
6853 + /* cmd word 1 */
6854 + /* Only least significant bit is relevant */
6855 + u8 enable;
6856 + u8 pad1;
6857 + u8 units;
6858 + u8 pad2;
6859 + __le32 threshold;
6860 +};
6861 +
6862 +struct dpni_cmd_get_taildrop {
6863 + u8 congestion_point;
6864 + u8 qtype;
6865 + u8 tc;
6866 + u8 index;
6867 +};
6868 +
6869 +struct dpni_rsp_get_taildrop {
6870 + /* cmd word 0 */
6871 + __le64 pad0;
6872 + /* cmd word 1 */
6873 + /* only least significant bit is relevant */
6874 + u8 enable;
6875 + u8 pad1;
6876 + u8 units;
6877 + u8 pad2;
6878 + __le32 threshold;
6879 +};
6880 +
6881 +#define DPNI_DEST_TYPE_SHIFT 0
6882 +#define DPNI_DEST_TYPE_SIZE 4
6883 +#define DPNI_CONG_UNITS_SHIFT 4
6884 +#define DPNI_CONG_UNITS_SIZE 2
6885 +
6886 +struct dpni_cmd_set_congestion_notification {
6887 + /* cmd word 0 */
6888 + u8 qtype;
6889 + u8 tc;
6890 + u8 pad[6];
6891 + /* cmd word 1 */
6892 + u32 dest_id;
6893 + u16 notification_mode;
6894 + u8 dest_priority;
6895 + /* from LSB: dest_type: 4 units:2 */
6896 + u8 type_units;
6897 + /* cmd word 2 */
6898 + u64 message_iova;
6899 + /* cmd word 3 */
6900 + u64 message_ctx;
6901 + /* cmd word 4 */
6902 + u32 threshold_entry;
6903 + u32 threshold_exit;
6904 +};
6905 +
6906 +struct dpni_cmd_get_congestion_notification {
6907 + /* cmd word 0 */
6908 + u8 qtype;
6909 + u8 tc;
6910 +};
6911 +
6912 +struct dpni_rsp_get_congestion_notification {
6913 + /* cmd word 0 */
6914 + u64 pad;
6915 + /* cmd word 1 */
6916 + u32 dest_id;
6917 + u16 notification_mode;
6918 + u8 dest_priority;
6919 + /* from LSB: dest_type: 4 units:2 */
6920 + u8 type_units;
6921 + /* cmd word 2 */
6922 + u64 message_iova;
6923 + /* cmd word 3 */
6924 + u64 message_ctx;
6925 + /* cmd word 4 */
6926 + u32 threshold_entry;
6927 + u32 threshold_exit;
6928 +};
6929 +#endif /* _FSL_DPNI_CMD_H */
6930 --- /dev/null
6931 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
6932 @@ -0,0 +1,1903 @@
6933 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
6934 + * Copyright 2016 NXP
6935 + *
6936 + * Redistribution and use in source and binary forms, with or without
6937 + * modification, are permitted provided that the following conditions are met:
6938 + * * Redistributions of source code must retain the above copyright
6939 + * notice, this list of conditions and the following disclaimer.
6940 + * * Redistributions in binary form must reproduce the above copyright
6941 + * notice, this list of conditions and the following disclaimer in the
6942 + * documentation and/or other materials provided with the distribution.
6943 + * * Neither the name of the above-listed copyright holders nor the
6944 + * names of any contributors may be used to endorse or promote products
6945 + * derived from this software without specific prior written permission.
6946 + *
6947 + *
6948 + * ALTERNATIVELY, this software may be distributed under the terms of the
6949 + * GNU General Public License ("GPL") as published by the Free Software
6950 + * Foundation, either version 2 of that License or (at your option) any
6951 + * later version.
6952 + *
6953 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
6954 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
6955 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
6956 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
6957 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
6958 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
6959 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
6960 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
6961 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
6962 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
6963 + * POSSIBILITY OF SUCH DAMAGE.
6964 + */
6965 +#include "../../fsl-mc/include/mc-sys.h"
6966 +#include "../../fsl-mc/include/mc-cmd.h"
6967 +#include "dpni.h"
6968 +#include "dpni-cmd.h"
6969 +
6970 +/**
6971 + * dpni_prepare_key_cfg() - function prepare extract parameters
6972 + * @cfg: defining a full Key Generation profile (rule)
6973 + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
6974 + *
6975 + * This function has to be called before the following functions:
6976 + * - dpni_set_rx_tc_dist()
6977 + * - dpni_set_qos_table()
6978 + */
6979 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
6980 +{
6981 + int i, j;
6982 + struct dpni_ext_set_rx_tc_dist *dpni_ext;
6983 + struct dpni_dist_extract *extr;
6984 +
6985 + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
6986 + return -EINVAL;
6987 +
6988 + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
6989 + dpni_ext->num_extracts = cfg->num_extracts;
6990 +
6991 + for (i = 0; i < cfg->num_extracts; i++) {
6992 + extr = &dpni_ext->extracts[i];
6993 +
6994 + switch (cfg->extracts[i].type) {
6995 + case DPKG_EXTRACT_FROM_HDR:
6996 + extr->prot = cfg->extracts[i].extract.from_hdr.prot;
6997 + dpni_set_field(extr->efh_type, EFH_TYPE,
6998 + cfg->extracts[i].extract.from_hdr.type);
6999 + extr->size = cfg->extracts[i].extract.from_hdr.size;
7000 + extr->offset = cfg->extracts[i].extract.from_hdr.offset;
7001 + extr->field = cpu_to_le32(
7002 + cfg->extracts[i].extract.from_hdr.field);
7003 + extr->hdr_index =
7004 + cfg->extracts[i].extract.from_hdr.hdr_index;
7005 + break;
7006 + case DPKG_EXTRACT_FROM_DATA:
7007 + extr->size = cfg->extracts[i].extract.from_data.size;
7008 + extr->offset =
7009 + cfg->extracts[i].extract.from_data.offset;
7010 + break;
7011 + case DPKG_EXTRACT_FROM_PARSE:
7012 + extr->size = cfg->extracts[i].extract.from_parse.size;
7013 + extr->offset =
7014 + cfg->extracts[i].extract.from_parse.offset;
7015 + break;
7016 + default:
7017 + return -EINVAL;
7018 + }
7019 +
7020 + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
7021 + dpni_set_field(extr->extract_type, EXTRACT_TYPE,
7022 + cfg->extracts[i].type);
7023 +
7024 + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
7025 + extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
7026 + extr->masks[j].offset =
7027 + cfg->extracts[i].masks[j].offset;
7028 + }
7029 + }
7030 +
7031 + return 0;
7032 +}
7033 +
7034 +/**
7035 + * dpni_open() - Open a control session for the specified object
7036 + * @mc_io: Pointer to MC portal's I/O object
7037 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7038 + * @dpni_id: DPNI unique ID
7039 + * @token: Returned token; use in subsequent API calls
7040 + *
7041 + * This function can be used to open a control session for an
7042 + * already created object; an object may have been declared in
7043 + * the DPL or by calling the dpni_create() function.
7044 + * This function returns a unique authentication token,
7045 + * associated with the specific object ID and the specific MC
7046 + * portal; this token must be used in all subsequent commands for
7047 + * this specific object.
7048 + *
7049 + * Return: '0' on Success; Error code otherwise.
7050 + */
7051 +int dpni_open(struct fsl_mc_io *mc_io,
7052 + u32 cmd_flags,
7053 + int dpni_id,
7054 + u16 *token)
7055 +{
7056 + struct mc_command cmd = { 0 };
7057 + struct dpni_cmd_open *cmd_params;
7058 +
7059 + int err;
7060 +
7061 + /* prepare command */
7062 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
7063 + cmd_flags,
7064 + 0);
7065 + cmd_params = (struct dpni_cmd_open *)cmd.params;
7066 + cmd_params->dpni_id = cpu_to_le32(dpni_id);
7067 +
7068 + /* send command to mc*/
7069 + err = mc_send_command(mc_io, &cmd);
7070 + if (err)
7071 + return err;
7072 +
7073 + /* retrieve response parameters */
7074 + *token = mc_cmd_hdr_read_token(&cmd);
7075 +
7076 + return 0;
7077 +}
7078 +
7079 +/**
7080 + * dpni_close() - Close the control session of the object
7081 + * @mc_io: Pointer to MC portal's I/O object
7082 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7083 + * @token: Token of DPNI object
7084 + *
7085 + * After this function is called, no further operations are
7086 + * allowed on the object without opening a new control session.
7087 + *
7088 + * Return: '0' on Success; Error code otherwise.
7089 + */
7090 +int dpni_close(struct fsl_mc_io *mc_io,
7091 + u32 cmd_flags,
7092 + u16 token)
7093 +{
7094 + struct mc_command cmd = { 0 };
7095 +
7096 + /* prepare command */
7097 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
7098 + cmd_flags,
7099 + token);
7100 +
7101 + /* send command to mc*/
7102 + return mc_send_command(mc_io, &cmd);
7103 +}
7104 +
7105 +/**
7106 + * dpni_set_pools() - Set buffer pools configuration
7107 + * @mc_io: Pointer to MC portal's I/O object
7108 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7109 + * @token: Token of DPNI object
7110 + * @cfg: Buffer pools configuration
7111 + *
7112 + * mandatory for DPNI operation
7113 + * warning:Allowed only when DPNI is disabled
7114 + *
7115 + * Return: '0' on Success; Error code otherwise.
7116 + */
7117 +int dpni_set_pools(struct fsl_mc_io *mc_io,
7118 + u32 cmd_flags,
7119 + u16 token,
7120 + const struct dpni_pools_cfg *cfg)
7121 +{
7122 + struct mc_command cmd = { 0 };
7123 + struct dpni_cmd_set_pools *cmd_params;
7124 + int i;
7125 +
7126 + /* prepare command */
7127 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
7128 + cmd_flags,
7129 + token);
7130 + cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
7131 + cmd_params->num_dpbp = cfg->num_dpbp;
7132 + for (i = 0; i < DPNI_MAX_DPBP; i++) {
7133 + cmd_params->pool[i].dpbp_id =
7134 + cpu_to_le16(cfg->pools[i].dpbp_id);
7135 + cmd_params->pool[i].priority_mask =
7136 + cfg->pools[i].priority_mask;
7137 + cmd_params->buffer_size[i] =
7138 + cpu_to_le16(cfg->pools[i].buffer_size);
7139 + cmd_params->backup_pool_mask |=
7140 + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
7141 + }
7142 +
7143 + /* send command to mc*/
7144 + return mc_send_command(mc_io, &cmd);
7145 +}
7146 +
7147 +/**
7148 + * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
7149 + * @mc_io: Pointer to MC portal's I/O object
7150 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7151 + * @token: Token of DPNI object
7152 + *
7153 + * Return: '0' on Success; Error code otherwise.
7154 + */
7155 +int dpni_enable(struct fsl_mc_io *mc_io,
7156 + u32 cmd_flags,
7157 + u16 token)
7158 +{
7159 + struct mc_command cmd = { 0 };
7160 +
7161 + /* prepare command */
7162 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
7163 + cmd_flags,
7164 + token);
7165 +
7166 + /* send command to mc*/
7167 + return mc_send_command(mc_io, &cmd);
7168 +}
7169 +
7170 +/**
7171 + * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
7172 + * @mc_io: Pointer to MC portal's I/O object
7173 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7174 + * @token: Token of DPNI object
7175 + *
7176 + * Return: '0' on Success; Error code otherwise.
7177 + */
7178 +int dpni_disable(struct fsl_mc_io *mc_io,
7179 + u32 cmd_flags,
7180 + u16 token)
7181 +{
7182 + struct mc_command cmd = { 0 };
7183 +
7184 + /* prepare command */
7185 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
7186 + cmd_flags,
7187 + token);
7188 +
7189 + /* send command to mc*/
7190 + return mc_send_command(mc_io, &cmd);
7191 +}
7192 +
7193 +/**
7194 + * dpni_is_enabled() - Check if the DPNI is enabled.
7195 + * @mc_io: Pointer to MC portal's I/O object
7196 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7197 + * @token: Token of DPNI object
7198 + * @en: Returns '1' if object is enabled; '0' otherwise
7199 + *
7200 + * Return: '0' on Success; Error code otherwise.
7201 + */
7202 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
7203 + u32 cmd_flags,
7204 + u16 token,
7205 + int *en)
7206 +{
7207 + struct mc_command cmd = { 0 };
7208 + struct dpni_rsp_is_enabled *rsp_params;
7209 + int err;
7210 +
7211 + /* prepare command */
7212 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
7213 + cmd_flags,
7214 + token);
7215 +
7216 + /* send command to mc*/
7217 + err = mc_send_command(mc_io, &cmd);
7218 + if (err)
7219 + return err;
7220 +
7221 + /* retrieve response parameters */
7222 + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
7223 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7224 +
7225 + return 0;
7226 +}
7227 +
7228 +/**
7229 + * dpni_reset() - Reset the DPNI, returns the object to initial state.
7230 + * @mc_io: Pointer to MC portal's I/O object
7231 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7232 + * @token: Token of DPNI object
7233 + *
7234 + * Return: '0' on Success; Error code otherwise.
7235 + */
7236 +int dpni_reset(struct fsl_mc_io *mc_io,
7237 + u32 cmd_flags,
7238 + u16 token)
7239 +{
7240 + struct mc_command cmd = { 0 };
7241 +
7242 + /* prepare command */
7243 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
7244 + cmd_flags,
7245 + token);
7246 +
7247 + /* send command to mc*/
7248 + return mc_send_command(mc_io, &cmd);
7249 +}
7250 +
7251 +/**
7252 + * dpni_set_irq_enable() - Set overall interrupt state.
7253 + * @mc_io: Pointer to MC portal's I/O object
7254 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7255 + * @token: Token of DPNI object
7256 + * @irq_index: The interrupt index to configure
7257 + * @en: Interrupt state: - enable = 1, disable = 0
7258 + *
7259 + * Allows GPP software to control when interrupts are generated.
7260 + * Each interrupt can have up to 32 causes. The enable/disable control's the
7261 + * overall interrupt state. if the interrupt is disabled no causes will cause
7262 + * an interrupt.
7263 + *
7264 + * Return: '0' on Success; Error code otherwise.
7265 + */
7266 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
7267 + u32 cmd_flags,
7268 + u16 token,
7269 + u8 irq_index,
7270 + u8 en)
7271 +{
7272 + struct mc_command cmd = { 0 };
7273 + struct dpni_cmd_set_irq_enable *cmd_params;
7274 +
7275 + /* prepare command */
7276 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
7277 + cmd_flags,
7278 + token);
7279 + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
7280 + dpni_set_field(cmd_params->enable, ENABLE, en);
7281 + cmd_params->irq_index = irq_index;
7282 +
7283 + /* send command to mc*/
7284 + return mc_send_command(mc_io, &cmd);
7285 +}
7286 +
7287 +/**
7288 + * dpni_get_irq_enable() - Get overall interrupt state
7289 + * @mc_io: Pointer to MC portal's I/O object
7290 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7291 + * @token: Token of DPNI object
7292 + * @irq_index: The interrupt index to configure
7293 + * @en: Returned interrupt state - enable = 1, disable = 0
7294 + *
7295 + * Return: '0' on Success; Error code otherwise.
7296 + */
7297 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
7298 + u32 cmd_flags,
7299 + u16 token,
7300 + u8 irq_index,
7301 + u8 *en)
7302 +{
7303 + struct mc_command cmd = { 0 };
7304 + struct dpni_cmd_get_irq_enable *cmd_params;
7305 + struct dpni_rsp_get_irq_enable *rsp_params;
7306 +
7307 + int err;
7308 +
7309 + /* prepare command */
7310 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
7311 + cmd_flags,
7312 + token);
7313 + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
7314 + cmd_params->irq_index = irq_index;
7315 +
7316 + /* send command to mc*/
7317 + err = mc_send_command(mc_io, &cmd);
7318 + if (err)
7319 + return err;
7320 +
7321 + /* retrieve response parameters */
7322 + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
7323 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
7324 +
7325 + return 0;
7326 +}
7327 +
7328 +/**
7329 + * dpni_set_irq_mask() - Set interrupt mask.
7330 + * @mc_io: Pointer to MC portal's I/O object
7331 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7332 + * @token: Token of DPNI object
7333 + * @irq_index: The interrupt index to configure
7334 + * @mask: event mask to trigger interrupt;
7335 + * each bit:
7336 + * 0 = ignore event
7337 + * 1 = consider event for asserting IRQ
7338 + *
7339 + * Every interrupt can have up to 32 causes and the interrupt model supports
7340 + * masking/unmasking each cause independently
7341 + *
7342 + * Return: '0' on Success; Error code otherwise.
7343 + */
7344 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
7345 + u32 cmd_flags,
7346 + u16 token,
7347 + u8 irq_index,
7348 + u32 mask)
7349 +{
7350 + struct mc_command cmd = { 0 };
7351 + struct dpni_cmd_set_irq_mask *cmd_params;
7352 +
7353 + /* prepare command */
7354 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
7355 + cmd_flags,
7356 + token);
7357 + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
7358 + cmd_params->mask = cpu_to_le32(mask);
7359 + cmd_params->irq_index = irq_index;
7360 +
7361 + /* send command to mc*/
7362 + return mc_send_command(mc_io, &cmd);
7363 +}
7364 +
7365 +/**
7366 + * dpni_get_irq_mask() - Get interrupt mask.
7367 + * @mc_io: Pointer to MC portal's I/O object
7368 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7369 + * @token: Token of DPNI object
7370 + * @irq_index: The interrupt index to configure
7371 + * @mask: Returned event mask to trigger interrupt
7372 + *
7373 + * Every interrupt can have up to 32 causes and the interrupt model supports
7374 + * masking/unmasking each cause independently
7375 + *
7376 + * Return: '0' on Success; Error code otherwise.
7377 + */
7378 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
7379 + u32 cmd_flags,
7380 + u16 token,
7381 + u8 irq_index,
7382 + u32 *mask)
7383 +{
7384 + struct mc_command cmd = { 0 };
7385 + struct dpni_cmd_get_irq_mask *cmd_params;
7386 + struct dpni_rsp_get_irq_mask *rsp_params;
7387 + int err;
7388 +
7389 + /* prepare command */
7390 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
7391 + cmd_flags,
7392 + token);
7393 + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
7394 + cmd_params->irq_index = irq_index;
7395 +
7396 + /* send command to mc*/
7397 + err = mc_send_command(mc_io, &cmd);
7398 + if (err)
7399 + return err;
7400 +
7401 + /* retrieve response parameters */
7402 + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
7403 + *mask = le32_to_cpu(rsp_params->mask);
7404 +
7405 + return 0;
7406 +}
7407 +
7408 +/**
7409 + * dpni_get_irq_status() - Get the current status of any pending interrupts.
7410 + * @mc_io: Pointer to MC portal's I/O object
7411 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7412 + * @token: Token of DPNI object
7413 + * @irq_index: The interrupt index to configure
7414 + * @status: Returned interrupts status - one bit per cause:
7415 + * 0 = no interrupt pending
7416 + * 1 = interrupt pending
7417 + *
7418 + * Return: '0' on Success; Error code otherwise.
7419 + */
7420 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
7421 + u32 cmd_flags,
7422 + u16 token,
7423 + u8 irq_index,
7424 + u32 *status)
7425 +{
7426 + struct mc_command cmd = { 0 };
7427 + struct dpni_cmd_get_irq_status *cmd_params;
7428 + struct dpni_rsp_get_irq_status *rsp_params;
7429 + int err;
7430 +
7431 + /* prepare command */
7432 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
7433 + cmd_flags,
7434 + token);
7435 + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
7436 + cmd_params->status = cpu_to_le32(*status);
7437 + cmd_params->irq_index = irq_index;
7438 +
7439 + /* send command to mc*/
7440 + err = mc_send_command(mc_io, &cmd);
7441 + if (err)
7442 + return err;
7443 +
7444 + /* retrieve response parameters */
7445 + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
7446 + *status = le32_to_cpu(rsp_params->status);
7447 +
7448 + return 0;
7449 +}
7450 +
7451 +/**
7452 + * dpni_clear_irq_status() - Clear a pending interrupt's status
7453 + * @mc_io: Pointer to MC portal's I/O object
7454 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7455 + * @token: Token of DPNI object
7456 + * @irq_index: The interrupt index to configure
7457 + * @status: bits to clear (W1C) - one bit per cause:
7458 + * 0 = don't change
7459 + * 1 = clear status bit
7460 + *
7461 + * Return: '0' on Success; Error code otherwise.
7462 + */
7463 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
7464 + u32 cmd_flags,
7465 + u16 token,
7466 + u8 irq_index,
7467 + u32 status)
7468 +{
7469 + struct mc_command cmd = { 0 };
7470 + struct dpni_cmd_clear_irq_status *cmd_params;
7471 +
7472 + /* prepare command */
7473 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
7474 + cmd_flags,
7475 + token);
7476 + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
7477 + cmd_params->irq_index = irq_index;
7478 + cmd_params->status = cpu_to_le32(status);
7479 +
7480 + /* send command to mc*/
7481 + return mc_send_command(mc_io, &cmd);
7482 +}
7483 +
7484 +/**
7485 + * dpni_get_attributes() - Retrieve DPNI attributes.
7486 + * @mc_io: Pointer to MC portal's I/O object
7487 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7488 + * @token: Token of DPNI object
7489 + * @attr: Object's attributes
7490 + *
7491 + * Return: '0' on Success; Error code otherwise.
7492 + */
7493 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
7494 + u32 cmd_flags,
7495 + u16 token,
7496 + struct dpni_attr *attr)
7497 +{
7498 + struct mc_command cmd = { 0 };
7499 + struct dpni_rsp_get_attr *rsp_params;
7500 +
7501 + int err;
7502 +
7503 + /* prepare command */
7504 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
7505 + cmd_flags,
7506 + token);
7507 +
7508 + /* send command to mc*/
7509 + err = mc_send_command(mc_io, &cmd);
7510 + if (err)
7511 + return err;
7512 +
7513 + /* retrieve response parameters */
7514 + rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
7515 + attr->options = le32_to_cpu(rsp_params->options);
7516 + attr->num_queues = rsp_params->num_queues;
7517 + attr->num_tcs = rsp_params->num_tcs;
7518 + attr->mac_filter_entries = rsp_params->mac_filter_entries;
7519 + attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
7520 + attr->qos_entries = rsp_params->qos_entries;
7521 + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
7522 + attr->qos_key_size = rsp_params->qos_key_size;
7523 + attr->fs_key_size = rsp_params->fs_key_size;
7524 + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
7525 +
7526 + return 0;
7527 +}
7528 +
7529 +/**
7530 + * dpni_set_errors_behavior() - Set errors behavior
7531 + * @mc_io: Pointer to MC portal's I/O object
7532 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7533 + * @token: Token of DPNI object
7534 + * @cfg: Errors configuration
7535 + *
7536 + * this function may be called numerous times with different
7537 + * error masks
7538 + *
7539 + * Return: '0' on Success; Error code otherwise.
7540 + */
7541 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
7542 + u32 cmd_flags,
7543 + u16 token,
7544 + struct dpni_error_cfg *cfg)
7545 +{
7546 + struct mc_command cmd = { 0 };
7547 + struct dpni_cmd_set_errors_behavior *cmd_params;
7548 +
7549 + /* prepare command */
7550 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
7551 + cmd_flags,
7552 + token);
7553 + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
7554 + cmd_params->errors = cpu_to_le32(cfg->errors);
7555 + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
7556 + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
7557 +
7558 + /* send command to mc*/
7559 + return mc_send_command(mc_io, &cmd);
7560 +}
7561 +
7562 +/**
7563 + * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
7564 + * @mc_io: Pointer to MC portal's I/O object
7565 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7566 + * @token: Token of DPNI object
7567 + * @qtype: Type of queue to retrieve configuration for
7568 + * @layout: Returns buffer layout attributes
7569 + *
7570 + * Return: '0' on Success; Error code otherwise.
7571 + */
7572 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
7573 + u32 cmd_flags,
7574 + u16 token,
7575 + enum dpni_queue_type qtype,
7576 + struct dpni_buffer_layout *layout)
7577 +{
7578 + struct mc_command cmd = { 0 };
7579 + struct dpni_cmd_get_buffer_layout *cmd_params;
7580 + struct dpni_rsp_get_buffer_layout *rsp_params;
7581 + int err;
7582 +
7583 + /* prepare command */
7584 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
7585 + cmd_flags,
7586 + token);
7587 + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
7588 + cmd_params->qtype = qtype;
7589 +
7590 + /* send command to mc*/
7591 + err = mc_send_command(mc_io, &cmd);
7592 + if (err)
7593 + return err;
7594 +
7595 + /* retrieve response parameters */
7596 + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
7597 + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
7598 + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
7599 + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
7600 + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
7601 + layout->data_align = le16_to_cpu(rsp_params->data_align);
7602 + layout->data_head_room = le16_to_cpu(rsp_params->head_room);
7603 + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
7604 +
7605 + return 0;
7606 +}
7607 +
7608 +/**
7609 + * dpni_set_buffer_layout() - Set buffer layout configuration.
7610 + * @mc_io: Pointer to MC portal's I/O object
7611 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7612 + * @token: Token of DPNI object
7613 + * @qtype: Type of queue this configuration applies to
7614 + * @layout: Buffer layout configuration
7615 + *
7616 + * Return: '0' on Success; Error code otherwise.
7617 + *
7618 + * @warning Allowed only when DPNI is disabled
7619 + */
7620 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
7621 + u32 cmd_flags,
7622 + u16 token,
7623 + enum dpni_queue_type qtype,
7624 + const struct dpni_buffer_layout *layout)
7625 +{
7626 + struct mc_command cmd = { 0 };
7627 + struct dpni_cmd_set_buffer_layout *cmd_params;
7628 +
7629 + /* prepare command */
7630 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
7631 + cmd_flags,
7632 + token);
7633 + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
7634 + cmd_params->qtype = qtype;
7635 + cmd_params->options = cpu_to_le16(layout->options);
7636 + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
7637 + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
7638 + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
7639 + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
7640 + cmd_params->data_align = cpu_to_le16(layout->data_align);
7641 + cmd_params->head_room = cpu_to_le16(layout->data_head_room);
7642 + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
7643 +
7644 + /* send command to mc*/
7645 + return mc_send_command(mc_io, &cmd);
7646 +}
7647 +
7648 +/**
7649 + * dpni_set_offload() - Set DPNI offload configuration.
7650 + * @mc_io: Pointer to MC portal's I/O object
7651 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7652 + * @token: Token of DPNI object
7653 + * @type: Type of DPNI offload
7654 + * @config: Offload configuration.
7655 + * For checksum offloads, non-zero value enables the offload
7656 + *
7657 + * Return: '0' on Success; Error code otherwise.
7658 + *
7659 + * @warning Allowed only when DPNI is disabled
7660 + */
7661 +
7662 +int dpni_set_offload(struct fsl_mc_io *mc_io,
7663 + u32 cmd_flags,
7664 + u16 token,
7665 + enum dpni_offload type,
7666 + u32 config)
7667 +{
7668 + struct mc_command cmd = { 0 };
7669 + struct dpni_cmd_set_offload *cmd_params;
7670 +
7671 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
7672 + cmd_flags,
7673 + token);
7674 + cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
7675 + cmd_params->dpni_offload = type;
7676 + cmd_params->config = cpu_to_le32(config);
7677 +
7678 + return mc_send_command(mc_io, &cmd);
7679 +}
7680 +
7681 +int dpni_get_offload(struct fsl_mc_io *mc_io,
7682 + u32 cmd_flags,
7683 + u16 token,
7684 + enum dpni_offload type,
7685 + u32 *config)
7686 +{
7687 + struct mc_command cmd = { 0 };
7688 + struct dpni_cmd_get_offload *cmd_params;
7689 + struct dpni_rsp_get_offload *rsp_params;
7690 + int err;
7691 +
7692 + /* prepare command */
7693 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
7694 + cmd_flags,
7695 + token);
7696 + cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
7697 + cmd_params->dpni_offload = type;
7698 +
7699 + /* send command to mc*/
7700 + err = mc_send_command(mc_io, &cmd);
7701 + if (err)
7702 + return err;
7703 +
7704 + /* retrieve response parameters */
7705 + rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
7706 + *config = le32_to_cpu(rsp_params->config);
7707 +
7708 + return 0;
7709 +}
7710 +
7711 +/**
7712 + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
7713 + * for enqueue operations
7714 + * @mc_io: Pointer to MC portal's I/O object
7715 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7716 + * @token: Token of DPNI object
7717 + * @qtype: Type of queue to receive QDID for
7718 + * @qdid: Returned virtual QDID value that should be used as an argument
7719 + * in all enqueue operations
7720 + *
7721 + * Return: '0' on Success; Error code otherwise.
7722 + */
7723 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
7724 + u32 cmd_flags,
7725 + u16 token,
7726 + enum dpni_queue_type qtype,
7727 + u16 *qdid)
7728 +{
7729 + struct mc_command cmd = { 0 };
7730 + struct dpni_cmd_get_qdid *cmd_params;
7731 + struct dpni_rsp_get_qdid *rsp_params;
7732 + int err;
7733 +
7734 + /* prepare command */
7735 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
7736 + cmd_flags,
7737 + token);
7738 + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
7739 + cmd_params->qtype = qtype;
7740 +
7741 + /* send command to mc*/
7742 + err = mc_send_command(mc_io, &cmd);
7743 + if (err)
7744 + return err;
7745 +
7746 + /* retrieve response parameters */
7747 + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
7748 + *qdid = le16_to_cpu(rsp_params->qdid);
7749 +
7750 + return 0;
7751 +}
7752 +
7753 +/**
7754 + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
7755 + * @mc_io: Pointer to MC portal's I/O object
7756 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7757 + * @token: Token of DPNI object
7758 + * @data_offset: Tx data offset (from start of buffer)
7759 + *
7760 + * Return: '0' on Success; Error code otherwise.
7761 + */
7762 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
7763 + u32 cmd_flags,
7764 + u16 token,
7765 + u16 *data_offset)
7766 +{
7767 + struct mc_command cmd = { 0 };
7768 + struct dpni_rsp_get_tx_data_offset *rsp_params;
7769 + int err;
7770 +
7771 + /* prepare command */
7772 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
7773 + cmd_flags,
7774 + token);
7775 +
7776 + /* send command to mc*/
7777 + err = mc_send_command(mc_io, &cmd);
7778 + if (err)
7779 + return err;
7780 +
7781 + /* retrieve response parameters */
7782 + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
7783 + *data_offset = le16_to_cpu(rsp_params->data_offset);
7784 +
7785 + return 0;
7786 +}
7787 +
7788 +/**
7789 + * dpni_set_link_cfg() - set the link configuration.
7790 + * @mc_io: Pointer to MC portal's I/O object
7791 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7792 + * @token: Token of DPNI object
7793 + * @cfg: Link configuration
7794 + *
7795 + * Return: '0' on Success; Error code otherwise.
7796 + */
7797 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
7798 + u32 cmd_flags,
7799 + u16 token,
7800 + const struct dpni_link_cfg *cfg)
7801 +{
7802 + struct mc_command cmd = { 0 };
7803 + struct dpni_cmd_set_link_cfg *cmd_params;
7804 +
7805 + /* prepare command */
7806 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
7807 + cmd_flags,
7808 + token);
7809 + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
7810 + cmd_params->rate = cpu_to_le32(cfg->rate);
7811 + cmd_params->options = cpu_to_le64(cfg->options);
7812 +
7813 + /* send command to mc*/
7814 + return mc_send_command(mc_io, &cmd);
7815 +}
7816 +
7817 +/**
7818 + * dpni_get_link_state() - Return the link state (either up or down)
7819 + * @mc_io: Pointer to MC portal's I/O object
7820 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7821 + * @token: Token of DPNI object
7822 + * @state: Returned link state;
7823 + *
7824 + * Return: '0' on Success; Error code otherwise.
7825 + */
7826 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
7827 + u32 cmd_flags,
7828 + u16 token,
7829 + struct dpni_link_state *state)
7830 +{
7831 + struct mc_command cmd = { 0 };
7832 + struct dpni_rsp_get_link_state *rsp_params;
7833 + int err;
7834 +
7835 + /* prepare command */
7836 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
7837 + cmd_flags,
7838 + token);
7839 +
7840 + /* send command to mc*/
7841 + err = mc_send_command(mc_io, &cmd);
7842 + if (err)
7843 + return err;
7844 +
7845 + /* retrieve response parameters */
7846 + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
7847 + state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
7848 + state->rate = le32_to_cpu(rsp_params->rate);
7849 + state->options = le64_to_cpu(rsp_params->options);
7850 +
7851 + return 0;
7852 +}
7853 +
7854 +/**
7855 + * dpni_set_tx_shaping() - Set the transmit shaping
7856 + * @mc_io: Pointer to MC portal's I/O object
7857 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7858 + * @token: Token of DPNI object
7859 + * @tx_shaper: tx shaping configuration
7860 + *
7861 + * Return: '0' on Success; Error code otherwise.
7862 + */
7863 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
7864 + u32 cmd_flags,
7865 + u16 token,
7866 + const struct dpni_tx_shaping_cfg *tx_shaper)
7867 +{
7868 + struct mc_command cmd = { 0 };
7869 + struct dpni_cmd_set_tx_shaping *cmd_params;
7870 +
7871 + /* prepare command */
7872 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
7873 + cmd_flags,
7874 + token);
7875 + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
7876 + cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
7877 + cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
7878 +
7879 + /* send command to mc*/
7880 + return mc_send_command(mc_io, &cmd);
7881 +}
7882 +
7883 +/**
7884 + * dpni_set_max_frame_length() - Set the maximum received frame length.
7885 + * @mc_io: Pointer to MC portal's I/O object
7886 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7887 + * @token: Token of DPNI object
7888 + * @max_frame_length: Maximum received frame length (in
7889 + * bytes); frame is discarded if its
7890 + * length exceeds this value
7891 + *
7892 + * Return: '0' on Success; Error code otherwise.
7893 + */
7894 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
7895 + u32 cmd_flags,
7896 + u16 token,
7897 + u16 max_frame_length)
7898 +{
7899 + struct mc_command cmd = { 0 };
7900 + struct dpni_cmd_set_max_frame_length *cmd_params;
7901 +
7902 + /* prepare command */
7903 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
7904 + cmd_flags,
7905 + token);
7906 + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
7907 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
7908 +
7909 + /* send command to mc*/
7910 + return mc_send_command(mc_io, &cmd);
7911 +}
7912 +
7913 +/**
7914 + * dpni_get_max_frame_length() - Get the maximum received frame length.
7915 + * @mc_io: Pointer to MC portal's I/O object
7916 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7917 + * @token: Token of DPNI object
7918 + * @max_frame_length: Maximum received frame length (in
7919 + * bytes); frame is discarded if its
7920 + * length exceeds this value
7921 + *
7922 + * Return: '0' on Success; Error code otherwise.
7923 + */
7924 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
7925 + u32 cmd_flags,
7926 + u16 token,
7927 + u16 *max_frame_length)
7928 +{
7929 + struct mc_command cmd = { 0 };
7930 + struct dpni_rsp_get_max_frame_length *rsp_params;
7931 + int err;
7932 +
7933 + /* prepare command */
7934 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
7935 + cmd_flags,
7936 + token);
7937 +
7938 + /* send command to mc*/
7939 + err = mc_send_command(mc_io, &cmd);
7940 + if (err)
7941 + return err;
7942 +
7943 + /* retrieve response parameters */
7944 + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
7945 + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
7946 +
7947 + return 0;
7948 +}
7949 +
7950 +/**
7951 + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
7952 + * @mc_io: Pointer to MC portal's I/O object
7953 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7954 + * @token: Token of DPNI object
7955 + * @en: Set to '1' to enable; '0' to disable
7956 + *
7957 + * Return: '0' on Success; Error code otherwise.
7958 + */
7959 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
7960 + u32 cmd_flags,
7961 + u16 token,
7962 + int en)
7963 +{
7964 + struct mc_command cmd = { 0 };
7965 + struct dpni_cmd_set_multicast_promisc *cmd_params;
7966 +
7967 + /* prepare command */
7968 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
7969 + cmd_flags,
7970 + token);
7971 + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
7972 + dpni_set_field(cmd_params->enable, ENABLE, en);
7973 +
7974 + /* send command to mc*/
7975 + return mc_send_command(mc_io, &cmd);
7976 +}
7977 +
7978 +/**
7979 + * dpni_get_multicast_promisc() - Get multicast promiscuous mode
7980 + * @mc_io: Pointer to MC portal's I/O object
7981 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
7982 + * @token: Token of DPNI object
7983 + * @en: Returns '1' if enabled; '0' otherwise
7984 + *
7985 + * Return: '0' on Success; Error code otherwise.
7986 + */
7987 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
7988 + u32 cmd_flags,
7989 + u16 token,
7990 + int *en)
7991 +{
7992 + struct mc_command cmd = { 0 };
7993 + struct dpni_rsp_get_multicast_promisc *rsp_params;
7994 + int err;
7995 +
7996 + /* prepare command */
7997 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
7998 + cmd_flags,
7999 + token);
8000 +
8001 + /* send command to mc*/
8002 + err = mc_send_command(mc_io, &cmd);
8003 + if (err)
8004 + return err;
8005 +
8006 + /* retrieve response parameters */
8007 + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
8008 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
8009 +
8010 + return 0;
8011 +}
8012 +
8013 +/**
8014 + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
8015 + * @mc_io: Pointer to MC portal's I/O object
8016 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8017 + * @token: Token of DPNI object
8018 + * @en: Set to '1' to enable; '0' to disable
8019 + *
8020 + * Return: '0' on Success; Error code otherwise.
8021 + */
8022 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
8023 + u32 cmd_flags,
8024 + u16 token,
8025 + int en)
8026 +{
8027 + struct mc_command cmd = { 0 };
8028 + struct dpni_cmd_set_unicast_promisc *cmd_params;
8029 +
8030 + /* prepare command */
8031 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
8032 + cmd_flags,
8033 + token);
8034 + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
8035 + dpni_set_field(cmd_params->enable, ENABLE, en);
8036 +
8037 + /* send command to mc*/
8038 + return mc_send_command(mc_io, &cmd);
8039 +}
8040 +
8041 +/**
8042 + * dpni_get_unicast_promisc() - Get unicast promiscuous mode
8043 + * @mc_io: Pointer to MC portal's I/O object
8044 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8045 + * @token: Token of DPNI object
8046 + * @en: Returns '1' if enabled; '0' otherwise
8047 + *
8048 + * Return: '0' on Success; Error code otherwise.
8049 + */
8050 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
8051 + u32 cmd_flags,
8052 + u16 token,
8053 + int *en)
8054 +{
8055 + struct mc_command cmd = { 0 };
8056 + struct dpni_rsp_get_unicast_promisc *rsp_params;
8057 + int err;
8058 +
8059 + /* prepare command */
8060 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
8061 + cmd_flags,
8062 + token);
8063 +
8064 + /* send command to mc*/
8065 + err = mc_send_command(mc_io, &cmd);
8066 + if (err)
8067 + return err;
8068 +
8069 + /* retrieve response parameters */
8070 + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
8071 + *en = dpni_get_field(rsp_params->enabled, ENABLE);
8072 +
8073 + return 0;
8074 +}
8075 +
8076 +/**
8077 + * dpni_set_primary_mac_addr() - Set the primary MAC address
8078 + * @mc_io: Pointer to MC portal's I/O object
8079 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8080 + * @token: Token of DPNI object
8081 + * @mac_addr: MAC address to set as primary address
8082 + *
8083 + * Return: '0' on Success; Error code otherwise.
8084 + */
8085 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
8086 + u32 cmd_flags,
8087 + u16 token,
8088 + const u8 mac_addr[6])
8089 +{
8090 + struct mc_command cmd = { 0 };
8091 + struct dpni_cmd_set_primary_mac_addr *cmd_params;
8092 + int i;
8093 +
8094 + /* prepare command */
8095 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
8096 + cmd_flags,
8097 + token);
8098 + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
8099 + for (i = 0; i < 6; i++)
8100 + cmd_params->mac_addr[i] = mac_addr[5 - i];
8101 +
8102 + /* send command to mc*/
8103 + return mc_send_command(mc_io, &cmd);
8104 +}
8105 +
8106 +/**
8107 + * dpni_get_primary_mac_addr() - Get the primary MAC address
8108 + * @mc_io: Pointer to MC portal's I/O object
8109 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8110 + * @token: Token of DPNI object
8111 + * @mac_addr: Returned MAC address
8112 + *
8113 + * Return: '0' on Success; Error code otherwise.
8114 + */
8115 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
8116 + u32 cmd_flags,
8117 + u16 token,
8118 + u8 mac_addr[6])
8119 +{
8120 + struct mc_command cmd = { 0 };
8121 + struct dpni_rsp_get_primary_mac_addr *rsp_params;
8122 + int i, err;
8123 +
8124 + /* prepare command */
8125 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
8126 + cmd_flags,
8127 + token);
8128 +
8129 + /* send command to mc*/
8130 + err = mc_send_command(mc_io, &cmd);
8131 + if (err)
8132 + return err;
8133 +
8134 + /* retrieve response parameters */
8135 + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
8136 + for (i = 0; i < 6; i++)
8137 + mac_addr[5 - i] = rsp_params->mac_addr[i];
8138 +
8139 + return 0;
8140 +}
8141 +
8142 +/**
8143 + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
8144 + * port the DPNI is attached to
8145 + * @mc_io: Pointer to MC portal's I/O object
8146 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8147 + * @token: Token of DPNI object
8148 + * @mac_addr: MAC address of the physical port, if any, otherwise 0
8149 + *
8150 + * The primary MAC address is not cleared by this operation.
8151 + *
8152 + * Return: '0' on Success; Error code otherwise.
8153 + */
8154 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
8155 + u32 cmd_flags,
8156 + u16 token,
8157 + u8 mac_addr[6])
8158 +{
8159 + struct mc_command cmd = { 0 };
8160 + struct dpni_rsp_get_port_mac_addr *rsp_params;
8161 + int i, err;
8162 +
8163 + /* prepare command */
8164 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
8165 + cmd_flags,
8166 + token);
8167 +
8168 + /* send command to mc*/
8169 + err = mc_send_command(mc_io, &cmd);
8170 + if (err)
8171 + return err;
8172 +
8173 + /* retrieve response parameters */
8174 + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
8175 + for (i = 0; i < 6; i++)
8176 + mac_addr[5 - i] = rsp_params->mac_addr[i];
8177 +
8178 + return 0;
8179 +}
8180 +
8181 +/**
8182 + * dpni_add_mac_addr() - Add MAC address filter
8183 + * @mc_io: Pointer to MC portal's I/O object
8184 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8185 + * @token: Token of DPNI object
8186 + * @mac_addr: MAC address to add
8187 + *
8188 + * Return: '0' on Success; Error code otherwise.
8189 + */
8190 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
8191 + u32 cmd_flags,
8192 + u16 token,
8193 + const u8 mac_addr[6])
8194 +{
8195 + struct mc_command cmd = { 0 };
8196 + struct dpni_cmd_add_mac_addr *cmd_params;
8197 + int i;
8198 +
8199 + /* prepare command */
8200 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
8201 + cmd_flags,
8202 + token);
8203 + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
8204 + for (i = 0; i < 6; i++)
8205 + cmd_params->mac_addr[i] = mac_addr[5 - i];
8206 +
8207 + /* send command to mc*/
8208 + return mc_send_command(mc_io, &cmd);
8209 +}
8210 +
8211 +/**
8212 + * dpni_remove_mac_addr() - Remove MAC address filter
8213 + * @mc_io: Pointer to MC portal's I/O object
8214 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8215 + * @token: Token of DPNI object
8216 + * @mac_addr: MAC address to remove
8217 + *
8218 + * Return: '0' on Success; Error code otherwise.
8219 + */
8220 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
8221 + u32 cmd_flags,
8222 + u16 token,
8223 + const u8 mac_addr[6])
8224 +{
8225 + struct mc_command cmd = { 0 };
8226 + struct dpni_cmd_remove_mac_addr *cmd_params;
8227 + int i;
8228 +
8229 + /* prepare command */
8230 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
8231 + cmd_flags,
8232 + token);
8233 + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
8234 + for (i = 0; i < 6; i++)
8235 + cmd_params->mac_addr[i] = mac_addr[5 - i];
8236 +
8237 + /* send command to mc*/
8238 + return mc_send_command(mc_io, &cmd);
8239 +}
8240 +
8241 +/**
8242 + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
8243 + * @mc_io: Pointer to MC portal's I/O object
8244 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8245 + * @token: Token of DPNI object
8246 + * @unicast: Set to '1' to clear unicast addresses
8247 + * @multicast: Set to '1' to clear multicast addresses
8248 + *
8249 + * The primary MAC address is not cleared by this operation.
8250 + *
8251 + * Return: '0' on Success; Error code otherwise.
8252 + */
8253 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
8254 + u32 cmd_flags,
8255 + u16 token,
8256 + int unicast,
8257 + int multicast)
8258 +{
8259 + struct mc_command cmd = { 0 };
8260 + struct dpni_cmd_clear_mac_filters *cmd_params;
8261 +
8262 + /* prepare command */
8263 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
8264 + cmd_flags,
8265 + token);
8266 + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
8267 + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
8268 + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
8269 +
8270 + /* send command to mc*/
8271 + return mc_send_command(mc_io, &cmd);
8272 +}
8273 +
8274 +/**
8275 + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
8276 + * @mc_io: Pointer to MC portal's I/O object
8277 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8278 + * @token: Token of DPNI object
8279 + * @tc_id: Traffic class selection (0-7)
8280 + * @cfg: Traffic class distribution configuration
8281 + *
8282 + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
8283 + * first to prepare the key_cfg_iova parameter
8284 + *
8285 + * Return: '0' on Success; error code otherwise.
8286 + */
8287 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
8288 + u32 cmd_flags,
8289 + u16 token,
8290 + u8 tc_id,
8291 + const struct dpni_rx_tc_dist_cfg *cfg)
8292 +{
8293 + struct mc_command cmd = { 0 };
8294 + struct dpni_cmd_set_rx_tc_dist *cmd_params;
8295 +
8296 + /* prepare command */
8297 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
8298 + cmd_flags,
8299 + token);
8300 + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
8301 + cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
8302 + cmd_params->tc_id = tc_id;
8303 + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
8304 + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
8305 + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
8306 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8307 +
8308 + /* send command to mc*/
8309 + return mc_send_command(mc_io, &cmd);
8310 +}
8311 +
8312 +/*
8313 + * dpni_set_qos_table() - Set QoS mapping table
8314 + * @mc_io: Pointer to MC portal's I/O object
8315 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8316 + * @token: Token of DPNI object
8317 + * @cfg: QoS table configuration
8318 + *
8319 + * This function and all QoS-related functions require that
8320 + *'max_tcs > 1' was set at DPNI creation.
8321 + *
8322 + * warning: Before calling this function, call dpkg_prepare_key_cfg() to
8323 + * prepare the key_cfg_iova parameter
8324 + *
8325 + * Return: '0' on Success; Error code otherwise.
8326 + */
8327 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
8328 + u32 cmd_flags,
8329 + u16 token,
8330 + const struct dpni_qos_tbl_cfg *cfg)
8331 +{
8332 + struct dpni_cmd_set_qos_table *cmd_params;
8333 + struct mc_command cmd = { 0 };
8334 +
8335 + /* prepare command */
8336 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
8337 + cmd_flags,
8338 + token);
8339 + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
8340 + cmd_params->default_tc = cfg->default_tc;
8341 + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
8342 + dpni_set_field(cmd_params->discard_on_miss,
8343 + ENABLE,
8344 + cfg->discard_on_miss);
8345 +
8346 + /* send command to mc*/
8347 + return mc_send_command(mc_io, &cmd);
8348 +}
8349 +
8350 +/**
8351 + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
8352 + * @mc_io: Pointer to MC portal's I/O object
8353 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8354 + * @token: Token of DPNI object
8355 + * @cfg: QoS rule to add
8356 + * @tc_id: Traffic class selection (0-7)
8357 + * @index: Location in the QoS table where to insert the entry.
8358 + * Only relevant if MASKING is enabled for QoS classification on
8359 + * this DPNI, it is ignored for exact match.
8360 + *
8361 + * Return: '0' on Success; Error code otherwise.
8362 + */
8363 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
8364 + u32 cmd_flags,
8365 + u16 token,
8366 + const struct dpni_rule_cfg *cfg,
8367 + u8 tc_id,
8368 + u16 index)
8369 +{
8370 + struct dpni_cmd_add_qos_entry *cmd_params;
8371 + struct mc_command cmd = { 0 };
8372 +
8373 + /* prepare command */
8374 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
8375 + cmd_flags,
8376 + token);
8377 + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
8378 + cmd_params->tc_id = tc_id;
8379 + cmd_params->key_size = cfg->key_size;
8380 + cmd_params->index = cpu_to_le16(index);
8381 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8382 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8383 +
8384 + /* send command to mc*/
8385 + return mc_send_command(mc_io, &cmd);
8386 +}
8387 +
8388 +/**
8389 + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
8390 + * (to select a flow ID)
8391 + * @mc_io: Pointer to MC portal's I/O object
8392 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8393 + * @token: Token of DPNI object
8394 + * @tc_id: Traffic class selection (0-7)
8395 + * @index: Location in the QoS table where to insert the entry.
8396 + * Only relevant if MASKING is enabled for QoS
8397 + * classification on this DPNI, it is ignored for exact match.
8398 + * @cfg: Flow steering rule to add
8399 + * @action: Action to be taken as result of a classification hit
8400 + *
8401 + * Return: '0' on Success; Error code otherwise.
8402 + */
8403 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
8404 + u32 cmd_flags,
8405 + u16 token,
8406 + u8 tc_id,
8407 + u16 index,
8408 + const struct dpni_rule_cfg *cfg,
8409 + const struct dpni_fs_action_cfg *action)
8410 +{
8411 + struct dpni_cmd_add_fs_entry *cmd_params;
8412 + struct mc_command cmd = { 0 };
8413 +
8414 + /* prepare command */
8415 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
8416 + cmd_flags,
8417 + token);
8418 + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
8419 + cmd_params->tc_id = tc_id;
8420 + cmd_params->key_size = cfg->key_size;
8421 + cmd_params->index = cpu_to_le16(index);
8422 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8423 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8424 + cmd_params->options = cpu_to_le16(action->options);
8425 + cmd_params->flow_id = cpu_to_le16(action->flow_id);
8426 + cmd_params->flc = cpu_to_le64(action->flc);
8427 +
8428 + /* send command to mc*/
8429 + return mc_send_command(mc_io, &cmd);
8430 +}
8431 +
8432 +/**
8433 + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
8434 + * traffic class
8435 + * @mc_io: Pointer to MC portal's I/O object
8436 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8437 + * @token: Token of DPNI object
8438 + * @tc_id: Traffic class selection (0-7)
8439 + * @cfg: Flow steering rule to remove
8440 + *
8441 + * Return: '0' on Success; Error code otherwise.
8442 + */
8443 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
8444 + u32 cmd_flags,
8445 + u16 token,
8446 + u8 tc_id,
8447 + const struct dpni_rule_cfg *cfg)
8448 +{
8449 + struct dpni_cmd_remove_fs_entry *cmd_params;
8450 + struct mc_command cmd = { 0 };
8451 +
8452 + /* prepare command */
8453 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
8454 + cmd_flags,
8455 + token);
8456 + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
8457 + cmd_params->tc_id = tc_id;
8458 + cmd_params->key_size = cfg->key_size;
8459 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
8460 + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
8461 +
8462 + /* send command to mc*/
8463 + return mc_send_command(mc_io, &cmd);
8464 +}
8465 +
8466 +/**
8467 + * dpni_set_congestion_notification() - Set traffic class congestion
8468 + * notification configuration
8469 + * @mc_io: Pointer to MC portal's I/O object
8470 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8471 + * @token: Token of DPNI object
8472 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
8473 + * @tc_id: Traffic class selection (0-7)
8474 + * @cfg: congestion notification configuration
8475 + *
8476 + * Return: '0' on Success; error code otherwise.
8477 + */
8478 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
8479 + u32 cmd_flags,
8480 + u16 token,
8481 + enum dpni_queue_type qtype,
8482 + u8 tc_id,
8483 + const struct dpni_congestion_notification_cfg *cfg)
8484 +{
8485 + struct dpni_cmd_set_congestion_notification *cmd_params;
8486 + struct mc_command cmd = { 0 };
8487 +
8488 + /* prepare command */
8489 + cmd.header = mc_encode_cmd_header(
8490 + DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
8491 + cmd_flags,
8492 + token);
8493 + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
8494 + cmd_params->qtype = qtype;
8495 + cmd_params->tc = tc_id;
8496 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
8497 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
8498 + cmd_params->dest_priority = cfg->dest_cfg.priority;
8499 + dpni_set_field(cmd_params->type_units, DEST_TYPE,
8500 + cfg->dest_cfg.dest_type);
8501 + dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
8502 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
8503 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
8504 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
8505 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
8506 +
8507 + /* send command to mc*/
8508 + return mc_send_command(mc_io, &cmd);
8509 +}
8510 +
8511 +/**
8512 + * dpni_get_congestion_notification() - Get traffic class congestion
8513 + * notification configuration
8514 + * @mc_io: Pointer to MC portal's I/O object
8515 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8516 + * @token: Token of DPNI object
8517 + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
8518 + * @tc_id: Traffic class selection (0-7)
8519 + * @cfg: congestion notification configuration
8520 + *
8521 + * Return: '0' on Success; error code otherwise.
8522 + */
8523 +int dpni_get_congestion_notification(
8524 + struct fsl_mc_io *mc_io,
8525 + u32 cmd_flags,
8526 + u16 token,
8527 + enum dpni_queue_type qtype,
8528 + u8 tc_id,
8529 + struct dpni_congestion_notification_cfg *cfg)
8530 +{
8531 + struct dpni_rsp_get_congestion_notification *rsp_params;
8532 + struct dpni_cmd_get_congestion_notification *cmd_params;
8533 + struct mc_command cmd = { 0 };
8534 + int err;
8535 +
8536 + /* prepare command */
8537 + cmd.header = mc_encode_cmd_header(
8538 + DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
8539 + cmd_flags,
8540 + token);
8541 + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
8542 + cmd_params->qtype = qtype;
8543 + cmd_params->tc = tc_id;
8544 +
8545 + /* send command to mc*/
8546 + err = mc_send_command(mc_io, &cmd);
8547 + if (err)
8548 + return err;
8549 +
8550 + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
8551 + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
8552 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
8553 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
8554 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
8555 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
8556 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
8557 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
8558 + cfg->dest_cfg.priority = rsp_params->dest_priority;
8559 + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
8560 + DEST_TYPE);
8561 +
8562 + return 0;
8563 +}
8564 +
8565 +/**
8566 + * dpni_set_queue() - Set queue parameters
8567 + * @mc_io: Pointer to MC portal's I/O object
8568 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8569 + * @token: Token of DPNI object
8570 + * @qtype: Type of queue - all queue types are supported, although
8571 + * the command is ignored for Tx
8572 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8573 + * @index: Selects the specific queue out of the set allocated for the
8574 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8575 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what
8576 + * configuration options are set on the queue
8577 + * @queue: Queue structure
8578 + *
8579 + * Return: '0' on Success; Error code otherwise.
8580 + */
8581 +int dpni_set_queue(struct fsl_mc_io *mc_io,
8582 + u32 cmd_flags,
8583 + u16 token,
8584 + enum dpni_queue_type qtype,
8585 + u8 tc,
8586 + u8 index,
8587 + u8 options,
8588 + const struct dpni_queue *queue)
8589 +{
8590 + struct mc_command cmd = { 0 };
8591 + struct dpni_cmd_set_queue *cmd_params;
8592 +
8593 + /* prepare command */
8594 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
8595 + cmd_flags,
8596 + token);
8597 + cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
8598 + cmd_params->qtype = qtype;
8599 + cmd_params->tc = tc;
8600 + cmd_params->index = index;
8601 + cmd_params->options = options;
8602 + cmd_params->dest_id = cpu_to_le32(queue->destination.id);
8603 + cmd_params->dest_prio = queue->destination.priority;
8604 + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
8605 + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
8606 + dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
8607 + queue->destination.hold_active);
8608 + cmd_params->flc = cpu_to_le64(queue->flc.value);
8609 + cmd_params->user_context = cpu_to_le64(queue->user_context);
8610 +
8611 + /* send command to mc */
8612 + return mc_send_command(mc_io, &cmd);
8613 +}
8614 +
8615 +/**
8616 + * dpni_get_queue() - Get queue parameters
8617 + * @mc_io: Pointer to MC portal's I/O object
8618 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8619 + * @token: Token of DPNI object
8620 + * @qtype: Type of queue - all queue types are supported
8621 + * @tc: Traffic class, in range 0 to NUM_TCS - 1
8622 + * @index: Selects the specific queue out of the set allocated for the
8623 + * same TC. Value must be in range 0 to NUM_QUEUES - 1
8624 + * @queue: Queue configuration structure
8625 + * @qid: Queue identification
8626 + *
8627 + * Return: '0' on Success; Error code otherwise.
8628 + */
8629 +int dpni_get_queue(struct fsl_mc_io *mc_io,
8630 + u32 cmd_flags,
8631 + u16 token,
8632 + enum dpni_queue_type qtype,
8633 + u8 tc,
8634 + u8 index,
8635 + struct dpni_queue *queue,
8636 + struct dpni_queue_id *qid)
8637 +{
8638 + struct mc_command cmd = { 0 };
8639 + struct dpni_cmd_get_queue *cmd_params;
8640 + struct dpni_rsp_get_queue *rsp_params;
8641 + int err;
8642 +
8643 + /* prepare command */
8644 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
8645 + cmd_flags,
8646 + token);
8647 + cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
8648 + cmd_params->qtype = qtype;
8649 + cmd_params->tc = tc;
8650 + cmd_params->index = index;
8651 +
8652 + /* send command to mc */
8653 + err = mc_send_command(mc_io, &cmd);
8654 + if (err)
8655 + return err;
8656 +
8657 + /* retrieve response parameters */
8658 + rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
8659 + queue->destination.id = le32_to_cpu(rsp_params->dest_id);
8660 + queue->destination.priority = rsp_params->dest_prio;
8661 + queue->destination.type = dpni_get_field(rsp_params->flags,
8662 + DEST_TYPE);
8663 + queue->flc.stash_control = dpni_get_field(rsp_params->flags,
8664 + STASH_CTRL);
8665 + queue->destination.hold_active = dpni_get_field(rsp_params->flags,
8666 + HOLD_ACTIVE);
8667 + queue->flc.value = le64_to_cpu(rsp_params->flc);
8668 + queue->user_context = le64_to_cpu(rsp_params->user_context);
8669 + qid->fqid = le32_to_cpu(rsp_params->fqid);
8670 + qid->qdbin = le16_to_cpu(rsp_params->qdbin);
8671 +
8672 + return 0;
8673 +}
8674 +
8675 +/**
8676 + * dpni_get_statistics() - Get DPNI statistics
8677 + * @mc_io: Pointer to MC portal's I/O object
8678 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8679 + * @token: Token of DPNI object
8680 + * @page: Selects the statistics page to retrieve, see
8681 + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
8682 + * @stat: Structure containing the statistics
8683 + *
8684 + * Return: '0' on Success; Error code otherwise.
8685 + */
8686 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
8687 + u32 cmd_flags,
8688 + u16 token,
8689 + u8 page,
8690 + union dpni_statistics *stat)
8691 +{
8692 + struct mc_command cmd = { 0 };
8693 + struct dpni_cmd_get_statistics *cmd_params;
8694 + struct dpni_rsp_get_statistics *rsp_params;
8695 + int i, err;
8696 +
8697 + /* prepare command */
8698 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
8699 + cmd_flags,
8700 + token);
8701 + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
8702 + cmd_params->page_number = page;
8703 +
8704 + /* send command to mc */
8705 + err = mc_send_command(mc_io, &cmd);
8706 + if (err)
8707 + return err;
8708 +
8709 + /* retrieve response parameters */
8710 + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
8711 + for (i = 0; i < DPNI_STATISTICS_CNT; i++)
8712 + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
8713 +
8714 + return 0;
8715 +}
8716 +
8717 +/**
8718 + * dpni_reset_statistics() - Clears DPNI statistics
8719 + * @mc_io: Pointer to MC portal's I/O object
8720 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8721 + * @token: Token of DPNI object
8722 + *
8723 + * Return: '0' on Success; Error code otherwise.
8724 + */
8725 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
8726 + u32 cmd_flags,
8727 + u16 token)
8728 +{
8729 + struct mc_command cmd = { 0 };
8730 +
8731 + /* prepare command */
8732 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
8733 + cmd_flags,
8734 + token);
8735 +
8736 + /* send command to mc*/
8737 + return mc_send_command(mc_io, &cmd);
8738 +}
8739 +
8740 +/**
8741 + * dpni_set_taildrop() - Set taildrop per queue or TC
8742 + * @mc_io: Pointer to MC portal's I/O object
8743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8744 + * @token: Token of DPNI object
8745 + * @cg_point: Congestion point
8746 + * @q_type: Queue type on which the taildrop is configured.
8747 + * Only Rx queues are supported for now
8748 + * @tc: Traffic class to apply this taildrop to
8749 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8750 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8751 + * @taildrop: Taildrop structure
8752 + *
8753 + * Return: '0' on Success; Error code otherwise.
8754 + */
8755 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
8756 + u32 cmd_flags,
8757 + u16 token,
8758 + enum dpni_congestion_point cg_point,
8759 + enum dpni_queue_type qtype,
8760 + u8 tc,
8761 + u8 index,
8762 + struct dpni_taildrop *taildrop)
8763 +{
8764 + struct mc_command cmd = { 0 };
8765 + struct dpni_cmd_set_taildrop *cmd_params;
8766 +
8767 + /* prepare command */
8768 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
8769 + cmd_flags,
8770 + token);
8771 + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
8772 + cmd_params->congestion_point = cg_point;
8773 + cmd_params->qtype = qtype;
8774 + cmd_params->tc = tc;
8775 + cmd_params->index = index;
8776 + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
8777 + cmd_params->units = taildrop->units;
8778 + cmd_params->threshold = cpu_to_le32(taildrop->threshold);
8779 +
8780 + /* send command to mc */
8781 + return mc_send_command(mc_io, &cmd);
8782 +}
8783 +
8784 +/**
8785 + * dpni_get_taildrop() - Get taildrop information
8786 + * @mc_io: Pointer to MC portal's I/O object
8787 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
8788 + * @token: Token of DPNI object
8789 + * @cg_point: Congestion point
8790 + * @q_type: Queue type on which the taildrop is configured.
8791 + * Only Rx queues are supported for now
8792 + * @tc: Traffic class to apply this taildrop to
8793 + * @q_index: Index of the queue if the DPNI supports multiple queues for
8794 + * traffic distribution. Ignored if CONGESTION_POINT is not 0.
8795 + * @taildrop: Taildrop structure
8796 + *
8797 + * Return: '0' on Success; Error code otherwise.
8798 + */
8799 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
8800 + u32 cmd_flags,
8801 + u16 token,
8802 + enum dpni_congestion_point cg_point,
8803 + enum dpni_queue_type qtype,
8804 + u8 tc,
8805 + u8 index,
8806 + struct dpni_taildrop *taildrop)
8807 +{
8808 + struct mc_command cmd = { 0 };
8809 + struct dpni_cmd_get_taildrop *cmd_params;
8810 + struct dpni_rsp_get_taildrop *rsp_params;
8811 + int err;
8812 +
8813 + /* prepare command */
8814 + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
8815 + cmd_flags,
8816 + token);
8817 + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
8818 + cmd_params->congestion_point = cg_point;
8819 + cmd_params->qtype = qtype;
8820 + cmd_params->tc = tc;
8821 + cmd_params->index = index;
8822 +
8823 + /* send command to mc */
8824 + err = mc_send_command(mc_io, &cmd);
8825 + if (err)
8826 + return err;
8827 +
8828 + /* retrieve response parameters */
8829 + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
8830 + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
8831 + taildrop->units = rsp_params->units;
8832 + taildrop->threshold = le32_to_cpu(rsp_params->threshold);
8833 +
8834 + return 0;
8835 +}
8836 --- /dev/null
8837 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
8838 @@ -0,0 +1,1053 @@
8839 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
8840 + * Copyright 2016 NXP
8841 + *
8842 + * Redistribution and use in source and binary forms, with or without
8843 + * modification, are permitted provided that the following conditions are met:
8844 + * * Redistributions of source code must retain the above copyright
8845 + * notice, this list of conditions and the following disclaimer.
8846 + * * Redistributions in binary form must reproduce the above copyright
8847 + * notice, this list of conditions and the following disclaimer in the
8848 + * documentation and/or other materials provided with the distribution.
8849 + * * Neither the name of the above-listed copyright holders nor the
8850 + * names of any contributors may be used to endorse or promote products
8851 + * derived from this software without specific prior written permission.
8852 + *
8853 + *
8854 + * ALTERNATIVELY, this software may be distributed under the terms of the
8855 + * GNU General Public License ("GPL") as published by the Free Software
8856 + * Foundation, either version 2 of that License or (at your option) any
8857 + * later version.
8858 + *
8859 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
8860 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8861 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8862 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
8863 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
8864 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
8865 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
8866 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
8867 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
8868 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
8869 + * POSSIBILITY OF SUCH DAMAGE.
8870 + */
8871 +#ifndef __FSL_DPNI_H
8872 +#define __FSL_DPNI_H
8873 +
8874 +#include "dpkg.h"
8875 +
8876 +struct fsl_mc_io;
8877 +
8878 +/**
8879 + * Data Path Network Interface API
8880 + * Contains initialization APIs and runtime control APIs for DPNI
8881 + */
8882 +
8883 +/** General DPNI macros */
8884 +
8885 +/**
8886 + * Maximum number of traffic classes
8887 + */
8888 +#define DPNI_MAX_TC 8
8889 +/**
8890 + * Maximum number of buffer pools per DPNI
8891 + */
8892 +#define DPNI_MAX_DPBP 8
8893 +/**
8894 + * Maximum number of senders
8895 + */
8896 +#define DPNI_MAX_SENDERS 8
8897 +/**
8898 + * Maximum distribution size
8899 + */
8900 +#define DPNI_MAX_DIST_SIZE 8
8901 +
8902 +/**
8903 + * All traffic classes considered; see dpni_set_queue()
8904 + */
8905 +#define DPNI_ALL_TCS (u8)(-1)
8906 +/**
8907 + * All flows within traffic class considered; see dpni_set_queue()
8908 + */
8909 +#define DPNI_ALL_TC_FLOWS (u16)(-1)
8910 +/**
8911 + * Generate new flow ID; see dpni_set_queue()
8912 + */
8913 +#define DPNI_NEW_FLOW_ID (u16)(-1)
8914 +
8915 +/**
8916 + * Tx traffic is always released to a buffer pool on transmit, there are no
8917 + * resources allocated to have the frames confirmed back to the source after
8918 + * transmission.
8919 + */
8920 +#define DPNI_OPT_TX_FRM_RELEASE 0x000001
8921 +/**
8922 + * Disables support for MAC address filtering for addresses other than primary
8923 + * MAC address. This affects both unicast and multicast. Promiscuous mode can
8924 + * still be enabled/disabled for both unicast and multicast. If promiscuous mode
8925 + * is disabled, only traffic matching the primary MAC address will be accepted.
8926 + */
8927 +#define DPNI_OPT_NO_MAC_FILTER 0x000002
8928 +/**
8929 + * Allocate policers for this DPNI. They can be used to rate-limit traffic per
8930 + * traffic class (TC) basis.
8931 + */
8932 +#define DPNI_OPT_HAS_POLICING 0x000004
8933 +/**
8934 + * Congestion can be managed in several ways, allowing the buffer pool to
8935 + * deplete on ingress, taildrop on each queue or use congestion groups for sets
8936 + * of queues. If set, it configures a single congestion groups across all TCs.
8937 + * If reset, a congestion group is allocated for each TC. Only relevant if the
8938 + * DPNI has multiple traffic classes.
8939 + */
8940 +#define DPNI_OPT_SHARED_CONGESTION 0x000008
8941 +/**
8942 + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
8943 + * look-ups are exact match. Note that TCAM is not available on LS1088 and its
8944 + * variants. Setting this bit on these SoCs will trigger an error.
8945 + */
8946 +#define DPNI_OPT_HAS_KEY_MASKING 0x000010
8947 +/**
8948 + * Disables the flow steering table.
8949 + */
8950 +#define DPNI_OPT_NO_FS 0x000020
8951 +
8952 +int dpni_open(struct fsl_mc_io *mc_io,
8953 + u32 cmd_flags,
8954 + int dpni_id,
8955 + u16 *token);
8956 +
8957 +int dpni_close(struct fsl_mc_io *mc_io,
8958 + u32 cmd_flags,
8959 + u16 token);
8960 +
8961 +/**
8962 + * struct dpni_pools_cfg - Structure representing buffer pools configuration
8963 + * @num_dpbp: Number of DPBPs
8964 + * @pools: Array of buffer pools parameters; The number of valid entries
8965 + * must match 'num_dpbp' value
8966 + */
8967 +struct dpni_pools_cfg {
8968 + u8 num_dpbp;
8969 + /**
8970 + * struct pools - Buffer pools parameters
8971 + * @dpbp_id: DPBP object ID
8972 + * @priority_mask: priorities served by DPBP
8973 + * @buffer_size: Buffer size
8974 + * @backup_pool: Backup pool
8975 + */
8976 + struct {
8977 + u16 dpbp_id;
8978 + u8 priority_mask;
8979 + u16 buffer_size;
8980 + u8 backup_pool;
8981 + } pools[DPNI_MAX_DPBP];
8982 +};
8983 +
8984 +int dpni_set_pools(struct fsl_mc_io *mc_io,
8985 + u32 cmd_flags,
8986 + u16 token,
8987 + const struct dpni_pools_cfg *cfg);
8988 +
8989 +int dpni_enable(struct fsl_mc_io *mc_io,
8990 + u32 cmd_flags,
8991 + u16 token);
8992 +
8993 +int dpni_disable(struct fsl_mc_io *mc_io,
8994 + u32 cmd_flags,
8995 + u16 token);
8996 +
8997 +int dpni_is_enabled(struct fsl_mc_io *mc_io,
8998 + u32 cmd_flags,
8999 + u16 token,
9000 + int *en);
9001 +
9002 +int dpni_reset(struct fsl_mc_io *mc_io,
9003 + u32 cmd_flags,
9004 + u16 token);
9005 +
9006 +/**
9007 + * DPNI IRQ Index and Events
9008 + */
9009 +
9010 +/**
9011 + * IRQ index
9012 + */
9013 +#define DPNI_IRQ_INDEX 0
9014 +/**
9015 + * IRQ event - indicates a change in link state
9016 + */
9017 +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
9018 +
9019 +int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
9020 + u32 cmd_flags,
9021 + u16 token,
9022 + u8 irq_index,
9023 + u8 en);
9024 +
9025 +int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
9026 + u32 cmd_flags,
9027 + u16 token,
9028 + u8 irq_index,
9029 + u8 *en);
9030 +
9031 +int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
9032 + u32 cmd_flags,
9033 + u16 token,
9034 + u8 irq_index,
9035 + u32 mask);
9036 +
9037 +int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
9038 + u32 cmd_flags,
9039 + u16 token,
9040 + u8 irq_index,
9041 + u32 *mask);
9042 +
9043 +int dpni_get_irq_status(struct fsl_mc_io *mc_io,
9044 + u32 cmd_flags,
9045 + u16 token,
9046 + u8 irq_index,
9047 + u32 *status);
9048 +
9049 +int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
9050 + u32 cmd_flags,
9051 + u16 token,
9052 + u8 irq_index,
9053 + u32 status);
9054 +
9055 +/**
9056 + * struct dpni_attr - Structure representing DPNI attributes
9057 + * @options: Any combination of the following options:
9058 + * DPNI_OPT_TX_FRM_RELEASE
9059 + * DPNI_OPT_NO_MAC_FILTER
9060 + * DPNI_OPT_HAS_POLICING
9061 + * DPNI_OPT_SHARED_CONGESTION
9062 + * DPNI_OPT_HAS_KEY_MASKING
9063 + * DPNI_OPT_NO_FS
9064 + * @num_queues: Number of Tx and Rx queues used for traffic distribution.
9065 + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
9066 + * @mac_filter_entries: Number of entries in the MAC address filtering table.
9067 + * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
9068 + * @qos_entries: Number of entries in the QoS classification table.
9069 + * @fs_entries: Number of entries in the flow steering table.
9070 + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
9071 + * than this when adding QoS entries will result in an error.
9072 + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
9073 + * key larger than this when composing the hash + FS key will
9074 + * result in an error.
9075 + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
9076 + * on 6, 5, 5 bits respectively.
9077 + */
9078 +struct dpni_attr {
9079 + u32 options;
9080 + u8 num_queues;
9081 + u8 num_tcs;
9082 + u8 mac_filter_entries;
9083 + u8 vlan_filter_entries;
9084 + u8 qos_entries;
9085 + u16 fs_entries;
9086 + u8 qos_key_size;
9087 + u8 fs_key_size;
9088 + u16 wriop_version;
9089 +};
9090 +
9091 +int dpni_get_attributes(struct fsl_mc_io *mc_io,
9092 + u32 cmd_flags,
9093 + u16 token,
9094 + struct dpni_attr *attr);
9095 +
9096 +/**
9097 + * DPNI errors
9098 + */
9099 +
9100 +/**
9101 + * Extract out of frame header error
9102 + */
9103 +#define DPNI_ERROR_EOFHE 0x00020000
9104 +/**
9105 + * Frame length error
9106 + */
9107 +#define DPNI_ERROR_FLE 0x00002000
9108 +/**
9109 + * Frame physical error
9110 + */
9111 +#define DPNI_ERROR_FPE 0x00001000
9112 +/**
9113 + * Parsing header error
9114 + */
9115 +#define DPNI_ERROR_PHE 0x00000020
9116 +/**
9117 + * Parser L3 checksum error
9118 + */
9119 +#define DPNI_ERROR_L3CE 0x00000004
9120 +/**
9121 + * Parser L3 checksum error
9122 + */
9123 +#define DPNI_ERROR_L4CE 0x00000001
9124 +
9125 +/**
9126 + * enum dpni_error_action - Defines DPNI behavior for errors
9127 + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
9128 + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
9129 + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
9130 + */
9131 +enum dpni_error_action {
9132 + DPNI_ERROR_ACTION_DISCARD = 0,
9133 + DPNI_ERROR_ACTION_CONTINUE = 1,
9134 + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
9135 +};
9136 +
9137 +/**
9138 + * struct dpni_error_cfg - Structure representing DPNI errors treatment
9139 + * @errors: Errors mask; use 'DPNI_ERROR__<X>
9140 + * @error_action: The desired action for the errors mask
9141 + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
9142 + * status (FAS); relevant only for the non-discard action
9143 + */
9144 +struct dpni_error_cfg {
9145 + u32 errors;
9146 + enum dpni_error_action error_action;
9147 + int set_frame_annotation;
9148 +};
9149 +
9150 +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
9151 + u32 cmd_flags,
9152 + u16 token,
9153 + struct dpni_error_cfg *cfg);
9154 +
9155 +/**
9156 + * DPNI buffer layout modification options
9157 + */
9158 +
9159 +/**
9160 + * Select to modify the time-stamp setting
9161 + */
9162 +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
9163 +/**
9164 + * Select to modify the parser-result setting; not applicable for Tx
9165 + */
9166 +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
9167 +/**
9168 + * Select to modify the frame-status setting
9169 + */
9170 +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
9171 +/**
9172 + * Select to modify the private-data-size setting
9173 + */
9174 +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
9175 +/**
9176 + * Select to modify the data-alignment setting
9177 + */
9178 +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
9179 +/**
9180 + * Select to modify the data-head-room setting
9181 + */
9182 +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
9183 +/**
9184 + * Select to modify the data-tail-room setting
9185 + */
9186 +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
9187 +
9188 +/**
9189 + * struct dpni_buffer_layout - Structure representing DPNI buffer layout
9190 + * @options: Flags representing the suggested modifications to the buffer
9191 + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
9192 + * @pass_timestamp: Pass timestamp value
9193 + * @pass_parser_result: Pass parser results
9194 + * @pass_frame_status: Pass frame status
9195 + * @private_data_size: Size kept for private data (in bytes)
9196 + * @data_align: Data alignment
9197 + * @data_head_room: Data head room
9198 + * @data_tail_room: Data tail room
9199 + */
9200 +struct dpni_buffer_layout {
9201 + u32 options;
9202 + int pass_timestamp;
9203 + int pass_parser_result;
9204 + int pass_frame_status;
9205 + u16 private_data_size;
9206 + u16 data_align;
9207 + u16 data_head_room;
9208 + u16 data_tail_room;
9209 +};
9210 +
9211 +/**
9212 + * enum dpni_queue_type - Identifies a type of queue targeted by the command
9213 + * @DPNI_QUEUE_RX: Rx queue
9214 + * @DPNI_QUEUE_TX: Tx queue
9215 + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
9216 + * @DPNI_QUEUE_RX_ERR: Rx error queue
9217 + */enum dpni_queue_type {
9218 + DPNI_QUEUE_RX,
9219 + DPNI_QUEUE_TX,
9220 + DPNI_QUEUE_TX_CONFIRM,
9221 + DPNI_QUEUE_RX_ERR,
9222 +};
9223 +
9224 +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
9225 + u32 cmd_flags,
9226 + u16 token,
9227 + enum dpni_queue_type qtype,
9228 + struct dpni_buffer_layout *layout);
9229 +
9230 +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
9231 + u32 cmd_flags,
9232 + u16 token,
9233 + enum dpni_queue_type qtype,
9234 + const struct dpni_buffer_layout *layout);
9235 +
9236 +/**
9237 + * enum dpni_offload - Identifies a type of offload targeted by the command
9238 + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
9239 + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
9240 + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
9241 + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
9242 + */
9243 +enum dpni_offload {
9244 + DPNI_OFF_RX_L3_CSUM,
9245 + DPNI_OFF_RX_L4_CSUM,
9246 + DPNI_OFF_TX_L3_CSUM,
9247 + DPNI_OFF_TX_L4_CSUM,
9248 +};
9249 +
9250 +int dpni_set_offload(struct fsl_mc_io *mc_io,
9251 + u32 cmd_flags,
9252 + u16 token,
9253 + enum dpni_offload type,
9254 + u32 config);
9255 +
9256 +int dpni_get_offload(struct fsl_mc_io *mc_io,
9257 + u32 cmd_flags,
9258 + u16 token,
9259 + enum dpni_offload type,
9260 + u32 *config);
9261 +
9262 +int dpni_get_qdid(struct fsl_mc_io *mc_io,
9263 + u32 cmd_flags,
9264 + u16 token,
9265 + enum dpni_queue_type qtype,
9266 + u16 *qdid);
9267 +
9268 +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
9269 + u32 cmd_flags,
9270 + u16 token,
9271 + u16 *data_offset);
9272 +
9273 +#define DPNI_STATISTICS_CNT 7
9274 +
9275 +union dpni_statistics {
9276 + /**
9277 + * struct page_0 - Page_0 statistics structure
9278 + * @ingress_all_frames: Ingress frame count
9279 + * @ingress_all_bytes: Ingress byte count
9280 + * @ingress_multicast_frames: Ingress multicast frame count
9281 + * @ingress_multicast_bytes: Ingress multicast byte count
9282 + * @ingress_broadcast_frames: Ingress broadcast frame count
9283 + * @ingress_broadcast_bytes: Ingress broadcast byte count
9284 + */
9285 + struct {
9286 + u64 ingress_all_frames;
9287 + u64 ingress_all_bytes;
9288 + u64 ingress_multicast_frames;
9289 + u64 ingress_multicast_bytes;
9290 + u64 ingress_broadcast_frames;
9291 + u64 ingress_broadcast_bytes;
9292 + } page_0;
9293 + /**
9294 + * struct page_1 - Page_1 statistics structure
9295 + * @egress_all_frames: Egress frame count
9296 + * @egress_all_bytes: Egress byte count
9297 + * @egress_multicast_frames: Egress multicast frame count
9298 + * @egress_multicast_bytes: Egress multicast byte count
9299 + * @egress_broadcast_frames: Egress broadcast frame count
9300 + * @egress_broadcast_bytes: Egress broadcast byte count
9301 + */
9302 + struct {
9303 + u64 egress_all_frames;
9304 + u64 egress_all_bytes;
9305 + u64 egress_multicast_frames;
9306 + u64 egress_multicast_bytes;
9307 + u64 egress_broadcast_frames;
9308 + u64 egress_broadcast_bytes;
9309 + } page_1;
9310 + /**
9311 + * struct page_2 - Page_2 statistics structure
9312 + * @ingress_filtered_frames: Ingress filtered frame count
9313 + * @ingress_discarded_frames: Ingress discarded frame count
9314 + * @ingress_nobuffer_discards: Ingress discarded frame count
9315 + * due to lack of buffers
9316 + * @egress_discarded_frames: Egress discarded frame count
9317 + * @egress_confirmed_frames: Egress confirmed frame count
9318 + */
9319 + struct {
9320 + u64 ingress_filtered_frames;
9321 + u64 ingress_discarded_frames;
9322 + u64 ingress_nobuffer_discards;
9323 + u64 egress_discarded_frames;
9324 + u64 egress_confirmed_frames;
9325 + } page_2;
9326 + /**
9327 + * struct raw - raw statistics structure
9328 + */
9329 + struct {
9330 + u64 counter[DPNI_STATISTICS_CNT];
9331 + } raw;
9332 +};
9333 +
9334 +int dpni_get_statistics(struct fsl_mc_io *mc_io,
9335 + u32 cmd_flags,
9336 + u16 token,
9337 + u8 page,
9338 + union dpni_statistics *stat);
9339 +
9340 +int dpni_reset_statistics(struct fsl_mc_io *mc_io,
9341 + u32 cmd_flags,
9342 + u16 token);
9343 +
9344 +/**
9345 + * Enable auto-negotiation
9346 + */
9347 +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
9348 +/**
9349 + * Enable half-duplex mode
9350 + */
9351 +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
9352 +/**
9353 + * Enable pause frames
9354 + */
9355 +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
9356 +/**
9357 + * Enable a-symmetric pause frames
9358 + */
9359 +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
9360 +/**
9361 + * Enable priority flow control pause frames
9362 + */
9363 +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
9364 +
9365 +/**
9366 + * struct - Structure representing DPNI link configuration
9367 + * @rate: Rate
9368 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
9369 + */
9370 +struct dpni_link_cfg {
9371 + u32 rate;
9372 + u64 options;
9373 +};
9374 +
9375 +int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
9376 + u32 cmd_flags,
9377 + u16 token,
9378 + const struct dpni_link_cfg *cfg);
9379 +
9380 +/**
9381 + * struct dpni_link_state - Structure representing DPNI link state
9382 + * @rate: Rate
9383 + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
9384 + * @up: Link state; '0' for down, '1' for up
9385 + */
9386 +struct dpni_link_state {
9387 + u32 rate;
9388 + u64 options;
9389 + int up;
9390 +};
9391 +
9392 +int dpni_get_link_state(struct fsl_mc_io *mc_io,
9393 + u32 cmd_flags,
9394 + u16 token,
9395 + struct dpni_link_state *state);
9396 +
9397 +/**
9398 + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
9399 + * @rate_limit: rate in Mbps
9400 + * @max_burst_size: burst size in bytes (up to 64KB)
9401 + */
9402 +struct dpni_tx_shaping_cfg {
9403 + u32 rate_limit;
9404 + u16 max_burst_size;
9405 +};
9406 +
9407 +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
9408 + u32 cmd_flags,
9409 + u16 token,
9410 + const struct dpni_tx_shaping_cfg *tx_shaper);
9411 +
9412 +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
9413 + u32 cmd_flags,
9414 + u16 token,
9415 + u16 max_frame_length);
9416 +
9417 +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
9418 + u32 cmd_flags,
9419 + u16 token,
9420 + u16 *max_frame_length);
9421 +
9422 +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
9423 + u32 cmd_flags,
9424 + u16 token,
9425 + int en);
9426 +
9427 +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
9428 + u32 cmd_flags,
9429 + u16 token,
9430 + int *en);
9431 +
9432 +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
9433 + u32 cmd_flags,
9434 + u16 token,
9435 + int en);
9436 +
9437 +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
9438 + u32 cmd_flags,
9439 + u16 token,
9440 + int *en);
9441 +
9442 +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
9443 + u32 cmd_flags,
9444 + u16 token,
9445 + const u8 mac_addr[6]);
9446 +
9447 +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
9448 + u32 cmd_flags,
9449 + u16 token,
9450 + u8 mac_addr[6]);
9451 +
9452 +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
9453 + u32 cm_flags,
9454 + u16 token,
9455 + u8 mac_addr[6]);
9456 +
9457 +int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
9458 + u32 cmd_flags,
9459 + u16 token,
9460 + const u8 mac_addr[6]);
9461 +
9462 +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
9463 + u32 cmd_flags,
9464 + u16 token,
9465 + const u8 mac_addr[6]);
9466 +
9467 +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
9468 + u32 cmd_flags,
9469 + u16 token,
9470 + int unicast,
9471 + int multicast);
9472 +
9473 +/**
9474 + * enum dpni_dist_mode - DPNI distribution mode
9475 + * @DPNI_DIST_MODE_NONE: No distribution
9476 + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
9477 + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
9478 + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
9479 + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
9480 + */
9481 +enum dpni_dist_mode {
9482 + DPNI_DIST_MODE_NONE = 0,
9483 + DPNI_DIST_MODE_HASH = 1,
9484 + DPNI_DIST_MODE_FS = 2
9485 +};
9486 +
9487 +/**
9488 + * enum dpni_fs_miss_action - DPNI Flow Steering miss action
9489 + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
9490 + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
9491 + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
9492 + */
9493 +enum dpni_fs_miss_action {
9494 + DPNI_FS_MISS_DROP = 0,
9495 + DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
9496 + DPNI_FS_MISS_HASH = 2
9497 +};
9498 +
9499 +/**
9500 + * struct dpni_fs_tbl_cfg - Flow Steering table configuration
9501 + * @miss_action: Miss action selection
9502 + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
9503 + */
9504 +struct dpni_fs_tbl_cfg {
9505 + enum dpni_fs_miss_action miss_action;
9506 + u16 default_flow_id;
9507 +};
9508 +
9509 +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
9510 + u8 *key_cfg_buf);
9511 +
9512 +/**
9513 + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
9514 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
9515 + * key extractions to be used as the QoS criteria by calling
9516 + * dpkg_prepare_key_cfg()
9517 + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
9518 + * '0' to use the 'default_tc' in such cases
9519 + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
9520 + */
9521 +struct dpni_qos_tbl_cfg {
9522 + u64 key_cfg_iova;
9523 + int discard_on_miss;
9524 + u8 default_tc;
9525 +};
9526 +
9527 +int dpni_set_qos_table(struct fsl_mc_io *mc_io,
9528 + u32 cmd_flags,
9529 + u16 token,
9530 + const struct dpni_qos_tbl_cfg *cfg);
9531 +
9532 +/**
9533 + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
9534 + * @dist_size: Set the distribution size;
9535 + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
9536 + * 112,128,192,224,256,384,448,512,768,896,1024
9537 + * @dist_mode: Distribution mode
9538 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
9539 + * the extractions to be used for the distribution key by calling
9540 + * dpni_prepare_key_cfg() relevant only when
9541 + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
9542 + * @fs_cfg: Flow Steering table configuration; only relevant if
9543 + * 'dist_mode = DPNI_DIST_MODE_FS'
9544 + */
9545 +struct dpni_rx_tc_dist_cfg {
9546 + u16 dist_size;
9547 + enum dpni_dist_mode dist_mode;
9548 + u64 key_cfg_iova;
9549 + struct dpni_fs_tbl_cfg fs_cfg;
9550 +};
9551 +
9552 +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
9553 + u32 cmd_flags,
9554 + u16 token,
9555 + u8 tc_id,
9556 + const struct dpni_rx_tc_dist_cfg *cfg);
9557 +
9558 +/**
9559 + * enum dpni_dest - DPNI destination types
9560 + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
9561 + * does not generate FQDAN notifications; user is expected to
9562 + * dequeue from the queue based on polling or other user-defined
9563 + * method
9564 + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
9565 + * notifications to the specified DPIO; user is expected to dequeue
9566 + * from the queue only after notification is received
9567 + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
9568 + * FQDAN notifications, but is connected to the specified DPCON
9569 + * object; user is expected to dequeue from the DPCON channel
9570 + */
9571 +enum dpni_dest {
9572 + DPNI_DEST_NONE = 0,
9573 + DPNI_DEST_DPIO = 1,
9574 + DPNI_DEST_DPCON = 2
9575 +};
9576 +
9577 +/**
9578 + * struct dpni_queue - Queue structure
9579 + * @user_context: User data, presented to the user along with any frames from
9580 + * this queue. Not relevant for Tx queues.
9581 + */
9582 +struct dpni_queue {
9583 +/**
9584 + * struct destination - Destination structure
9585 + * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
9586 + * Identifies either a DPIO or a DPCON object. Not relevant for
9587 + * Tx queues.
9588 + * @type: May be one of the following:
9589 + * 0 - No destination, queue can be manually queried, but will not
9590 + * push traffic or notifications to a DPIO;
9591 + * 1 - The destination is a DPIO. When traffic becomes available in
9592 + * the queue a FQDAN (FQ data available notification) will be
9593 + * generated to selected DPIO;
9594 + * 2 - The destination is a DPCON. The queue is associated with a
9595 + * DPCON object for the purpose of scheduling between multiple
9596 + * queues. The DPCON may be independently configured to
9597 + * generate notifications. Not relevant for Tx queues.
9598 + * @hold_active: Hold active, maintains a queue scheduled for longer
9599 + * in a DPIO during dequeue to reduce spread of traffic.
9600 + * Only relevant if queues are not affined to a single DPIO.
9601 + */
9602 + struct {
9603 + u16 id;
9604 + enum dpni_dest type;
9605 + char hold_active;
9606 + u8 priority;
9607 + } destination;
9608 + u64 user_context;
9609 + struct {
9610 + u64 value;
9611 + char stash_control;
9612 + } flc;
9613 +};
9614 +
9615 +/**
9616 + * struct dpni_queue_id - Queue identification, used for enqueue commands
9617 + * or queue control
9618 + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
9619 + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
9620 + * for Tx queues.
9621 + */
9622 +struct dpni_queue_id {
9623 + u32 fqid;
9624 + u16 qdbin;
9625 +};
9626 +
9627 +/**
9628 + * Set User Context
9629 + */
9630 +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
9631 +#define DPNI_QUEUE_OPT_DEST 0x00000002
9632 +#define DPNI_QUEUE_OPT_FLC 0x00000004
9633 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
9634 +
9635 +int dpni_set_queue(struct fsl_mc_io *mc_io,
9636 + u32 cmd_flags,
9637 + u16 token,
9638 + enum dpni_queue_type qtype,
9639 + u8 tc,
9640 + u8 index,
9641 + u8 options,
9642 + const struct dpni_queue *queue);
9643 +
9644 +int dpni_get_queue(struct fsl_mc_io *mc_io,
9645 + u32 cmd_flags,
9646 + u16 token,
9647 + enum dpni_queue_type qtype,
9648 + u8 tc,
9649 + u8 index,
9650 + struct dpni_queue *queue,
9651 + struct dpni_queue_id *qid);
9652 +
9653 +/**
9654 + * enum dpni_congestion_unit - DPNI congestion units
9655 + * @DPNI_CONGESTION_UNIT_BYTES: bytes units
9656 + * @DPNI_CONGESTION_UNIT_FRAMES: frames units
9657 + */
9658 +enum dpni_congestion_unit {
9659 + DPNI_CONGESTION_UNIT_BYTES = 0,
9660 + DPNI_CONGESTION_UNIT_FRAMES
9661 +};
9662 +
9663 +/**
9664 + * enum dpni_congestion_point - Structure representing congestion point
9665 + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
9666 + * QUEUE_INDEX
9667 + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
9668 + * define the DPNI this can be either per TC (default) or per
9669 + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
9670 + * QUEUE_INDEX is ignored if this type is used.
9671 + */
9672 +enum dpni_congestion_point {
9673 + DPNI_CP_QUEUE,
9674 + DPNI_CP_GROUP,
9675 +};
9676 +
9677 +/**
9678 + * struct dpni_dest_cfg - Structure representing DPNI destination parameters
9679 + * @dest_type: Destination type
9680 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
9681 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
9682 + * are 0-1 or 0-7, depending on the number of priorities in that
9683 + * channel; not relevant for 'DPNI_DEST_NONE' option
9684 + */
9685 +struct dpni_dest_cfg {
9686 + enum dpni_dest dest_type;
9687 + int dest_id;
9688 + u8 priority;
9689 +};
9690 +
9691 +/* DPNI congestion options */
9692 +
9693 +/**
9694 + * CSCN message is written to message_iova once entering a
9695 + * congestion state (see 'threshold_entry')
9696 + */
9697 +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
9698 +/**
9699 + * CSCN message is written to message_iova once exiting a
9700 + * congestion state (see 'threshold_exit')
9701 + */
9702 +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
9703 +/**
9704 + * CSCN write will attempt to allocate into a cache (coherent write);
9705 + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
9706 + */
9707 +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
9708 +/**
9709 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9710 + * DPIO/DPCON's WQ channel once entering a congestion state
9711 + * (see 'threshold_entry')
9712 + */
9713 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
9714 +/**
9715 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
9716 + * DPIO/DPCON's WQ channel once exiting a congestion state
9717 + * (see 'threshold_exit')
9718 + */
9719 +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
9720 +/**
9721 + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
9722 + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
9723 + */
9724 +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
9725 +/**
9726 + * This congestion will trigger flow control or priority flow control.
9727 + * This will have effect only if flow control is enabled with
9728 + * dpni_set_link_cfg().
9729 + */
9730 +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
9731 +
9732 +/**
9733 + * struct dpni_congestion_notification_cfg - congestion notification
9734 + * configuration
9735 + * @units: units type
9736 + * @threshold_entry: above this threshold we enter a congestion state.
9737 + * set it to '0' to disable it
9738 + * @threshold_exit: below this threshold we exit the congestion state.
9739 + * @message_ctx: The context that will be part of the CSCN message
9740 + * @message_iova: I/O virtual address (must be in DMA-able memory),
9741 + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
9742 + * contained in 'options'
9743 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
9744 + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
9745 + */
9746 +
9747 +struct dpni_congestion_notification_cfg {
9748 + enum dpni_congestion_unit units;
9749 + u32 threshold_entry;
9750 + u32 threshold_exit;
9751 + u64 message_ctx;
9752 + u64 message_iova;
9753 + struct dpni_dest_cfg dest_cfg;
9754 + u16 notification_mode;
9755 +};
9756 +
9757 +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
9758 + u32 cmd_flags,
9759 + u16 token,
9760 + enum dpni_queue_type qtype,
9761 + u8 tc_id,
9762 + const struct dpni_congestion_notification_cfg *cfg);
9763 +
9764 +int dpni_get_congestion_notification(
9765 + struct fsl_mc_io *mc_io,
9766 + u32 cmd_flags,
9767 + u16 token,
9768 + enum dpni_queue_type qtype,
9769 + u8 tc_id,
9770 + struct dpni_congestion_notification_cfg *cfg);
9771 +
9772 +/**
9773 + * struct dpni_taildrop - Structure representing the taildrop
9774 + * @enable: Indicates whether the taildrop is active or not.
9775 + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
9776 + * byte units, this field is ignored and assumed = 0 if
9777 + * CONGESTION_POINT is 0.
9778 + * @threshold: Threshold value, in units identified by UNITS field. Value 0
9779 + * cannot be used as a valid taildrop threshold, THRESHOLD must
9780 + * be > 0 if the taildrop is enabled.
9781 + */
9782 +struct dpni_taildrop {
9783 + char enable;
9784 + enum dpni_congestion_unit units;
9785 + u32 threshold;
9786 +};
9787 +
9788 +int dpni_set_taildrop(struct fsl_mc_io *mc_io,
9789 + u32 cmd_flags,
9790 + u16 token,
9791 + enum dpni_congestion_point cg_point,
9792 + enum dpni_queue_type q_type,
9793 + u8 tc,
9794 + u8 q_index,
9795 + struct dpni_taildrop *taildrop);
9796 +
9797 +int dpni_get_taildrop(struct fsl_mc_io *mc_io,
9798 + u32 cmd_flags,
9799 + u16 token,
9800 + enum dpni_congestion_point cg_point,
9801 + enum dpni_queue_type q_type,
9802 + u8 tc,
9803 + u8 q_index,
9804 + struct dpni_taildrop *taildrop);
9805 +
9806 +/**
9807 + * struct dpni_rule_cfg - Rule configuration for table lookup
9808 + * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
9809 + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
9810 + * @key_size: key and mask size (in bytes)
9811 + */
9812 +struct dpni_rule_cfg {
9813 + u64 key_iova;
9814 + u64 mask_iova;
9815 + u8 key_size;
9816 +};
9817 +
9818 +int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
9819 + u32 cmd_flags,
9820 + u16 token,
9821 + const struct dpni_rule_cfg *cfg,
9822 + u8 tc_id,
9823 + u16 index);
9824 +
9825 +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
9826 + u32 cmd_flags,
9827 + u16 token,
9828 + const struct dpni_rule_cfg *cfg);
9829 +
9830 +int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
9831 + u32 cmd_flags,
9832 + u16 token);
9833 +
9834 +/**
9835 + * Discard matching traffic. If set, this takes precedence over any other
9836 + * configuration and matching traffic is always discarded.
9837 + */
9838 + #define DPNI_FS_OPT_DISCARD 0x1
9839 +
9840 +/**
9841 + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
9842 + * override the FLC value set per queue.
9843 + * For more details check the Frame Descriptor section in the hardware
9844 + * documentation.
9845 + */
9846 +#define DPNI_FS_OPT_SET_FLC 0x2
9847 +
9848 +/*
9849 + * Indicates whether the 6 lowest significant bits of FLC are used for stash
9850 + * control. If set, the 6 least significant bits in value are interpreted as
9851 + * follows:
9852 + * - bits 0-1: indicates the number of 64 byte units of context that are
9853 + * stashed. FLC value is interpreted as a memory address in this case,
9854 + * excluding the 6 LS bits.
9855 + * - bits 2-3: indicates the number of 64 byte units of frame annotation
9856 + * to be stashed. Annotation is placed at FD[ADDR].
9857 + * - bits 4-5: indicates the number of 64 byte units of frame data to be
9858 + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
9859 + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
9860 + */
9861 +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
9862 +
9863 +/**
9864 + * struct dpni_fs_action_cfg - Action configuration for table look-up
9865 + * @flc: FLC value for traffic matching this rule. Please check the Frame
9866 + * Descriptor section in the hardware documentation for more information.
9867 + * @flow_id: Identifies the Rx queue used for matching traffic. Supported
9868 + * values are in range 0 to num_queue-1.
9869 + * @options: Any combination of DPNI_FS_OPT_ values.
9870 + */
9871 +struct dpni_fs_action_cfg {
9872 + u64 flc;
9873 + u16 flow_id;
9874 + u16 options;
9875 +};
9876 +
9877 +int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
9878 + u32 cmd_flags,
9879 + u16 token,
9880 + u8 tc_id,
9881 + u16 index,
9882 + const struct dpni_rule_cfg *cfg,
9883 + const struct dpni_fs_action_cfg *action);
9884 +
9885 +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
9886 + u32 cmd_flags,
9887 + u16 token,
9888 + u8 tc_id,
9889 + const struct dpni_rule_cfg *cfg);
9890 +
9891 +#endif /* __FSL_DPNI_H */
9892 --- /dev/null
9893 +++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
9894 @@ -0,0 +1,480 @@
9895 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
9896 + *
9897 + * Redistribution and use in source and binary forms, with or without
9898 + * modification, are permitted provided that the following conditions are met:
9899 + * * Redistributions of source code must retain the above copyright
9900 + * notice, this list of conditions and the following disclaimer.
9901 + * * Redistributions in binary form must reproduce the above copyright
9902 + * notice, this list of conditions and the following disclaimer in the
9903 + * documentation and/or other materials provided with the distribution.
9904 + * * Neither the name of the above-listed copyright holders nor the
9905 + * names of any contributors may be used to endorse or promote products
9906 + * derived from this software without specific prior written permission.
9907 + *
9908 + *
9909 + * ALTERNATIVELY, this software may be distributed under the terms of the
9910 + * GNU General Public License ("GPL") as published by the Free Software
9911 + * Foundation, either version 2 of that License or (at your option) any
9912 + * later version.
9913 + *
9914 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
9915 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
9916 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
9917 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
9918 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
9919 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
9920 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
9921 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
9922 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
9923 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
9924 + * POSSIBILITY OF SUCH DAMAGE.
9925 + */
9926 +#ifndef __FSL_NET_H
9927 +#define __FSL_NET_H
9928 +
9929 +#define LAST_HDR_INDEX 0xFFFFFFFF
9930 +
9931 +/*****************************************************************************/
9932 +/* Protocol fields */
9933 +/*****************************************************************************/
9934 +
9935 +/************************* Ethernet fields *********************************/
9936 +#define NH_FLD_ETH_DA (1)
9937 +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
9938 +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
9939 +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
9940 +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
9941 +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
9942 +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
9943 +
9944 +#define NH_FLD_ETH_ADDR_SIZE 6
9945 +
9946 +/*************************** VLAN fields ***********************************/
9947 +#define NH_FLD_VLAN_VPRI (1)
9948 +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
9949 +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
9950 +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
9951 +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
9952 +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
9953 +
9954 +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
9955 + NH_FLD_VLAN_CFI | \
9956 + NH_FLD_VLAN_VID)
9957 +
9958 +/************************ IP (generic) fields ******************************/
9959 +#define NH_FLD_IP_VER (1)
9960 +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
9961 +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
9962 +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
9963 +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
9964 +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
9965 +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
9966 +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
9967 +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
9968 +
9969 +#define NH_FLD_IP_PROTO_SIZE 1
9970 +
9971 +/***************************** IPV4 fields *********************************/
9972 +#define NH_FLD_IPV4_VER (1)
9973 +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
9974 +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
9975 +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
9976 +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
9977 +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
9978 +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
9979 +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
9980 +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
9981 +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
9982 +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
9983 +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
9984 +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
9985 +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
9986 +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
9987 +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
9988 +
9989 +#define NH_FLD_IPV4_ADDR_SIZE 4
9990 +#define NH_FLD_IPV4_PROTO_SIZE 1
9991 +
9992 +/***************************** IPV6 fields *********************************/
9993 +#define NH_FLD_IPV6_VER (1)
9994 +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
9995 +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
9996 +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
9997 +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
9998 +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
9999 +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
10000 +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
10001 +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
10002 +
10003 +#define NH_FLD_IPV6_ADDR_SIZE 16
10004 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
10005 +
10006 +/***************************** ICMP fields *********************************/
10007 +#define NH_FLD_ICMP_TYPE (1)
10008 +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
10009 +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
10010 +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
10011 +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
10012 +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
10013 +
10014 +#define NH_FLD_ICMP_CODE_SIZE 1
10015 +#define NH_FLD_ICMP_TYPE_SIZE 1
10016 +
10017 +/***************************** IGMP fields *********************************/
10018 +#define NH_FLD_IGMP_VERSION (1)
10019 +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
10020 +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
10021 +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
10022 +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
10023 +
10024 +/***************************** TCP fields **********************************/
10025 +#define NH_FLD_TCP_PORT_SRC (1)
10026 +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
10027 +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
10028 +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
10029 +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
10030 +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
10031 +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
10032 +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
10033 +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
10034 +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
10035 +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
10036 +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
10037 +
10038 +#define NH_FLD_TCP_PORT_SIZE 2
10039 +
10040 +/***************************** UDP fields **********************************/
10041 +#define NH_FLD_UDP_PORT_SRC (1)
10042 +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
10043 +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
10044 +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
10045 +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
10046 +
10047 +#define NH_FLD_UDP_PORT_SIZE 2
10048 +
10049 +/*************************** UDP-lite fields *******************************/
10050 +#define NH_FLD_UDP_LITE_PORT_SRC (1)
10051 +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
10052 +#define NH_FLD_UDP_LITE_ALL_FIELDS \
10053 + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
10054 +
10055 +#define NH_FLD_UDP_LITE_PORT_SIZE 2
10056 +
10057 +/*************************** UDP-encap-ESP fields **************************/
10058 +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
10059 +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
10060 +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
10061 +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
10062 +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
10063 +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
10064 +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
10065 + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
10066 +
10067 +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
10068 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
10069 +
10070 +/***************************** SCTP fields *********************************/
10071 +#define NH_FLD_SCTP_PORT_SRC (1)
10072 +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
10073 +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
10074 +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
10075 +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
10076 +
10077 +#define NH_FLD_SCTP_PORT_SIZE 2
10078 +
10079 +/***************************** DCCP fields *********************************/
10080 +#define NH_FLD_DCCP_PORT_SRC (1)
10081 +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
10082 +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
10083 +
10084 +#define NH_FLD_DCCP_PORT_SIZE 2
10085 +
10086 +/***************************** IPHC fields *********************************/
10087 +#define NH_FLD_IPHC_CID (1)
10088 +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
10089 +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
10090 +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
10091 +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
10092 +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
10093 +
10094 +/***************************** SCTP fields *********************************/
10095 +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
10096 +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
10097 +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
10098 +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
10099 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
10100 +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
10101 +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
10102 +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
10103 +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
10104 +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
10105 +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
10106 + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
10107 +
10108 +/*************************** L2TPV2 fields *********************************/
10109 +#define NH_FLD_L2TPV2_TYPE_BIT (1)
10110 +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
10111 +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
10112 +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
10113 +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
10114 +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
10115 +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
10116 +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
10117 +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
10118 +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
10119 +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
10120 +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
10121 +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
10122 +#define NH_FLD_L2TPV2_ALL_FIELDS \
10123 + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
10124 +
10125 +/*************************** L2TPV3 fields *********************************/
10126 +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
10127 +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
10128 +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
10129 +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
10130 +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
10131 +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
10132 +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
10133 +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
10134 +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
10135 +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
10136 + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
10137 +
10138 +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
10139 +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
10140 +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
10141 +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
10142 +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
10143 + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
10144 +
10145 +/**************************** PPP fields ***********************************/
10146 +#define NH_FLD_PPP_PID (1)
10147 +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
10148 +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
10149 +
10150 +/************************** PPPoE fields ***********************************/
10151 +#define NH_FLD_PPPOE_VER (1)
10152 +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
10153 +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
10154 +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
10155 +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
10156 +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
10157 +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
10158 +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
10159 +
10160 +/************************* PPP-Mux fields **********************************/
10161 +#define NH_FLD_PPPMUX_PID (1)
10162 +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
10163 +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
10164 +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
10165 +
10166 +/*********************** PPP-Mux sub-frame fields **************************/
10167 +#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
10168 +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
10169 +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
10170 +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
10171 +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
10172 +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
10173 + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
10174 +
10175 +/*************************** LLC fields ************************************/
10176 +#define NH_FLD_LLC_DSAP (1)
10177 +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
10178 +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
10179 +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
10180 +
10181 +/*************************** NLPID fields **********************************/
10182 +#define NH_FLD_NLPID_NLPID (1)
10183 +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
10184 +
10185 +/*************************** SNAP fields ***********************************/
10186 +#define NH_FLD_SNAP_OUI (1)
10187 +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
10188 +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
10189 +
10190 +/*************************** LLC SNAP fields *******************************/
10191 +#define NH_FLD_LLC_SNAP_TYPE (1)
10192 +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
10193 +
10194 +#define NH_FLD_ARP_HTYPE (1)
10195 +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
10196 +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
10197 +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
10198 +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
10199 +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
10200 +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
10201 +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
10202 +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
10203 +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
10204 +
10205 +/*************************** RFC2684 fields ********************************/
10206 +#define NH_FLD_RFC2684_LLC (1)
10207 +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
10208 +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
10209 +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
10210 +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
10211 +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
10212 +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
10213 +
10214 +/*************************** User defined fields ***************************/
10215 +#define NH_FLD_USER_DEFINED_SRCPORT (1)
10216 +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
10217 +#define NH_FLD_USER_DEFINED_ALL_FIELDS \
10218 + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
10219 +
10220 +/*************************** Payload fields ********************************/
10221 +#define NH_FLD_PAYLOAD_BUFFER (1)
10222 +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
10223 +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
10224 +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
10225 +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
10226 +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
10227 +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
10228 +
10229 +/*************************** GRE fields ************************************/
10230 +#define NH_FLD_GRE_TYPE (1)
10231 +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
10232 +
10233 +/*************************** MINENCAP fields *******************************/
10234 +#define NH_FLD_MINENCAP_SRC_IP (1)
10235 +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
10236 +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
10237 +#define NH_FLD_MINENCAP_ALL_FIELDS \
10238 + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
10239 +
10240 +/*************************** IPSEC AH fields *******************************/
10241 +#define NH_FLD_IPSEC_AH_SPI (1)
10242 +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
10243 +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
10244 +
10245 +/*************************** IPSEC ESP fields ******************************/
10246 +#define NH_FLD_IPSEC_ESP_SPI (1)
10247 +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
10248 +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
10249 +
10250 +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
10251 +
10252 +/*************************** MPLS fields ***********************************/
10253 +#define NH_FLD_MPLS_LABEL_STACK (1)
10254 +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
10255 + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
10256 +
10257 +/*************************** MACSEC fields *********************************/
10258 +#define NH_FLD_MACSEC_SECTAG (1)
10259 +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
10260 +
10261 +/*************************** GTP fields ************************************/
10262 +#define NH_FLD_GTP_TEID (1)
10263 +
10264 +/* Protocol options */
10265 +
10266 +/* Ethernet options */
10267 +#define NH_OPT_ETH_BROADCAST 1
10268 +#define NH_OPT_ETH_MULTICAST 2
10269 +#define NH_OPT_ETH_UNICAST 3
10270 +#define NH_OPT_ETH_BPDU 4
10271 +
10272 +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
10273 +/* also applicable for broadcast */
10274 +
10275 +/* VLAN options */
10276 +#define NH_OPT_VLAN_CFI 1
10277 +
10278 +/* IPV4 options */
10279 +#define NH_OPT_IPV4_UNICAST 1
10280 +#define NH_OPT_IPV4_MULTICAST 2
10281 +#define NH_OPT_IPV4_BROADCAST 3
10282 +#define NH_OPT_IPV4_OPTION 4
10283 +#define NH_OPT_IPV4_FRAG 5
10284 +#define NH_OPT_IPV4_INITIAL_FRAG 6
10285 +
10286 +/* IPV6 options */
10287 +#define NH_OPT_IPV6_UNICAST 1
10288 +#define NH_OPT_IPV6_MULTICAST 2
10289 +#define NH_OPT_IPV6_OPTION 3
10290 +#define NH_OPT_IPV6_FRAG 4
10291 +#define NH_OPT_IPV6_INITIAL_FRAG 5
10292 +
10293 +/* General IP options (may be used for any version) */
10294 +#define NH_OPT_IP_FRAG 1
10295 +#define NH_OPT_IP_INITIAL_FRAG 2
10296 +#define NH_OPT_IP_OPTION 3
10297 +
10298 +/* Minenc. options */
10299 +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
10300 +
10301 +/* GRE. options */
10302 +#define NH_OPT_GRE_ROUTING_PRESENT 1
10303 +
10304 +/* TCP options */
10305 +#define NH_OPT_TCP_OPTIONS 1
10306 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
10307 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3
10308 +
10309 +/* CAPWAP options */
10310 +#define NH_OPT_CAPWAP_DTLS 1
10311 +
10312 +enum net_prot {
10313 + NET_PROT_NONE = 0,
10314 + NET_PROT_PAYLOAD,
10315 + NET_PROT_ETH,
10316 + NET_PROT_VLAN,
10317 + NET_PROT_IPV4,
10318 + NET_PROT_IPV6,
10319 + NET_PROT_IP,
10320 + NET_PROT_TCP,
10321 + NET_PROT_UDP,
10322 + NET_PROT_UDP_LITE,
10323 + NET_PROT_IPHC,
10324 + NET_PROT_SCTP,
10325 + NET_PROT_SCTP_CHUNK_DATA,
10326 + NET_PROT_PPPOE,
10327 + NET_PROT_PPP,
10328 + NET_PROT_PPPMUX,
10329 + NET_PROT_PPPMUX_SUBFRM,
10330 + NET_PROT_L2TPV2,
10331 + NET_PROT_L2TPV3_CTRL,
10332 + NET_PROT_L2TPV3_SESS,
10333 + NET_PROT_LLC,
10334 + NET_PROT_LLC_SNAP,
10335 + NET_PROT_NLPID,
10336 + NET_PROT_SNAP,
10337 + NET_PROT_MPLS,
10338 + NET_PROT_IPSEC_AH,
10339 + NET_PROT_IPSEC_ESP,
10340 + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
10341 + NET_PROT_MACSEC,
10342 + NET_PROT_GRE,
10343 + NET_PROT_MINENCAP,
10344 + NET_PROT_DCCP,
10345 + NET_PROT_ICMP,
10346 + NET_PROT_IGMP,
10347 + NET_PROT_ARP,
10348 + NET_PROT_CAPWAP_DATA,
10349 + NET_PROT_CAPWAP_CTRL,
10350 + NET_PROT_RFC2684,
10351 + NET_PROT_ICMPV6,
10352 + NET_PROT_FCOE,
10353 + NET_PROT_FIP,
10354 + NET_PROT_ISCSI,
10355 + NET_PROT_GTP,
10356 + NET_PROT_USER_DEFINED_L2,
10357 + NET_PROT_USER_DEFINED_L3,
10358 + NET_PROT_USER_DEFINED_L4,
10359 + NET_PROT_USER_DEFINED_L5,
10360 + NET_PROT_USER_DEFINED_SHIM1,
10361 + NET_PROT_USER_DEFINED_SHIM2,
10362 +
10363 + NET_PROT_DUMMY_LAST
10364 +};
10365 +
10366 +/*! IEEE8021.Q */
10367 +#define NH_IEEE8021Q_ETYPE 0x8100
10368 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
10369 + ((((u32)((etype) & 0xFFFF)) << 16) | \
10370 + (((u32)((pcp) & 0x07)) << 13) | \
10371 + (((u32)((dei) & 0x01)) << 12) | \
10372 + (((u32)((vlan_id) & 0xFFF))))
10373 +
10374 +#endif /* __FSL_NET_H */
10375 --- /dev/null
10376 +++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
10377 @@ -0,0 +1,6 @@
10378 +config FSL_DPAA2_ETHSW
10379 + tristate "DPAA2 Ethernet Switch"
10380 + depends on FSL_MC_BUS && FSL_DPAA2
10381 + default y
10382 + ---help---
10383 + Prototype driver for DPAA2 Ethernet Switch.
10384 --- /dev/null
10385 +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
10386 @@ -0,0 +1,10 @@
10387 +
10388 +obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
10389 +
10390 +dpaa2-ethsw-objs := switch.o dpsw.o
10391 +
10392 +all:
10393 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
10394 +
10395 +clean:
10396 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
10397 --- /dev/null
10398 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
10399 @@ -0,0 +1,851 @@
10400 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
10401 + *
10402 + * Redistribution and use in source and binary forms, with or without
10403 + * modification, are permitted provided that the following conditions are met:
10404 + * * Redistributions of source code must retain the above copyright
10405 + * notice, this list of conditions and the following disclaimer.
10406 + * * Redistributions in binary form must reproduce the above copyright
10407 + * notice, this list of conditions and the following disclaimer in the
10408 + * documentation and/or other materials provided with the distribution.
10409 + * * Neither the name of the above-listed copyright holders nor the
10410 + * names of any contributors may be used to endorse or promote products
10411 + * derived from this software without specific prior written permission.
10412 + *
10413 + *
10414 + * ALTERNATIVELY, this software may be distributed under the terms of the
10415 + * GNU General Public License ("GPL") as published by the Free Software
10416 + * Foundation, either version 2 of that License or (at your option) any
10417 + * later version.
10418 + *
10419 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10420 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10421 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10422 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10423 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10424 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10425 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10426 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10427 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10428 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10429 + * POSSIBILITY OF SUCH DAMAGE.
10430 + */
10431 +#ifndef __FSL_DPSW_CMD_H
10432 +#define __FSL_DPSW_CMD_H
10433 +
10434 +/* DPSW Version */
10435 +#define DPSW_VER_MAJOR 8
10436 +#define DPSW_VER_MINOR 0
10437 +
10438 +#define DPSW_CMD_BASE_VERSION 1
10439 +#define DPSW_CMD_ID_OFFSET 4
10440 +
10441 +#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
10442 +
10443 +/* Command IDs */
10444 +#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
10445 +#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
10446 +
10447 +#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
10448 +
10449 +#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
10450 +#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
10451 +#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
10452 +#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
10453 +#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
10454 +
10455 +#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
10456 +#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
10457 +#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
10458 +#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
10459 +#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
10460 +#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
10461 +#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
10462 +#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
10463 +
10464 +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
10465 +
10466 +#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
10467 +
10468 +#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
10469 +
10470 +#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
10471 +#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
10472 +#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
10473 +#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
10474 +#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
10475 +#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
10476 +#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
10477 +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
10478 +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
10479 +#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
10480 +#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
10481 +#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
10482 +
10483 +#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
10484 +#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
10485 +
10486 +#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
10487 +
10488 +#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
10489 +#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
10490 +#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
10491 +#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
10492 +#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
10493 +#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
10494 +#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
10495 +
10496 +#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
10497 +
10498 +#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
10499 +#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
10500 +#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
10501 +#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
10502 +#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
10503 +#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
10504 +#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
10505 +#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
10506 +#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
10507 +#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
10508 +#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
10509 +#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
10510 +
10511 +#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
10512 +#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
10513 +#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
10514 +#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
10515 +#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
10516 +#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
10517 +#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
10518 +#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
10519 +#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
10520 +#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
10521 +
10522 +#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
10523 +#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
10524 +#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
10525 +#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
10526 +#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
10527 +#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
10528 +#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
10529 +
10530 +#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
10531 +#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
10532 +#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
10533 +#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
10534 +
10535 +/* Macros for accessing command fields smaller than 1byte */
10536 +#define DPSW_MASK(field) \
10537 + GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
10538 + DPSW_##field##_SHIFT)
10539 +#define dpsw_set_field(var, field, val) \
10540 + ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
10541 +#define dpsw_get_field(var, field) \
10542 + (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
10543 +#define dpsw_get_bit(var, bit) \
10544 + (((var) >> (bit)) & GENMASK(0, 0))
10545 +
10546 +static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
10547 +{
10548 + var |= (u64)val << bit & GENMASK(bit, bit);
10549 + return var;
10550 +}
10551 +
10552 +struct dpsw_cmd_open {
10553 + __le32 dpsw_id;
10554 +};
10555 +
10556 +#define DPSW_COMPONENT_TYPE_SHIFT 0
10557 +#define DPSW_COMPONENT_TYPE_SIZE 4
10558 +
10559 +struct dpsw_cmd_create {
10560 + /* cmd word 0 */
10561 + __le16 num_ifs;
10562 + u8 max_fdbs;
10563 + u8 max_meters_per_if;
10564 + /* from LSB: only the first 4 bits */
10565 + u8 component_type;
10566 + u8 pad[3];
10567 + /* cmd word 1 */
10568 + __le16 max_vlans;
10569 + __le16 max_fdb_entries;
10570 + __le16 fdb_aging_time;
10571 + __le16 max_fdb_mc_groups;
10572 + /* cmd word 2 */
10573 + __le64 options;
10574 +};
10575 +
10576 +struct dpsw_cmd_destroy {
10577 + __le32 dpsw_id;
10578 +};
10579 +
10580 +#define DPSW_ENABLE_SHIFT 0
10581 +#define DPSW_ENABLE_SIZE 1
10582 +
10583 +struct dpsw_rsp_is_enabled {
10584 + /* from LSB: enable:1 */
10585 + u8 enabled;
10586 +};
10587 +
10588 +struct dpsw_cmd_set_irq {
10589 + /* cmd word 0 */
10590 + u8 irq_index;
10591 + u8 pad[3];
10592 + __le32 irq_val;
10593 + /* cmd word 1 */
10594 + __le64 irq_addr;
10595 + /* cmd word 2 */
10596 + __le32 irq_num;
10597 +};
10598 +
10599 +struct dpsw_cmd_get_irq {
10600 + __le32 pad;
10601 + u8 irq_index;
10602 +};
10603 +
10604 +struct dpsw_rsp_get_irq {
10605 + /* cmd word 0 */
10606 + __le32 irq_val;
10607 + __le32 pad;
10608 + /* cmd word 1 */
10609 + __le64 irq_addr;
10610 + /* cmd word 2 */
10611 + __le32 irq_num;
10612 + __le32 irq_type;
10613 +};
10614 +
10615 +struct dpsw_cmd_set_irq_enable {
10616 + u8 enable_state;
10617 + u8 pad[3];
10618 + u8 irq_index;
10619 +};
10620 +
10621 +struct dpsw_cmd_get_irq_enable {
10622 + __le32 pad;
10623 + u8 irq_index;
10624 +};
10625 +
10626 +struct dpsw_rsp_get_irq_enable {
10627 + u8 enable_state;
10628 +};
10629 +
10630 +struct dpsw_cmd_set_irq_mask {
10631 + __le32 mask;
10632 + u8 irq_index;
10633 +};
10634 +
10635 +struct dpsw_cmd_get_irq_mask {
10636 + __le32 pad;
10637 + u8 irq_index;
10638 +};
10639 +
10640 +struct dpsw_rsp_get_irq_mask {
10641 + __le32 mask;
10642 +};
10643 +
10644 +struct dpsw_cmd_get_irq_status {
10645 + __le32 status;
10646 + u8 irq_index;
10647 +};
10648 +
10649 +struct dpsw_rsp_get_irq_status {
10650 + __le32 status;
10651 +};
10652 +
10653 +struct dpsw_cmd_clear_irq_status {
10654 + __le32 status;
10655 + u8 irq_index;
10656 +};
10657 +
10658 +#define DPSW_COMPONENT_TYPE_SHIFT 0
10659 +#define DPSW_COMPONENT_TYPE_SIZE 4
10660 +
10661 +struct dpsw_rsp_get_attr {
10662 + /* cmd word 0 */
10663 + __le16 num_ifs;
10664 + u8 max_fdbs;
10665 + u8 num_fdbs;
10666 + __le16 max_vlans;
10667 + __le16 num_vlans;
10668 + /* cmd word 1 */
10669 + __le16 max_fdb_entries;
10670 + __le16 fdb_aging_time;
10671 + __le32 dpsw_id;
10672 + /* cmd word 2 */
10673 + __le16 mem_size;
10674 + __le16 max_fdb_mc_groups;
10675 + u8 max_meters_per_if;
10676 + /* from LSB only the ffirst 4 bits */
10677 + u8 component_type;
10678 + __le16 pad;
10679 + /* cmd word 3 */
10680 + __le64 options;
10681 +};
10682 +
10683 +struct dpsw_cmd_set_reflection_if {
10684 + __le16 if_id;
10685 +};
10686 +
10687 +struct dpsw_cmd_if_set_flooding {
10688 + __le16 if_id;
10689 + /* from LSB: enable:1 */
10690 + u8 enable;
10691 +};
10692 +
10693 +struct dpsw_cmd_if_set_broadcast {
10694 + __le16 if_id;
10695 + /* from LSB: enable:1 */
10696 + u8 enable;
10697 +};
10698 +
10699 +struct dpsw_cmd_if_set_multicast {
10700 + __le16 if_id;
10701 + /* from LSB: enable:1 */
10702 + u8 enable;
10703 +};
10704 +
10705 +#define DPSW_VLAN_ID_SHIFT 0
10706 +#define DPSW_VLAN_ID_SIZE 12
10707 +#define DPSW_DEI_SHIFT 12
10708 +#define DPSW_DEI_SIZE 1
10709 +#define DPSW_PCP_SHIFT 13
10710 +#define DPSW_PCP_SIZE 3
10711 +
10712 +struct dpsw_cmd_if_set_tci {
10713 + __le16 if_id;
10714 + /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
10715 + __le16 conf;
10716 +};
10717 +
10718 +struct dpsw_cmd_if_get_tci {
10719 + __le16 if_id;
10720 +};
10721 +
10722 +struct dpsw_rsp_if_get_tci {
10723 + __le16 pad;
10724 + __le16 vlan_id;
10725 + u8 dei;
10726 + u8 pcp;
10727 +};
10728 +
10729 +#define DPSW_STATE_SHIFT 0
10730 +#define DPSW_STATE_SIZE 4
10731 +
10732 +struct dpsw_cmd_if_set_stp {
10733 + __le16 if_id;
10734 + __le16 vlan_id;
10735 + /* only the first LSB 4 bits */
10736 + u8 state;
10737 +};
10738 +
10739 +#define DPSW_FRAME_TYPE_SHIFT 0
10740 +#define DPSW_FRAME_TYPE_SIZE 4
10741 +#define DPSW_UNACCEPTED_ACT_SHIFT 4
10742 +#define DPSW_UNACCEPTED_ACT_SIZE 4
10743 +
10744 +struct dpsw_cmd_if_set_accepted_frames {
10745 + __le16 if_id;
10746 + /* from LSB: type:4 unaccepted_act:4 */
10747 + u8 unaccepted;
10748 +};
10749 +
10750 +#define DPSW_ACCEPT_ALL_SHIFT 0
10751 +#define DPSW_ACCEPT_ALL_SIZE 1
10752 +
10753 +struct dpsw_cmd_if_set_accept_all_vlan {
10754 + __le16 if_id;
10755 + /* only the least significant bit */
10756 + u8 accept_all;
10757 +};
10758 +
10759 +#define DPSW_COUNTER_TYPE_SHIFT 0
10760 +#define DPSW_COUNTER_TYPE_SIZE 5
10761 +
10762 +struct dpsw_cmd_if_get_counter {
10763 + __le16 if_id;
10764 + /* from LSB: type:5 */
10765 + u8 type;
10766 +};
10767 +
10768 +struct dpsw_rsp_if_get_counter {
10769 + __le64 pad;
10770 + __le64 counter;
10771 +};
10772 +
10773 +struct dpsw_cmd_if_set_counter {
10774 + /* cmd word 0 */
10775 + __le16 if_id;
10776 + /* from LSB: type:5 */
10777 + u8 type;
10778 + /* cmd word 1 */
10779 + __le64 counter;
10780 +};
10781 +
10782 +#define DPSW_PRIORITY_SELECTOR_SHIFT 0
10783 +#define DPSW_PRIORITY_SELECTOR_SIZE 3
10784 +#define DPSW_SCHED_MODE_SHIFT 0
10785 +#define DPSW_SCHED_MODE_SIZE 4
10786 +
10787 +struct dpsw_cmd_if_set_tx_selection {
10788 + __le16 if_id;
10789 + /* from LSB: priority_selector:3 */
10790 + u8 priority_selector;
10791 + u8 pad[5];
10792 + u8 tc_id[8];
10793 +
10794 + struct dpsw_tc_sched {
10795 + __le16 delta_bandwidth;
10796 + u8 mode;
10797 + u8 pad;
10798 + } tc_sched[8];
10799 +};
10800 +
10801 +#define DPSW_FILTER_SHIFT 0
10802 +#define DPSW_FILTER_SIZE 2
10803 +
10804 +struct dpsw_cmd_if_reflection {
10805 + __le16 if_id;
10806 + __le16 vlan_id;
10807 + /* only 2 bits from the LSB */
10808 + u8 filter;
10809 +};
10810 +
10811 +#define DPSW_MODE_SHIFT 0
10812 +#define DPSW_MODE_SIZE 4
10813 +#define DPSW_UNITS_SHIFT 4
10814 +#define DPSW_UNITS_SIZE 4
10815 +
10816 +struct dpsw_cmd_if_set_flooding_metering {
10817 + /* cmd word 0 */
10818 + __le16 if_id;
10819 + u8 pad;
10820 + /* from LSB: mode:4 units:4 */
10821 + u8 mode_units;
10822 + __le32 cir;
10823 + /* cmd word 1 */
10824 + __le32 eir;
10825 + __le32 cbs;
10826 + /* cmd word 2 */
10827 + __le32 ebs;
10828 +};
10829 +
10830 +struct dpsw_cmd_if_set_metering {
10831 + /* cmd word 0 */
10832 + __le16 if_id;
10833 + u8 tc_id;
10834 + /* from LSB: mode:4 units:4 */
10835 + u8 mode_units;
10836 + __le32 cir;
10837 + /* cmd word 1 */
10838 + __le32 eir;
10839 + __le32 cbs;
10840 + /* cmd word 2 */
10841 + __le32 ebs;
10842 +};
10843 +
10844 +#define DPSW_EARLY_DROP_MODE_SHIFT 0
10845 +#define DPSW_EARLY_DROP_MODE_SIZE 2
10846 +#define DPSW_EARLY_DROP_UNIT_SHIFT 2
10847 +#define DPSW_EARLY_DROP_UNIT_SIZE 2
10848 +
10849 +struct dpsw_prep_early_drop {
10850 + /* from LSB: mode:2 units:2 */
10851 + u8 conf;
10852 + u8 pad0[3];
10853 + __le32 tail_drop_threshold;
10854 + u8 green_drop_probability;
10855 + u8 pad1[7];
10856 + __le64 green_max_threshold;
10857 + __le64 green_min_threshold;
10858 + __le64 pad2;
10859 + u8 yellow_drop_probability;
10860 + u8 pad3[7];
10861 + __le64 yellow_max_threshold;
10862 + __le64 yellow_min_threshold;
10863 +};
10864 +
10865 +struct dpsw_cmd_if_set_early_drop {
10866 + /* cmd word 0 */
10867 + u8 pad0;
10868 + u8 tc_id;
10869 + __le16 if_id;
10870 + __le32 pad1;
10871 + /* cmd word 1 */
10872 + __le64 early_drop_iova;
10873 +};
10874 +
10875 +struct dpsw_cmd_custom_tpid {
10876 + __le16 pad;
10877 + __le16 tpid;
10878 +};
10879 +
10880 +struct dpsw_cmd_if {
10881 + __le16 if_id;
10882 +};
10883 +
10884 +#define DPSW_ADMIT_UNTAGGED_SHIFT 0
10885 +#define DPSW_ADMIT_UNTAGGED_SIZE 4
10886 +#define DPSW_ENABLED_SHIFT 5
10887 +#define DPSW_ENABLED_SIZE 1
10888 +#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
10889 +#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
10890 +
10891 +struct dpsw_rsp_if_get_attr {
10892 + /* cmd word 0 */
10893 + /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
10894 + u8 conf;
10895 + u8 pad1;
10896 + u8 num_tcs;
10897 + u8 pad2;
10898 + __le16 qdid;
10899 + /* cmd word 1 */
10900 + __le32 options;
10901 + __le32 pad3;
10902 + /* cmd word 2 */
10903 + __le32 rate;
10904 +};
10905 +
10906 +struct dpsw_cmd_if_set_max_frame_length {
10907 + __le16 if_id;
10908 + __le16 frame_length;
10909 +};
10910 +
10911 +struct dpsw_cmd_if_get_max_frame_length {
10912 + __le16 if_id;
10913 +};
10914 +
10915 +struct dpsw_rsp_if_get_max_frame_length {
10916 + __le16 pad;
10917 + __le16 frame_length;
10918 +};
10919 +
10920 +struct dpsw_cmd_if_set_link_cfg {
10921 + /* cmd word 0 */
10922 + __le16 if_id;
10923 + u8 pad[6];
10924 + /* cmd word 1 */
10925 + __le32 rate;
10926 + __le32 pad1;
10927 + /* cmd word 2 */
10928 + __le64 options;
10929 +};
10930 +
10931 +struct dpsw_cmd_if_get_link_state {
10932 + __le16 if_id;
10933 +};
10934 +
10935 +#define DPSW_UP_SHIFT 0
10936 +#define DPSW_UP_SIZE 1
10937 +
10938 +struct dpsw_rsp_if_get_link_state {
10939 + /* cmd word 0 */
10940 + __le32 pad0;
10941 + u8 up;
10942 + u8 pad1[3];
10943 + /* cmd word 1 */
10944 + __le32 rate;
10945 + __le32 pad2;
10946 + /* cmd word 2 */
10947 + __le64 options;
10948 +};
10949 +
10950 +struct dpsw_vlan_add {
10951 + __le16 fdb_id;
10952 + __le16 vlan_id;
10953 +};
10954 +
10955 +struct dpsw_cmd_vlan_manage_if {
10956 + /* cmd word 0 */
10957 + __le16 pad0;
10958 + __le16 vlan_id;
10959 + __le32 pad1;
10960 + /* cmd word 1 */
10961 + __le64 if_id[4];
10962 +};
10963 +
10964 +struct dpsw_cmd_vlan_remove {
10965 + __le16 pad;
10966 + __le16 vlan_id;
10967 +};
10968 +
10969 +struct dpsw_cmd_vlan_get_attr {
10970 + __le16 vlan_id;
10971 +};
10972 +
10973 +struct dpsw_rsp_vlan_get_attr {
10974 + /* cmd word 0 */
10975 + __le64 pad;
10976 + /* cmd word 1 */
10977 + __le16 fdb_id;
10978 + __le16 num_ifs;
10979 + __le16 num_untagged_ifs;
10980 + __le16 num_flooding_ifs;
10981 +};
10982 +
10983 +struct dpsw_cmd_vlan_get_if {
10984 + __le16 vlan_id;
10985 +};
10986 +
10987 +struct dpsw_rsp_vlan_get_if {
10988 + /* cmd word 0 */
10989 + __le16 pad0;
10990 + __le16 num_ifs;
10991 + u8 pad1[4];
10992 + /* cmd word 1 */
10993 + __le64 if_id[4];
10994 +};
10995 +
10996 +struct dpsw_cmd_vlan_get_if_untagged {
10997 + __le16 vlan_id;
10998 +};
10999 +
11000 +struct dpsw_rsp_vlan_get_if_untagged {
11001 + /* cmd word 0 */
11002 + __le16 pad0;
11003 + __le16 num_ifs;
11004 + u8 pad1[4];
11005 + /* cmd word 1 */
11006 + __le64 if_id[4];
11007 +};
11008 +
11009 +struct dpsw_cmd_vlan_get_if_flooding {
11010 + __le16 vlan_id;
11011 +};
11012 +
11013 +struct dpsw_rsp_vlan_get_if_flooding {
11014 + /* cmd word 0 */
11015 + __le16 pad0;
11016 + __le16 num_ifs;
11017 + u8 pad1[4];
11018 + /* cmd word 1 */
11019 + __le64 if_id[4];
11020 +};
11021 +
11022 +struct dpsw_cmd_fdb_add {
11023 + __le32 pad;
11024 + __le16 fdb_aging_time;
11025 + __le16 num_fdb_entries;
11026 +};
11027 +
11028 +struct dpsw_rsp_fdb_add {
11029 + __le16 fdb_id;
11030 +};
11031 +
11032 +struct dpsw_cmd_fdb_remove {
11033 + __le16 fdb_id;
11034 +};
11035 +
11036 +#define DPSW_ENTRY_TYPE_SHIFT 0
11037 +#define DPSW_ENTRY_TYPE_SIZE 4
11038 +
11039 +struct dpsw_cmd_fdb_add_unicast {
11040 + /* cmd word 0 */
11041 + __le16 fdb_id;
11042 + u8 mac_addr[6];
11043 + /* cmd word 1 */
11044 + u8 if_egress;
11045 + u8 pad;
11046 + /* only the first 4 bits from LSB */
11047 + u8 type;
11048 +};
11049 +
11050 +struct dpsw_cmd_fdb_get_unicast {
11051 + __le16 fdb_id;
11052 + u8 mac_addr[6];
11053 +};
11054 +
11055 +struct dpsw_rsp_fdb_get_unicast {
11056 + __le64 pad;
11057 + __le16 if_egress;
11058 + /* only first 4 bits from LSB */
11059 + u8 type;
11060 +};
11061 +
11062 +struct dpsw_cmd_fdb_remove_unicast {
11063 + /* cmd word 0 */
11064 + __le16 fdb_id;
11065 + u8 mac_addr[6];
11066 + /* cmd word 1 */
11067 + __le16 if_egress;
11068 + /* only the first 4 bits from LSB */
11069 + u8 type;
11070 +};
11071 +
11072 +struct dpsw_cmd_fdb_add_multicast {
11073 + /* cmd word 0 */
11074 + __le16 fdb_id;
11075 + __le16 num_ifs;
11076 + /* only the first 4 bits from LSB */
11077 + u8 type;
11078 + u8 pad[3];
11079 + /* cmd word 1 */
11080 + u8 mac_addr[6];
11081 + __le16 pad2;
11082 + /* cmd word 2 */
11083 + __le64 if_id[4];
11084 +};
11085 +
11086 +struct dpsw_cmd_fdb_get_multicast {
11087 + __le16 fdb_id;
11088 + u8 mac_addr[6];
11089 +};
11090 +
11091 +struct dpsw_rsp_fdb_get_multicast {
11092 + /* cmd word 0 */
11093 + __le64 pad0;
11094 + /* cmd word 1 */
11095 + __le16 num_ifs;
11096 + /* only the first 4 bits from LSB */
11097 + u8 type;
11098 + u8 pad1[5];
11099 + /* cmd word 2 */
11100 + __le64 if_id[4];
11101 +};
11102 +
11103 +struct dpsw_cmd_fdb_remove_multicast {
11104 + /* cmd word 0 */
11105 + __le16 fdb_id;
11106 + __le16 num_ifs;
11107 + /* only the first 4 bits from LSB */
11108 + u8 type;
11109 + u8 pad[3];
11110 + /* cmd word 1 */
11111 + u8 mac_addr[6];
11112 + __le16 pad2;
11113 + /* cmd word 2 */
11114 + __le64 if_id[4];
11115 +};
11116 +
11117 +#define DPSW_LEARNING_MODE_SHIFT 0
11118 +#define DPSW_LEARNING_MODE_SIZE 4
11119 +
11120 +struct dpsw_cmd_fdb_set_learning_mode {
11121 + __le16 fdb_id;
11122 + /* only the first 4 bits from LSB */
11123 + u8 mode;
11124 +};
11125 +
11126 +struct dpsw_cmd_fdb_get_attr {
11127 + __le16 fdb_id;
11128 +};
11129 +
11130 +struct dpsw_rsp_fdb_get_attr {
11131 + /* cmd word 0 */
11132 + __le16 pad;
11133 + __le16 max_fdb_entries;
11134 + __le16 fdb_aging_time;
11135 + __le16 num_fdb_mc_groups;
11136 + /* cmd word 1 */
11137 + __le16 max_fdb_mc_groups;
11138 + /* only the first 4 bits from LSB */
11139 + u8 learning_mode;
11140 +};
11141 +
11142 +struct dpsw_cmd_acl_add {
11143 + __le16 pad;
11144 + __le16 max_entries;
11145 +};
11146 +
11147 +struct dpsw_rsp_acl_add {
11148 + __le16 acl_id;
11149 +};
11150 +
11151 +struct dpsw_cmd_acl_remove {
11152 + __le16 acl_id;
11153 +};
11154 +
11155 +struct dpsw_prep_acl_entry {
11156 + u8 match_l2_dest_mac[6];
11157 + __le16 match_l2_tpid;
11158 +
11159 + u8 match_l2_source_mac[6];
11160 + __le16 match_l2_vlan_id;
11161 +
11162 + __le32 match_l3_dest_ip;
11163 + __le32 match_l3_source_ip;
11164 +
11165 + __le16 match_l4_dest_port;
11166 + __le16 match_l4_source_port;
11167 + __le16 match_l2_ether_type;
11168 + u8 match_l2_pcp_dei;
11169 + u8 match_l3_dscp;
11170 +
11171 + u8 mask_l2_dest_mac[6];
11172 + __le16 mask_l2_tpid;
11173 +
11174 + u8 mask_l2_source_mac[6];
11175 + __le16 mask_l2_vlan_id;
11176 +
11177 + __le32 mask_l3_dest_ip;
11178 + __le32 mask_l3_source_ip;
11179 +
11180 + __le16 mask_l4_dest_port;
11181 + __le16 mask_l4_source_port;
11182 + __le16 mask_l2_ether_type;
11183 + u8 mask_l2_pcp_dei;
11184 + u8 mask_l3_dscp;
11185 +
11186 + u8 match_l3_protocol;
11187 + u8 mask_l3_protocol;
11188 +};
11189 +
11190 +#define DPSW_RESULT_ACTION_SHIFT 0
11191 +#define DPSW_RESULT_ACTION_SIZE 4
11192 +
11193 +struct dpsw_cmd_acl_entry {
11194 + __le16 acl_id;
11195 + __le16 result_if_id;
11196 + __le32 precedence;
11197 + /* from LSB only the first 4 bits */
11198 + u8 result_action;
11199 + u8 pad[7];
11200 + __le64 pad2[4];
11201 + __le64 key_iova;
11202 +};
11203 +
11204 +struct dpsw_cmd_acl_if {
11205 + /* cmd word 0 */
11206 + __le16 acl_id;
11207 + __le16 num_ifs;
11208 + __le32 pad;
11209 + /* cmd word 1 */
11210 + __le64 if_id[4];
11211 +};
11212 +
11213 +struct dpsw_cmd_acl_get_attr {
11214 + __le16 acl_id;
11215 +};
11216 +
11217 +struct dpsw_rsp_acl_get_attr {
11218 + /* cmd word 0 */
11219 + __le64 pad;
11220 + /* cmd word 1 */
11221 + __le16 max_entries;
11222 + __le16 num_entries;
11223 + __le16 num_ifs;
11224 +};
11225 +
11226 +struct dpsw_rsp_ctrl_if_get_attr {
11227 + /* cmd word 0 */
11228 + __le64 pad;
11229 + /* cmd word 1 */
11230 + __le32 rx_fqid;
11231 + __le32 rx_err_fqid;
11232 + /* cmd word 2 */
11233 + __le32 tx_err_conf_fqid;
11234 +};
11235 +
11236 +struct dpsw_cmd_ctrl_if_set_pools {
11237 + u8 num_dpbp;
11238 + /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
11239 + u8 backup_pool;
11240 + __le16 pad;
11241 + __le32 dpbp_id[8];
11242 + __le16 buffer_size[8];
11243 +};
11244 +
11245 +struct dpsw_rsp_get_api_version {
11246 + __le16 version_major;
11247 + __le16 version_minor;
11248 +};
11249 +
11250 +#endif /* __FSL_DPSW_CMD_H */
11251 --- /dev/null
11252 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
11253 @@ -0,0 +1,2762 @@
11254 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
11255 + *
11256 + * Redistribution and use in source and binary forms, with or without
11257 + * modification, are permitted provided that the following conditions are met:
11258 + * * Redistributions of source code must retain the above copyright
11259 + * notice, this list of conditions and the following disclaimer.
11260 + * * Redistributions in binary form must reproduce the above copyright
11261 + * notice, this list of conditions and the following disclaimer in the
11262 + * documentation and/or other materials provided with the distribution.
11263 + * * Neither the name of the above-listed copyright holders nor the
11264 + * names of any contributors may be used to endorse or promote products
11265 + * derived from this software without specific prior written permission.
11266 + *
11267 + *
11268 + * ALTERNATIVELY, this software may be distributed under the terms of the
11269 + * GNU General Public License ("GPL") as published by the Free Software
11270 + * Foundation, either version 2 of that License or (at your option) any
11271 + * later version.
11272 + *
11273 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11274 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11275 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11276 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11277 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11278 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11279 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11280 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11281 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11282 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11283 + * POSSIBILITY OF SUCH DAMAGE.
11284 + */
11285 +#include "../../fsl-mc/include/mc-sys.h"
11286 +#include "../../fsl-mc/include/mc-cmd.h"
11287 +#include "dpsw.h"
11288 +#include "dpsw-cmd.h"
11289 +
11290 +static void build_if_id_bitmap(__le64 *bmap,
11291 + const u16 *id,
11292 + const u16 num_ifs) {
11293 + int i;
11294 +
11295 + for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
11296 + bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
11297 + (id[i] % 64),
11298 + 1);
11299 +}
11300 +
11301 +static void read_if_id_bitmap(u16 *if_id,
11302 + u16 *num_ifs,
11303 + __le64 *bmap) {
11304 + int bitmap[DPSW_MAX_IF] = { 0 };
11305 + int i, j = 0;
11306 + int count = 0;
11307 +
11308 + for (i = 0; i < DPSW_MAX_IF; i++) {
11309 + bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
11310 + i % 64);
11311 + count += bitmap[i];
11312 + }
11313 +
11314 + *num_ifs = (u16)count;
11315 +
11316 + for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
11317 + if (bitmap[i]) {
11318 + if_id[j] = (u16)i;
11319 + j++;
11320 + }
11321 + }
11322 +}
11323 +
11324 +/**
11325 + * dpsw_open() - Open a control session for the specified object
11326 + * @mc_io: Pointer to MC portal's I/O object
11327 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11328 + * @dpsw_id: DPSW unique ID
11329 + * @token: Returned token; use in subsequent API calls
11330 + *
11331 + * This function can be used to open a control session for an
11332 + * already created object; an object may have been declared in
11333 + * the DPL or by calling the dpsw_create() function.
11334 + * This function returns a unique authentication token,
11335 + * associated with the specific object ID and the specific MC
11336 + * portal; this token must be used in all subsequent commands for
11337 + * this specific object
11338 + *
11339 + * Return: '0' on Success; Error code otherwise.
11340 + */
11341 +int dpsw_open(struct fsl_mc_io *mc_io,
11342 + u32 cmd_flags,
11343 + int dpsw_id,
11344 + u16 *token)
11345 +{
11346 + struct mc_command cmd = { 0 };
11347 + struct dpsw_cmd_open *cmd_params;
11348 + int err;
11349 +
11350 + /* prepare command */
11351 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
11352 + cmd_flags,
11353 + 0);
11354 + cmd_params = (struct dpsw_cmd_open *)cmd.params;
11355 + cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
11356 +
11357 + /* send command to mc*/
11358 + err = mc_send_command(mc_io, &cmd);
11359 + if (err)
11360 + return err;
11361 +
11362 + /* retrieve response parameters */
11363 + *token = mc_cmd_hdr_read_token(&cmd);
11364 +
11365 + return 0;
11366 +}
11367 +
11368 +/**
11369 + * dpsw_close() - Close the control session of the object
11370 + * @mc_io: Pointer to MC portal's I/O object
11371 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11372 + * @token: Token of DPSW object
11373 + *
11374 + * After this function is called, no further operations are
11375 + * allowed on the object without opening a new control session.
11376 + *
11377 + * Return: '0' on Success; Error code otherwise.
11378 + */
11379 +int dpsw_close(struct fsl_mc_io *mc_io,
11380 + u32 cmd_flags,
11381 + u16 token)
11382 +{
11383 + struct mc_command cmd = { 0 };
11384 +
11385 + /* prepare command */
11386 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
11387 + cmd_flags,
11388 + token);
11389 +
11390 + /* send command to mc*/
11391 + return mc_send_command(mc_io, &cmd);
11392 +}
11393 +
11394 +/**
11395 + * dpsw_enable() - Enable DPSW functionality
11396 + * @mc_io: Pointer to MC portal's I/O object
11397 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11398 + * @token: Token of DPSW object
11399 + *
11400 + * Return: Completion status. '0' on Success; Error code otherwise.
11401 + */
11402 +int dpsw_enable(struct fsl_mc_io *mc_io,
11403 + u32 cmd_flags,
11404 + u16 token)
11405 +{
11406 + struct mc_command cmd = { 0 };
11407 +
11408 + /* prepare command */
11409 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
11410 + cmd_flags,
11411 + token);
11412 +
11413 + /* send command to mc*/
11414 + return mc_send_command(mc_io, &cmd);
11415 +}
11416 +
11417 +/**
11418 + * dpsw_disable() - Disable DPSW functionality
11419 + * @mc_io: Pointer to MC portal's I/O object
11420 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11421 + * @token: Token of DPSW object
11422 + *
11423 + * Return: Completion status. '0' on Success; Error code otherwise.
11424 + */
11425 +int dpsw_disable(struct fsl_mc_io *mc_io,
11426 + u32 cmd_flags,
11427 + u16 token)
11428 +{
11429 + struct mc_command cmd = { 0 };
11430 +
11431 + /* prepare command */
11432 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
11433 + cmd_flags,
11434 + token);
11435 +
11436 + /* send command to mc*/
11437 + return mc_send_command(mc_io, &cmd);
11438 +}
11439 +
11440 +/**
11441 + * dpsw_is_enabled() - Check if the DPSW is enabled
11442 + *
11443 + * @mc_io: Pointer to MC portal's I/O object
11444 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11445 + * @token: Token of DPSW object
11446 + * @en: Returns '1' if object is enabled; '0' otherwise
11447 + *
11448 + * Return: '0' on Success; Error code otherwise
11449 + */
11450 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
11451 + u32 cmd_flags,
11452 + u16 token,
11453 + int *en)
11454 +{
11455 + struct mc_command cmd = { 0 };
11456 + struct dpsw_rsp_is_enabled *cmd_rsp;
11457 + int err;
11458 +
11459 + /* prepare command */
11460 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
11461 + token);
11462 +
11463 + /* send command to mc*/
11464 + err = mc_send_command(mc_io, &cmd);
11465 + if (err)
11466 + return err;
11467 +
11468 + /* retrieve response parameters */
11469 + cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
11470 + *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
11471 +
11472 + return 0;
11473 +}
11474 +
11475 +/**
11476 + * dpsw_reset() - Reset the DPSW, returns the object to initial state.
11477 + * @mc_io: Pointer to MC portal's I/O object
11478 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11479 + * @token: Token of DPSW object
11480 + *
11481 + * Return: '0' on Success; Error code otherwise.
11482 + */
11483 +int dpsw_reset(struct fsl_mc_io *mc_io,
11484 + u32 cmd_flags,
11485 + u16 token)
11486 +{
11487 + struct mc_command cmd = { 0 };
11488 +
11489 + /* prepare command */
11490 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
11491 + cmd_flags,
11492 + token);
11493 +
11494 + /* send command to mc*/
11495 + return mc_send_command(mc_io, &cmd);
11496 +}
11497 +
11498 +/**
11499 + * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
11500 + * @mc_io: Pointer to MC portal's I/O object
11501 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11502 + * @token: Token of DPSW object
11503 + * @irq_index: Identifies the interrupt index to configure
11504 + * @irq_cfg: IRQ configuration
11505 + *
11506 + * Return: '0' on Success; Error code otherwise.
11507 + */
11508 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
11509 + u32 cmd_flags,
11510 + u16 token,
11511 + u8 irq_index,
11512 + struct dpsw_irq_cfg *irq_cfg)
11513 +{
11514 + struct mc_command cmd = { 0 };
11515 + struct dpsw_cmd_set_irq *cmd_params;
11516 +
11517 + /* prepare command */
11518 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
11519 + cmd_flags,
11520 + token);
11521 + cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
11522 + cmd_params->irq_index = irq_index;
11523 + cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
11524 + cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
11525 + cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
11526 +
11527 + /* send command to mc*/
11528 + return mc_send_command(mc_io, &cmd);
11529 +}
11530 +
11531 +/**
11532 + * dpsw_get_irq() - Get IRQ information from the DPSW
11533 + *
11534 + * @mc_io: Pointer to MC portal's I/O object
11535 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11536 + * @token: Token of DPSW object
11537 + * @irq_index: The interrupt index to configure
11538 + * @type: Interrupt type: 0 represents message interrupt
11539 + * type (both irq_addr and irq_val are valid)
11540 + * @irq_cfg: IRQ attributes
11541 + *
11542 + * Return: '0' on Success; Error code otherwise.
11543 + */
11544 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
11545 + u32 cmd_flags,
11546 + u16 token,
11547 + u8 irq_index,
11548 + int *type,
11549 + struct dpsw_irq_cfg *irq_cfg)
11550 +{
11551 + struct mc_command cmd = { 0 };
11552 + struct dpsw_cmd_get_irq *cmd_params;
11553 + struct dpsw_rsp_get_irq *rsp_params;
11554 + int err;
11555 +
11556 + /* prepare command */
11557 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
11558 + cmd_flags,
11559 + token);
11560 + cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
11561 + cmd_params->irq_index = irq_index;
11562 +
11563 + /* send command to mc*/
11564 + err = mc_send_command(mc_io, &cmd);
11565 + if (err)
11566 + return err;
11567 +
11568 + /* retrieve response parameters */
11569 + rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
11570 + irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
11571 + irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
11572 + irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
11573 + *type = le32_to_cpu(rsp_params->irq_type);
11574 +
11575 + return 0;
11576 +}
11577 +
11578 +/**
11579 + * dpsw_set_irq_enable() - Set overall interrupt state.
11580 + * @mc_io: Pointer to MC portal's I/O object
11581 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11582 + * @token: Token of DPCI object
11583 + * @irq_index: The interrupt index to configure
11584 + * @en: Interrupt state - enable = 1, disable = 0
11585 + *
11586 + * Allows GPP software to control when interrupts are generated.
11587 + * Each interrupt can have up to 32 causes. The enable/disable control's the
11588 + * overall interrupt state. if the interrupt is disabled no causes will cause
11589 + * an interrupt
11590 + *
11591 + * Return: '0' on Success; Error code otherwise.
11592 + */
11593 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
11594 + u32 cmd_flags,
11595 + u16 token,
11596 + u8 irq_index,
11597 + u8 en)
11598 +{
11599 + struct mc_command cmd = { 0 };
11600 + struct dpsw_cmd_set_irq_enable *cmd_params;
11601 +
11602 + /* prepare command */
11603 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
11604 + cmd_flags,
11605 + token);
11606 + cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
11607 + dpsw_set_field(cmd_params->enable_state, ENABLE, en);
11608 + cmd_params->irq_index = irq_index;
11609 +
11610 + /* send command to mc*/
11611 + return mc_send_command(mc_io, &cmd);
11612 +}
11613 +
11614 +/**
11615 + * dpsw_set_irq_mask() - Set interrupt mask.
11616 + * @mc_io: Pointer to MC portal's I/O object
11617 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11618 + * @token: Token of DPCI object
11619 + * @irq_index: The interrupt index to configure
11620 + * @mask: Event mask to trigger interrupt;
11621 + * each bit:
11622 + * 0 = ignore event
11623 + * 1 = consider event for asserting IRQ
11624 + *
11625 + * Every interrupt can have up to 32 causes and the interrupt model supports
11626 + * masking/unmasking each cause independently
11627 + *
11628 + * Return: '0' on Success; Error code otherwise.
11629 + */
11630 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
11631 + u32 cmd_flags,
11632 + u16 token,
11633 + u8 irq_index,
11634 + u32 mask)
11635 +{
11636 + struct mc_command cmd = { 0 };
11637 + struct dpsw_cmd_set_irq_mask *cmd_params;
11638 +
11639 + /* prepare command */
11640 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
11641 + cmd_flags,
11642 + token);
11643 + cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
11644 + cmd_params->mask = cpu_to_le32(mask);
11645 + cmd_params->irq_index = irq_index;
11646 +
11647 + /* send command to mc*/
11648 + return mc_send_command(mc_io, &cmd);
11649 +}
11650 +
11651 +/**
11652 + * dpsw_get_irq_status() - Get the current status of any pending interrupts
11653 + * @mc_io: Pointer to MC portal's I/O object
11654 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11655 + * @token: Token of DPSW object
11656 + * @irq_index: The interrupt index to configure
11657 + * @status: Returned interrupts status - one bit per cause:
11658 + * 0 = no interrupt pending
11659 + * 1 = interrupt pending
11660 + *
11661 + * Return: '0' on Success; Error code otherwise.
11662 + */
11663 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
11664 + u32 cmd_flags,
11665 + u16 token,
11666 + u8 irq_index,
11667 + u32 *status)
11668 +{
11669 + struct mc_command cmd = { 0 };
11670 + struct dpsw_cmd_get_irq_status *cmd_params;
11671 + struct dpsw_rsp_get_irq_status *rsp_params;
11672 + int err;
11673 +
11674 + /* prepare command */
11675 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
11676 + cmd_flags,
11677 + token);
11678 + cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
11679 + cmd_params->status = cpu_to_le32(*status);
11680 + cmd_params->irq_index = irq_index;
11681 +
11682 + /* send command to mc*/
11683 + err = mc_send_command(mc_io, &cmd);
11684 + if (err)
11685 + return err;
11686 +
11687 + /* retrieve response parameters */
11688 + rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
11689 + *status = le32_to_cpu(rsp_params->status);
11690 +
11691 + return 0;
11692 +}
11693 +
11694 +/**
11695 + * dpsw_clear_irq_status() - Clear a pending interrupt's status
11696 + * @mc_io: Pointer to MC portal's I/O object
11697 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11698 + * @token: Token of DPCI object
11699 + * @irq_index: The interrupt index to configure
11700 + * @status: bits to clear (W1C) - one bit per cause:
11701 + * 0 = don't change
11702 + * 1 = clear status bit
11703 + *
11704 + * Return: '0' on Success; Error code otherwise.
11705 + */
11706 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
11707 + u32 cmd_flags,
11708 + u16 token,
11709 + u8 irq_index,
11710 + u32 status)
11711 +{
11712 + struct mc_command cmd = { 0 };
11713 + struct dpsw_cmd_clear_irq_status *cmd_params;
11714 +
11715 + /* prepare command */
11716 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
11717 + cmd_flags,
11718 + token);
11719 + cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
11720 + cmd_params->status = cpu_to_le32(status);
11721 + cmd_params->irq_index = irq_index;
11722 +
11723 + /* send command to mc*/
11724 + return mc_send_command(mc_io, &cmd);
11725 +}
11726 +
11727 +/**
11728 + * dpsw_get_attributes() - Retrieve DPSW attributes
11729 + * @mc_io: Pointer to MC portal's I/O object
11730 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11731 + * @token: Token of DPSW object
11732 + * @attr: Returned DPSW attributes
11733 + *
11734 + * Return: Completion status. '0' on Success; Error code otherwise.
11735 + */
11736 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
11737 + u32 cmd_flags,
11738 + u16 token,
11739 + struct dpsw_attr *attr)
11740 +{
11741 + struct mc_command cmd = { 0 };
11742 + struct dpsw_rsp_get_attr *rsp_params;
11743 + int err;
11744 +
11745 + /* prepare command */
11746 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
11747 + cmd_flags,
11748 + token);
11749 +
11750 + /* send command to mc*/
11751 + err = mc_send_command(mc_io, &cmd);
11752 + if (err)
11753 + return err;
11754 +
11755 + /* retrieve response parameters */
11756 + rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
11757 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
11758 + attr->max_fdbs = rsp_params->max_fdbs;
11759 + attr->num_fdbs = rsp_params->num_fdbs;
11760 + attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
11761 + attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
11762 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
11763 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
11764 + attr->id = le32_to_cpu(rsp_params->dpsw_id);
11765 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
11766 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
11767 + attr->max_meters_per_if = rsp_params->max_meters_per_if;
11768 + attr->options = le64_to_cpu(rsp_params->options);
11769 + attr->component_type = dpsw_get_field(rsp_params->component_type,
11770 + COMPONENT_TYPE);
11771 +
11772 + return 0;
11773 +}
11774 +
11775 +/**
11776 + * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
11777 + * @mc_io: Pointer to MC portal's I/O object
11778 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11779 + * @token: Token of DPSW object
11780 + * @if_id: Interface Id
11781 + *
11782 + * Only one reflection receive interface is allowed per switch
11783 + *
11784 + * Return: Completion status. '0' on Success; Error code otherwise.
11785 + */
11786 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
11787 + u32 cmd_flags,
11788 + u16 token,
11789 + u16 if_id)
11790 +{
11791 + struct mc_command cmd = { 0 };
11792 + struct dpsw_cmd_set_reflection_if *cmd_params;
11793 +
11794 + /* prepare command */
11795 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
11796 + cmd_flags,
11797 + token);
11798 + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
11799 + cmd_params->if_id = cpu_to_le16(if_id);
11800 +
11801 + /* send command to mc*/
11802 + return mc_send_command(mc_io, &cmd);
11803 +}
11804 +
11805 +/**
11806 + * dpsw_if_set_link_cfg() - Set the link configuration.
11807 + * @mc_io: Pointer to MC portal's I/O object
11808 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11809 + * @token: Token of DPSW object
11810 + * @if_id: Interface id
11811 + * @cfg: Link configuration
11812 + *
11813 + * Return: '0' on Success; Error code otherwise.
11814 + */
11815 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
11816 + u32 cmd_flags,
11817 + u16 token,
11818 + u16 if_id,
11819 + struct dpsw_link_cfg *cfg)
11820 +{
11821 + struct mc_command cmd = { 0 };
11822 + struct dpsw_cmd_if_set_link_cfg *cmd_params;
11823 +
11824 + /* prepare command */
11825 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
11826 + cmd_flags,
11827 + token);
11828 + cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
11829 + cmd_params->if_id = cpu_to_le16(if_id);
11830 + cmd_params->rate = cpu_to_le32(cfg->rate);
11831 + cmd_params->options = cpu_to_le64(cfg->options);
11832 +
11833 + /* send command to mc*/
11834 + return mc_send_command(mc_io, &cmd);
11835 +}
11836 +
11837 +/**
11838 + * dpsw_if_get_link_state - Return the link state
11839 + * @mc_io: Pointer to MC portal's I/O object
11840 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11841 + * @token: Token of DPSW object
11842 + * @if_id: Interface id
11843 + * @state: Link state 1 - linkup, 0 - link down or disconnected
11844 + *
11845 + * @Return '0' on Success; Error code otherwise.
11846 + */
11847 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
11848 + u32 cmd_flags,
11849 + u16 token,
11850 + u16 if_id,
11851 + struct dpsw_link_state *state)
11852 +{
11853 + struct mc_command cmd = { 0 };
11854 + struct dpsw_cmd_if_get_link_state *cmd_params;
11855 + struct dpsw_rsp_if_get_link_state *rsp_params;
11856 + int err;
11857 +
11858 + /* prepare command */
11859 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
11860 + cmd_flags,
11861 + token);
11862 + cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
11863 + cmd_params->if_id = cpu_to_le16(if_id);
11864 +
11865 + /* send command to mc*/
11866 + err = mc_send_command(mc_io, &cmd);
11867 + if (err)
11868 + return err;
11869 +
11870 + /* retrieve response parameters */
11871 + rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
11872 + state->rate = le32_to_cpu(rsp_params->rate);
11873 + state->options = le64_to_cpu(rsp_params->options);
11874 + state->up = dpsw_get_field(rsp_params->up, UP);
11875 +
11876 + return 0;
11877 +}
11878 +
11879 +/**
11880 + * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
11881 + * @mc_io: Pointer to MC portal's I/O object
11882 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11883 + * @token: Token of DPSW object
11884 + * @if_id: Interface Identifier
11885 + * @en: 1 - enable, 0 - disable
11886 + *
11887 + * Return: Completion status. '0' on Success; Error code otherwise.
11888 + */
11889 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
11890 + u32 cmd_flags,
11891 + u16 token,
11892 + u16 if_id,
11893 + int en)
11894 +{
11895 + struct mc_command cmd = { 0 };
11896 + struct dpsw_cmd_if_set_flooding *cmd_params;
11897 +
11898 + /* prepare command */
11899 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
11900 + cmd_flags,
11901 + token);
11902 + cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
11903 + cmd_params->if_id = cpu_to_le16(if_id);
11904 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11905 +
11906 + /* send command to mc*/
11907 + return mc_send_command(mc_io, &cmd);
11908 +}
11909 +
11910 +/**
11911 + * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
11912 + * @mc_io: Pointer to MC portal's I/O object
11913 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11914 + * @token: Token of DPSW object
11915 + * @if_id: Interface Identifier
11916 + * @en: 1 - enable, 0 - disable
11917 + *
11918 + * Return: Completion status. '0' on Success; Error code otherwise.
11919 + */
11920 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
11921 + u32 cmd_flags,
11922 + u16 token,
11923 + u16 if_id,
11924 + int en)
11925 +{
11926 + struct mc_command cmd = { 0 };
11927 + struct dpsw_cmd_if_set_broadcast *cmd_params;
11928 +
11929 + /* prepare command */
11930 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
11931 + cmd_flags,
11932 + token);
11933 + cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
11934 + cmd_params->if_id = cpu_to_le16(if_id);
11935 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11936 +
11937 + /* send command to mc*/
11938 + return mc_send_command(mc_io, &cmd);
11939 +}
11940 +
11941 +/**
11942 + * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
11943 + * @mc_io: Pointer to MC portal's I/O object
11944 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11945 + * @token: Token of DPSW object
11946 + * @if_id: Interface Identifier
11947 + * @en: 1 - enable, 0 - disable
11948 + *
11949 + * Return: Completion status. '0' on Success; Error code otherwise.
11950 + */
11951 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
11952 + u32 cmd_flags,
11953 + u16 token,
11954 + u16 if_id,
11955 + int en)
11956 +{
11957 + struct mc_command cmd = { 0 };
11958 + struct dpsw_cmd_if_set_multicast *cmd_params;
11959 +
11960 + /* prepare command */
11961 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
11962 + cmd_flags,
11963 + token);
11964 + cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
11965 + cmd_params->if_id = cpu_to_le16(if_id);
11966 + dpsw_set_field(cmd_params->enable, ENABLE, en);
11967 +
11968 + /* send command to mc*/
11969 + return mc_send_command(mc_io, &cmd);
11970 +}
11971 +
11972 +/**
11973 + * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
11974 + * @mc_io: Pointer to MC portal's I/O object
11975 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
11976 + * @token: Token of DPSW object
11977 + * @if_id: Interface Identifier
11978 + * @cfg: Tag Control Information Configuration
11979 + *
11980 + * Return: Completion status. '0' on Success; Error code otherwise.
11981 + */
11982 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
11983 + u32 cmd_flags,
11984 + u16 token,
11985 + u16 if_id,
11986 + const struct dpsw_tci_cfg *cfg)
11987 +{
11988 + struct mc_command cmd = { 0 };
11989 + struct dpsw_cmd_if_set_tci *cmd_params;
11990 +
11991 + /* prepare command */
11992 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
11993 + cmd_flags,
11994 + token);
11995 + cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
11996 + cmd_params->if_id = cpu_to_le16(if_id);
11997 + dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
11998 + dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
11999 + dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
12000 + cmd_params->conf = cpu_to_le16(cmd_params->conf);
12001 +
12002 + /* send command to mc*/
12003 + return mc_send_command(mc_io, &cmd);
12004 +}
12005 +
12006 +/**
12007 + * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
12008 + * @mc_io: Pointer to MC portal's I/O object
12009 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12010 + * @token: Token of DPSW object
12011 + * @if_id: Interface Identifier
12012 + * @cfg: Tag Control Information Configuration
12013 + *
12014 + * Return: Completion status. '0' on Success; Error code otherwise.
12015 + */
12016 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
12017 + u32 cmd_flags,
12018 + u16 token,
12019 + u16 if_id,
12020 + struct dpsw_tci_cfg *cfg)
12021 +{
12022 + struct mc_command cmd = { 0 };
12023 + struct dpsw_cmd_if_get_tci *cmd_params;
12024 + struct dpsw_rsp_if_get_tci *rsp_params;
12025 + int err;
12026 +
12027 + /* prepare command */
12028 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
12029 + cmd_flags,
12030 + token);
12031 + cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
12032 + cmd_params->if_id = cpu_to_le16(if_id);
12033 +
12034 + /* send command to mc*/
12035 + err = mc_send_command(mc_io, &cmd);
12036 + if (err)
12037 + return err;
12038 +
12039 + /* retrieve response parameters */
12040 + rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
12041 + cfg->pcp = rsp_params->pcp;
12042 + cfg->dei = rsp_params->dei;
12043 + cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
12044 +
12045 + return 0;
12046 +}
12047 +
12048 +/**
12049 + * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
12050 + * @mc_io: Pointer to MC portal's I/O object
12051 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12052 + * @token: Token of DPSW object
12053 + * @if_id: Interface Identifier
12054 + * @cfg: STP State configuration parameters
12055 + *
12056 + * The following STP states are supported -
12057 + * blocking, listening, learning, forwarding and disabled.
12058 + *
12059 + * Return: Completion status. '0' on Success; Error code otherwise.
12060 + */
12061 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
12062 + u32 cmd_flags,
12063 + u16 token,
12064 + u16 if_id,
12065 + const struct dpsw_stp_cfg *cfg)
12066 +{
12067 + struct mc_command cmd = { 0 };
12068 + struct dpsw_cmd_if_set_stp *cmd_params;
12069 +
12070 + /* prepare command */
12071 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
12072 + cmd_flags,
12073 + token);
12074 + cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
12075 + cmd_params->if_id = cpu_to_le16(if_id);
12076 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
12077 + dpsw_set_field(cmd_params->state, STATE, cfg->state);
12078 +
12079 + /* send command to mc*/
12080 + return mc_send_command(mc_io, &cmd);
12081 +}
12082 +
12083 +/**
12084 + * dpsw_if_set_accepted_frames()
12085 + * @mc_io: Pointer to MC portal's I/O object
12086 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12087 + * @token: Token of DPSW object
12088 + * @if_id: Interface Identifier
12089 + * @cfg: Frame types configuration
12090 + *
12091 + * When is admit_only_vlan_tagged- the device will discard untagged
12092 + * frames or Priority-Tagged frames received on this interface.
12093 + * When admit_only_untagged- untagged frames or Priority-Tagged
12094 + * frames received on this interface will be accepted and assigned
12095 + * to a VID based on the PVID and VID Set for this interface.
12096 + * When admit_all - the device will accept VLAN tagged, untagged
12097 + * and priority tagged frames.
12098 + * The default is admit_all
12099 + *
12100 + * Return: Completion status. '0' on Success; Error code otherwise.
12101 + */
12102 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
12103 + u32 cmd_flags,
12104 + u16 token,
12105 + u16 if_id,
12106 + const struct dpsw_accepted_frames_cfg *cfg)
12107 +{
12108 + struct mc_command cmd = { 0 };
12109 + struct dpsw_cmd_if_set_accepted_frames *cmd_params;
12110 +
12111 + /* prepare command */
12112 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
12113 + cmd_flags,
12114 + token);
12115 + cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
12116 + cmd_params->if_id = cpu_to_le16(if_id);
12117 + dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
12118 + dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
12119 + cfg->unaccept_act);
12120 +
12121 + /* send command to mc*/
12122 + return mc_send_command(mc_io, &cmd);
12123 +}
12124 +
12125 +/**
12126 + * dpsw_if_set_accept_all_vlan()
12127 + * @mc_io: Pointer to MC portal's I/O object
12128 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12129 + * @token: Token of DPSW object
12130 + * @if_id: Interface Identifier
12131 + * @accept_all: Accept or drop frames having different VLAN
12132 + *
12133 + * When this is accept (FALSE), the device will discard incoming
12134 + * frames for VLANs that do not include this interface in its
12135 + * Member set. When accept (TRUE), the interface will accept all incoming frames
12136 + *
12137 + * Return: Completion status. '0' on Success; Error code otherwise.
12138 + */
12139 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
12140 + u32 cmd_flags,
12141 + u16 token,
12142 + u16 if_id,
12143 + int accept_all)
12144 +{
12145 + struct mc_command cmd = { 0 };
12146 + struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
12147 +
12148 + /* prepare command */
12149 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
12150 + cmd_flags,
12151 + token);
12152 + cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
12153 + cmd_params->if_id = cpu_to_le16(if_id);
12154 + dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
12155 +
12156 + /* send command to mc*/
12157 + return mc_send_command(mc_io, &cmd);
12158 +}
12159 +
12160 +/**
12161 + * dpsw_if_get_counter() - Get specific counter of particular interface
12162 + * @mc_io: Pointer to MC portal's I/O object
12163 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12164 + * @token: Token of DPSW object
12165 + * @if_id: Interface Identifier
12166 + * @type: Counter type
12167 + * @counter: return value
12168 + *
12169 + * Return: Completion status. '0' on Success; Error code otherwise.
12170 + */
12171 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
12172 + u32 cmd_flags,
12173 + u16 token,
12174 + u16 if_id,
12175 + enum dpsw_counter type,
12176 + u64 *counter)
12177 +{
12178 + struct mc_command cmd = { 0 };
12179 + struct dpsw_cmd_if_get_counter *cmd_params;
12180 + struct dpsw_rsp_if_get_counter *rsp_params;
12181 + int err;
12182 +
12183 + /* prepare command */
12184 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
12185 + cmd_flags,
12186 + token);
12187 + cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
12188 + cmd_params->if_id = cpu_to_le16(if_id);
12189 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
12190 +
12191 + /* send command to mc*/
12192 + err = mc_send_command(mc_io, &cmd);
12193 + if (err)
12194 + return err;
12195 +
12196 + /* retrieve response parameters */
12197 + rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
12198 + *counter = le64_to_cpu(rsp_params->counter);
12199 +
12200 + return 0;
12201 +}
12202 +
12203 +/**
12204 + * dpsw_if_set_counter() - Set specific counter of particular interface
12205 + * @mc_io: Pointer to MC portal's I/O object
12206 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12207 + * @token: Token of DPSW object
12208 + * @if_id: Interface Identifier
12209 + * @type: Counter type
12210 + * @counter: New counter value
12211 + *
12212 + * Return: Completion status. '0' on Success; Error code otherwise.
12213 + */
12214 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
12215 + u32 cmd_flags,
12216 + u16 token,
12217 + u16 if_id,
12218 + enum dpsw_counter type,
12219 + u64 counter)
12220 +{
12221 + struct mc_command cmd = { 0 };
12222 + struct dpsw_cmd_if_set_counter *cmd_params;
12223 +
12224 + /* prepare command */
12225 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
12226 + cmd_flags,
12227 + token);
12228 + cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
12229 + cmd_params->if_id = cpu_to_le16(if_id);
12230 + cmd_params->counter = cpu_to_le64(counter);
12231 + dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
12232 +
12233 + /* send command to mc*/
12234 + return mc_send_command(mc_io, &cmd);
12235 +}
12236 +
12237 +/**
12238 + * dpsw_if_set_tx_selection() - Function is used for mapping variety
12239 + * of frame fields
12240 + * @mc_io: Pointer to MC portal's I/O object
12241 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12242 + * @token: Token of DPSW object
12243 + * @if_id: Interface Identifier
12244 + * @cfg: Traffic class mapping configuration
12245 + *
12246 + * Function is used for mapping variety of frame fields (DSCP, PCP)
12247 + * to Traffic Class. Traffic class is a number
12248 + * in the range from 0 to 7
12249 + *
12250 + * Return: Completion status. '0' on Success; Error code otherwise.
12251 + */
12252 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
12253 + u32 cmd_flags,
12254 + u16 token,
12255 + u16 if_id,
12256 + const struct dpsw_tx_selection_cfg *cfg)
12257 +{
12258 + struct dpsw_cmd_if_set_tx_selection *cmd_params;
12259 + struct mc_command cmd = { 0 };
12260 + int i;
12261 +
12262 + /* prepare command */
12263 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
12264 + cmd_flags,
12265 + token);
12266 + cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
12267 + cmd_params->if_id = cpu_to_le16(if_id);
12268 + dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
12269 + cfg->priority_selector);
12270 +
12271 + for (i = 0; i < 8; i++) {
12272 + cmd_params->tc_sched[i].delta_bandwidth =
12273 + cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
12274 + dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
12275 + cfg->tc_sched[i].mode);
12276 + cmd_params->tc_id[i] = cfg->tc_id[i];
12277 + }
12278 +
12279 + /* send command to mc*/
12280 + return mc_send_command(mc_io, &cmd);
12281 +}
12282 +
12283 +/**
12284 + * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
12285 + * @mc_io: Pointer to MC portal's I/O object
12286 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12287 + * @token: Token of DPSW object
12288 + * @if_id: Interface Identifier
12289 + * @cfg: Reflection configuration
12290 + *
12291 + * Return: Completion status. '0' on Success; Error code otherwise.
12292 + */
12293 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
12294 + u32 cmd_flags,
12295 + u16 token,
12296 + u16 if_id,
12297 + const struct dpsw_reflection_cfg *cfg)
12298 +{
12299 + struct mc_command cmd = { 0 };
12300 + struct dpsw_cmd_if_reflection *cmd_params;
12301 +
12302 + /* prepare command */
12303 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
12304 + cmd_flags,
12305 + token);
12306 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
12307 + cmd_params->if_id = cpu_to_le16(if_id);
12308 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
12309 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
12310 +
12311 + /* send command to mc*/
12312 + return mc_send_command(mc_io, &cmd);
12313 +}
12314 +
12315 +/**
12316 + * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
12317 + * @mc_io: Pointer to MC portal's I/O object
12318 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12319 + * @token: Token of DPSW object
12320 + * @if_id: Interface Identifier
12321 + * @cfg: Reflection configuration
12322 + *
12323 + * Return: Completion status. '0' on Success; Error code otherwise.
12324 + */
12325 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
12326 + u32 cmd_flags,
12327 + u16 token,
12328 + u16 if_id,
12329 + const struct dpsw_reflection_cfg *cfg)
12330 +{
12331 + struct mc_command cmd = { 0 };
12332 + struct dpsw_cmd_if_reflection *cmd_params;
12333 +
12334 + /* prepare command */
12335 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
12336 + cmd_flags,
12337 + token);
12338 + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
12339 + cmd_params->if_id = cpu_to_le16(if_id);
12340 + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
12341 + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
12342 +
12343 + /* send command to mc*/
12344 + return mc_send_command(mc_io, &cmd);
12345 +}
12346 +
12347 +/**
12348 + * dpsw_if_set_flooding_metering() - Set flooding metering
12349 + * @mc_io: Pointer to MC portal's I/O object
12350 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12351 + * @token: Token of DPSW object
12352 + * @if_id: Interface Identifier
12353 + * @cfg: Metering parameters
12354 + *
12355 + * Return: Completion status. '0' on Success; Error code otherwise.
12356 + */
12357 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
12358 + u32 cmd_flags,
12359 + u16 token,
12360 + u16 if_id,
12361 + const struct dpsw_metering_cfg *cfg)
12362 +{
12363 + struct mc_command cmd = { 0 };
12364 + struct dpsw_cmd_if_set_flooding_metering *cmd_params;
12365 +
12366 + /* prepare command */
12367 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
12368 + cmd_flags,
12369 + token);
12370 + cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
12371 + cmd_params->if_id = cpu_to_le16(if_id);
12372 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
12373 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
12374 + cmd_params->cir = cpu_to_le32(cfg->cir);
12375 + cmd_params->eir = cpu_to_le32(cfg->eir);
12376 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
12377 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
12378 +
12379 + /* send command to mc*/
12380 + return mc_send_command(mc_io, &cmd);
12381 +}
12382 +
12383 +/**
12384 + * dpsw_if_set_metering() - Set interface metering for flooding
12385 + * @mc_io: Pointer to MC portal's I/O object
12386 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12387 + * @token: Token of DPSW object
12388 + * @if_id: Interface Identifier
12389 + * @tc_id: Traffic class ID
12390 + * @cfg: Metering parameters
12391 + *
12392 + * Return: Completion status. '0' on Success; Error code otherwise.
12393 + */
12394 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
12395 + u32 cmd_flags,
12396 + u16 token,
12397 + u16 if_id,
12398 + u8 tc_id,
12399 + const struct dpsw_metering_cfg *cfg)
12400 +{
12401 + struct mc_command cmd = { 0 };
12402 + struct dpsw_cmd_if_set_metering *cmd_params;
12403 +
12404 + /* prepare command */
12405 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
12406 + cmd_flags,
12407 + token);
12408 + cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
12409 + cmd_params->if_id = cpu_to_le16(if_id);
12410 + cmd_params->tc_id = tc_id;
12411 + dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
12412 + dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
12413 + cmd_params->cir = cpu_to_le32(cfg->cir);
12414 + cmd_params->eir = cpu_to_le32(cfg->eir);
12415 + cmd_params->cbs = cpu_to_le32(cfg->cbs);
12416 + cmd_params->ebs = cpu_to_le32(cfg->ebs);
12417 +
12418 + /* send command to mc*/
12419 + return mc_send_command(mc_io, &cmd);
12420 +}
12421 +
12422 +/**
12423 + * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
12424 + * @cfg: Early-drop configuration
12425 + * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
12426 + *
12427 + * This function has to be called before dpsw_if_tc_set_early_drop
12428 + *
12429 + */
12430 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
12431 + u8 *early_drop_buf)
12432 +{
12433 + struct dpsw_prep_early_drop *ext_params;
12434 +
12435 + ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
12436 + dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
12437 + dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
12438 + ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
12439 + ext_params->green_drop_probability = cfg->green.drop_probability;
12440 + ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
12441 + ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
12442 + ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
12443 + ext_params->yellow_max_threshold =
12444 + cpu_to_le64(cfg->yellow.max_threshold);
12445 + ext_params->yellow_min_threshold =
12446 + cpu_to_le64(cfg->yellow.min_threshold);
12447 +}
12448 +
12449 +/**
12450 + * dpsw_if_set_early_drop() - Set interface traffic class early-drop
12451 + * configuration
12452 + * @mc_io: Pointer to MC portal's I/O object
12453 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12454 + * @token: Token of DPSW object
12455 + * @if_id: Interface Identifier
12456 + * @tc_id: Traffic class selection (0-7)
12457 + * @early_drop_iova: I/O virtual address of 64 bytes;
12458 + * Must be cacheline-aligned and DMA-able memory
12459 + *
12460 + * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
12461 + * to prepare the early_drop_iova parameter
12462 + *
12463 + * Return: '0' on Success; error code otherwise.
12464 + */
12465 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
12466 + u32 cmd_flags,
12467 + u16 token,
12468 + u16 if_id,
12469 + u8 tc_id,
12470 + u64 early_drop_iova)
12471 +{
12472 + struct mc_command cmd = { 0 };
12473 + struct dpsw_cmd_if_set_early_drop *cmd_params;
12474 +
12475 + /* prepare command */
12476 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
12477 + cmd_flags,
12478 + token);
12479 + cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
12480 + cmd_params->tc_id = tc_id;
12481 + cmd_params->if_id = cpu_to_le16(if_id);
12482 + cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
12483 +
12484 + /* send command to mc*/
12485 + return mc_send_command(mc_io, &cmd);
12486 +}
12487 +
12488 +/**
12489 + * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
12490 + * @mc_io: Pointer to MC portal's I/O object
12491 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12492 + * @token: Token of DPSW object
12493 + * @cfg: Tag Protocol identifier
12494 + *
12495 + * API Configures a distinct Ethernet type value (or TPID value)
12496 + * to indicate a VLAN tag in addition to the common
12497 + * TPID values 0x8100 and 0x88A8.
12498 + * Two additional TPID's are supported
12499 + *
12500 + * Return: Completion status. '0' on Success; Error code otherwise.
12501 + */
12502 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
12503 + u32 cmd_flags,
12504 + u16 token,
12505 + const struct dpsw_custom_tpid_cfg *cfg)
12506 +{
12507 + struct mc_command cmd = { 0 };
12508 + struct dpsw_cmd_custom_tpid *cmd_params;
12509 +
12510 + /* prepare command */
12511 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
12512 + cmd_flags,
12513 + token);
12514 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
12515 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
12516 +
12517 + /* send command to mc*/
12518 + return mc_send_command(mc_io, &cmd);
12519 +}
12520 +
12521 +/**
12522 + * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
12523 + * @mc_io: Pointer to MC portal's I/O object
12524 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12525 + * @token: Token of DPSW object
12526 + * @cfg: Tag Protocol identifier
12527 + *
12528 + * Return: Completion status. '0' on Success; Error code otherwise.
12529 + */
12530 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
12531 + u32 cmd_flags,
12532 + u16 token,
12533 + const struct dpsw_custom_tpid_cfg *cfg)
12534 +{
12535 + struct mc_command cmd = { 0 };
12536 + struct dpsw_cmd_custom_tpid *cmd_params;
12537 +
12538 + /* prepare command */
12539 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
12540 + cmd_flags,
12541 + token);
12542 + cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
12543 + cmd_params->tpid = cpu_to_le16(cfg->tpid);
12544 +
12545 + /* send command to mc*/
12546 + return mc_send_command(mc_io, &cmd);
12547 +}
12548 +
12549 +/**
12550 + * dpsw_if_enable() - Enable Interface
12551 + * @mc_io: Pointer to MC portal's I/O object
12552 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12553 + * @token: Token of DPSW object
12554 + * @if_id: Interface Identifier
12555 + *
12556 + * Return: Completion status. '0' on Success; Error code otherwise.
12557 + */
12558 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
12559 + u32 cmd_flags,
12560 + u16 token,
12561 + u16 if_id)
12562 +{
12563 + struct mc_command cmd = { 0 };
12564 + struct dpsw_cmd_if *cmd_params;
12565 +
12566 + /* prepare command */
12567 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
12568 + cmd_flags,
12569 + token);
12570 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
12571 + cmd_params->if_id = cpu_to_le16(if_id);
12572 +
12573 + /* send command to mc*/
12574 + return mc_send_command(mc_io, &cmd);
12575 +}
12576 +
12577 +/**
12578 + * dpsw_if_disable() - Disable Interface
12579 + * @mc_io: Pointer to MC portal's I/O object
12580 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12581 + * @token: Token of DPSW object
12582 + * @if_id: Interface Identifier
12583 + *
12584 + * Return: Completion status. '0' on Success; Error code otherwise.
12585 + */
12586 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
12587 + u32 cmd_flags,
12588 + u16 token,
12589 + u16 if_id)
12590 +{
12591 + struct mc_command cmd = { 0 };
12592 + struct dpsw_cmd_if *cmd_params;
12593 +
12594 + /* prepare command */
12595 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
12596 + cmd_flags,
12597 + token);
12598 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
12599 + cmd_params->if_id = cpu_to_le16(if_id);
12600 +
12601 + /* send command to mc*/
12602 + return mc_send_command(mc_io, &cmd);
12603 +}
12604 +
12605 +/**
12606 + * dpsw_if_get_attributes() - Function obtains attributes of interface
12607 + * @mc_io: Pointer to MC portal's I/O object
12608 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12609 + * @token: Token of DPSW object
12610 + * @if_id: Interface Identifier
12611 + * @attr: Returned interface attributes
12612 + *
12613 + * Return: Completion status. '0' on Success; Error code otherwise.
12614 + */
12615 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
12616 + u32 cmd_flags,
12617 + u16 token,
12618 + u16 if_id,
12619 + struct dpsw_if_attr *attr)
12620 +{
12621 + struct dpsw_rsp_if_get_attr *rsp_params;
12622 + struct dpsw_cmd_if *cmd_params;
12623 + struct mc_command cmd = { 0 };
12624 + int err;
12625 +
12626 + /* prepare command */
12627 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
12628 + cmd_flags,
12629 + token);
12630 + cmd_params = (struct dpsw_cmd_if *)cmd.params;
12631 + cmd_params->if_id = cpu_to_le16(if_id);
12632 +
12633 + /* send command to mc*/
12634 + err = mc_send_command(mc_io, &cmd);
12635 + if (err)
12636 + return err;
12637 +
12638 + /* retrieve response parameters */
12639 + rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
12640 + attr->num_tcs = rsp_params->num_tcs;
12641 + attr->rate = le32_to_cpu(rsp_params->rate);
12642 + attr->options = le32_to_cpu(rsp_params->options);
12643 + attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
12644 + attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
12645 + ACCEPT_ALL_VLAN);
12646 + attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
12647 + attr->qdid = le16_to_cpu(rsp_params->qdid);
12648 +
12649 + return 0;
12650 +}
12651 +
12652 +/**
12653 + * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
12654 + * @mc_io: Pointer to MC portal's I/O object
12655 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12656 + * @token: Token of DPSW object
12657 + * @if_id: Interface Identifier
12658 + * @frame_length: Maximum Frame Length
12659 + *
12660 + * Return: Completion status. '0' on Success; Error code otherwise.
12661 + */
12662 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
12663 + u32 cmd_flags,
12664 + u16 token,
12665 + u16 if_id,
12666 + u16 frame_length)
12667 +{
12668 + struct mc_command cmd = { 0 };
12669 + struct dpsw_cmd_if_set_max_frame_length *cmd_params;
12670 +
12671 + /* prepare command */
12672 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
12673 + cmd_flags,
12674 + token);
12675 + cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
12676 + cmd_params->if_id = cpu_to_le16(if_id);
12677 + cmd_params->frame_length = cpu_to_le16(frame_length);
12678 +
12679 + /* send command to mc*/
12680 + return mc_send_command(mc_io, &cmd);
12681 +}
12682 +
12683 +/**
12684 + * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
12685 + * @mc_io: Pointer to MC portal's I/O object
12686 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12687 + * @token: Token of DPSW object
12688 + * @if_id: Interface Identifier
12689 + * @frame_length: Returned maximum Frame Length
12690 + *
12691 + * Return: Completion status. '0' on Success; Error code otherwise.
12692 + */
12693 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
12694 + u32 cmd_flags,
12695 + u16 token,
12696 + u16 if_id,
12697 + u16 *frame_length)
12698 +{
12699 + struct mc_command cmd = { 0 };
12700 + struct dpsw_cmd_if_get_max_frame_length *cmd_params;
12701 + struct dpsw_rsp_if_get_max_frame_length *rsp_params;
12702 + int err;
12703 +
12704 + /* prepare command */
12705 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
12706 + cmd_flags,
12707 + token);
12708 + cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
12709 + cmd_params->if_id = cpu_to_le16(if_id);
12710 +
12711 + /* send command to mc*/
12712 + err = mc_send_command(mc_io, &cmd);
12713 + if (err)
12714 + return err;
12715 +
12716 + rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
12717 + *frame_length = le16_to_cpu(rsp_params->frame_length);
12718 +
12719 + return 0;
12720 +}
12721 +
12722 +/**
12723 + * dpsw_vlan_add() - Adding new VLAN to DPSW.
12724 + * @mc_io: Pointer to MC portal's I/O object
12725 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12726 + * @token: Token of DPSW object
12727 + * @vlan_id: VLAN Identifier
12728 + * @cfg: VLAN configuration
12729 + *
12730 + * Only VLAN ID and FDB ID are required parameters here.
12731 + * 12 bit VLAN ID is defined in IEEE802.1Q.
12732 + * Adding a duplicate VLAN ID is not allowed.
12733 + * FDB ID can be shared across multiple VLANs. Shared learning
12734 + * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
12735 + * with same fdb_id
12736 + *
12737 + * Return: Completion status. '0' on Success; Error code otherwise.
12738 + */
12739 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
12740 + u32 cmd_flags,
12741 + u16 token,
12742 + u16 vlan_id,
12743 + const struct dpsw_vlan_cfg *cfg)
12744 +{
12745 + struct mc_command cmd = { 0 };
12746 + struct dpsw_vlan_add *cmd_params;
12747 +
12748 + /* prepare command */
12749 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
12750 + cmd_flags,
12751 + token);
12752 + cmd_params = (struct dpsw_vlan_add *)cmd.params;
12753 + cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
12754 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12755 +
12756 + /* send command to mc*/
12757 + return mc_send_command(mc_io, &cmd);
12758 +}
12759 +
12760 +/**
12761 + * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
12762 + * @mc_io: Pointer to MC portal's I/O object
12763 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12764 + * @token: Token of DPSW object
12765 + * @vlan_id: VLAN Identifier
12766 + * @cfg: Set of interfaces to add
12767 + *
12768 + * It adds only interfaces not belonging to this VLAN yet,
12769 + * otherwise an error is generated and an entire command is
12770 + * ignored. This function can be called numerous times always
12771 + * providing required interfaces delta.
12772 + *
12773 + * Return: Completion status. '0' on Success; Error code otherwise.
12774 + */
12775 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
12776 + u32 cmd_flags,
12777 + u16 token,
12778 + u16 vlan_id,
12779 + const struct dpsw_vlan_if_cfg *cfg)
12780 +{
12781 + struct mc_command cmd = { 0 };
12782 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12783 +
12784 + /* prepare command */
12785 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
12786 + cmd_flags,
12787 + token);
12788 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12789 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12790 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12791 +
12792 + /* send command to mc*/
12793 + return mc_send_command(mc_io, &cmd);
12794 +}
12795 +
12796 +/**
12797 + * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
12798 + * transmitted as untagged.
12799 + * @mc_io: Pointer to MC portal's I/O object
12800 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12801 + * @token: Token of DPSW object
12802 + * @vlan_id: VLAN Identifier
12803 + * @cfg: Set of interfaces that should be transmitted as untagged
12804 + *
12805 + * These interfaces should already belong to this VLAN.
12806 + * By default all interfaces are transmitted as tagged.
12807 + * Providing un-existing interface or untagged interface that is
12808 + * configured untagged already generates an error and the entire
12809 + * command is ignored.
12810 + *
12811 + * Return: Completion status. '0' on Success; Error code otherwise.
12812 + */
12813 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
12814 + u32 cmd_flags,
12815 + u16 token,
12816 + u16 vlan_id,
12817 + const struct dpsw_vlan_if_cfg *cfg)
12818 +{
12819 + struct mc_command cmd = { 0 };
12820 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12821 +
12822 + /* prepare command */
12823 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
12824 + cmd_flags,
12825 + token);
12826 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12827 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12828 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12829 +
12830 + /* send command to mc*/
12831 + return mc_send_command(mc_io, &cmd);
12832 +}
12833 +
12834 +/**
12835 + * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
12836 + * included in flooding when frame with unknown destination
12837 + * unicast MAC arrived.
12838 + * @mc_io: Pointer to MC portal's I/O object
12839 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12840 + * @token: Token of DPSW object
12841 + * @vlan_id: VLAN Identifier
12842 + * @cfg: Set of interfaces that should be used for flooding
12843 + *
12844 + * These interfaces should belong to this VLAN. By default all
12845 + * interfaces are included into flooding list. Providing
12846 + * un-existing interface or an interface that already in the
12847 + * flooding list generates an error and the entire command is
12848 + * ignored.
12849 + *
12850 + * Return: Completion status. '0' on Success; Error code otherwise.
12851 + */
12852 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
12853 + u32 cmd_flags,
12854 + u16 token,
12855 + u16 vlan_id,
12856 + const struct dpsw_vlan_if_cfg *cfg)
12857 +{
12858 + struct mc_command cmd = { 0 };
12859 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12860 +
12861 + /* prepare command */
12862 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
12863 + cmd_flags,
12864 + token);
12865 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12866 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12867 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12868 +
12869 + /* send command to mc*/
12870 + return mc_send_command(mc_io, &cmd);
12871 +}
12872 +
12873 +/**
12874 + * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
12875 + * @mc_io: Pointer to MC portal's I/O object
12876 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12877 + * @token: Token of DPSW object
12878 + * @vlan_id: VLAN Identifier
12879 + * @cfg: Set of interfaces that should be removed
12880 + *
12881 + * Interfaces must belong to this VLAN, otherwise an error
12882 + * is returned and an the command is ignored
12883 + *
12884 + * Return: Completion status. '0' on Success; Error code otherwise.
12885 + */
12886 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
12887 + u32 cmd_flags,
12888 + u16 token,
12889 + u16 vlan_id,
12890 + const struct dpsw_vlan_if_cfg *cfg)
12891 +{
12892 + struct mc_command cmd = { 0 };
12893 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12894 +
12895 + /* prepare command */
12896 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
12897 + cmd_flags,
12898 + token);
12899 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12900 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12901 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12902 +
12903 + /* send command to mc*/
12904 + return mc_send_command(mc_io, &cmd);
12905 +}
12906 +
12907 +/**
12908 + * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
12909 + * converted from transmitted as untagged to transmit as tagged.
12910 + * @mc_io: Pointer to MC portal's I/O object
12911 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12912 + * @token: Token of DPSW object
12913 + * @vlan_id: VLAN Identifier
12914 + * @cfg: Set of interfaces that should be removed
12915 + *
12916 + * Interfaces provided by API have to belong to this VLAN and
12917 + * configured untagged, otherwise an error is returned and the
12918 + * command is ignored
12919 + *
12920 + * Return: Completion status. '0' on Success; Error code otherwise.
12921 + */
12922 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
12923 + u32 cmd_flags,
12924 + u16 token,
12925 + u16 vlan_id,
12926 + const struct dpsw_vlan_if_cfg *cfg)
12927 +{
12928 + struct mc_command cmd = { 0 };
12929 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12930 +
12931 + /* prepare command */
12932 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
12933 + cmd_flags,
12934 + token);
12935 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12936 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12937 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12938 +
12939 + /* send command to mc*/
12940 + return mc_send_command(mc_io, &cmd);
12941 +}
12942 +
12943 +/**
12944 + * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
12945 + * removed from the flooding list.
12946 + * @mc_io: Pointer to MC portal's I/O object
12947 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12948 + * @token: Token of DPSW object
12949 + * @vlan_id: VLAN Identifier
12950 + * @cfg: Set of interfaces used for flooding
12951 + *
12952 + * Return: Completion status. '0' on Success; Error code otherwise.
12953 + */
12954 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
12955 + u32 cmd_flags,
12956 + u16 token,
12957 + u16 vlan_id,
12958 + const struct dpsw_vlan_if_cfg *cfg)
12959 +{
12960 + struct mc_command cmd = { 0 };
12961 + struct dpsw_cmd_vlan_manage_if *cmd_params;
12962 +
12963 + /* prepare command */
12964 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
12965 + cmd_flags,
12966 + token);
12967 + cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
12968 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12969 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
12970 +
12971 + /* send command to mc*/
12972 + return mc_send_command(mc_io, &cmd);
12973 +}
12974 +
12975 +/**
12976 + * dpsw_vlan_remove() - Remove an entire VLAN
12977 + * @mc_io: Pointer to MC portal's I/O object
12978 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12979 + * @token: Token of DPSW object
12980 + * @vlan_id: VLAN Identifier
12981 + *
12982 + * Return: Completion status. '0' on Success; Error code otherwise.
12983 + */
12984 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
12985 + u32 cmd_flags,
12986 + u16 token,
12987 + u16 vlan_id)
12988 +{
12989 + struct mc_command cmd = { 0 };
12990 + struct dpsw_cmd_vlan_remove *cmd_params;
12991 +
12992 + /* prepare command */
12993 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
12994 + cmd_flags,
12995 + token);
12996 + cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
12997 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
12998 +
12999 + /* send command to mc*/
13000 + return mc_send_command(mc_io, &cmd);
13001 +}
13002 +
13003 +/**
13004 + * dpsw_vlan_get_attributes() - Get VLAN attributes
13005 + * @mc_io: Pointer to MC portal's I/O object
13006 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13007 + * @token: Token of DPSW object
13008 + * @vlan_id: VLAN Identifier
13009 + * @attr: Returned DPSW attributes
13010 + *
13011 + * Return: Completion status. '0' on Success; Error code otherwise.
13012 + */
13013 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
13014 + u32 cmd_flags,
13015 + u16 token,
13016 + u16 vlan_id,
13017 + struct dpsw_vlan_attr *attr)
13018 +{
13019 + struct mc_command cmd = { 0 };
13020 + struct dpsw_cmd_vlan_get_attr *cmd_params;
13021 + struct dpsw_rsp_vlan_get_attr *rsp_params;
13022 + int err;
13023 +
13024 + /* prepare command */
13025 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
13026 + cmd_flags,
13027 + token);
13028 + cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
13029 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13030 +
13031 + /* send command to mc*/
13032 + err = mc_send_command(mc_io, &cmd);
13033 + if (err)
13034 + return err;
13035 +
13036 + /* retrieve response parameters */
13037 + rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
13038 + attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
13039 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13040 + attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
13041 + attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
13042 +
13043 + return 0;
13044 +}
13045 +
13046 +/**
13047 + * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
13048 + * @mc_io: Pointer to MC portal's I/O object
13049 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13050 + * @token: Token of DPSW object
13051 + * @vlan_id: VLAN Identifier
13052 + * @cfg: Returned set of interfaces belong to this VLAN
13053 + *
13054 + * Return: Completion status. '0' on Success; Error code otherwise.
13055 + */
13056 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
13057 + u32 cmd_flags,
13058 + u16 token,
13059 + u16 vlan_id,
13060 + struct dpsw_vlan_if_cfg *cfg)
13061 +{
13062 + struct mc_command cmd = { 0 };
13063 + struct dpsw_cmd_vlan_get_if *cmd_params;
13064 + struct dpsw_rsp_vlan_get_if *rsp_params;
13065 + int err;
13066 +
13067 + /* prepare command */
13068 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
13069 + cmd_flags,
13070 + token);
13071 + cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
13072 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13073 +
13074 + /* send command to mc*/
13075 + err = mc_send_command(mc_io, &cmd);
13076 + if (err)
13077 + return err;
13078 +
13079 + /* retrieve response parameters */
13080 + rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
13081 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13082 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
13083 +
13084 + return 0;
13085 +}
13086 +
13087 +/**
13088 + * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
13089 + * @mc_io: Pointer to MC portal's I/O object
13090 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13091 + * @token: Token of DPSW object
13092 + * @vlan_id: VLAN Identifier
13093 + * @cfg: Returned set of flooding interfaces
13094 + *
13095 + * Return: Completion status. '0' on Success; Error code otherwise.
13096 + */
13097 +
13098 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
13099 + u32 cmd_flags,
13100 + u16 token,
13101 + u16 vlan_id,
13102 + struct dpsw_vlan_if_cfg *cfg)
13103 +{
13104 + struct mc_command cmd = { 0 };
13105 + struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
13106 + struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
13107 + int err;
13108 +
13109 + /* prepare command */
13110 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
13111 + cmd_flags,
13112 + token);
13113 + cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
13114 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13115 +
13116 + /* send command to mc*/
13117 + err = mc_send_command(mc_io, &cmd);
13118 + if (err)
13119 + return err;
13120 +
13121 + /* retrieve response parameters */
13122 + rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
13123 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13124 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
13125 +
13126 + return 0;
13127 +}
13128 +
13129 +/**
13130 + * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
13131 + * untagged
13132 + * @mc_io: Pointer to MC portal's I/O object
13133 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13134 + * @token: Token of DPSW object
13135 + * @vlan_id: VLAN Identifier
13136 + * @cfg: Returned set of untagged interfaces
13137 + *
13138 + * Return: Completion status. '0' on Success; Error code otherwise.
13139 + */
13140 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
13141 + u32 cmd_flags,
13142 + u16 token,
13143 + u16 vlan_id,
13144 + struct dpsw_vlan_if_cfg *cfg)
13145 +{
13146 + struct mc_command cmd = { 0 };
13147 + struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
13148 + struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
13149 + int err;
13150 +
13151 + /* prepare command */
13152 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
13153 + cmd_flags,
13154 + token);
13155 + cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
13156 + cmd_params->vlan_id = cpu_to_le16(vlan_id);
13157 +
13158 + /* send command to mc*/
13159 + err = mc_send_command(mc_io, &cmd);
13160 + if (err)
13161 + return err;
13162 +
13163 + /* retrieve response parameters */
13164 + rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
13165 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13166 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
13167 +
13168 + return 0;
13169 +}
13170 +
13171 +/**
13172 + * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
13173 + * the reference
13174 + * @mc_io: Pointer to MC portal's I/O object
13175 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13176 + * @token: Token of DPSW object
13177 + * @fdb_id: Returned Forwarding Database Identifier
13178 + * @cfg: FDB Configuration
13179 + *
13180 + * Return: Completion status. '0' on Success; Error code otherwise.
13181 + */
13182 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
13183 + u32 cmd_flags,
13184 + u16 token,
13185 + u16 *fdb_id,
13186 + const struct dpsw_fdb_cfg *cfg)
13187 +{
13188 + struct mc_command cmd = { 0 };
13189 + struct dpsw_cmd_fdb_add *cmd_params;
13190 + struct dpsw_rsp_fdb_add *rsp_params;
13191 + int err;
13192 +
13193 + /* prepare command */
13194 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
13195 + cmd_flags,
13196 + token);
13197 + cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
13198 + cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
13199 + cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
13200 +
13201 + /* send command to mc*/
13202 + err = mc_send_command(mc_io, &cmd);
13203 + if (err)
13204 + return err;
13205 +
13206 + /* retrieve response parameters */
13207 + rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
13208 + *fdb_id = le16_to_cpu(rsp_params->fdb_id);
13209 +
13210 + return 0;
13211 +}
13212 +
13213 +/**
13214 + * dpsw_fdb_remove() - Remove FDB from switch
13215 + * @mc_io: Pointer to MC portal's I/O object
13216 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13217 + * @token: Token of DPSW object
13218 + * @fdb_id: Forwarding Database Identifier
13219 + *
13220 + * Return: Completion status. '0' on Success; Error code otherwise.
13221 + */
13222 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
13223 + u32 cmd_flags,
13224 + u16 token,
13225 + u16 fdb_id)
13226 +{
13227 + struct mc_command cmd = { 0 };
13228 + struct dpsw_cmd_fdb_remove *cmd_params;
13229 +
13230 + /* prepare command */
13231 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
13232 + cmd_flags,
13233 + token);
13234 + cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
13235 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13236 +
13237 + /* send command to mc*/
13238 + return mc_send_command(mc_io, &cmd);
13239 +}
13240 +
13241 +/**
13242 + * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
13243 + * @mc_io: Pointer to MC portal's I/O object
13244 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13245 + * @token: Token of DPSW object
13246 + * @fdb_id: Forwarding Database Identifier
13247 + * @cfg: Unicast entry configuration
13248 + *
13249 + * Return: Completion status. '0' on Success; Error code otherwise.
13250 + */
13251 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
13252 + u32 cmd_flags,
13253 + u16 token,
13254 + u16 fdb_id,
13255 + const struct dpsw_fdb_unicast_cfg *cfg)
13256 +{
13257 + struct mc_command cmd = { 0 };
13258 + struct dpsw_cmd_fdb_add_unicast *cmd_params;
13259 + int i;
13260 +
13261 + /* prepare command */
13262 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
13263 + cmd_flags,
13264 + token);
13265 + cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
13266 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13267 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
13268 + for (i = 0; i < 6; i++)
13269 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
13270 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
13271 +
13272 + /* send command to mc*/
13273 + return mc_send_command(mc_io, &cmd);
13274 +}
13275 +
13276 +/**
13277 + * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
13278 + * unicast Ethernet address
13279 + * @mc_io: Pointer to MC portal's I/O object
13280 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13281 + * @token: Token of DPSW object
13282 + * @fdb_id: Forwarding Database Identifier
13283 + * @cfg: Returned unicast entry configuration
13284 + *
13285 + * Return: Completion status. '0' on Success; Error code otherwise.
13286 + */
13287 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
13288 + u32 cmd_flags,
13289 + u16 token,
13290 + u16 fdb_id,
13291 + struct dpsw_fdb_unicast_cfg *cfg)
13292 +{
13293 + struct mc_command cmd = { 0 };
13294 + struct dpsw_cmd_fdb_get_unicast *cmd_params;
13295 + struct dpsw_rsp_fdb_get_unicast *rsp_params;
13296 + int err, i;
13297 +
13298 + /* prepare command */
13299 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
13300 + cmd_flags,
13301 + token);
13302 + cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
13303 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13304 + for (i = 0; i < 6; i++)
13305 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
13306 +
13307 + /* send command to mc*/
13308 + err = mc_send_command(mc_io, &cmd);
13309 + if (err)
13310 + return err;
13311 +
13312 + /* retrieve response parameters */
13313 + rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
13314 + cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
13315 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
13316 +
13317 + return 0;
13318 +}
13319 +
13320 +/**
13321 + * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
13322 + * @mc_io: Pointer to MC portal's I/O object
13323 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13324 + * @token: Token of DPSW object
13325 + * @fdb_id: Forwarding Database Identifier
13326 + * @cfg: Unicast entry configuration
13327 + *
13328 + * Return: Completion status. '0' on Success; Error code otherwise.
13329 + */
13330 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
13331 + u32 cmd_flags,
13332 + u16 token,
13333 + u16 fdb_id,
13334 + const struct dpsw_fdb_unicast_cfg *cfg)
13335 +{
13336 + struct mc_command cmd = { 0 };
13337 + struct dpsw_cmd_fdb_remove_unicast *cmd_params;
13338 + int i;
13339 +
13340 + /* prepare command */
13341 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
13342 + cmd_flags,
13343 + token);
13344 + cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
13345 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13346 + for (i = 0; i < 6; i++)
13347 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
13348 + cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
13349 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
13350 +
13351 + /* send command to mc*/
13352 + return mc_send_command(mc_io, &cmd);
13353 +}
13354 +
13355 +/**
13356 + * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
13357 + * @mc_io: Pointer to MC portal's I/O object
13358 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13359 + * @token: Token of DPSW object
13360 + * @fdb_id: Forwarding Database Identifier
13361 + * @cfg: Multicast entry configuration
13362 + *
13363 + * If group doesn't exist, it will be created.
13364 + * It adds only interfaces not belonging to this multicast group
13365 + * yet, otherwise error will be generated and the command is
13366 + * ignored.
13367 + * This function may be called numerous times always providing
13368 + * required interfaces delta.
13369 + *
13370 + * Return: Completion status. '0' on Success; Error code otherwise.
13371 + */
13372 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
13373 + u32 cmd_flags,
13374 + u16 token,
13375 + u16 fdb_id,
13376 + const struct dpsw_fdb_multicast_cfg *cfg)
13377 +{
13378 + struct mc_command cmd = { 0 };
13379 + struct dpsw_cmd_fdb_add_multicast *cmd_params;
13380 + int i;
13381 +
13382 + /* prepare command */
13383 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
13384 + cmd_flags,
13385 + token);
13386 + cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
13387 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13388 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13389 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
13390 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13391 + for (i = 0; i < 6; i++)
13392 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
13393 +
13394 + /* send command to mc*/
13395 + return mc_send_command(mc_io, &cmd);
13396 +}
13397 +
13398 +/**
13399 + * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
13400 + * address.
13401 + * @mc_io: Pointer to MC portal's I/O object
13402 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13403 + * @token: Token of DPSW object
13404 + * @fdb_id: Forwarding Database Identifier
13405 + * @cfg: Returned multicast entry configuration
13406 + *
13407 + * Return: Completion status. '0' on Success; Error code otherwise.
13408 + */
13409 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
13410 + u32 cmd_flags,
13411 + u16 token,
13412 + u16 fdb_id,
13413 + struct dpsw_fdb_multicast_cfg *cfg)
13414 +{
13415 + struct mc_command cmd = { 0 };
13416 + struct dpsw_cmd_fdb_get_multicast *cmd_params;
13417 + struct dpsw_rsp_fdb_get_multicast *rsp_params;
13418 + int err, i;
13419 +
13420 + /* prepare command */
13421 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
13422 + cmd_flags,
13423 + token);
13424 + cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
13425 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13426 + for (i = 0; i < 6; i++)
13427 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
13428 +
13429 + /* send command to mc*/
13430 + err = mc_send_command(mc_io, &cmd);
13431 + if (err)
13432 + return err;
13433 +
13434 + /* retrieve response parameters */
13435 + rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
13436 + cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13437 + cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
13438 + read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
13439 +
13440 + return 0;
13441 +}
13442 +
13443 +/**
13444 + * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
13445 + * group.
13446 + * @mc_io: Pointer to MC portal's I/O object
13447 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13448 + * @token: Token of DPSW object
13449 + * @fdb_id: Forwarding Database Identifier
13450 + * @cfg: Multicast entry configuration
13451 + *
13452 + * Interfaces provided by this API have to exist in the group,
13453 + * otherwise an error will be returned and an entire command
13454 + * ignored. If there is no interface left in the group,
13455 + * an entire group is deleted
13456 + *
13457 + * Return: Completion status. '0' on Success; Error code otherwise.
13458 + */
13459 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
13460 + u32 cmd_flags,
13461 + u16 token,
13462 + u16 fdb_id,
13463 + const struct dpsw_fdb_multicast_cfg *cfg)
13464 +{
13465 + struct mc_command cmd = { 0 };
13466 + struct dpsw_cmd_fdb_remove_multicast *cmd_params;
13467 + int i;
13468 +
13469 + /* prepare command */
13470 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
13471 + cmd_flags,
13472 + token);
13473 + cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
13474 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13475 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13476 + dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
13477 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13478 + for (i = 0; i < 6; i++)
13479 + cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
13480 +
13481 + /* send command to mc*/
13482 + return mc_send_command(mc_io, &cmd);
13483 +}
13484 +
13485 +/**
13486 + * dpsw_fdb_set_learning_mode() - Define FDB learning mode
13487 + * @mc_io: Pointer to MC portal's I/O object
13488 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13489 + * @token: Token of DPSW object
13490 + * @fdb_id: Forwarding Database Identifier
13491 + * @mode: Learning mode
13492 + *
13493 + * Return: Completion status. '0' on Success; Error code otherwise.
13494 + */
13495 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
13496 + u32 cmd_flags,
13497 + u16 token,
13498 + u16 fdb_id,
13499 + enum dpsw_fdb_learning_mode mode)
13500 +{
13501 + struct mc_command cmd = { 0 };
13502 + struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
13503 +
13504 + /* prepare command */
13505 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
13506 + cmd_flags,
13507 + token);
13508 + cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
13509 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13510 + dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
13511 +
13512 + /* send command to mc*/
13513 + return mc_send_command(mc_io, &cmd);
13514 +}
13515 +
13516 +/**
13517 + * dpsw_fdb_get_attributes() - Get FDB attributes
13518 + * @mc_io: Pointer to MC portal's I/O object
13519 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13520 + * @token: Token of DPSW object
13521 + * @fdb_id: Forwarding Database Identifier
13522 + * @attr: Returned FDB attributes
13523 + *
13524 + * Return: Completion status. '0' on Success; Error code otherwise.
13525 + */
13526 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
13527 + u32 cmd_flags,
13528 + u16 token,
13529 + u16 fdb_id,
13530 + struct dpsw_fdb_attr *attr)
13531 +{
13532 + struct mc_command cmd = { 0 };
13533 + struct dpsw_cmd_fdb_get_attr *cmd_params;
13534 + struct dpsw_rsp_fdb_get_attr *rsp_params;
13535 + int err;
13536 +
13537 + /* prepare command */
13538 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
13539 + cmd_flags,
13540 + token);
13541 + cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
13542 + cmd_params->fdb_id = cpu_to_le16(fdb_id);
13543 +
13544 + /* send command to mc*/
13545 + err = mc_send_command(mc_io, &cmd);
13546 + if (err)
13547 + return err;
13548 +
13549 + /* retrieve response parameters */
13550 + rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
13551 + attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
13552 + attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
13553 + attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
13554 + LEARNING_MODE);
13555 + attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
13556 + attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
13557 +
13558 + return 0;
13559 +}
13560 +
13561 +/**
13562 + * dpsw_acl_add() - Adds ACL to L2 switch.
13563 + * @mc_io: Pointer to MC portal's I/O object
13564 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13565 + * @token: Token of DPSW object
13566 + * @acl_id: Returned ACL ID, for the future reference
13567 + * @cfg: ACL configuration
13568 + *
13569 + * Create Access Control List. Multiple ACLs can be created and
13570 + * co-exist in L2 switch
13571 + *
13572 + * Return: '0' on Success; Error code otherwise.
13573 + */
13574 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
13575 + u32 cmd_flags,
13576 + u16 token,
13577 + u16 *acl_id,
13578 + const struct dpsw_acl_cfg *cfg)
13579 +{
13580 + struct mc_command cmd = { 0 };
13581 + struct dpsw_cmd_acl_add *cmd_params;
13582 + struct dpsw_rsp_acl_add *rsp_params;
13583 + int err;
13584 +
13585 + /* prepare command */
13586 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
13587 + cmd_flags,
13588 + token);
13589 + cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
13590 + cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
13591 +
13592 + /* send command to mc*/
13593 + err = mc_send_command(mc_io, &cmd);
13594 + if (err)
13595 + return err;
13596 +
13597 + /* retrieve response parameters */
13598 + rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
13599 + *acl_id = le16_to_cpu(rsp_params->acl_id);
13600 +
13601 + return 0;
13602 +}
13603 +
13604 +/**
13605 + * dpsw_acl_remove() - Removes ACL from L2 switch.
13606 + * @mc_io: Pointer to MC portal's I/O object
13607 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13608 + * @token: Token of DPSW object
13609 + * @acl_id: ACL ID
13610 + *
13611 + * Return: '0' on Success; Error code otherwise.
13612 + */
13613 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
13614 + u32 cmd_flags,
13615 + u16 token,
13616 + u16 acl_id)
13617 +{
13618 + struct mc_command cmd = { 0 };
13619 + struct dpsw_cmd_acl_remove *cmd_params;
13620 +
13621 + /* prepare command */
13622 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
13623 + cmd_flags,
13624 + token);
13625 + cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
13626 + cmd_params->acl_id = cpu_to_le16(acl_id);
13627 +
13628 + /* send command to mc*/
13629 + return mc_send_command(mc_io, &cmd);
13630 +}
13631 +
13632 +/**
13633 + * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
13634 + * @key: Key
13635 + * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
13636 + *
13637 + * This function has to be called before adding or removing acl_entry
13638 + *
13639 + */
13640 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
13641 + u8 *entry_cfg_buf)
13642 +{
13643 + struct dpsw_prep_acl_entry *ext_params;
13644 + int i;
13645 +
13646 + ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
13647 +
13648 + for (i = 0; i < 6; i++) {
13649 + ext_params->match_l2_dest_mac[i] =
13650 + key->match.l2_dest_mac[5 - i];
13651 + ext_params->match_l2_source_mac[i] =
13652 + key->match.l2_source_mac[5 - i];
13653 + ext_params->mask_l2_dest_mac[i] =
13654 + key->mask.l2_dest_mac[5 - i];
13655 + ext_params->mask_l2_source_mac[i] =
13656 + key->mask.l2_source_mac[5 - i];
13657 + }
13658 +
13659 + ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
13660 + ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
13661 + ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
13662 + ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
13663 + ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
13664 + ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
13665 + ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
13666 + ext_params->match_l3_dscp = key->match.l3_dscp;
13667 + ext_params->match_l4_source_port =
13668 + cpu_to_le16(key->match.l4_source_port);
13669 +
13670 + ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
13671 + ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
13672 + ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
13673 + ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
13674 + ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
13675 + ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
13676 + ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
13677 + ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
13678 + ext_params->mask_l3_dscp = key->mask.l3_dscp;
13679 + ext_params->match_l3_protocol = key->match.l3_protocol;
13680 + ext_params->mask_l3_protocol = key->mask.l3_protocol;
13681 +}
13682 +
13683 +/**
13684 + * dpsw_acl_add_entry() - Adds an entry to ACL.
13685 + * @mc_io: Pointer to MC portal's I/O object
13686 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13687 + * @token: Token of DPSW object
13688 + * @acl_id: ACL ID
13689 + * @cfg: Entry configuration
13690 + *
13691 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13692 + *
13693 + * Return: '0' on Success; Error code otherwise.
13694 + */
13695 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
13696 + u32 cmd_flags,
13697 + u16 token,
13698 + u16 acl_id,
13699 + const struct dpsw_acl_entry_cfg *cfg)
13700 +{
13701 + struct mc_command cmd = { 0 };
13702 + struct dpsw_cmd_acl_entry *cmd_params;
13703 +
13704 + /* prepare command */
13705 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
13706 + cmd_flags,
13707 + token);
13708 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13709 + cmd_params->acl_id = cpu_to_le16(acl_id);
13710 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13711 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13712 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13713 + cfg->result.action);
13714 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13715 +
13716 + /* send command to mc*/
13717 + return mc_send_command(mc_io, &cmd);
13718 +}
13719 +
13720 +/**
13721 + * dpsw_acl_remove_entry() - Removes an entry from ACL.
13722 + * @mc_io: Pointer to MC portal's I/O object
13723 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13724 + * @token: Token of DPSW object
13725 + * @acl_id: ACL ID
13726 + * @cfg: Entry configuration
13727 + *
13728 + * warning: This function has to be called after dpsw_acl_set_entry_cfg()
13729 + *
13730 + * Return: '0' on Success; Error code otherwise.
13731 + */
13732 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
13733 + u32 cmd_flags,
13734 + u16 token,
13735 + u16 acl_id,
13736 + const struct dpsw_acl_entry_cfg *cfg)
13737 +{
13738 + struct mc_command cmd = { 0 };
13739 + struct dpsw_cmd_acl_entry *cmd_params;
13740 +
13741 + /* prepare command */
13742 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
13743 + cmd_flags,
13744 + token);
13745 + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
13746 + cmd_params->acl_id = cpu_to_le16(acl_id);
13747 + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
13748 + cmd_params->precedence = cpu_to_le32(cfg->precedence);
13749 + dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
13750 + cfg->result.action);
13751 + cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
13752 +
13753 + /* send command to mc*/
13754 + return mc_send_command(mc_io, &cmd);
13755 +}
13756 +
13757 +/**
13758 + * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
13759 + * @mc_io: Pointer to MC portal's I/O object
13760 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13761 + * @token: Token of DPSW object
13762 + * @acl_id: ACL ID
13763 + * @cfg: Interfaces list
13764 + *
13765 + * Return: '0' on Success; Error code otherwise.
13766 + */
13767 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
13768 + u32 cmd_flags,
13769 + u16 token,
13770 + u16 acl_id,
13771 + const struct dpsw_acl_if_cfg *cfg)
13772 +{
13773 + struct mc_command cmd = { 0 };
13774 + struct dpsw_cmd_acl_if *cmd_params;
13775 +
13776 + /* prepare command */
13777 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
13778 + cmd_flags,
13779 + token);
13780 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13781 + cmd_params->acl_id = cpu_to_le16(acl_id);
13782 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13783 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13784 +
13785 + /* send command to mc*/
13786 + return mc_send_command(mc_io, &cmd);
13787 +}
13788 +
13789 +/**
13790 + * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
13791 + * @mc_io: Pointer to MC portal's I/O object
13792 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13793 + * @token: Token of DPSW object
13794 + * @acl_id: ACL ID
13795 + * @cfg: Interfaces list
13796 + *
13797 + * Return: '0' on Success; Error code otherwise.
13798 + */
13799 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
13800 + u32 cmd_flags,
13801 + u16 token,
13802 + u16 acl_id,
13803 + const struct dpsw_acl_if_cfg *cfg)
13804 +{
13805 + struct mc_command cmd = { 0 };
13806 + struct dpsw_cmd_acl_if *cmd_params;
13807 +
13808 + /* prepare command */
13809 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
13810 + cmd_flags,
13811 + token);
13812 + cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
13813 + cmd_params->acl_id = cpu_to_le16(acl_id);
13814 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
13815 + build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
13816 +
13817 + /* send command to mc*/
13818 + return mc_send_command(mc_io, &cmd);
13819 +}
13820 +
13821 +/**
13822 + * dpsw_acl_get_attributes() - Get specific counter of particular interface
13823 + * @mc_io: Pointer to MC portal's I/O object
13824 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13825 + * @token: Token of DPSW object
13826 + * @acl_id: ACL Identifier
13827 + * @attr: Returned ACL attributes
13828 + *
13829 + * Return: '0' on Success; Error code otherwise.
13830 + */
13831 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
13832 + u32 cmd_flags,
13833 + u16 token,
13834 + u16 acl_id,
13835 + struct dpsw_acl_attr *attr)
13836 +{
13837 + struct mc_command cmd = { 0 };
13838 + struct dpsw_cmd_acl_get_attr *cmd_params;
13839 + struct dpsw_rsp_acl_get_attr *rsp_params;
13840 + int err;
13841 +
13842 + /* prepare command */
13843 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
13844 + cmd_flags,
13845 + token);
13846 + cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
13847 + cmd_params->acl_id = cpu_to_le16(acl_id);
13848 +
13849 + /* send command to mc*/
13850 + err = mc_send_command(mc_io, &cmd);
13851 + if (err)
13852 + return err;
13853 +
13854 + /* retrieve response parameters */
13855 + rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
13856 + attr->max_entries = le16_to_cpu(rsp_params->max_entries);
13857 + attr->num_entries = le16_to_cpu(rsp_params->num_entries);
13858 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
13859 +
13860 + return 0;
13861 +}
13862 +
13863 +/**
13864 + * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
13865 + * @mc_io: Pointer to MC portal's I/O object
13866 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13867 + * @token: Token of DPSW object
13868 + * @attr: Returned control interface attributes
13869 + *
13870 + * Return: '0' on Success; Error code otherwise.
13871 + */
13872 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
13873 + u32 cmd_flags,
13874 + u16 token,
13875 + struct dpsw_ctrl_if_attr *attr)
13876 +{
13877 + struct mc_command cmd = { 0 };
13878 + struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
13879 + int err;
13880 +
13881 + /* prepare command */
13882 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
13883 + cmd_flags,
13884 + token);
13885 +
13886 + /* send command to mc*/
13887 + err = mc_send_command(mc_io, &cmd);
13888 + if (err)
13889 + return err;
13890 +
13891 + /* retrieve response parameters */
13892 + rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
13893 + attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
13894 + attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
13895 + attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
13896 +
13897 + return 0;
13898 +}
13899 +
13900 +/**
13901 + * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
13902 + * @mc_io: Pointer to MC portal's I/O object
13903 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13904 + * @token: Token of DPSW object
13905 + * @cfg: Buffer pools configuration
13906 + *
13907 + * Return: '0' on Success; Error code otherwise.
13908 + */
13909 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
13910 + u32 cmd_flags,
13911 + u16 token,
13912 + const struct dpsw_ctrl_if_pools_cfg *pools)
13913 +{
13914 + struct mc_command cmd = { 0 };
13915 + struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
13916 + int i;
13917 +
13918 + /* prepare command */
13919 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
13920 + cmd_flags,
13921 + token);
13922 + cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
13923 + cmd_params->num_dpbp = pools->num_dpbp;
13924 + for (i = 0; i < 8; i++) {
13925 + cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
13926 + i,
13927 + pools->pools[i].backup_pool);
13928 + cmd_params->buffer_size[i] =
13929 + cpu_to_le16(pools->pools[i].buffer_size);
13930 + cmd_params->dpbp_id[i] =
13931 + cpu_to_le32(pools->pools[i].dpbp_id);
13932 + }
13933 +
13934 + /* send command to mc*/
13935 + return mc_send_command(mc_io, &cmd);
13936 +}
13937 +
13938 +/**
13939 + * dpsw_ctrl_if_enable() - Enable control interface
13940 + * @mc_io: Pointer to MC portal's I/O object
13941 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13942 + * @token: Token of DPSW object
13943 + *
13944 + * Return: '0' on Success; Error code otherwise.
13945 + */
13946 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
13947 + u32 cmd_flags,
13948 + u16 token)
13949 +{
13950 + struct mc_command cmd = { 0 };
13951 +
13952 + /* prepare command */
13953 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
13954 + cmd_flags,
13955 + token);
13956 +
13957 + /* send command to mc*/
13958 + return mc_send_command(mc_io, &cmd);
13959 +}
13960 +
13961 +/**
13962 + * dpsw_ctrl_if_disable() - Function disables control interface
13963 + * @mc_io: Pointer to MC portal's I/O object
13964 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13965 + * @token: Token of DPSW object
13966 + *
13967 + * Return: '0' on Success; Error code otherwise.
13968 + */
13969 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
13970 + u32 cmd_flags,
13971 + u16 token)
13972 +{
13973 + struct mc_command cmd = { 0 };
13974 +
13975 + /* prepare command */
13976 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
13977 + cmd_flags,
13978 + token);
13979 +
13980 + /* send command to mc*/
13981 + return mc_send_command(mc_io, &cmd);
13982 +}
13983 +
13984 +/**
13985 + * dpsw_get_api_version() - Get Data Path Switch API version
13986 + * @mc_io: Pointer to MC portal's I/O object
13987 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13988 + * @major_ver: Major version of data path switch API
13989 + * @minor_ver: Minor version of data path switch API
13990 + *
13991 + * Return: '0' on Success; Error code otherwise.
13992 + */
13993 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
13994 + u32 cmd_flags,
13995 + u16 *major_ver,
13996 + u16 *minor_ver)
13997 +{
13998 + struct mc_command cmd = { 0 };
13999 + struct dpsw_rsp_get_api_version *rsp_params;
14000 + int err;
14001 +
14002 + cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
14003 + cmd_flags,
14004 + 0);
14005 +
14006 + err = mc_send_command(mc_io, &cmd);
14007 + if (err)
14008 + return err;
14009 +
14010 + rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
14011 + *major_ver = le16_to_cpu(rsp_params->version_major);
14012 + *minor_ver = le16_to_cpu(rsp_params->version_minor);
14013 +
14014 + return 0;
14015 +}
14016 --- /dev/null
14017 +++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
14018 @@ -0,0 +1,1269 @@
14019 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
14020 + *
14021 + * Redistribution and use in source and binary forms, with or without
14022 + * modification, are permitted provided that the following conditions are met:
14023 + * * Redistributions of source code must retain the above copyright
14024 + * notice, this list of conditions and the following disclaimer.
14025 + * * Redistributions in binary form must reproduce the above copyright
14026 + * notice, this list of conditions and the following disclaimer in the
14027 + * documentation and/or other materials provided with the distribution.
14028 + * * Neither the name of the above-listed copyright holders nor the
14029 + * names of any contributors may be used to endorse or promote products
14030 + * derived from this software without specific prior written permission.
14031 + *
14032 + *
14033 + * ALTERNATIVELY, this software may be distributed under the terms of the
14034 + * GNU General Public License ("GPL") as published by the Free Software
14035 + * Foundation, either version 2 of that License or (at your option) any
14036 + * later version.
14037 + *
14038 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
14039 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
14040 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
14041 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
14042 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
14043 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
14044 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
14045 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
14046 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
14047 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
14048 + * POSSIBILITY OF SUCH DAMAGE.
14049 + */
14050 +#ifndef __FSL_DPSW_H
14051 +#define __FSL_DPSW_H
14052 +
14053 +/* Data Path L2-Switch API
14054 + * Contains API for handling DPSW topology and functionality
14055 + */
14056 +
14057 +struct fsl_mc_io;
14058 +
14059 +/**
14060 + * DPSW general definitions
14061 + */
14062 +
14063 +/**
14064 + * Maximum number of traffic class priorities
14065 + */
14066 +#define DPSW_MAX_PRIORITIES 8
14067 +/**
14068 + * Maximum number of interfaces
14069 + */
14070 +#define DPSW_MAX_IF 64
14071 +
14072 +int dpsw_open(struct fsl_mc_io *mc_io,
14073 + u32 cmd_flags,
14074 + int dpsw_id,
14075 + u16 *token);
14076 +
14077 +int dpsw_close(struct fsl_mc_io *mc_io,
14078 + u32 cmd_flags,
14079 + u16 token);
14080 +
14081 +/**
14082 + * DPSW options
14083 + */
14084 +
14085 +/**
14086 + * Disable flooding
14087 + */
14088 +#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
14089 +/**
14090 + * Disable Multicast
14091 + */
14092 +#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
14093 +/**
14094 + * Support control interface
14095 + */
14096 +#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
14097 +/**
14098 + * Disable flooding metering
14099 + */
14100 +#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
14101 +/**
14102 + * Enable metering
14103 + */
14104 +#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
14105 +
14106 +/**
14107 + * enum dpsw_component_type - component type of a bridge
14108 + * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
14109 + * enterprise VLAN bridge or of a Provider Bridge used
14110 + * to process C-tagged frames
14111 + * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
14112 + * Provider Bridge
14113 + *
14114 + */
14115 +enum dpsw_component_type {
14116 + DPSW_COMPONENT_TYPE_C_VLAN = 0,
14117 + DPSW_COMPONENT_TYPE_S_VLAN
14118 +};
14119 +
14120 +/**
14121 + * struct dpsw_cfg - DPSW configuration
14122 + * @num_ifs: Number of external and internal interfaces
14123 + * @adv: Advanced parameters; default is all zeros;
14124 + * use this structure to change default settings
14125 + */
14126 +struct dpsw_cfg {
14127 + u16 num_ifs;
14128 + /**
14129 + * struct adv - Advanced parameters
14130 + * @options: Enable/Disable DPSW features (bitmap)
14131 + * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
14132 + * @max_meters_per_if: Number of meters per interface
14133 + * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
14134 + * @max_fdb_entries: Number of FDB entries for default FDB table;
14135 + * 0 - indicates default 1024 entries.
14136 + * @fdb_aging_time: Default FDB aging time for default FDB table;
14137 + * 0 - indicates default 300 seconds
14138 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
14139 + * 0 - indicates default 32
14140 + * @component_type: Indicates the component type of this bridge
14141 + */
14142 + struct {
14143 + u64 options;
14144 + u16 max_vlans;
14145 + u8 max_meters_per_if;
14146 + u8 max_fdbs;
14147 + u16 max_fdb_entries;
14148 + u16 fdb_aging_time;
14149 + u16 max_fdb_mc_groups;
14150 + enum dpsw_component_type component_type;
14151 + } adv;
14152 +};
14153 +
14154 +int dpsw_create(struct fsl_mc_io *mc_io,
14155 + u16 dprc_token,
14156 + u32 cmd_flags,
14157 + const struct dpsw_cfg *cfg,
14158 + u32 *obj_id);
14159 +
14160 +int dpsw_destroy(struct fsl_mc_io *mc_io,
14161 + u16 dprc_token,
14162 + u32 cmd_flags,
14163 + u32 object_id);
14164 +
14165 +int dpsw_enable(struct fsl_mc_io *mc_io,
14166 + u32 cmd_flags,
14167 + u16 token);
14168 +
14169 +int dpsw_disable(struct fsl_mc_io *mc_io,
14170 + u32 cmd_flags,
14171 + u16 token);
14172 +
14173 +int dpsw_is_enabled(struct fsl_mc_io *mc_io,
14174 + u32 cmd_flags,
14175 + u16 token,
14176 + int *en);
14177 +
14178 +int dpsw_reset(struct fsl_mc_io *mc_io,
14179 + u32 cmd_flags,
14180 + u16 token);
14181 +
14182 +/**
14183 + * DPSW IRQ Index and Events
14184 + */
14185 +
14186 +#define DPSW_IRQ_INDEX_IF 0x0000
14187 +#define DPSW_IRQ_INDEX_L2SW 0x0001
14188 +
14189 +/**
14190 + * IRQ event - Indicates that the link state changed
14191 + */
14192 +#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
14193 +
14194 +/**
14195 + * struct dpsw_irq_cfg - IRQ configuration
14196 + * @addr: Address that must be written to signal a message-based interrupt
14197 + * @val: Value to write into irq_addr address
14198 + * @irq_num: A user defined number associated with this IRQ
14199 + */
14200 +struct dpsw_irq_cfg {
14201 + u64 addr;
14202 + u32 val;
14203 + int irq_num;
14204 +};
14205 +
14206 +int dpsw_set_irq(struct fsl_mc_io *mc_io,
14207 + u32 cmd_flags,
14208 + u16 token,
14209 + u8 irq_index,
14210 + struct dpsw_irq_cfg *irq_cfg);
14211 +
14212 +int dpsw_get_irq(struct fsl_mc_io *mc_io,
14213 + u32 cmd_flags,
14214 + u16 token,
14215 + u8 irq_index,
14216 + int *type,
14217 + struct dpsw_irq_cfg *irq_cfg);
14218 +
14219 +int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
14220 + u32 cmd_flags,
14221 + u16 token,
14222 + u8 irq_index,
14223 + u8 en);
14224 +
14225 +int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
14226 + u32 cmd_flags,
14227 + u16 token,
14228 + u8 irq_index,
14229 + u8 *en);
14230 +
14231 +int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
14232 + u32 cmd_flags,
14233 + u16 token,
14234 + u8 irq_index,
14235 + u32 mask);
14236 +
14237 +int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
14238 + u32 cmd_flags,
14239 + u16 token,
14240 + u8 irq_index,
14241 + u32 *mask);
14242 +
14243 +int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
14244 + u32 cmd_flags,
14245 + u16 token,
14246 + u8 irq_index,
14247 + u32 *status);
14248 +
14249 +int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
14250 + u32 cmd_flags,
14251 + u16 token,
14252 + u8 irq_index,
14253 + u32 status);
14254 +
14255 +/**
14256 + * struct dpsw_attr - Structure representing DPSW attributes
14257 + * @id: DPSW object ID
14258 + * @options: Enable/Disable DPSW features
14259 + * @max_vlans: Maximum Number of VLANs
14260 + * @max_meters_per_if: Number of meters per interface
14261 + * @max_fdbs: Maximum Number of FDBs
14262 + * @max_fdb_entries: Number of FDB entries for default FDB table;
14263 + * 0 - indicates default 1024 entries.
14264 + * @fdb_aging_time: Default FDB aging time for default FDB table;
14265 + * 0 - indicates default 300 seconds
14266 + * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
14267 + * 0 - indicates default 32
14268 + * @mem_size: DPSW frame storage memory size
14269 + * @num_ifs: Number of interfaces
14270 + * @num_vlans: Current number of VLANs
14271 + * @num_fdbs: Current number of FDBs
14272 + * @component_type: Component type of this bridge
14273 + */
14274 +struct dpsw_attr {
14275 + int id;
14276 + u64 options;
14277 + u16 max_vlans;
14278 + u8 max_meters_per_if;
14279 + u8 max_fdbs;
14280 + u16 max_fdb_entries;
14281 + u16 fdb_aging_time;
14282 + u16 max_fdb_mc_groups;
14283 + u16 num_ifs;
14284 + u16 mem_size;
14285 + u16 num_vlans;
14286 + u8 num_fdbs;
14287 + enum dpsw_component_type component_type;
14288 +};
14289 +
14290 +int dpsw_get_attributes(struct fsl_mc_io *mc_io,
14291 + u32 cmd_flags,
14292 + u16 token,
14293 + struct dpsw_attr *attr);
14294 +
14295 +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
14296 + u32 cmd_flags,
14297 + u16 token,
14298 + u16 if_id);
14299 +
14300 +/**
14301 + * enum dpsw_action - Action selection for special/control frames
14302 + * @DPSW_ACTION_DROP: Drop frame
14303 + * @DPSW_ACTION_REDIRECT: Redirect frame to control port
14304 + */
14305 +enum dpsw_action {
14306 + DPSW_ACTION_DROP = 0,
14307 + DPSW_ACTION_REDIRECT = 1
14308 +};
14309 +
14310 +/**
14311 + * Enable auto-negotiation
14312 + */
14313 +#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
14314 +/**
14315 + * Enable half-duplex mode
14316 + */
14317 +#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
14318 +/**
14319 + * Enable pause frames
14320 + */
14321 +#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
14322 +/**
14323 + * Enable a-symmetric pause frames
14324 + */
14325 +#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
14326 +
14327 +/**
14328 + * struct dpsw_link_cfg - Structure representing DPSW link configuration
14329 + * @rate: Rate
14330 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
14331 + */
14332 +struct dpsw_link_cfg {
14333 + u32 rate;
14334 + u64 options;
14335 +};
14336 +
14337 +int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
14338 + u32 cmd_flags,
14339 + u16 token,
14340 + u16 if_id,
14341 + struct dpsw_link_cfg *cfg);
14342 +/**
14343 + * struct dpsw_link_state - Structure representing DPSW link state
14344 + * @rate: Rate
14345 + * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
14346 + * @up: 0 - covers two cases: down and disconnected, 1 - up
14347 + */
14348 +struct dpsw_link_state {
14349 + u32 rate;
14350 + u64 options;
14351 + int up;
14352 +};
14353 +
14354 +int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
14355 + u32 cmd_flags,
14356 + u16 token,
14357 + u16 if_id,
14358 + struct dpsw_link_state *state);
14359 +
14360 +int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
14361 + u32 cmd_flags,
14362 + u16 token,
14363 + u16 if_id,
14364 + int en);
14365 +
14366 +int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
14367 + u32 cmd_flags,
14368 + u16 token,
14369 + u16 if_id,
14370 + int en);
14371 +
14372 +int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
14373 + u32 cmd_flags,
14374 + u16 token,
14375 + u16 if_id,
14376 + int en);
14377 +
14378 +/**
14379 + * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
14380 + * @pcp: Priority Code Point (PCP): a 3-bit field which refers
14381 + * to the IEEE 802.1p priority
14382 + * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
14383 + * separately or in conjunction with PCP to indicate frames
14384 + * eligible to be dropped in the presence of congestion
14385 + * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
14386 + * to which the frame belongs. The hexadecimal values
14387 + * of 0x000 and 0xFFF are reserved;
14388 + * all other values may be used as VLAN identifiers,
14389 + * allowing up to 4,094 VLANs
14390 + */
14391 +struct dpsw_tci_cfg {
14392 + u8 pcp;
14393 + u8 dei;
14394 + u16 vlan_id;
14395 +};
14396 +
14397 +int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
14398 + u32 cmd_flags,
14399 + u16 token,
14400 + u16 if_id,
14401 + const struct dpsw_tci_cfg *cfg);
14402 +
14403 +int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
14404 + u32 cmd_flags,
14405 + u16 token,
14406 + u16 if_id,
14407 + struct dpsw_tci_cfg *cfg);
14408 +
14409 +/**
14410 + * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
14411 + * @DPSW_STP_STATE_BLOCKING: Blocking state
14412 + * @DPSW_STP_STATE_LISTENING: Listening state
14413 + * @DPSW_STP_STATE_LEARNING: Learning state
14414 + * @DPSW_STP_STATE_FORWARDING: Forwarding state
14415 + *
14416 + */
14417 +enum dpsw_stp_state {
14418 + DPSW_STP_STATE_BLOCKING = 0,
14419 + DPSW_STP_STATE_LISTENING = 1,
14420 + DPSW_STP_STATE_LEARNING = 2,
14421 + DPSW_STP_STATE_FORWARDING = 3
14422 +};
14423 +
14424 +/**
14425 + * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
14426 + * @vlan_id: VLAN ID STP state
14427 + * @state: STP state
14428 + */
14429 +struct dpsw_stp_cfg {
14430 + u16 vlan_id;
14431 + enum dpsw_stp_state state;
14432 +};
14433 +
14434 +int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
14435 + u32 cmd_flags,
14436 + u16 token,
14437 + u16 if_id,
14438 + const struct dpsw_stp_cfg *cfg);
14439 +
14440 +/**
14441 + * enum dpsw_accepted_frames - Types of frames to accept
14442 + * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
14443 + * priority tagged frames
14444 + * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
14445 + * Priority-Tagged frames received on this interface.
14446 + *
14447 + */
14448 +enum dpsw_accepted_frames {
14449 + DPSW_ADMIT_ALL = 1,
14450 + DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
14451 +};
14452 +
14453 +/**
14454 + * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
14455 + * @type: Defines ingress accepted frames
14456 + * @unaccept_act: When a frame is not accepted, it may be discarded or
14457 + * redirected to control interface depending on this mode
14458 + */
14459 +struct dpsw_accepted_frames_cfg {
14460 + enum dpsw_accepted_frames type;
14461 + enum dpsw_action unaccept_act;
14462 +};
14463 +
14464 +int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
14465 + u32 cmd_flags,
14466 + u16 token,
14467 + u16 if_id,
14468 + const struct dpsw_accepted_frames_cfg *cfg);
14469 +
14470 +int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
14471 + u32 cmd_flags,
14472 + u16 token,
14473 + u16 if_id,
14474 + int accept_all);
14475 +
14476 +/**
14477 + * enum dpsw_counter - Counters types
14478 + * @DPSW_CNT_ING_FRAME: Counts ingress frames
14479 + * @DPSW_CNT_ING_BYTE: Counts ingress bytes
14480 + * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
14481 + * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
14482 + * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
14483 + * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
14484 + * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
14485 + * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
14486 + * @DPSW_CNT_EGR_FRAME: Counts egress frames
14487 + * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
14488 + * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
14489 + * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
14490 + */
14491 +enum dpsw_counter {
14492 + DPSW_CNT_ING_FRAME = 0x0,
14493 + DPSW_CNT_ING_BYTE = 0x1,
14494 + DPSW_CNT_ING_FLTR_FRAME = 0x2,
14495 + DPSW_CNT_ING_FRAME_DISCARD = 0x3,
14496 + DPSW_CNT_ING_MCAST_FRAME = 0x4,
14497 + DPSW_CNT_ING_MCAST_BYTE = 0x5,
14498 + DPSW_CNT_ING_BCAST_FRAME = 0x6,
14499 + DPSW_CNT_ING_BCAST_BYTES = 0x7,
14500 + DPSW_CNT_EGR_FRAME = 0x8,
14501 + DPSW_CNT_EGR_BYTE = 0x9,
14502 + DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
14503 + DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
14504 +};
14505 +
14506 +int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
14507 + u32 cmd_flags,
14508 + u16 token,
14509 + u16 if_id,
14510 + enum dpsw_counter type,
14511 + u64 *counter);
14512 +
14513 +int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
14514 + u32 cmd_flags,
14515 + u16 token,
14516 + u16 if_id,
14517 + enum dpsw_counter type,
14518 + u64 counter);
14519 +
14520 +/**
14521 + * Maximum number of TC
14522 + */
14523 +#define DPSW_MAX_TC 8
14524 +
14525 +/**
14526 + * enum dpsw_priority_selector - User priority
14527 + * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
14528 + * refers to the IEEE 802.1p priority.
14529 + * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
14530 + * field from IP header
14531 + *
14532 + */
14533 +enum dpsw_priority_selector {
14534 + DPSW_UP_PCP = 0,
14535 + DPSW_UP_DSCP = 1
14536 +};
14537 +
14538 +/**
14539 + * enum dpsw_schedule_mode - Traffic classes scheduling
14540 + * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
14541 + * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
14542 + */
14543 +enum dpsw_schedule_mode {
14544 + DPSW_SCHED_STRICT_PRIORITY,
14545 + DPSW_SCHED_WEIGHTED
14546 +};
14547 +
14548 +/**
14549 + * struct dpsw_tx_schedule_cfg - traffic class configuration
14550 + * @mode: Strict or weight-based scheduling
14551 + * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
14552 + */
14553 +struct dpsw_tx_schedule_cfg {
14554 + enum dpsw_schedule_mode mode;
14555 + u16 delta_bandwidth;
14556 +};
14557 +
14558 +/**
14559 + * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
14560 + * class configuration
14561 + * @priority_selector: Source for user priority regeneration
14562 + * @tc_id: The Regenerated User priority that the incoming
14563 + * User Priority is mapped to for this interface
14564 + * @tc_sched: Traffic classes configuration
14565 + */
14566 +struct dpsw_tx_selection_cfg {
14567 + enum dpsw_priority_selector priority_selector;
14568 + u8 tc_id[DPSW_MAX_PRIORITIES];
14569 + struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
14570 +};
14571 +
14572 +int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
14573 + u32 cmd_flags,
14574 + u16 token,
14575 + u16 if_id,
14576 + const struct dpsw_tx_selection_cfg *cfg);
14577 +
14578 +/**
14579 + * enum dpsw_reflection_filter - Filter type for frames to reflect
14580 + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
14581 + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
14582 + * particular VLAN defined by vid parameter
14583 + *
14584 + */
14585 +enum dpsw_reflection_filter {
14586 + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
14587 + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
14588 +};
14589 +
14590 +/**
14591 + * struct dpsw_reflection_cfg - Structure representing reflection information
14592 + * @filter: Filter type for frames to reflect
14593 + * @vlan_id: Vlan Id to reflect; valid only when filter type is
14594 + * DPSW_INGRESS_VLAN
14595 + */
14596 +struct dpsw_reflection_cfg {
14597 + enum dpsw_reflection_filter filter;
14598 + u16 vlan_id;
14599 +};
14600 +
14601 +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
14602 + u32 cmd_flags,
14603 + u16 token,
14604 + u16 if_id,
14605 + const struct dpsw_reflection_cfg *cfg);
14606 +
14607 +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
14608 + u32 cmd_flags,
14609 + u16 token,
14610 + u16 if_id,
14611 + const struct dpsw_reflection_cfg *cfg);
14612 +
14613 +/**
14614 + * enum dpsw_metering_mode - Metering modes
14615 + * @DPSW_METERING_MODE_NONE: metering disabled
14616 + * @DPSW_METERING_MODE_RFC2698: RFC 2698
14617 + * @DPSW_METERING_MODE_RFC4115: RFC 4115
14618 + */
14619 +enum dpsw_metering_mode {
14620 + DPSW_METERING_MODE_NONE = 0,
14621 + DPSW_METERING_MODE_RFC2698,
14622 + DPSW_METERING_MODE_RFC4115
14623 +};
14624 +
14625 +/**
14626 + * enum dpsw_metering_unit - Metering count
14627 + * @DPSW_METERING_UNIT_BYTES: count bytes
14628 + * @DPSW_METERING_UNIT_FRAMES: count frames
14629 + */
14630 +enum dpsw_metering_unit {
14631 + DPSW_METERING_UNIT_BYTES = 0,
14632 + DPSW_METERING_UNIT_FRAMES
14633 +};
14634 +
14635 +/**
14636 + * struct dpsw_metering_cfg - Metering configuration
14637 + * @mode: metering modes
14638 + * @units: Bytes or frame units
14639 + * @cir: Committed information rate (CIR) in Kbits/s
14640 + * @eir: Peak information rate (PIR) Kbit/s rfc2698
14641 + * Excess information rate (EIR) Kbit/s rfc4115
14642 + * @cbs: Committed burst size (CBS) in bytes
14643 + * @ebs: Peak burst size (PBS) in bytes for rfc2698
14644 + * Excess bust size (EBS) in bytes rfc4115
14645 + *
14646 + */
14647 +struct dpsw_metering_cfg {
14648 + enum dpsw_metering_mode mode;
14649 + enum dpsw_metering_unit units;
14650 + u32 cir;
14651 + u32 eir;
14652 + u32 cbs;
14653 + u32 ebs;
14654 +};
14655 +
14656 +int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
14657 + u32 cmd_flags,
14658 + u16 token,
14659 + u16 if_id,
14660 + const struct dpsw_metering_cfg *cfg);
14661 +
14662 +int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
14663 + u32 cmd_flags,
14664 + u16 token,
14665 + u16 if_id,
14666 + u8 tc_id,
14667 + const struct dpsw_metering_cfg *cfg);
14668 +
14669 +/**
14670 + * enum dpsw_early_drop_unit - DPSW early drop unit
14671 + * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
14672 + * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
14673 + */
14674 +enum dpsw_early_drop_unit {
14675 + DPSW_EARLY_DROP_UNIT_BYTE = 0,
14676 + DPSW_EARLY_DROP_UNIT_FRAMES
14677 +};
14678 +
14679 +/**
14680 + * enum dpsw_early_drop_mode - DPSW early drop mode
14681 + * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
14682 + * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
14683 + * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
14684 + */
14685 +enum dpsw_early_drop_mode {
14686 + DPSW_EARLY_DROP_MODE_NONE = 0,
14687 + DPSW_EARLY_DROP_MODE_TAIL,
14688 + DPSW_EARLY_DROP_MODE_WRED
14689 +};
14690 +
14691 +/**
14692 + * struct dpsw_wred_cfg - WRED configuration
14693 + * @max_threshold: maximum threshold that packets may be discarded. Above this
14694 + * threshold all packets are discarded; must be less than 2^39;
14695 + * approximated to be expressed as (x+256)*2^(y-1) due to HW
14696 + * implementation.
14697 + * @min_threshold: minimum threshold that packets may be discarded at
14698 + * @drop_probability: probability that a packet will be discarded (1-100,
14699 + * associated with the maximum threshold)
14700 + */
14701 +struct dpsw_wred_cfg {
14702 + u64 min_threshold;
14703 + u64 max_threshold;
14704 + u8 drop_probability;
14705 +};
14706 +
14707 +/**
14708 + * struct dpsw_early_drop_cfg - early-drop configuration
14709 + * @drop_mode: drop mode
14710 + * @units: count units
14711 + * @yellow: WRED - 'yellow' configuration
14712 + * @green: WRED - 'green' configuration
14713 + * @tail_drop_threshold: tail drop threshold
14714 + */
14715 +struct dpsw_early_drop_cfg {
14716 + enum dpsw_early_drop_mode drop_mode;
14717 + enum dpsw_early_drop_unit units;
14718 + struct dpsw_wred_cfg yellow;
14719 + struct dpsw_wred_cfg green;
14720 + u32 tail_drop_threshold;
14721 +};
14722 +
14723 +void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
14724 + u8 *early_drop_buf);
14725 +
14726 +int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
14727 + u32 cmd_flags,
14728 + u16 token,
14729 + u16 if_id,
14730 + u8 tc_id,
14731 + u64 early_drop_iova);
14732 +
14733 +/**
14734 + * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
14735 + * @tpid: An additional tag protocol identifier
14736 + */
14737 +struct dpsw_custom_tpid_cfg {
14738 + u16 tpid;
14739 +};
14740 +
14741 +int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
14742 + u32 cmd_flags,
14743 + u16 token,
14744 + const struct dpsw_custom_tpid_cfg *cfg);
14745 +
14746 +int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
14747 + u32 cmd_flags,
14748 + u16 token,
14749 + const struct dpsw_custom_tpid_cfg *cfg);
14750 +
14751 +int dpsw_if_enable(struct fsl_mc_io *mc_io,
14752 + u32 cmd_flags,
14753 + u16 token,
14754 + u16 if_id);
14755 +
14756 +int dpsw_if_disable(struct fsl_mc_io *mc_io,
14757 + u32 cmd_flags,
14758 + u16 token,
14759 + u16 if_id);
14760 +
14761 +/**
14762 + * struct dpsw_if_attr - Structure representing DPSW interface attributes
14763 + * @num_tcs: Number of traffic classes
14764 + * @rate: Transmit rate in bits per second
14765 + * @options: Interface configuration options (bitmap)
14766 + * @enabled: Indicates if interface is enabled
14767 + * @accept_all_vlan: The device discards/accepts incoming frames
14768 + * for VLANs that do not include this interface
14769 + * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
14770 + * discards untagged frames or priority-tagged frames received on
14771 + * this interface;
14772 + * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
14773 + * tagged frames received on this interface are accepted
14774 + * @qdid: control frames transmit qdid
14775 + */
14776 +struct dpsw_if_attr {
14777 + u8 num_tcs;
14778 + u32 rate;
14779 + u32 options;
14780 + int enabled;
14781 + int accept_all_vlan;
14782 + enum dpsw_accepted_frames admit_untagged;
14783 + u16 qdid;
14784 +};
14785 +
14786 +int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
14787 + u32 cmd_flags,
14788 + u16 token,
14789 + u16 if_id,
14790 + struct dpsw_if_attr *attr);
14791 +
14792 +int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
14793 + u32 cmd_flags,
14794 + u16 token,
14795 + u16 if_id,
14796 + u16 frame_length);
14797 +
14798 +int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
14799 + u32 cmd_flags,
14800 + u16 token,
14801 + u16 if_id,
14802 + u16 *frame_length);
14803 +
14804 +/**
14805 + * struct dpsw_vlan_cfg - VLAN Configuration
14806 + * @fdb_id: Forwarding Data Base
14807 + */
14808 +struct dpsw_vlan_cfg {
14809 + u16 fdb_id;
14810 +};
14811 +
14812 +int dpsw_vlan_add(struct fsl_mc_io *mc_io,
14813 + u32 cmd_flags,
14814 + u16 token,
14815 + u16 vlan_id,
14816 + const struct dpsw_vlan_cfg *cfg);
14817 +
14818 +/**
14819 + * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
14820 + * @num_ifs: The number of interfaces that are assigned to the egress
14821 + * list for this VLAN
14822 + * @if_id: The set of interfaces that are
14823 + * assigned to the egress list for this VLAN
14824 + */
14825 +struct dpsw_vlan_if_cfg {
14826 + u16 num_ifs;
14827 + u16 if_id[DPSW_MAX_IF];
14828 +};
14829 +
14830 +int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
14831 + u32 cmd_flags,
14832 + u16 token,
14833 + u16 vlan_id,
14834 + const struct dpsw_vlan_if_cfg *cfg);
14835 +
14836 +int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
14837 + u32 cmd_flags,
14838 + u16 token,
14839 + u16 vlan_id,
14840 + const struct dpsw_vlan_if_cfg *cfg);
14841 +
14842 +int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
14843 + u32 cmd_flags,
14844 + u16 token,
14845 + u16 vlan_id,
14846 + const struct dpsw_vlan_if_cfg *cfg);
14847 +
14848 +int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
14849 + u32 cmd_flags,
14850 + u16 token,
14851 + u16 vlan_id,
14852 + const struct dpsw_vlan_if_cfg *cfg);
14853 +
14854 +int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
14855 + u32 cmd_flags,
14856 + u16 token,
14857 + u16 vlan_id,
14858 + const struct dpsw_vlan_if_cfg *cfg);
14859 +
14860 +int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
14861 + u32 cmd_flags,
14862 + u16 token,
14863 + u16 vlan_id,
14864 + const struct dpsw_vlan_if_cfg *cfg);
14865 +
14866 +int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
14867 + u32 cmd_flags,
14868 + u16 token,
14869 + u16 vlan_id);
14870 +
14871 +/**
14872 + * struct dpsw_vlan_attr - VLAN attributes
14873 + * @fdb_id: Associated FDB ID
14874 + * @num_ifs: Number of interfaces
14875 + * @num_untagged_ifs: Number of untagged interfaces
14876 + * @num_flooding_ifs: Number of flooding interfaces
14877 + */
14878 +struct dpsw_vlan_attr {
14879 + u16 fdb_id;
14880 + u16 num_ifs;
14881 + u16 num_untagged_ifs;
14882 + u16 num_flooding_ifs;
14883 +};
14884 +
14885 +int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
14886 + u32 cmd_flags,
14887 + u16 token,
14888 + u16 vlan_id,
14889 + struct dpsw_vlan_attr *attr);
14890 +
14891 +int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
14892 + u32 cmd_flags,
14893 + u16 token,
14894 + u16 vlan_id,
14895 + struct dpsw_vlan_if_cfg *cfg);
14896 +
14897 +int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
14898 + u32 cmd_flags,
14899 + u16 token,
14900 + u16 vlan_id,
14901 + struct dpsw_vlan_if_cfg *cfg);
14902 +
14903 +int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
14904 + u32 cmd_flags,
14905 + u16 token,
14906 + u16 vlan_id,
14907 + struct dpsw_vlan_if_cfg *cfg);
14908 +
14909 +/**
14910 + * struct dpsw_fdb_cfg - FDB Configuration
14911 + * @num_fdb_entries: Number of FDB entries
14912 + * @fdb_aging_time: Aging time in seconds
14913 + */
14914 +struct dpsw_fdb_cfg {
14915 + u16 num_fdb_entries;
14916 + u16 fdb_aging_time;
14917 +};
14918 +
14919 +int dpsw_fdb_add(struct fsl_mc_io *mc_io,
14920 + u32 cmd_flags,
14921 + u16 token,
14922 + u16 *fdb_id,
14923 + const struct dpsw_fdb_cfg *cfg);
14924 +
14925 +int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
14926 + u32 cmd_flags,
14927 + u16 token,
14928 + u16 fdb_id);
14929 +
14930 +/**
14931 + * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
14932 + * @DPSW_FDB_ENTRY_STATIC: Static entry
14933 + * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
14934 + */
14935 +enum dpsw_fdb_entry_type {
14936 + DPSW_FDB_ENTRY_STATIC = 0,
14937 + DPSW_FDB_ENTRY_DINAMIC = 1
14938 +};
14939 +
14940 +/**
14941 + * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
14942 + * @type: Select static or dynamic entry
14943 + * @mac_addr: MAC address
14944 + * @if_egress: Egress interface ID
14945 + */
14946 +struct dpsw_fdb_unicast_cfg {
14947 + enum dpsw_fdb_entry_type type;
14948 + u8 mac_addr[6];
14949 + u16 if_egress;
14950 +};
14951 +
14952 +int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
14953 + u32 cmd_flags,
14954 + u16 token,
14955 + u16 fdb_id,
14956 + const struct dpsw_fdb_unicast_cfg *cfg);
14957 +
14958 +int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
14959 + u32 cmd_flags,
14960 + u16 token,
14961 + u16 fdb_id,
14962 + struct dpsw_fdb_unicast_cfg *cfg);
14963 +
14964 +int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
14965 + u32 cmd_flags,
14966 + u16 token,
14967 + u16 fdb_id,
14968 + const struct dpsw_fdb_unicast_cfg *cfg);
14969 +
14970 +/**
14971 + * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
14972 + * @type: Select static or dynamic entry
14973 + * @mac_addr: MAC address
14974 + * @num_ifs: Number of external and internal interfaces
14975 + * @if_id: Egress interface IDs
14976 + */
14977 +struct dpsw_fdb_multicast_cfg {
14978 + enum dpsw_fdb_entry_type type;
14979 + u8 mac_addr[6];
14980 + u16 num_ifs;
14981 + u16 if_id[DPSW_MAX_IF];
14982 +};
14983 +
14984 +int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
14985 + u32 cmd_flags,
14986 + u16 token,
14987 + u16 fdb_id,
14988 + const struct dpsw_fdb_multicast_cfg *cfg);
14989 +
14990 +int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
14991 + u32 cmd_flags,
14992 + u16 token,
14993 + u16 fdb_id,
14994 + struct dpsw_fdb_multicast_cfg *cfg);
14995 +
14996 +int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
14997 + u32 cmd_flags,
14998 + u16 token,
14999 + u16 fdb_id,
15000 + const struct dpsw_fdb_multicast_cfg *cfg);
15001 +
15002 +/**
15003 + * enum dpsw_fdb_learning_mode - Auto-learning modes
15004 + * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
15005 + * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
15006 + * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
15007 + * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
15008 + *
15009 + * NONE - SECURE LEARNING
15010 + * SMAC found DMAC found CTLU Action
15011 + * v v Forward frame to
15012 + * 1. DMAC destination
15013 + * - v Forward frame to
15014 + * 1. DMAC destination
15015 + * 2. Control interface
15016 + * v - Forward frame to
15017 + * 1. Flooding list of interfaces
15018 + * - - Forward frame to
15019 + * 1. Flooding list of interfaces
15020 + * 2. Control interface
15021 + * SECURE LEARING
15022 + * SMAC found DMAC found CTLU Action
15023 + * v v Forward frame to
15024 + * 1. DMAC destination
15025 + * - v Forward frame to
15026 + * 1. Control interface
15027 + * v - Forward frame to
15028 + * 1. Flooding list of interfaces
15029 + * - - Forward frame to
15030 + * 1. Control interface
15031 + */
15032 +enum dpsw_fdb_learning_mode {
15033 + DPSW_FDB_LEARNING_MODE_DIS = 0,
15034 + DPSW_FDB_LEARNING_MODE_HW = 1,
15035 + DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
15036 + DPSW_FDB_LEARNING_MODE_SECURE = 3
15037 +};
15038 +
15039 +int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
15040 + u32 cmd_flags,
15041 + u16 token,
15042 + u16 fdb_id,
15043 + enum dpsw_fdb_learning_mode mode);
15044 +
15045 +/**
15046 + * struct dpsw_fdb_attr - FDB Attributes
15047 + * @max_fdb_entries: Number of FDB entries
15048 + * @fdb_aging_time: Aging time in seconds
15049 + * @learning_mode: Learning mode
15050 + * @num_fdb_mc_groups: Current number of multicast groups
15051 + * @max_fdb_mc_groups: Maximum number of multicast groups
15052 + */
15053 +struct dpsw_fdb_attr {
15054 + u16 max_fdb_entries;
15055 + u16 fdb_aging_time;
15056 + enum dpsw_fdb_learning_mode learning_mode;
15057 + u16 num_fdb_mc_groups;
15058 + u16 max_fdb_mc_groups;
15059 +};
15060 +
15061 +int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
15062 + u32 cmd_flags,
15063 + u16 token,
15064 + u16 fdb_id,
15065 + struct dpsw_fdb_attr *attr);
15066 +
15067 +/**
15068 + * struct dpsw_acl_cfg - ACL Configuration
15069 + * @max_entries: Number of FDB entries
15070 + */
15071 +struct dpsw_acl_cfg {
15072 + u16 max_entries;
15073 +};
15074 +
15075 +/**
15076 + * struct dpsw_acl_fields - ACL fields.
15077 + * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
15078 + * slow protocols, MVRP, STP
15079 + * @l2_source_mac: Source MAC address
15080 + * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
15081 + * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
15082 + * Q-in-Q, IPv4, IPv6, PPPoE
15083 + * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
15084 + * @l2_vlan_id: layer 2 VLAN ID
15085 + * @l2_ether_type: layer 2 Ethernet type
15086 + * @l3_dscp: Layer 3 differentiated services code point
15087 + * @l3_protocol: Tells the Network layer at the destination host, to which
15088 + * Protocol this packet belongs to. The following protocol are
15089 + * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
15090 + * (encapsulation), GRE, PTP
15091 + * @l3_source_ip: Source IPv4 IP
15092 + * @l3_dest_ip: Destination IPv4 IP
15093 + * @l4_source_port: Source TCP/UDP Port
15094 + * @l4_dest_port: Destination TCP/UDP Port
15095 + */
15096 +struct dpsw_acl_fields {
15097 + u8 l2_dest_mac[6];
15098 + u8 l2_source_mac[6];
15099 + u16 l2_tpid;
15100 + u8 l2_pcp_dei;
15101 + u16 l2_vlan_id;
15102 + u16 l2_ether_type;
15103 + u8 l3_dscp;
15104 + u8 l3_protocol;
15105 + u32 l3_source_ip;
15106 + u32 l3_dest_ip;
15107 + u16 l4_source_port;
15108 + u16 l4_dest_port;
15109 +};
15110 +
15111 +/**
15112 + * struct dpsw_acl_key - ACL key
15113 + * @match: Match fields
15114 + * @mask: Mask: b'1 - valid, b'0 don't care
15115 + */
15116 +struct dpsw_acl_key {
15117 + struct dpsw_acl_fields match;
15118 + struct dpsw_acl_fields mask;
15119 +};
15120 +
15121 +/**
15122 + * enum dpsw_acl_action
15123 + * @DPSW_ACL_ACTION_DROP: Drop frame
15124 + * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
15125 + * @DPSW_ACL_ACTION_ACCEPT: Accept frame
15126 + * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
15127 + */
15128 +enum dpsw_acl_action {
15129 + DPSW_ACL_ACTION_DROP,
15130 + DPSW_ACL_ACTION_REDIRECT,
15131 + DPSW_ACL_ACTION_ACCEPT,
15132 + DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
15133 +};
15134 +
15135 +/**
15136 + * struct dpsw_acl_result - ACL action
15137 + * @action: Action should be taken when ACL entry hit
15138 + * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
15139 + * action
15140 + */
15141 +struct dpsw_acl_result {
15142 + enum dpsw_acl_action action;
15143 + u16 if_id;
15144 +};
15145 +
15146 +/**
15147 + * struct dpsw_acl_entry_cfg - ACL entry
15148 + * @key_iova: I/O virtual address of DMA-able memory filled with key after call
15149 + * to dpsw_acl_prepare_entry_cfg()
15150 + * @result: Required action when entry hit occurs
15151 + * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
15152 + * during the lifetime of a Policy. It is user responsibility to
15153 + * space the priorities according to consequent rule additions.
15154 + */
15155 +struct dpsw_acl_entry_cfg {
15156 + u64 key_iova;
15157 + struct dpsw_acl_result result;
15158 + int precedence;
15159 +};
15160 +
15161 +int dpsw_acl_add(struct fsl_mc_io *mc_io,
15162 + u32 cmd_flags,
15163 + u16 token,
15164 + u16 *acl_id,
15165 + const struct dpsw_acl_cfg *cfg);
15166 +
15167 +int dpsw_acl_remove(struct fsl_mc_io *mc_io,
15168 + u32 cmd_flags,
15169 + u16 token,
15170 + u16 acl_id);
15171 +
15172 +void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
15173 + uint8_t *entry_cfg_buf);
15174 +
15175 +int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
15176 + u32 cmd_flags,
15177 + u16 token,
15178 + u16 acl_id,
15179 + const struct dpsw_acl_entry_cfg *cfg);
15180 +
15181 +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
15182 + u32 cmd_flags,
15183 + u16 token,
15184 + u16 acl_id,
15185 + const struct dpsw_acl_entry_cfg *cfg);
15186 +
15187 +/**
15188 + * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
15189 + * @num_ifs: Number of interfaces
15190 + * @if_id: List of interfaces
15191 + */
15192 +struct dpsw_acl_if_cfg {
15193 + u16 num_ifs;
15194 + u16 if_id[DPSW_MAX_IF];
15195 +};
15196 +
15197 +int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
15198 + u32 cmd_flags,
15199 + u16 token,
15200 + u16 acl_id,
15201 + const struct dpsw_acl_if_cfg *cfg);
15202 +
15203 +int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
15204 + u32 cmd_flags,
15205 + u16 token,
15206 + u16 acl_id,
15207 + const struct dpsw_acl_if_cfg *cfg);
15208 +
15209 +/**
15210 + * struct dpsw_acl_attr - ACL Attributes
15211 + * @max_entries: Max number of ACL entries
15212 + * @num_entries: Number of used ACL entries
15213 + * @num_ifs: Number of interfaces associated with ACL
15214 + */
15215 +struct dpsw_acl_attr {
15216 + u16 max_entries;
15217 + u16 num_entries;
15218 + u16 num_ifs;
15219 +};
15220 +
15221 +int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
15222 + u32 cmd_flags,
15223 + u16 token,
15224 + u16 acl_id,
15225 + struct dpsw_acl_attr *attr);
15226 +/**
15227 + * struct dpsw_ctrl_if_attr - Control interface attributes
15228 + * @rx_fqid: Receive FQID
15229 + * @rx_err_fqid: Receive error FQID
15230 + * @tx_err_conf_fqid: Transmit error and confirmation FQID
15231 + */
15232 +struct dpsw_ctrl_if_attr {
15233 + u32 rx_fqid;
15234 + u32 rx_err_fqid;
15235 + u32 tx_err_conf_fqid;
15236 +};
15237 +
15238 +int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
15239 + u32 cmd_flags,
15240 + u16 token,
15241 + struct dpsw_ctrl_if_attr *attr);
15242 +
15243 +/**
15244 + * Maximum number of DPBP
15245 + */
15246 +#define DPSW_MAX_DPBP 8
15247 +
15248 +/**
15249 + * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
15250 + * @num_dpbp: Number of DPBPs
15251 + * @pools: Array of buffer pools parameters; The number of valid entries
15252 + * must match 'num_dpbp' value
15253 + */
15254 +struct dpsw_ctrl_if_pools_cfg {
15255 + u8 num_dpbp;
15256 + /**
15257 + * struct pools - Buffer pools parameters
15258 + * @dpbp_id: DPBP object ID
15259 + * @buffer_size: Buffer size
15260 + * @backup_pool: Backup pool
15261 + */
15262 + struct {
15263 + int dpbp_id;
15264 + u16 buffer_size;
15265 + int backup_pool;
15266 + } pools[DPSW_MAX_DPBP];
15267 +};
15268 +
15269 +int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
15270 + u32 cmd_flags,
15271 + u16 token,
15272 + const struct dpsw_ctrl_if_pools_cfg *cfg);
15273 +
15274 +int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
15275 + u32 cmd_flags,
15276 + u16 token);
15277 +
15278 +int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
15279 + u32 cmd_flags,
15280 + u16 token);
15281 +
15282 +int dpsw_get_api_version(struct fsl_mc_io *mc_io,
15283 + u32 cmd_flags,
15284 + u16 *major_ver,
15285 + u16 *minor_ver);
15286 +
15287 +#endif /* __FSL_DPSW_H */
15288 --- /dev/null
15289 +++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
15290 @@ -0,0 +1,1857 @@
15291 +/* Copyright 2014-2015 Freescale Semiconductor Inc.
15292 + *
15293 + * Redistribution and use in source and binary forms, with or without
15294 + * modification, are permitted provided that the following conditions are met:
15295 + * * Redistributions of source code must retain the above copyright
15296 + * notice, this list of conditions and the following disclaimer.
15297 + * * Redistributions in binary form must reproduce the above copyright
15298 + * notice, this list of conditions and the following disclaimer in the
15299 + * documentation and/or other materials provided with the distribution.
15300 + * * Neither the name of Freescale Semiconductor nor the
15301 + * names of its contributors may be used to endorse or promote products
15302 + * derived from this software without specific prior written permission.
15303 + *
15304 + *
15305 + * ALTERNATIVELY, this software may be distributed under the terms of the
15306 + * GNU General Public License ("GPL") as published by the Free Software
15307 + * Foundation, either version 2 of that License or (at your option) any
15308 + * later version.
15309 + *
15310 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
15311 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15312 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
15313 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
15314 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
15315 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
15316 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
15317 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15318 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
15319 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15320 + */
15321 +
15322 +#include <linux/module.h>
15323 +#include <linux/msi.h>
15324 +
15325 +#include <linux/netdevice.h>
15326 +#include <linux/etherdevice.h>
15327 +#include <linux/rtnetlink.h>
15328 +#include <linux/if_vlan.h>
15329 +
15330 +#include <uapi/linux/if_bridge.h>
15331 +#include <net/netlink.h>
15332 +
15333 +#include "../../fsl-mc/include/mc.h"
15334 +#include "dpsw.h"
15335 +#include "dpsw-cmd.h"
15336 +
15337 +static const char ethsw_drv_version[] = "0.1";
15338 +
15339 +/* Minimal supported DPSE version */
15340 +#define DPSW_MIN_VER_MAJOR 8
15341 +#define DPSW_MIN_VER_MINOR 0
15342 +
15343 +/* IRQ index */
15344 +#define DPSW_MAX_IRQ_NUM 2
15345 +
15346 +#define ETHSW_VLAN_MEMBER 1
15347 +#define ETHSW_VLAN_UNTAGGED 2
15348 +#define ETHSW_VLAN_PVID 4
15349 +#define ETHSW_VLAN_GLOBAL 8
15350 +
15351 +/* Maximum Frame Length supported by HW (currently 10k) */
15352 +#define DPAA2_MFL (10 * 1024)
15353 +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
15354 +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
15355 +
15356 +struct ethsw_port_priv {
15357 + struct net_device *netdev;
15358 + struct list_head list;
15359 + u16 port_index;
15360 + struct ethsw_dev_priv *ethsw_priv;
15361 + u8 stp_state;
15362 +
15363 + char vlans[VLAN_VID_MASK + 1];
15364 +
15365 +};
15366 +
15367 +struct ethsw_dev_priv {
15368 + struct net_device *netdev;
15369 + struct fsl_mc_io *mc_io;
15370 + u16 dpsw_handle;
15371 + struct dpsw_attr sw_attr;
15372 + int dev_id;
15373 + /*TODO: redundant, we can use the slave dev list */
15374 + struct list_head port_list;
15375 +
15376 + bool flood;
15377 + bool learning;
15378 +
15379 + char vlans[VLAN_VID_MASK + 1];
15380 +};
15381 +
15382 +static int ethsw_port_stop(struct net_device *netdev);
15383 +static int ethsw_port_open(struct net_device *netdev);
15384 +
15385 +static inline void __get_priv(struct net_device *netdev,
15386 + struct ethsw_dev_priv **priv,
15387 + struct ethsw_port_priv **port_priv)
15388 +{
15389 + struct ethsw_dev_priv *_priv = NULL;
15390 + struct ethsw_port_priv *_port_priv = NULL;
15391 +
15392 + if (netdev->flags & IFF_MASTER) {
15393 + _priv = netdev_priv(netdev);
15394 + } else {
15395 + _port_priv = netdev_priv(netdev);
15396 + _priv = _port_priv->ethsw_priv;
15397 + }
15398 +
15399 + if (priv)
15400 + *priv = _priv;
15401 + if (port_priv)
15402 + *port_priv = _port_priv;
15403 +}
15404 +
15405 +/* -------------------------------------------------------------------------- */
15406 +/* ethsw netdevice ops */
15407 +
15408 +static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
15409 +{
15410 + /* we don't support I/O for now, drop the frame */
15411 + dev_kfree_skb_any(skb);
15412 + return NETDEV_TX_OK;
15413 +}
15414 +
15415 +static int ethsw_open(struct net_device *netdev)
15416 +{
15417 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
15418 + struct list_head *pos;
15419 + struct ethsw_port_priv *port_priv = NULL;
15420 + int err;
15421 +
15422 + err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
15423 + if (err) {
15424 + netdev_err(netdev, "dpsw_enable err %d\n", err);
15425 + return err;
15426 + }
15427 +
15428 + list_for_each(pos, &priv->port_list) {
15429 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
15430 + err = dev_open(port_priv->netdev);
15431 + if (err)
15432 + netdev_err(port_priv->netdev, "dev_open err %d\n", err);
15433 + }
15434 +
15435 + return 0;
15436 +}
15437 +
15438 +static int ethsw_stop(struct net_device *netdev)
15439 +{
15440 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
15441 + struct list_head *pos;
15442 + struct ethsw_port_priv *port_priv = NULL;
15443 + int err;
15444 +
15445 + err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
15446 + if (err) {
15447 + netdev_err(netdev, "dpsw_disable err %d\n", err);
15448 + return err;
15449 + }
15450 +
15451 + list_for_each(pos, &priv->port_list) {
15452 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
15453 + err = dev_close(port_priv->netdev);
15454 + if (err)
15455 + netdev_err(port_priv->netdev,
15456 + "dev_close err %d\n", err);
15457 + }
15458 +
15459 + return 0;
15460 +}
15461 +
15462 +static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
15463 +{
15464 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
15465 + int err;
15466 +
15467 + struct dpsw_vlan_cfg vcfg = {
15468 + /* TODO: add support for VLAN private FDBs */
15469 + .fdb_id = 0,
15470 + };
15471 + if (priv->vlans[vid]) {
15472 + netdev_err(netdev, "VLAN already configured\n");
15473 + return -EEXIST;
15474 + }
15475 +
15476 + err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
15477 + if (err) {
15478 + netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
15479 + return err;
15480 + }
15481 + priv->vlans[vid] = ETHSW_VLAN_MEMBER;
15482 +
15483 + return 0;
15484 +}
15485 +
15486 +static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
15487 +{
15488 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15489 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15490 + int err;
15491 +
15492 + struct dpsw_vlan_if_cfg vcfg = {
15493 + .num_ifs = 1,
15494 + .if_id[0] = port_priv->port_index,
15495 + };
15496 +
15497 + if (port_priv->vlans[vid]) {
15498 + netdev_err(netdev, "VLAN already configured\n");
15499 + return -EEXIST;
15500 + }
15501 +
15502 + if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
15503 + netdev_err(netdev, "interface must be down to change PVID!\n");
15504 + return -EBUSY;
15505 + }
15506 +
15507 + err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
15508 + if (err) {
15509 + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
15510 + return err;
15511 + }
15512 + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
15513 +
15514 + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
15515 + err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
15516 + priv->dpsw_handle, vid, &vcfg);
15517 + if (err) {
15518 + netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
15519 + err);
15520 + return err;
15521 + }
15522 + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
15523 + }
15524 +
15525 + if (flags & BRIDGE_VLAN_INFO_PVID) {
15526 + struct dpsw_tci_cfg tci_cfg = {
15527 + /* TODO: at least add better defaults if these cannot
15528 + * be configured
15529 + */
15530 + .pcp = 0,
15531 + .dei = 0,
15532 + .vlan_id = vid,
15533 + };
15534 +
15535 + err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
15536 + port_priv->port_index, &tci_cfg);
15537 + if (err) {
15538 + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
15539 + return err;
15540 + }
15541 + port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
15542 + }
15543 +
15544 + return 0;
15545 +}
15546 +
15547 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
15548 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
15549 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
15550 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
15551 + .len = sizeof(struct bridge_vlan_info), },
15552 +};
15553 +
15554 +static int ethsw_setlink_af_spec(struct net_device *netdev,
15555 + struct nlattr **tb)
15556 +{
15557 + struct bridge_vlan_info *vinfo;
15558 + struct ethsw_dev_priv *priv = NULL;
15559 + struct ethsw_port_priv *port_priv = NULL;
15560 + int err = 0;
15561 +
15562 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
15563 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
15564 + return -EOPNOTSUPP;
15565 + }
15566 +
15567 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
15568 +
15569 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
15570 + return -EINVAL;
15571 +
15572 + __get_priv(netdev, &priv, &port_priv);
15573 +
15574 + if (!port_priv || !priv->vlans[vinfo->vid]) {
15575 + /* command targets switch device or this is a new VLAN */
15576 + err = ethsw_add_vlan(priv->netdev, vinfo->vid);
15577 + if (err)
15578 + return err;
15579 +
15580 + /* command targets switch device; mark it*/
15581 + if (!port_priv)
15582 + priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
15583 + }
15584 +
15585 + if (port_priv) {
15586 + /* command targets switch port */
15587 + err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
15588 + if (err)
15589 + return err;
15590 + }
15591 +
15592 + return 0;
15593 +}
15594 +
15595 +static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
15596 + [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
15597 + [IFLA_BRPORT_COST] = { .type = NLA_U32 },
15598 + [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
15599 + [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
15600 + [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
15601 + [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
15602 + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
15603 + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
15604 +};
15605 +
15606 +static int ethsw_set_learning(struct net_device *netdev, u8 flag)
15607 +{
15608 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
15609 + enum dpsw_fdb_learning_mode learn_mode;
15610 + int err;
15611 +
15612 + if (flag)
15613 + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
15614 + else
15615 + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
15616 +
15617 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
15618 + 0, learn_mode);
15619 + if (err) {
15620 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
15621 + return err;
15622 + }
15623 + priv->learning = !!flag;
15624 +
15625 + return 0;
15626 +}
15627 +
15628 +static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
15629 +{
15630 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15631 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15632 + int err;
15633 +
15634 + err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
15635 + port_priv->port_index, (int)flag);
15636 + if (err) {
15637 + netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
15638 + return err;
15639 + }
15640 + priv->flood = !!flag;
15641 +
15642 + return 0;
15643 +}
15644 +
15645 +static int ethsw_port_set_state(struct net_device *netdev, u8 state)
15646 +{
15647 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
15648 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
15649 + u8 old_state = port_priv->stp_state;
15650 + int err;
15651 +
15652 + struct dpsw_stp_cfg stp_cfg = {
15653 + .vlan_id = 1,
15654 + .state = state,
15655 + };
15656 + /* TODO: check port state, interface may be down */
15657 +
15658 + if (state > BR_STATE_BLOCKING)
15659 + return -EINVAL;
15660 +
15661 + if (state == port_priv->stp_state)
15662 + return 0;
15663 +
15664 + if (state == BR_STATE_DISABLED) {
15665 + port_priv->stp_state = state;
15666 +
15667 + err = ethsw_port_stop(netdev);
15668 + if (err)
15669 + goto error;
15670 + } else {
15671 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
15672 + port_priv->port_index, &stp_cfg);
15673 + if (err) {
15674 + netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
15675 + return err;
15676 + }
15677 +
15678 + port_priv->stp_state = state;
15679 +
15680 + if (old_state == BR_STATE_DISABLED) {
15681 + err = ethsw_port_open(netdev);
15682 + if (err)
15683 + goto error;
15684 + }
15685 + }
15686 +
15687 + return 0;
15688 +error:
15689 + port_priv->stp_state = old_state;
15690 + return err;
15691 +}
15692 +
15693 +static int ethsw_setlink_protinfo(struct net_device *netdev,
15694 + struct nlattr **tb)
15695 +{
15696 + struct ethsw_dev_priv *priv;
15697 + struct ethsw_port_priv *port_priv = NULL;
15698 + int err = 0;
15699 +
15700 + __get_priv(netdev, &priv, &port_priv);
15701 +
15702 + if (tb[IFLA_BRPORT_LEARNING]) {
15703 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
15704 +
15705 + if (port_priv)
15706 + netdev_warn(netdev,
15707 + "learning set on whole switch dev\n");
15708 +
15709 + err = ethsw_set_learning(priv->netdev, flag);
15710 + if (err)
15711 + return err;
15712 +
15713 + } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
15714 + u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
15715 +
15716 + err = ethsw_port_set_flood(port_priv->netdev, flag);
15717 + if (err)
15718 + return err;
15719 +
15720 + } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
15721 + u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
15722 +
15723 + err = ethsw_port_set_state(port_priv->netdev, state);
15724 + if (err)
15725 + return err;
15726 +
15727 + } else {
15728 + return -EOPNOTSUPP;
15729 + }
15730 +
15731 + return 0;
15732 +}
15733 +
15734 +static int ethsw_setlink(struct net_device *netdev,
15735 + struct nlmsghdr *nlh,
15736 + u16 flags)
15737 +{
15738 + struct nlattr *attr;
15739 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
15740 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
15741 + int err = 0;
15742 +
15743 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
15744 + if (attr) {
15745 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
15746 + ifla_br_policy);
15747 + if (err) {
15748 + netdev_err(netdev,
15749 + "nla_parse_nested for br_policy err %d\n",
15750 + err);
15751 + return err;
15752 + }
15753 +
15754 + err = ethsw_setlink_af_spec(netdev, tb);
15755 + return err;
15756 + }
15757 +
15758 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
15759 + if (attr) {
15760 + err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
15761 + ifla_brport_policy);
15762 + if (err) {
15763 + netdev_err(netdev,
15764 + "nla_parse_nested for brport_policy err %d\n",
15765 + err);
15766 + return err;
15767 + }
15768 +
15769 + err = ethsw_setlink_protinfo(netdev, tb);
15770 + return err;
15771 + }
15772 +
15773 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
15774 + return -EOPNOTSUPP;
15775 +}
15776 +
15777 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
15778 + struct ethsw_dev_priv *priv)
15779 +{
15780 + u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
15781 + int iflink;
15782 + int err;
15783 +
15784 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
15785 + if (err)
15786 + goto nla_put_err;
15787 + err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
15788 + if (err)
15789 + goto nla_put_err;
15790 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
15791 + if (err)
15792 + goto nla_put_err;
15793 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
15794 + if (err)
15795 + goto nla_put_err;
15796 + if (netdev->addr_len) {
15797 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
15798 + netdev->dev_addr);
15799 + if (err)
15800 + goto nla_put_err;
15801 + }
15802 +
15803 + iflink = dev_get_iflink(netdev);
15804 + if (netdev->ifindex != iflink) {
15805 + err = nla_put_u32(skb, IFLA_LINK, iflink);
15806 + if (err)
15807 + goto nla_put_err;
15808 + }
15809 +
15810 + return 0;
15811 +
15812 +nla_put_err:
15813 + netdev_err(netdev, "nla_put_ err %d\n", err);
15814 + return err;
15815 +}
15816 +
15817 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
15818 + struct ethsw_port_priv *port_priv)
15819 +{
15820 + struct nlattr *nest;
15821 + int err;
15822 +
15823 + u8 stp_state = port_priv->stp_state;
15824 +
15825 + if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
15826 + stp_state = BR_STATE_BLOCKING;
15827 +
15828 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
15829 + if (!nest) {
15830 + netdev_err(netdev, "nla_nest_start failed\n");
15831 + return -ENOMEM;
15832 + }
15833 +
15834 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
15835 + if (err)
15836 + goto nla_put_err;
15837 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
15838 + if (err)
15839 + goto nla_put_err;
15840 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
15841 + if (err)
15842 + goto nla_put_err;
15843 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
15844 + if (err)
15845 + goto nla_put_err;
15846 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
15847 + if (err)
15848 + goto nla_put_err;
15849 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
15850 + if (err)
15851 + goto nla_put_err;
15852 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
15853 + if (err)
15854 + goto nla_put_err;
15855 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
15856 + port_priv->ethsw_priv->learning);
15857 + if (err)
15858 + goto nla_put_err;
15859 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
15860 + port_priv->ethsw_priv->flood);
15861 + if (err)
15862 + goto nla_put_err;
15863 + nla_nest_end(skb, nest);
15864 +
15865 + return 0;
15866 +
15867 +nla_put_err:
15868 + netdev_err(netdev, "nla_put_ err %d\n", err);
15869 + nla_nest_cancel(skb, nest);
15870 + return err;
15871 +}
15872 +
15873 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
15874 + struct ethsw_dev_priv *priv,
15875 + struct ethsw_port_priv *port_priv)
15876 +{
15877 + struct nlattr *nest;
15878 + struct bridge_vlan_info vinfo;
15879 + const char *vlans;
15880 + u16 i;
15881 + int err;
15882 +
15883 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
15884 + if (!nest) {
15885 + netdev_err(netdev, "nla_nest_start failed");
15886 + return -ENOMEM;
15887 + }
15888 +
15889 + if (port_priv)
15890 + vlans = port_priv->vlans;
15891 + else
15892 + vlans = priv->vlans;
15893 +
15894 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
15895 + vinfo.flags = 0;
15896 + vinfo.vid = i;
15897 +
15898 + if (vlans[i] & ETHSW_VLAN_UNTAGGED)
15899 + vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
15900 +
15901 + if (vlans[i] & ETHSW_VLAN_PVID)
15902 + vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
15903 +
15904 + if (vlans[i] & ETHSW_VLAN_MEMBER) {
15905 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
15906 + sizeof(vinfo), &vinfo);
15907 + if (err)
15908 + goto nla_put_err;
15909 + }
15910 + }
15911 +
15912 + nla_nest_end(skb, nest);
15913 +
15914 + return 0;
15915 +nla_put_err:
15916 + netdev_err(netdev, "nla_put_ err %d\n", err);
15917 + nla_nest_cancel(skb, nest);
15918 + return err;
15919 +}
15920 +
15921 +static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
15922 + struct net_device *netdev, u32 filter_mask,
15923 + int nlflags)
15924 +{
15925 + struct ethsw_dev_priv *priv;
15926 + struct ethsw_port_priv *port_priv = NULL;
15927 + struct ifinfomsg *hdr;
15928 + struct nlmsghdr *nlh;
15929 + int err;
15930 +
15931 + __get_priv(netdev, &priv, &port_priv);
15932 +
15933 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
15934 + if (!nlh)
15935 + return -EMSGSIZE;
15936 +
15937 + hdr = nlmsg_data(nlh);
15938 + memset(hdr, 0, sizeof(*hdr));
15939 + hdr->ifi_family = AF_BRIDGE;
15940 + hdr->ifi_type = netdev->type;
15941 + hdr->ifi_index = netdev->ifindex;
15942 + hdr->ifi_flags = dev_get_flags(netdev);
15943 +
15944 + err = __nla_put_netdev(skb, netdev, priv);
15945 + if (err)
15946 + goto nla_put_err;
15947 +
15948 + if (port_priv) {
15949 + err = __nla_put_port(skb, netdev, port_priv);
15950 + if (err)
15951 + goto nla_put_err;
15952 + }
15953 +
15954 + /* Check if the VID information is requested */
15955 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
15956 + err = __nla_put_vlan(skb, netdev, priv, port_priv);
15957 + if (err)
15958 + goto nla_put_err;
15959 + }
15960 +
15961 + nlmsg_end(skb, nlh);
15962 + return skb->len;
15963 +
15964 +nla_put_err:
15965 + nlmsg_cancel(skb, nlh);
15966 + return -EMSGSIZE;
15967 +}
15968 +
15969 +static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
15970 +{
15971 + struct list_head *pos;
15972 + struct ethsw_port_priv *ppriv_local = NULL;
15973 + int err = 0;
15974 +
15975 + if (!priv->vlans[vid])
15976 + return -ENOENT;
15977 +
15978 + err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
15979 + if (err) {
15980 + netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
15981 + return err;
15982 + }
15983 + priv->vlans[vid] = 0;
15984 +
15985 + list_for_each(pos, &priv->port_list) {
15986 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
15987 + list);
15988 + ppriv_local->vlans[vid] = 0;
15989 + }
15990 +
15991 + return 0;
15992 +}
15993 +
15994 +static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
15995 + struct ethsw_port_priv *port_priv,
15996 + u16 vid)
15997 +{
15998 + struct list_head *pos;
15999 + struct ethsw_port_priv *ppriv_local = NULL;
16000 + struct dpsw_vlan_if_cfg vcfg = {
16001 + .num_ifs = 1,
16002 + .if_id[0] = port_priv->port_index,
16003 + };
16004 + unsigned int count = 0;
16005 + int err = 0;
16006 +
16007 + if (!port_priv->vlans[vid])
16008 + return -ENOENT;
16009 +
16010 + /* VLAN will be deleted from switch if global flag is not set
16011 + * and is configured on only one port
16012 + */
16013 + if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
16014 + list_for_each(pos, &priv->port_list) {
16015 + ppriv_local = list_entry(pos, struct ethsw_port_priv,
16016 + list);
16017 + if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
16018 + count++;
16019 + }
16020 +
16021 + if (count == 1)
16022 + return ethsw_dellink_switch(priv, vid);
16023 + }
16024 +
16025 + err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
16026 + vid, &vcfg);
16027 + if (err) {
16028 + netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
16029 + return err;
16030 + }
16031 + port_priv->vlans[vid] = 0;
16032 + return 0;
16033 +}
16034 +
16035 +static int ethsw_dellink(struct net_device *netdev,
16036 + struct nlmsghdr *nlh,
16037 + u16 flags)
16038 +{
16039 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
16040 + struct nlattr *spec;
16041 + struct bridge_vlan_info *vinfo;
16042 + struct ethsw_dev_priv *priv;
16043 + struct ethsw_port_priv *port_priv = NULL;
16044 + int err = 0;
16045 +
16046 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
16047 + if (!spec)
16048 + return 0;
16049 +
16050 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
16051 + if (err)
16052 + return err;
16053 +
16054 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
16055 + return -EOPNOTSUPP;
16056 +
16057 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
16058 +
16059 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
16060 + return -EINVAL;
16061 +
16062 + __get_priv(netdev, &priv, &port_priv);
16063 +
16064 + /* decide if command targets switch device or port */
16065 + if (!port_priv)
16066 + err = ethsw_dellink_switch(priv, vinfo->vid);
16067 + else
16068 + err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
16069 +
16070 + return err;
16071 +}
16072 +
16073 +static const struct net_device_ops ethsw_ops = {
16074 + .ndo_open = &ethsw_open,
16075 + .ndo_stop = &ethsw_stop,
16076 +
16077 + .ndo_bridge_setlink = &ethsw_setlink,
16078 + .ndo_bridge_getlink = &ethsw_getlink,
16079 + .ndo_bridge_dellink = &ethsw_dellink,
16080 +
16081 + .ndo_start_xmit = &ethsw_dropframe,
16082 +};
16083 +
16084 +/*--------------------------------------------------------------------------- */
16085 +/* switch port netdevice ops */
16086 +
16087 +static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
16088 +{
16089 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16090 + struct dpsw_link_state state;
16091 + int err;
16092 +
16093 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
16094 + port_priv->ethsw_priv->dpsw_handle,
16095 + port_priv->port_index, &state);
16096 + if (unlikely(err)) {
16097 + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
16098 + return err;
16099 + }
16100 +
16101 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
16102 +
16103 + if (state.up)
16104 + netif_carrier_on(port_priv->netdev);
16105 + else
16106 + netif_carrier_off(port_priv->netdev);
16107 +
16108 + return 0;
16109 +}
16110 +
16111 +static int ethsw_port_open(struct net_device *netdev)
16112 +{
16113 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16114 + int err;
16115 +
16116 + err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
16117 + port_priv->ethsw_priv->dpsw_handle,
16118 + port_priv->port_index);
16119 + if (err) {
16120 + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
16121 + return err;
16122 + }
16123 +
16124 + /* sync carrier state */
16125 + err = _ethsw_port_carrier_state_sync(netdev);
16126 + if (err) {
16127 + netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
16128 + err);
16129 + goto err_carrier_sync;
16130 + }
16131 +
16132 + return 0;
16133 +
16134 +err_carrier_sync:
16135 + dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
16136 + port_priv->ethsw_priv->dpsw_handle,
16137 + port_priv->port_index);
16138 + return err;
16139 +}
16140 +
16141 +static int ethsw_port_stop(struct net_device *netdev)
16142 +{
16143 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16144 + int err;
16145 +
16146 + err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
16147 + port_priv->ethsw_priv->dpsw_handle,
16148 + port_priv->port_index);
16149 + if (err) {
16150 + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
16151 + return err;
16152 + }
16153 +
16154 + return 0;
16155 +}
16156 +
16157 +static int ethsw_port_fdb_add_uc(struct net_device *netdev,
16158 + const unsigned char *addr)
16159 +{
16160 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16161 + struct dpsw_fdb_unicast_cfg entry = {0};
16162 + int err;
16163 +
16164 + entry.if_egress = port_priv->port_index;
16165 + entry.type = DPSW_FDB_ENTRY_STATIC;
16166 + ether_addr_copy(entry.mac_addr, addr);
16167 +
16168 + err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
16169 + port_priv->ethsw_priv->dpsw_handle,
16170 + 0, &entry);
16171 + if (err)
16172 + netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
16173 + return err;
16174 +}
16175 +
16176 +static int ethsw_port_fdb_del_uc(struct net_device *netdev,
16177 + const unsigned char *addr)
16178 +{
16179 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16180 + struct dpsw_fdb_unicast_cfg entry = {0};
16181 + int err;
16182 +
16183 + entry.if_egress = port_priv->port_index;
16184 + entry.type = DPSW_FDB_ENTRY_STATIC;
16185 + ether_addr_copy(entry.mac_addr, addr);
16186 +
16187 + err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
16188 + port_priv->ethsw_priv->dpsw_handle,
16189 + 0, &entry);
16190 + if (err)
16191 + netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
16192 + return err;
16193 +}
16194 +
16195 +static int ethsw_port_fdb_add_mc(struct net_device *netdev,
16196 + const unsigned char *addr)
16197 +{
16198 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16199 + struct dpsw_fdb_multicast_cfg entry = {0};
16200 + int err;
16201 +
16202 + ether_addr_copy(entry.mac_addr, addr);
16203 + entry.type = DPSW_FDB_ENTRY_STATIC;
16204 + entry.num_ifs = 1;
16205 + entry.if_id[0] = port_priv->port_index;
16206 +
16207 + err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
16208 + port_priv->ethsw_priv->dpsw_handle,
16209 + 0, &entry);
16210 + if (err)
16211 + netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
16212 + return err;
16213 +}
16214 +
16215 +static int ethsw_port_fdb_del_mc(struct net_device *netdev,
16216 + const unsigned char *addr)
16217 +{
16218 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16219 + struct dpsw_fdb_multicast_cfg entry = {0};
16220 + int err;
16221 +
16222 + ether_addr_copy(entry.mac_addr, addr);
16223 + entry.type = DPSW_FDB_ENTRY_STATIC;
16224 + entry.num_ifs = 1;
16225 + entry.if_id[0] = port_priv->port_index;
16226 +
16227 + err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
16228 + port_priv->ethsw_priv->dpsw_handle,
16229 + 0, &entry);
16230 + if (err)
16231 + netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
16232 + return err;
16233 +}
16234 +
16235 +static int _lookup_address(struct net_device *netdev, int is_uc,
16236 + const unsigned char *addr)
16237 +{
16238 + struct netdev_hw_addr *ha;
16239 + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
16240 +
16241 + netif_addr_lock_bh(netdev);
16242 + list_for_each_entry(ha, &list->list, list) {
16243 + if (ether_addr_equal(ha->addr, addr)) {
16244 + netif_addr_unlock_bh(netdev);
16245 + return 1;
16246 + }
16247 + }
16248 + netif_addr_unlock_bh(netdev);
16249 + return 0;
16250 +}
16251 +
16252 +static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
16253 + struct net_device *netdev,
16254 + const unsigned char *addr, u16 vid,
16255 + u16 flags)
16256 +{
16257 + struct list_head *pos;
16258 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16259 + struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
16260 + int err;
16261 +
16262 + /* TODO: add replace support when added to iproute bridge */
16263 + if (!(flags & NLM_F_REQUEST)) {
16264 + netdev_err(netdev,
16265 + "ethsw_port_fdb_add unexpected flags value %08x\n",
16266 + flags);
16267 + return -EINVAL;
16268 + }
16269 +
16270 + if (is_unicast_ether_addr(addr)) {
16271 + /* if entry cannot be replaced, return error if exists */
16272 + if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
16273 + list_for_each(pos, &priv->port_list) {
16274 + port_priv = list_entry(pos,
16275 + struct ethsw_port_priv,
16276 + list);
16277 + if (_lookup_address(port_priv->netdev,
16278 + 1, addr))
16279 + return -EEXIST;
16280 + }
16281 + }
16282 +
16283 + err = ethsw_port_fdb_add_uc(netdev, addr);
16284 + if (err) {
16285 + netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
16286 + err);
16287 + return err;
16288 + }
16289 +
16290 + /* we might have replaced an existing entry for a different
16291 + * switch port, make sure the address doesn't linger in any
16292 + * port address list
16293 + */
16294 + list_for_each(pos, &priv->port_list) {
16295 + port_priv = list_entry(pos, struct ethsw_port_priv,
16296 + list);
16297 + dev_uc_del(port_priv->netdev, addr);
16298 + }
16299 +
16300 + err = dev_uc_add(netdev, addr);
16301 + if (err) {
16302 + netdev_err(netdev, "dev_uc_add err %d\n", err);
16303 + return err;
16304 + }
16305 + } else {
16306 + struct dpsw_fdb_multicast_cfg entry = {
16307 + .type = DPSW_FDB_ENTRY_STATIC,
16308 + .num_ifs = 0,
16309 + };
16310 +
16311 + /* check if address is already set on this port */
16312 + if (_lookup_address(netdev, 0, addr))
16313 + return -EEXIST;
16314 +
16315 + /* check if the address exists on other port */
16316 + ether_addr_copy(entry.mac_addr, addr);
16317 + err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
16318 + 0, &entry);
16319 + if (!err) {
16320 + /* entry exists, can we replace it? */
16321 + if (flags & NLM_F_EXCL)
16322 + return -EEXIST;
16323 + } else if (err != -ENAVAIL) {
16324 + netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
16325 + err);
16326 + return err;
16327 + }
16328 +
16329 + err = ethsw_port_fdb_add_mc(netdev, addr);
16330 + if (err) {
16331 + netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
16332 + err);
16333 + return err;
16334 + }
16335 +
16336 + err = dev_mc_add(netdev, addr);
16337 + if (err) {
16338 + netdev_err(netdev, "dev_mc_add err %d\n", err);
16339 + return err;
16340 + }
16341 + }
16342 +
16343 + return 0;
16344 +}
16345 +
16346 +static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
16347 + struct net_device *netdev,
16348 + const unsigned char *addr, u16 vid)
16349 +{
16350 + int err;
16351 +
16352 + if (is_unicast_ether_addr(addr)) {
16353 + err = ethsw_port_fdb_del_uc(netdev, addr);
16354 + if (err) {
16355 + netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
16356 + err);
16357 + return err;
16358 + }
16359 +
16360 + /* also delete if configured on port */
16361 + err = dev_uc_del(netdev, addr);
16362 + if (err && err != -ENOENT) {
16363 + netdev_err(netdev, "dev_uc_del err %d\n", err);
16364 + return err;
16365 + }
16366 + } else {
16367 + if (!_lookup_address(netdev, 0, addr))
16368 + return -ENOENT;
16369 +
16370 + err = dev_mc_del(netdev, addr);
16371 + if (err) {
16372 + netdev_err(netdev, "dev_mc_del err %d\n", err);
16373 + return err;
16374 + }
16375 +
16376 + err = ethsw_port_fdb_del_mc(netdev, addr);
16377 + if (err) {
16378 + netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
16379 + err);
16380 + return err;
16381 + }
16382 + }
16383 +
16384 + return 0;
16385 +}
16386 +
16387 +struct rtnl_link_stats64 *ethsw_port_get_stats(struct net_device *netdev,
16388 + struct rtnl_link_stats64 *storage)
16389 +{
16390 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16391 + u64 tmp;
16392 + int err;
16393 +
16394 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16395 + port_priv->ethsw_priv->dpsw_handle,
16396 + port_priv->port_index,
16397 + DPSW_CNT_ING_FRAME, &storage->rx_packets);
16398 + if (err)
16399 + goto error;
16400 +
16401 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16402 + port_priv->ethsw_priv->dpsw_handle,
16403 + port_priv->port_index,
16404 + DPSW_CNT_EGR_FRAME, &storage->tx_packets);
16405 + if (err)
16406 + goto error;
16407 +
16408 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16409 + port_priv->ethsw_priv->dpsw_handle,
16410 + port_priv->port_index,
16411 + DPSW_CNT_ING_BYTE, &storage->rx_bytes);
16412 + if (err)
16413 + goto error;
16414 +
16415 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16416 + port_priv->ethsw_priv->dpsw_handle,
16417 + port_priv->port_index,
16418 + DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
16419 + if (err)
16420 + goto error;
16421 +
16422 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16423 + port_priv->ethsw_priv->dpsw_handle,
16424 + port_priv->port_index,
16425 + DPSW_CNT_ING_FRAME_DISCARD,
16426 + &storage->rx_dropped);
16427 + if (err)
16428 + goto error;
16429 +
16430 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16431 + port_priv->ethsw_priv->dpsw_handle,
16432 + port_priv->port_index,
16433 + DPSW_CNT_ING_FLTR_FRAME,
16434 + &tmp);
16435 + if (err)
16436 + goto error;
16437 + storage->rx_dropped += tmp;
16438 +
16439 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16440 + port_priv->ethsw_priv->dpsw_handle,
16441 + port_priv->port_index,
16442 + DPSW_CNT_EGR_FRAME_DISCARD,
16443 + &storage->tx_dropped);
16444 + if (err)
16445 + goto error;
16446 +
16447 + return storage;
16448 +
16449 +error:
16450 + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
16451 +}
16452 +
16453 +static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
16454 +{
16455 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16456 + int err;
16457 +
16458 + if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
16459 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
16460 + mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
16461 + return -EINVAL;
16462 + }
16463 +
16464 + err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
16465 + 0,
16466 + port_priv->ethsw_priv->dpsw_handle,
16467 + port_priv->port_index,
16468 + (u16)ETHSW_L2_MAX_FRM(mtu));
16469 + if (err) {
16470 + netdev_err(netdev,
16471 + "dpsw_if_set_max_frame_length() err %d\n", err);
16472 + return err;
16473 + }
16474 +
16475 + netdev->mtu = mtu;
16476 + return 0;
16477 +}
16478 +
16479 +static const struct net_device_ops ethsw_port_ops = {
16480 + .ndo_open = &ethsw_port_open,
16481 + .ndo_stop = &ethsw_port_stop,
16482 +
16483 + .ndo_fdb_add = &ethsw_port_fdb_add,
16484 + .ndo_fdb_del = &ethsw_port_fdb_del,
16485 + .ndo_fdb_dump = &ndo_dflt_fdb_dump,
16486 +
16487 + .ndo_get_stats64 = &ethsw_port_get_stats,
16488 + .ndo_change_mtu = &ethsw_port_change_mtu,
16489 +
16490 + .ndo_start_xmit = &ethsw_dropframe,
16491 +};
16492 +
16493 +static void ethsw_get_drvinfo(struct net_device *netdev,
16494 + struct ethtool_drvinfo *drvinfo)
16495 +{
16496 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16497 + u16 version_major, version_minor;
16498 + int err;
16499 +
16500 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
16501 + strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
16502 +
16503 + err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
16504 + &version_major,
16505 + &version_minor);
16506 + if (err)
16507 + strlcpy(drvinfo->fw_version, "N/A",
16508 + sizeof(drvinfo->fw_version));
16509 + else
16510 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
16511 + "%u.%u", version_major, version_minor);
16512 +
16513 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
16514 + sizeof(drvinfo->bus_info));
16515 +}
16516 +
16517 +static int ethsw_get_settings(struct net_device *netdev,
16518 + struct ethtool_cmd *cmd)
16519 +{
16520 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16521 + struct dpsw_link_state state = {0};
16522 + int err = 0;
16523 +
16524 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
16525 + port_priv->ethsw_priv->dpsw_handle,
16526 + port_priv->port_index,
16527 + &state);
16528 + if (err) {
16529 + netdev_err(netdev, "ERROR %d getting link state", err);
16530 + goto out;
16531 + }
16532 +
16533 + /* At the moment, we have no way of interrogating the DPMAC
16534 + * from the DPSW side or there may not exist a DPMAC at all.
16535 + * Report only autoneg state, duplexity and speed.
16536 + */
16537 + if (state.options & DPSW_LINK_OPT_AUTONEG)
16538 + cmd->autoneg = AUTONEG_ENABLE;
16539 + if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
16540 + cmd->autoneg = DUPLEX_FULL;
16541 + ethtool_cmd_speed_set(cmd, state.rate);
16542 +
16543 +out:
16544 + return err;
16545 +}
16546 +
16547 +static int ethsw_set_settings(struct net_device *netdev,
16548 + struct ethtool_cmd *cmd)
16549 +{
16550 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16551 + struct dpsw_link_state state = {0};
16552 + struct dpsw_link_cfg cfg = {0};
16553 + int err = 0;
16554 +
16555 + netdev_dbg(netdev, "Setting link parameters...");
16556 +
16557 + err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
16558 + port_priv->ethsw_priv->dpsw_handle,
16559 + port_priv->port_index,
16560 + &state);
16561 + if (err) {
16562 + netdev_err(netdev, "ERROR %d getting link state", err);
16563 + goto out;
16564 + }
16565 +
16566 + /* Due to a temporary MC limitation, the DPSW port must be down
16567 + * in order to be able to change link settings. Taking steps to let
16568 + * the user know that.
16569 + */
16570 + if (netif_running(netdev)) {
16571 + netdev_info(netdev,
16572 + "Sorry, interface must be brought down first.\n");
16573 + return -EACCES;
16574 + }
16575 +
16576 + cfg.options = state.options;
16577 + cfg.rate = ethtool_cmd_speed(cmd);
16578 + if (cmd->autoneg == AUTONEG_ENABLE)
16579 + cfg.options |= DPSW_LINK_OPT_AUTONEG;
16580 + else
16581 + cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
16582 + if (cmd->duplex == DUPLEX_HALF)
16583 + cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
16584 + else
16585 + cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
16586 +
16587 + err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
16588 + port_priv->ethsw_priv->dpsw_handle,
16589 + port_priv->port_index,
16590 + &cfg);
16591 + if (err)
16592 + /* ethtool will be loud enough if we return an error; no point
16593 + * in putting our own error message on the console by default
16594 + */
16595 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
16596 +
16597 +out:
16598 + return err;
16599 +}
16600 +
16601 +static struct {
16602 + enum dpsw_counter id;
16603 + char name[ETH_GSTRING_LEN];
16604 +} ethsw_ethtool_counters[] = {
16605 + {DPSW_CNT_ING_FRAME, "rx frames"},
16606 + {DPSW_CNT_ING_BYTE, "rx bytes"},
16607 + {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
16608 + {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
16609 + {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
16610 + {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
16611 + {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
16612 + {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
16613 + {DPSW_CNT_EGR_FRAME, "tx frames"},
16614 + {DPSW_CNT_EGR_BYTE, "tx bytes"},
16615 + {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
16616 +
16617 +};
16618 +
16619 +static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
16620 +{
16621 + switch (sset) {
16622 + case ETH_SS_STATS:
16623 + return ARRAY_SIZE(ethsw_ethtool_counters);
16624 + default:
16625 + return -EOPNOTSUPP;
16626 + }
16627 +}
16628 +
16629 +static void ethsw_ethtool_get_strings(struct net_device *netdev,
16630 + u32 stringset, u8 *data)
16631 +{
16632 + u32 i;
16633 +
16634 + switch (stringset) {
16635 + case ETH_SS_STATS:
16636 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
16637 + memcpy(data + i * ETH_GSTRING_LEN,
16638 + ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
16639 + break;
16640 + }
16641 +}
16642 +
16643 +static void ethsw_ethtool_get_stats(struct net_device *netdev,
16644 + struct ethtool_stats *stats,
16645 + u64 *data)
16646 +{
16647 + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
16648 + u32 i;
16649 + int err;
16650 +
16651 + for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
16652 + err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
16653 + port_priv->ethsw_priv->dpsw_handle,
16654 + port_priv->port_index,
16655 + ethsw_ethtool_counters[i].id,
16656 + &data[i]);
16657 + if (err)
16658 + netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
16659 + ethsw_ethtool_counters[i].name, err);
16660 + }
16661 +}
16662 +
16663 +static const struct ethtool_ops ethsw_port_ethtool_ops = {
16664 + .get_drvinfo = &ethsw_get_drvinfo,
16665 + .get_link = &ethtool_op_get_link,
16666 + .get_settings = &ethsw_get_settings,
16667 + .set_settings = &ethsw_set_settings,
16668 + .get_strings = &ethsw_ethtool_get_strings,
16669 + .get_ethtool_stats = &ethsw_ethtool_get_stats,
16670 + .get_sset_count = &ethsw_ethtool_get_sset_count,
16671 +};
16672 +
16673 +/* -------------------------------------------------------------------------- */
16674 +/* ethsw driver functions */
16675 +
16676 +static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
16677 +{
16678 + struct list_head *pos;
16679 + struct ethsw_port_priv *port_priv;
16680 + int err;
16681 +
16682 + list_for_each(pos, &priv->port_list) {
16683 + port_priv = list_entry(pos, struct ethsw_port_priv,
16684 + list);
16685 +
16686 + err = _ethsw_port_carrier_state_sync(port_priv->netdev);
16687 + if (err)
16688 + netdev_err(port_priv->netdev,
16689 + "_ethsw_port_carrier_state_sync err %d\n",
16690 + err);
16691 + }
16692 +
16693 + return 0;
16694 +}
16695 +
16696 +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
16697 +{
16698 + return IRQ_WAKE_THREAD;
16699 +}
16700 +
16701 +static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
16702 +{
16703 + struct device *dev = (struct device *)arg;
16704 + struct net_device *netdev = dev_get_drvdata(dev);
16705 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16706 +
16707 + struct fsl_mc_io *io = priv->mc_io;
16708 + u16 token = priv->dpsw_handle;
16709 + int irq_index = DPSW_IRQ_INDEX_IF;
16710 +
16711 + /* Mask the events and the if_id reserved bits to be cleared on read */
16712 + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
16713 + int err;
16714 +
16715 + err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
16716 + if (unlikely(err)) {
16717 + netdev_err(netdev, "Can't get irq status (err %d)", err);
16718 +
16719 + err = dpsw_clear_irq_status(io, 0, token, irq_index,
16720 + 0xFFFFFFFF);
16721 + if (unlikely(err))
16722 + netdev_err(netdev, "Can't clear irq status (err %d)",
16723 + err);
16724 + goto out;
16725 + }
16726 +
16727 + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
16728 + err = ethsw_links_state_update(priv);
16729 + if (unlikely(err))
16730 + goto out;
16731 + }
16732 +
16733 +out:
16734 + return IRQ_HANDLED;
16735 +}
16736 +
16737 +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
16738 +{
16739 + struct device *dev = &sw_dev->dev;
16740 + struct net_device *netdev = dev_get_drvdata(dev);
16741 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16742 + int err = 0;
16743 + struct fsl_mc_device_irq *irq;
16744 + const int irq_index = DPSW_IRQ_INDEX_IF;
16745 + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
16746 +
16747 + err = fsl_mc_allocate_irqs(sw_dev);
16748 + if (unlikely(err)) {
16749 + dev_err(dev, "MC irqs allocation failed\n");
16750 + return err;
16751 + }
16752 +
16753 + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
16754 + err = -EINVAL;
16755 + goto free_irq;
16756 + }
16757 +
16758 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16759 + irq_index, 0);
16760 + if (unlikely(err)) {
16761 + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
16762 + goto free_irq;
16763 + }
16764 +
16765 + irq = sw_dev->irqs[irq_index];
16766 +
16767 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
16768 + ethsw_irq0_handler,
16769 + _ethsw_irq0_handler_thread,
16770 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
16771 + dev_name(dev), dev);
16772 + if (unlikely(err)) {
16773 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
16774 + goto free_irq;
16775 + }
16776 +
16777 + err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
16778 + irq_index, mask);
16779 + if (unlikely(err)) {
16780 + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
16781 + goto free_devm_irq;
16782 + }
16783 +
16784 + err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16785 + irq_index, 1);
16786 + if (unlikely(err)) {
16787 + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
16788 + goto free_devm_irq;
16789 + }
16790 +
16791 + return 0;
16792 +
16793 +free_devm_irq:
16794 + devm_free_irq(dev, irq->msi_desc->irq, dev);
16795 +free_irq:
16796 + fsl_mc_free_irqs(sw_dev);
16797 + return err;
16798 +}
16799 +
16800 +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
16801 +{
16802 + struct device *dev = &sw_dev->dev;
16803 + struct net_device *netdev = dev_get_drvdata(dev);
16804 + struct ethsw_dev_priv *priv = netdev_priv(netdev);
16805 +
16806 + dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
16807 + DPSW_IRQ_INDEX_IF, 0);
16808 + devm_free_irq(dev,
16809 + sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
16810 + dev);
16811 + fsl_mc_free_irqs(sw_dev);
16812 +}
16813 +
16814 +static int __cold
16815 +ethsw_init(struct fsl_mc_device *sw_dev)
16816 +{
16817 + struct device *dev = &sw_dev->dev;
16818 + struct ethsw_dev_priv *priv;
16819 + struct net_device *netdev;
16820 + int err = 0;
16821 + u16 i;
16822 + u16 version_major, version_minor;
16823 + const struct dpsw_stp_cfg stp_cfg = {
16824 + .vlan_id = 1,
16825 + .state = DPSW_STP_STATE_FORWARDING,
16826 + };
16827 +
16828 + netdev = dev_get_drvdata(dev);
16829 + priv = netdev_priv(netdev);
16830 +
16831 + priv->dev_id = sw_dev->obj_desc.id;
16832 +
16833 + err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
16834 + if (err) {
16835 + dev_err(dev, "dpsw_open err %d\n", err);
16836 + goto err_exit;
16837 + }
16838 + if (!priv->dpsw_handle) {
16839 + dev_err(dev, "dpsw_open returned null handle but no error\n");
16840 + err = -EFAULT;
16841 + goto err_exit;
16842 + }
16843 +
16844 + err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
16845 + &priv->sw_attr);
16846 + if (err) {
16847 + dev_err(dev, "dpsw_get_attributes err %d\n", err);
16848 + goto err_close;
16849 + }
16850 +
16851 + err = dpsw_get_api_version(priv->mc_io, 0,
16852 + &version_major,
16853 + &version_minor);
16854 + if (err) {
16855 + dev_err(dev, "dpsw_get_api_version err %d\n", err);
16856 + goto err_close;
16857 + }
16858 +
16859 + /* Minimum supported DPSW version check */
16860 + if (version_major < DPSW_MIN_VER_MAJOR ||
16861 + (version_major == DPSW_MIN_VER_MAJOR &&
16862 + version_minor < DPSW_MIN_VER_MINOR)) {
16863 + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
16864 + version_major,
16865 + version_minor,
16866 + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
16867 + err = -ENOTSUPP;
16868 + goto err_close;
16869 + }
16870 +
16871 + err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
16872 + if (err) {
16873 + dev_err(dev, "dpsw_reset err %d\n", err);
16874 + goto err_close;
16875 + }
16876 +
16877 + err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
16878 + DPSW_FDB_LEARNING_MODE_HW);
16879 + if (err) {
16880 + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
16881 + goto err_close;
16882 + }
16883 +
16884 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
16885 + err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
16886 + &stp_cfg);
16887 + if (err) {
16888 + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
16889 + err, i);
16890 + goto err_close;
16891 + }
16892 +
16893 + err = dpsw_if_set_broadcast(priv->mc_io, 0,
16894 + priv->dpsw_handle, i, 1);
16895 + if (err) {
16896 + dev_err(dev,
16897 + "dpsw_if_set_broadcast err %d for port %d\n",
16898 + err, i);
16899 + goto err_close;
16900 + }
16901 + }
16902 +
16903 + return 0;
16904 +
16905 +err_close:
16906 + dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16907 +err_exit:
16908 + return err;
16909 +}
16910 +
16911 +static int __cold
16912 +ethsw_takedown(struct fsl_mc_device *sw_dev)
16913 +{
16914 + struct device *dev = &sw_dev->dev;
16915 + struct net_device *netdev;
16916 + struct ethsw_dev_priv *priv;
16917 + int err;
16918 +
16919 + netdev = dev_get_drvdata(dev);
16920 + priv = netdev_priv(netdev);
16921 +
16922 + err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
16923 + if (err)
16924 + dev_warn(dev, "dpsw_close err %d\n", err);
16925 +
16926 + return 0;
16927 +}
16928 +
16929 +static int __cold
16930 +ethsw_remove(struct fsl_mc_device *sw_dev)
16931 +{
16932 + struct device *dev;
16933 + struct net_device *netdev;
16934 + struct ethsw_dev_priv *priv;
16935 + struct ethsw_port_priv *port_priv;
16936 + struct list_head *pos;
16937 +
16938 + dev = &sw_dev->dev;
16939 + netdev = dev_get_drvdata(dev);
16940 + priv = netdev_priv(netdev);
16941 +
16942 + list_for_each(pos, &priv->port_list) {
16943 + port_priv = list_entry(pos, struct ethsw_port_priv, list);
16944 +
16945 + rtnl_lock();
16946 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
16947 + rtnl_unlock();
16948 +
16949 + unregister_netdev(port_priv->netdev);
16950 + free_netdev(port_priv->netdev);
16951 + }
16952 +
16953 + ethsw_teardown_irqs(sw_dev);
16954 +
16955 + unregister_netdev(netdev);
16956 +
16957 + ethsw_takedown(sw_dev);
16958 + fsl_mc_portal_free(priv->mc_io);
16959 +
16960 + dev_set_drvdata(dev, NULL);
16961 + free_netdev(netdev);
16962 +
16963 + return 0;
16964 +}
16965 +
16966 +static int __cold
16967 +ethsw_probe(struct fsl_mc_device *sw_dev)
16968 +{
16969 + struct device *dev;
16970 + struct net_device *netdev = NULL;
16971 + struct ethsw_dev_priv *priv = NULL;
16972 + int err = 0;
16973 + u16 i;
16974 + const char def_mcast[ETH_ALEN] = {
16975 + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
16976 + };
16977 + char port_name[IFNAMSIZ];
16978 +
16979 + dev = &sw_dev->dev;
16980 +
16981 + /* register switch device, it's for management only - no I/O */
16982 + netdev = alloc_etherdev(sizeof(*priv));
16983 + if (!netdev) {
16984 + dev_err(dev, "alloc_etherdev error\n");
16985 + return -ENOMEM;
16986 + }
16987 + netdev->netdev_ops = &ethsw_ops;
16988 +
16989 + SET_NETDEV_DEV(netdev, dev);
16990 + dev_set_drvdata(dev, netdev);
16991 +
16992 + priv = netdev_priv(netdev);
16993 + priv->netdev = netdev;
16994 +
16995 + err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
16996 + if (err) {
16997 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
16998 + goto err_free_netdev;
16999 + }
17000 + if (!priv->mc_io) {
17001 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
17002 + err = -EFAULT;
17003 + goto err_free_netdev;
17004 + }
17005 +
17006 + err = ethsw_init(sw_dev);
17007 + if (err) {
17008 + dev_err(dev, "switch init err %d\n", err);
17009 + goto err_free_cmdport;
17010 + }
17011 +
17012 + netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
17013 +
17014 + /* TODO: should we hold rtnl_lock here? We can't register_netdev under
17015 + * lock
17016 + */
17017 + dev_alloc_name(netdev, "sw%d");
17018 + err = register_netdev(netdev);
17019 + if (err < 0) {
17020 + dev_err(dev, "register_netdev error %d\n", err);
17021 + goto err_takedown;
17022 + }
17023 + if (err)
17024 + dev_info(dev, "register_netdev res %d\n", err);
17025 +
17026 + /* VLAN 1 is implicitly configured on the switch */
17027 + priv->vlans[1] = ETHSW_VLAN_MEMBER;
17028 + /* Flooding, learning are implicitly enabled */
17029 + priv->learning = true;
17030 + priv->flood = true;
17031 +
17032 + /* register switch ports */
17033 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
17034 +
17035 + INIT_LIST_HEAD(&priv->port_list);
17036 + for (i = 0; i < priv->sw_attr.num_ifs; i++) {
17037 + struct net_device *port_netdev;
17038 + struct ethsw_port_priv *port_priv;
17039 +
17040 + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
17041 + if (!port_netdev) {
17042 + dev_err(dev, "alloc_etherdev error\n");
17043 + goto err_takedown;
17044 + }
17045 +
17046 + port_priv = netdev_priv(port_netdev);
17047 + port_priv->netdev = port_netdev;
17048 + port_priv->ethsw_priv = priv;
17049 +
17050 + port_priv->port_index = i;
17051 + port_priv->stp_state = BR_STATE_FORWARDING;
17052 + /* VLAN 1 is configured by default on all switch ports */
17053 + port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
17054 + ETHSW_VLAN_PVID;
17055 +
17056 + SET_NETDEV_DEV(port_netdev, dev);
17057 + port_netdev->netdev_ops = &ethsw_port_ops;
17058 + port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
17059 +
17060 + port_netdev->flags = port_netdev->flags |
17061 + IFF_PROMISC | IFF_SLAVE;
17062 +
17063 + dev_alloc_name(port_netdev, port_name);
17064 + err = register_netdev(port_netdev);
17065 + if (err < 0) {
17066 + dev_err(dev, "register_netdev error %d\n", err);
17067 + free_netdev(port_netdev);
17068 + goto err_takedown;
17069 + }
17070 +
17071 + rtnl_lock();
17072 +
17073 + err = netdev_master_upper_dev_link(port_netdev, netdev,
17074 + NULL, NULL);
17075 + if (err) {
17076 + dev_err(dev, "netdev_master_upper_dev_link error %d\n",
17077 + err);
17078 + unregister_netdev(port_netdev);
17079 + free_netdev(port_netdev);
17080 + rtnl_unlock();
17081 + goto err_takedown;
17082 + }
17083 +
17084 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
17085 +
17086 + rtnl_unlock();
17087 +
17088 + list_add(&port_priv->list, &priv->port_list);
17089 +
17090 + /* TODO: implmenet set_rm_mode instead of this */
17091 + err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
17092 + if (err)
17093 + dev_warn(&netdev->dev,
17094 + "ethsw_port_fdb_add_mc err %d\n", err);
17095 + }
17096 +
17097 + /* the switch starts up enabled */
17098 + rtnl_lock();
17099 + err = dev_open(netdev);
17100 + rtnl_unlock();
17101 + if (err)
17102 + dev_warn(dev, "dev_open err %d\n", err);
17103 +
17104 + /* setup irqs */
17105 + err = ethsw_setup_irqs(sw_dev);
17106 + if (unlikely(err)) {
17107 + dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
17108 + goto err_takedown;
17109 + }
17110 +
17111 + dev_info(&netdev->dev,
17112 + "probed %d port switch\n", priv->sw_attr.num_ifs);
17113 + return 0;
17114 +
17115 +err_takedown:
17116 + ethsw_remove(sw_dev);
17117 +err_free_cmdport:
17118 + fsl_mc_portal_free(priv->mc_io);
17119 +err_free_netdev:
17120 + dev_set_drvdata(dev, NULL);
17121 + free_netdev(netdev);
17122 +
17123 + return err;
17124 +}
17125 +
17126 +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
17127 + {
17128 + .vendor = FSL_MC_VENDOR_FREESCALE,
17129 + .obj_type = "dpsw",
17130 + },
17131 + {}
17132 +};
17133 +
17134 +static struct fsl_mc_driver eth_sw_drv = {
17135 + .driver = {
17136 + .name = KBUILD_MODNAME,
17137 + .owner = THIS_MODULE,
17138 + },
17139 + .probe = ethsw_probe,
17140 + .remove = ethsw_remove,
17141 + .match_id_table = ethsw_match_id_table,
17142 +};
17143 +
17144 +module_fsl_mc_driver(eth_sw_drv);
17145 +
17146 +MODULE_LICENSE("GPL");
17147 +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
17148 --- /dev/null
17149 +++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
17150 @@ -0,0 +1,7 @@
17151 +config FSL_DPAA2_EVB
17152 + tristate "DPAA2 Edge Virtual Bridge"
17153 + depends on FSL_MC_BUS && FSL_DPAA2
17154 + select VLAN_8021Q
17155 + default y
17156 + ---help---
17157 + Prototype driver for DPAA2 Edge Virtual Bridge.
17158 --- /dev/null
17159 +++ b/drivers/staging/fsl-dpaa2/evb/Makefile
17160 @@ -0,0 +1,10 @@
17161 +
17162 +obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
17163 +
17164 +dpaa2-evb-objs := evb.o dpdmux.o
17165 +
17166 +all:
17167 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
17168 +
17169 +clean:
17170 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
17171 --- /dev/null
17172 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
17173 @@ -0,0 +1,279 @@
17174 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
17175 + *
17176 + * Redistribution and use in source and binary forms, with or without
17177 + * modification, are permitted provided that the following conditions are met:
17178 + * * Redistributions of source code must retain the above copyright
17179 + * notice, this list of conditions and the following disclaimer.
17180 + * * Redistributions in binary form must reproduce the above copyright
17181 + * notice, this list of conditions and the following disclaimer in the
17182 + * documentation and/or other materials provided with the distribution.
17183 + * * Neither the name of the above-listed copyright holders nor the
17184 + * names of any contributors may be used to endorse or promote products
17185 + * derived from this software without specific prior written permission.
17186 + *
17187 + *
17188 + * ALTERNATIVELY, this software may be distributed under the terms of the
17189 + * GNU General Public License ("GPL") as published by the Free Software
17190 + * Foundation, either version 2 of that License or (at your option) any
17191 + * later version.
17192 + *
17193 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17194 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17195 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17196 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
17197 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17198 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17199 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
17200 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
17201 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
17202 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17203 + * POSSIBILITY OF SUCH DAMAGE.
17204 + */
17205 +#ifndef _FSL_DPDMUX_CMD_H
17206 +#define _FSL_DPDMUX_CMD_H
17207 +
17208 +/* DPDMUX Version */
17209 +#define DPDMUX_VER_MAJOR 6
17210 +#define DPDMUX_VER_MINOR 1
17211 +
17212 +#define DPDMUX_CMD_BASE_VER 1
17213 +#define DPDMUX_CMD_ID_OFFSET 4
17214 +
17215 +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
17216 +
17217 +/* Command IDs */
17218 +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
17219 +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
17220 +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
17221 +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
17222 +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
17223 +
17224 +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
17225 +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
17226 +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
17227 +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
17228 +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
17229 +
17230 +#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
17231 +#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
17232 +#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
17233 +#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
17234 +#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
17235 +#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
17236 +
17237 +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
17238 +
17239 +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
17240 +
17241 +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
17242 +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
17243 +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
17244 +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
17245 +
17246 +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
17247 +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
17248 +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
17249 +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
17250 +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
17251 +
17252 +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
17253 +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
17254 +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
17255 +
17256 +#define DPDMUX_MASK(field) \
17257 + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
17258 + DPDMUX_##field##_SHIFT)
17259 +#define dpdmux_set_field(var, field, val) \
17260 + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
17261 +#define dpdmux_get_field(var, field) \
17262 + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
17263 +
17264 +struct dpdmux_cmd_open {
17265 + u32 dpdmux_id;
17266 +};
17267 +
17268 +struct dpdmux_cmd_create {
17269 + u8 method;
17270 + u8 manip;
17271 + u16 num_ifs;
17272 + u32 pad;
17273 +
17274 + u16 adv_max_dmat_entries;
17275 + u16 adv_max_mc_groups;
17276 + u16 adv_max_vlan_ids;
17277 + u16 pad1;
17278 +
17279 + u64 options;
17280 +};
17281 +
17282 +struct dpdmux_cmd_destroy {
17283 + u32 dpdmux_id;
17284 +};
17285 +
17286 +#define DPDMUX_ENABLE_SHIFT 0
17287 +#define DPDMUX_ENABLE_SIZE 1
17288 +
17289 +struct dpdmux_rsp_is_enabled {
17290 + u8 en;
17291 +};
17292 +
17293 +struct dpdmux_cmd_set_irq_enable {
17294 + u8 enable;
17295 + u8 pad[3];
17296 + u8 irq_index;
17297 +};
17298 +
17299 +struct dpdmux_cmd_get_irq_enable {
17300 + u32 pad;
17301 + u8 irq_index;
17302 +};
17303 +
17304 +struct dpdmux_rsp_get_irq_enable {
17305 + u8 enable;
17306 +};
17307 +
17308 +struct dpdmux_cmd_set_irq_mask {
17309 + u32 mask;
17310 + u8 irq_index;
17311 +};
17312 +
17313 +struct dpdmux_cmd_get_irq_mask {
17314 + u32 pad;
17315 + u8 irq_index;
17316 +};
17317 +
17318 +struct dpdmux_rsp_get_irq_mask {
17319 + u32 mask;
17320 +};
17321 +
17322 +struct dpdmux_cmd_get_irq_status {
17323 + u32 status;
17324 + u8 irq_index;
17325 +};
17326 +
17327 +struct dpdmux_rsp_get_irq_status {
17328 + u32 status;
17329 +};
17330 +
17331 +struct dpdmux_cmd_clear_irq_status {
17332 + u32 status;
17333 + u8 irq_index;
17334 +};
17335 +
17336 +struct dpdmux_rsp_get_attr {
17337 + u8 method;
17338 + u8 manip;
17339 + u16 num_ifs;
17340 + u16 mem_size;
17341 + u16 pad;
17342 +
17343 + u64 pad1;
17344 +
17345 + u32 id;
17346 + u32 pad2;
17347 +
17348 + u64 options;
17349 +};
17350 +
17351 +struct dpdmux_cmd_set_max_frame_length {
17352 + u16 max_frame_length;
17353 +};
17354 +
17355 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
17356 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
17357 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
17358 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
17359 +
17360 +struct dpdmux_cmd_if_set_accepted_frames {
17361 + u16 if_id;
17362 + u8 frames_options;
17363 +};
17364 +
17365 +struct dpdmux_cmd_if {
17366 + u16 if_id;
17367 +};
17368 +
17369 +struct dpdmux_rsp_if_get_attr {
17370 + u8 pad[3];
17371 + u8 enabled;
17372 + u8 pad1[3];
17373 + u8 accepted_frames_type;
17374 + u32 rate;
17375 +};
17376 +
17377 +struct dpdmux_cmd_if_l2_rule {
17378 + u16 if_id;
17379 + u8 mac_addr5;
17380 + u8 mac_addr4;
17381 + u8 mac_addr3;
17382 + u8 mac_addr2;
17383 + u8 mac_addr1;
17384 + u8 mac_addr0;
17385 +
17386 + u32 pad;
17387 + u16 vlan_id;
17388 +};
17389 +
17390 +struct dpdmux_cmd_if_get_counter {
17391 + u16 if_id;
17392 + u8 counter_type;
17393 +};
17394 +
17395 +struct dpdmux_rsp_if_get_counter {
17396 + u64 pad;
17397 + u64 counter;
17398 +};
17399 +
17400 +struct dpdmux_cmd_if_set_link_cfg {
17401 + u16 if_id;
17402 + u16 pad[3];
17403 +
17404 + u32 rate;
17405 + u32 pad1;
17406 +
17407 + u64 options;
17408 +};
17409 +
17410 +struct dpdmux_cmd_if_get_link_state {
17411 + u16 if_id;
17412 +};
17413 +
17414 +struct dpdmux_rsp_if_get_link_state {
17415 + u32 pad;
17416 + u8 up;
17417 + u8 pad1[3];
17418 +
17419 + u32 rate;
17420 + u32 pad2;
17421 +
17422 + u64 options;
17423 +};
17424 +
17425 +struct dpdmux_rsp_get_api_version {
17426 + u16 major;
17427 + u16 minor;
17428 +};
17429 +
17430 +struct dpdmux_set_custom_key {
17431 + u64 pad[6];
17432 + u64 key_cfg_iova;
17433 +};
17434 +
17435 +struct dpdmux_cmd_add_custom_cls_entry {
17436 + u8 pad[3];
17437 + u8 key_size;
17438 + u16 pad1;
17439 + u16 dest_if;
17440 + u64 key_iova;
17441 + u64 mask_iova;
17442 +};
17443 +
17444 +struct dpdmux_cmd_remove_custom_cls_entry {
17445 + u8 pad[3];
17446 + u8 key_size;
17447 + u32 pad1;
17448 + u64 key_iova;
17449 + u64 mask_iova;
17450 +};
17451 +
17452 +#endif /* _FSL_DPDMUX_CMD_H */
17453 --- /dev/null
17454 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
17455 @@ -0,0 +1,1112 @@
17456 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
17457 + *
17458 + * Redistribution and use in source and binary forms, with or without
17459 + * modification, are permitted provided that the following conditions are met:
17460 + * * Redistributions of source code must retain the above copyright
17461 + * notice, this list of conditions and the following disclaimer.
17462 + * * Redistributions in binary form must reproduce the above copyright
17463 + * notice, this list of conditions and the following disclaimer in the
17464 + * documentation and/or other materials provided with the distribution.
17465 + * * Neither the name of the above-listed copyright holders nor the
17466 + * names of any contributors may be used to endorse or promote products
17467 + * derived from this software without specific prior written permission.
17468 + *
17469 + *
17470 + * ALTERNATIVELY, this software may be distributed under the terms of the
17471 + * GNU General Public License ("GPL") as published by the Free Software
17472 + * Foundation, either version 2 of that License or (at your option) any
17473 + * later version.
17474 + *
17475 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17476 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17477 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17478 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
17479 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17480 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
17481 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
17482 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
17483 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
17484 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17485 + * POSSIBILITY OF SUCH DAMAGE.
17486 + */
17487 +#include "../../fsl-mc/include/mc-sys.h"
17488 +#include "../../fsl-mc/include/mc-cmd.h"
17489 +#include "dpdmux.h"
17490 +#include "dpdmux-cmd.h"
17491 +
17492 +/**
17493 + * dpdmux_open() - Open a control session for the specified object
17494 + * @mc_io: Pointer to MC portal's I/O object
17495 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17496 + * @dpdmux_id: DPDMUX unique ID
17497 + * @token: Returned token; use in subsequent API calls
17498 + *
17499 + * This function can be used to open a control session for an
17500 + * already created object; an object may have been declared in
17501 + * the DPL or by calling the dpdmux_create() function.
17502 + * This function returns a unique authentication token,
17503 + * associated with the specific object ID and the specific MC
17504 + * portal; this token must be used in all subsequent commands for
17505 + * this specific object.
17506 + *
17507 + * Return: '0' on Success; Error code otherwise.
17508 + */
17509 +int dpdmux_open(struct fsl_mc_io *mc_io,
17510 + u32 cmd_flags,
17511 + int dpdmux_id,
17512 + u16 *token)
17513 +{
17514 + struct mc_command cmd = { 0 };
17515 + struct dpdmux_cmd_open *cmd_params;
17516 + int err;
17517 +
17518 + /* prepare command */
17519 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
17520 + cmd_flags,
17521 + 0);
17522 + cmd_params = (struct dpdmux_cmd_open *)cmd.params;
17523 + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
17524 +
17525 + /* send command to mc*/
17526 + err = mc_send_command(mc_io, &cmd);
17527 + if (err)
17528 + return err;
17529 +
17530 + /* retrieve response parameters */
17531 + *token = mc_cmd_hdr_read_token(&cmd);
17532 +
17533 + return 0;
17534 +}
17535 +
17536 +/**
17537 + * dpdmux_close() - Close the control session of the object
17538 + * @mc_io: Pointer to MC portal's I/O object
17539 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17540 + * @token: Token of DPDMUX object
17541 + *
17542 + * After this function is called, no further operations are
17543 + * allowed on the object without opening a new control session.
17544 + *
17545 + * Return: '0' on Success; Error code otherwise.
17546 + */
17547 +int dpdmux_close(struct fsl_mc_io *mc_io,
17548 + u32 cmd_flags,
17549 + u16 token)
17550 +{
17551 + struct mc_command cmd = { 0 };
17552 +
17553 + /* prepare command */
17554 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
17555 + cmd_flags,
17556 + token);
17557 +
17558 + /* send command to mc*/
17559 + return mc_send_command(mc_io, &cmd);
17560 +}
17561 +
17562 +/**
17563 + * dpdmux_create() - Create the DPDMUX object
17564 + * @mc_io: Pointer to MC portal's I/O object
17565 + * @dprc_token: Parent container token; '0' for default container
17566 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17567 + * @cfg: Configuration structure
17568 + * @obj_id: returned object id
17569 + *
17570 + * Create the DPDMUX object, allocate required resources and
17571 + * perform required initialization.
17572 + *
17573 + * The object can be created either by declaring it in the
17574 + * DPL file, or by calling this function.
17575 + *
17576 + * The function accepts an authentication token of a parent
17577 + * container that this object should be assigned to. The token
17578 + * can be '0' so the object will be assigned to the default container.
17579 + * The newly created object can be opened with the returned
17580 + * object id and using the container's associated tokens and MC portals.
17581 + *
17582 + * Return: '0' on Success; Error code otherwise.
17583 + */
17584 +int dpdmux_create(struct fsl_mc_io *mc_io,
17585 + u16 dprc_token,
17586 + u32 cmd_flags,
17587 + const struct dpdmux_cfg *cfg,
17588 + u32 *obj_id)
17589 +{
17590 + struct mc_command cmd = { 0 };
17591 + struct dpdmux_cmd_create *cmd_params;
17592 + int err;
17593 +
17594 + /* prepare command */
17595 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
17596 + cmd_flags,
17597 + dprc_token);
17598 + cmd_params = (struct dpdmux_cmd_create *)cmd.params;
17599 + cmd_params->method = cfg->method;
17600 + cmd_params->manip = cfg->manip;
17601 + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
17602 + cmd_params->adv_max_dmat_entries =
17603 + cpu_to_le16(cfg->adv.max_dmat_entries);
17604 + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
17605 + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
17606 + cmd_params->options = cpu_to_le64(cfg->adv.options);
17607 +
17608 + /* send command to mc*/
17609 + err = mc_send_command(mc_io, &cmd);
17610 + if (err)
17611 + return err;
17612 +
17613 + /* retrieve response parameters */
17614 + *obj_id = mc_cmd_hdr_read_token(&cmd);
17615 +
17616 + return 0;
17617 +}
17618 +
17619 +/**
17620 + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
17621 + * @mc_io: Pointer to MC portal's I/O object
17622 + * @dprc_token: Parent container token; '0' for default container
17623 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17624 + * @object_id: The object id; it must be a valid id within the container that
17625 + * created this object;
17626 + *
17627 + * The function accepts the authentication token of the parent container that
17628 + * created the object (not the one that currently owns the object). The object
17629 + * is searched within parent using the provided 'object_id'.
17630 + * All tokens to the object must be closed before calling destroy.
17631 + *
17632 + * Return: '0' on Success; error code otherwise.
17633 + */
17634 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
17635 + u16 dprc_token,
17636 + u32 cmd_flags,
17637 + u32 object_id)
17638 +{
17639 + struct mc_command cmd = { 0 };
17640 + struct dpdmux_cmd_destroy *cmd_params;
17641 +
17642 + /* prepare command */
17643 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
17644 + cmd_flags,
17645 + dprc_token);
17646 + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
17647 + cmd_params->dpdmux_id = cpu_to_le32(object_id);
17648 +
17649 + /* send command to mc*/
17650 + return mc_send_command(mc_io, &cmd);
17651 +}
17652 +
17653 +/**
17654 + * dpdmux_enable() - Enable DPDMUX functionality
17655 + * @mc_io: Pointer to MC portal's I/O object
17656 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17657 + * @token: Token of DPDMUX object
17658 + *
17659 + * Return: '0' on Success; Error code otherwise.
17660 + */
17661 +int dpdmux_enable(struct fsl_mc_io *mc_io,
17662 + u32 cmd_flags,
17663 + u16 token)
17664 +{
17665 + struct mc_command cmd = { 0 };
17666 +
17667 + /* prepare command */
17668 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
17669 + cmd_flags,
17670 + token);
17671 +
17672 + /* send command to mc*/
17673 + return mc_send_command(mc_io, &cmd);
17674 +}
17675 +
17676 +/**
17677 + * dpdmux_disable() - Disable DPDMUX functionality
17678 + * @mc_io: Pointer to MC portal's I/O object
17679 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17680 + * @token: Token of DPDMUX object
17681 + *
17682 + * Return: '0' on Success; Error code otherwise.
17683 + */
17684 +int dpdmux_disable(struct fsl_mc_io *mc_io,
17685 + u32 cmd_flags,
17686 + u16 token)
17687 +{
17688 + struct mc_command cmd = { 0 };
17689 +
17690 + /* prepare command */
17691 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
17692 + cmd_flags,
17693 + token);
17694 +
17695 + /* send command to mc*/
17696 + return mc_send_command(mc_io, &cmd);
17697 +}
17698 +
17699 +/**
17700 + * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
17701 + * @mc_io: Pointer to MC portal's I/O object
17702 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17703 + * @token: Token of DPDMUX object
17704 + * @en: Returns '1' if object is enabled; '0' otherwise
17705 + *
17706 + * Return: '0' on Success; Error code otherwise.
17707 + */
17708 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
17709 + u32 cmd_flags,
17710 + u16 token,
17711 + int *en)
17712 +{
17713 + struct mc_command cmd = { 0 };
17714 + struct dpdmux_rsp_is_enabled *rsp_params;
17715 + int err;
17716 +
17717 + /* prepare command */
17718 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
17719 + cmd_flags,
17720 + token);
17721 +
17722 + /* send command to mc*/
17723 + err = mc_send_command(mc_io, &cmd);
17724 + if (err)
17725 + return err;
17726 +
17727 + /* retrieve response parameters */
17728 + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
17729 + *en = dpdmux_get_field(rsp_params->en, ENABLE);
17730 +
17731 + return 0;
17732 +}
17733 +
17734 +/**
17735 + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
17736 + * @mc_io: Pointer to MC portal's I/O object
17737 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17738 + * @token: Token of DPDMUX object
17739 + *
17740 + * Return: '0' on Success; Error code otherwise.
17741 + */
17742 +int dpdmux_reset(struct fsl_mc_io *mc_io,
17743 + u32 cmd_flags,
17744 + u16 token)
17745 +{
17746 + struct mc_command cmd = { 0 };
17747 +
17748 + /* prepare command */
17749 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
17750 + cmd_flags,
17751 + token);
17752 +
17753 + /* send command to mc*/
17754 + return mc_send_command(mc_io, &cmd);
17755 +}
17756 +
17757 +/**
17758 + * dpdmux_set_irq_enable() - Set overall interrupt state.
17759 + * @mc_io: Pointer to MC portal's I/O object
17760 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17761 + * @token: Token of DPDMUX object
17762 + * @irq_index: The interrupt index to configure
17763 + * @en: Interrupt state - enable = 1, disable = 0
17764 + *
17765 + * Allows GPP software to control when interrupts are generated.
17766 + * Each interrupt can have up to 32 causes. The enable/disable control's the
17767 + * overall interrupt state. if the interrupt is disabled no causes will cause
17768 + * an interrupt.
17769 + *
17770 + * Return: '0' on Success; Error code otherwise.
17771 + */
17772 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
17773 + u32 cmd_flags,
17774 + u16 token,
17775 + u8 irq_index,
17776 + u8 en)
17777 +{
17778 + struct mc_command cmd = { 0 };
17779 + struct dpdmux_cmd_set_irq_enable *cmd_params;
17780 +
17781 + /* prepare command */
17782 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
17783 + cmd_flags,
17784 + token);
17785 + cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
17786 + cmd_params->enable = en;
17787 + cmd_params->irq_index = irq_index;
17788 +
17789 + /* send command to mc*/
17790 + return mc_send_command(mc_io, &cmd);
17791 +}
17792 +
17793 +/**
17794 + * dpdmux_get_irq_enable() - Get overall interrupt state.
17795 + * @mc_io: Pointer to MC portal's I/O object
17796 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17797 + * @token: Token of DPDMUX object
17798 + * @irq_index: The interrupt index to configure
17799 + * @en: Returned interrupt state - enable = 1, disable = 0
17800 + *
17801 + * Return: '0' on Success; Error code otherwise.
17802 + */
17803 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
17804 + u32 cmd_flags,
17805 + u16 token,
17806 + u8 irq_index,
17807 + u8 *en)
17808 +{
17809 + struct mc_command cmd = { 0 };
17810 + struct dpdmux_cmd_get_irq_enable *cmd_params;
17811 + struct dpdmux_rsp_get_irq_enable *rsp_params;
17812 + int err;
17813 +
17814 + /* prepare command */
17815 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
17816 + cmd_flags,
17817 + token);
17818 + cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
17819 + cmd_params->irq_index = irq_index;
17820 +
17821 + /* send command to mc*/
17822 + err = mc_send_command(mc_io, &cmd);
17823 + if (err)
17824 + return err;
17825 +
17826 + /* retrieve response parameters */
17827 + rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
17828 + *en = rsp_params->enable;
17829 +
17830 + return 0;
17831 +}
17832 +
17833 +/**
17834 + * dpdmux_set_irq_mask() - Set interrupt mask.
17835 + * @mc_io: Pointer to MC portal's I/O object
17836 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17837 + * @token: Token of DPDMUX object
17838 + * @irq_index: The interrupt index to configure
17839 + * @mask: event mask to trigger interrupt;
17840 + * each bit:
17841 + * 0 = ignore event
17842 + * 1 = consider event for asserting IRQ
17843 + *
17844 + * Every interrupt can have up to 32 causes and the interrupt model supports
17845 + * masking/unmasking each cause independently
17846 + *
17847 + * Return: '0' on Success; Error code otherwise.
17848 + */
17849 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
17850 + u32 cmd_flags,
17851 + u16 token,
17852 + u8 irq_index,
17853 + u32 mask)
17854 +{
17855 + struct mc_command cmd = { 0 };
17856 + struct dpdmux_cmd_set_irq_mask *cmd_params;
17857 +
17858 + /* prepare command */
17859 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
17860 + cmd_flags,
17861 + token);
17862 + cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
17863 + cmd_params->mask = cpu_to_le32(mask);
17864 + cmd_params->irq_index = irq_index;
17865 +
17866 + /* send command to mc*/
17867 + return mc_send_command(mc_io, &cmd);
17868 +}
17869 +
17870 +/**
17871 + * dpdmux_get_irq_mask() - Get interrupt mask.
17872 + * @mc_io: Pointer to MC portal's I/O object
17873 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17874 + * @token: Token of DPDMUX object
17875 + * @irq_index: The interrupt index to configure
17876 + * @mask: Returned event mask to trigger interrupt
17877 + *
17878 + * Every interrupt can have up to 32 causes and the interrupt model supports
17879 + * masking/unmasking each cause independently
17880 + *
17881 + * Return: '0' on Success; Error code otherwise.
17882 + */
17883 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
17884 + u32 cmd_flags,
17885 + u16 token,
17886 + u8 irq_index,
17887 + u32 *mask)
17888 +{
17889 + struct mc_command cmd = { 0 };
17890 + struct dpdmux_cmd_get_irq_mask *cmd_params;
17891 + struct dpdmux_rsp_get_irq_mask *rsp_params;
17892 + int err;
17893 +
17894 + /* prepare command */
17895 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
17896 + cmd_flags,
17897 + token);
17898 + cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
17899 + cmd_params->irq_index = irq_index;
17900 +
17901 + /* send command to mc*/
17902 + err = mc_send_command(mc_io, &cmd);
17903 + if (err)
17904 + return err;
17905 +
17906 + /* retrieve response parameters */
17907 + rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
17908 + *mask = le32_to_cpu(rsp_params->mask);
17909 +
17910 + return 0;
17911 +}
17912 +
17913 +/**
17914 + * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
17915 + * @mc_io: Pointer to MC portal's I/O object
17916 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17917 + * @token: Token of DPDMUX object
17918 + * @irq_index: The interrupt index to configure
17919 + * @status: Returned interrupts status - one bit per cause:
17920 + * 0 = no interrupt pending
17921 + * 1 = interrupt pending
17922 + *
17923 + * Return: '0' on Success; Error code otherwise.
17924 + */
17925 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
17926 + u32 cmd_flags,
17927 + u16 token,
17928 + u8 irq_index,
17929 + u32 *status)
17930 +{
17931 + struct mc_command cmd = { 0 };
17932 + struct dpdmux_cmd_get_irq_status *cmd_params;
17933 + struct dpdmux_rsp_get_irq_status *rsp_params;
17934 + int err;
17935 +
17936 + /* prepare command */
17937 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
17938 + cmd_flags,
17939 + token);
17940 + cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
17941 + cmd_params->status = cpu_to_le32(*status);
17942 + cmd_params->irq_index = irq_index;
17943 +
17944 + /* send command to mc*/
17945 + err = mc_send_command(mc_io, &cmd);
17946 + if (err)
17947 + return err;
17948 +
17949 + /* retrieve response parameters */
17950 + rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
17951 + *status = le32_to_cpu(rsp_params->status);
17952 +
17953 + return 0;
17954 +}
17955 +
17956 +/**
17957 + * dpdmux_clear_irq_status() - Clear a pending interrupt's status
17958 + * @mc_io: Pointer to MC portal's I/O object
17959 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17960 + * @token: Token of DPDMUX object
17961 + * @irq_index: The interrupt index to configure
17962 + * @status: bits to clear (W1C) - one bit per cause:
17963 + * 0 = don't change
17964 + * 1 = clear status bit
17965 + *
17966 + * Return: '0' on Success; Error code otherwise.
17967 + */
17968 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
17969 + u32 cmd_flags,
17970 + u16 token,
17971 + u8 irq_index,
17972 + u32 status)
17973 +{
17974 + struct mc_command cmd = { 0 };
17975 + struct dpdmux_cmd_clear_irq_status *cmd_params;
17976 +
17977 + /* prepare command */
17978 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
17979 + cmd_flags,
17980 + token);
17981 + cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
17982 + cmd_params->status = cpu_to_le32(status);
17983 + cmd_params->irq_index = irq_index;
17984 +
17985 + /* send command to mc*/
17986 + return mc_send_command(mc_io, &cmd);
17987 +}
17988 +
17989 +/**
17990 + * dpdmux_get_attributes() - Retrieve DPDMUX attributes
17991 + * @mc_io: Pointer to MC portal's I/O object
17992 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
17993 + * @token: Token of DPDMUX object
17994 + * @attr: Returned object's attributes
17995 + *
17996 + * Return: '0' on Success; Error code otherwise.
17997 + */
17998 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
17999 + u32 cmd_flags,
18000 + u16 token,
18001 + struct dpdmux_attr *attr)
18002 +{
18003 + struct mc_command cmd = { 0 };
18004 + struct dpdmux_rsp_get_attr *rsp_params;
18005 + int err;
18006 +
18007 + /* prepare command */
18008 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
18009 + cmd_flags,
18010 + token);
18011 +
18012 + /* send command to mc*/
18013 + err = mc_send_command(mc_io, &cmd);
18014 + if (err)
18015 + return err;
18016 +
18017 + /* retrieve response parameters */
18018 + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
18019 + attr->id = le32_to_cpu(rsp_params->id);
18020 + attr->options = le64_to_cpu(rsp_params->options);
18021 + attr->method = rsp_params->method;
18022 + attr->manip = rsp_params->manip;
18023 + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
18024 + attr->mem_size = le16_to_cpu(rsp_params->mem_size);
18025 +
18026 + return 0;
18027 +}
18028 +
18029 +/**
18030 + * dpdmux_if_enable() - Enable Interface
18031 + * @mc_io: Pointer to MC portal's I/O object
18032 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18033 + * @token: Token of DPDMUX object
18034 + * @if_id: Interface Identifier
18035 + *
18036 + * Return: Completion status. '0' on Success; Error code otherwise.
18037 + */
18038 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
18039 + u32 cmd_flags,
18040 + u16 token,
18041 + u16 if_id)
18042 +{
18043 + struct dpdmux_cmd_if *cmd_params;
18044 + struct mc_command cmd = { 0 };
18045 +
18046 + /* prepare command */
18047 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
18048 + cmd_flags,
18049 + token);
18050 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
18051 + cmd_params->if_id = cpu_to_le16(if_id);
18052 +
18053 + /* send command to mc*/
18054 + return mc_send_command(mc_io, &cmd);
18055 +}
18056 +
18057 +/**
18058 + * dpdmux_if_disable() - Disable Interface
18059 + * @mc_io: Pointer to MC portal's I/O object
18060 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18061 + * @token: Token of DPDMUX object
18062 + * @if_id: Interface Identifier
18063 + *
18064 + * Return: Completion status. '0' on Success; Error code otherwise.
18065 + */
18066 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
18067 + u32 cmd_flags,
18068 + u16 token,
18069 + u16 if_id)
18070 +{
18071 + struct dpdmux_cmd_if *cmd_params;
18072 + struct mc_command cmd = { 0 };
18073 +
18074 + /* prepare command */
18075 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
18076 + cmd_flags,
18077 + token);
18078 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
18079 + cmd_params->if_id = cpu_to_le16(if_id);
18080 +
18081 + /* send command to mc*/
18082 + return mc_send_command(mc_io, &cmd);
18083 +}
18084 +
18085 +/**
18086 + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
18087 + * @mc_io: Pointer to MC portal's I/O object
18088 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18089 + * @token: Token of DPDMUX object
18090 + * @max_frame_length: The required maximum frame length
18091 + *
18092 + * Update the maximum frame length on all DMUX interfaces.
18093 + * In case of VEPA, the maximum frame length on all dmux interfaces
18094 + * will be updated with the minimum value of the mfls of the connected
18095 + * dpnis and the actual value of dmux mfl.
18096 + *
18097 + * Return: '0' on Success; Error code otherwise.
18098 + */
18099 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
18100 + u32 cmd_flags,
18101 + u16 token,
18102 + u16 max_frame_length)
18103 +{
18104 + struct mc_command cmd = { 0 };
18105 + struct dpdmux_cmd_set_max_frame_length *cmd_params;
18106 +
18107 + /* prepare command */
18108 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
18109 + cmd_flags,
18110 + token);
18111 + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
18112 + cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
18113 +
18114 + /* send command to mc*/
18115 + return mc_send_command(mc_io, &cmd);
18116 +}
18117 +
18118 +/**
18119 + * dpdmux_ul_reset_counters() - Function resets the uplink counter
18120 + * @mc_io: Pointer to MC portal's I/O object
18121 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18122 + * @token: Token of DPDMUX object
18123 + *
18124 + * Return: '0' on Success; Error code otherwise.
18125 + */
18126 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
18127 + u32 cmd_flags,
18128 + u16 token)
18129 +{
18130 + struct mc_command cmd = { 0 };
18131 +
18132 + /* prepare command */
18133 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
18134 + cmd_flags,
18135 + token);
18136 +
18137 + /* send command to mc*/
18138 + return mc_send_command(mc_io, &cmd);
18139 +}
18140 +
18141 +/**
18142 + * dpdmux_if_set_accepted_frames() - Set the accepted frame types
18143 + * @mc_io: Pointer to MC portal's I/O object
18144 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18145 + * @token: Token of DPDMUX object
18146 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
18147 + * @cfg: Frame types configuration
18148 + *
18149 + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
18150 + * priority-tagged frames are discarded.
18151 + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
18152 + * priority-tagged frames are accepted.
18153 + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
18154 + * untagged and priority-tagged frame are accepted;
18155 + *
18156 + * Return: '0' on Success; Error code otherwise.
18157 + */
18158 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
18159 + u32 cmd_flags,
18160 + u16 token,
18161 + u16 if_id,
18162 + const struct dpdmux_accepted_frames *cfg)
18163 +{
18164 + struct mc_command cmd = { 0 };
18165 + struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
18166 +
18167 + /* prepare command */
18168 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
18169 + cmd_flags,
18170 + token);
18171 + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
18172 + cmd_params->if_id = cpu_to_le16(if_id);
18173 + dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
18174 + cfg->type);
18175 + dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
18176 + cfg->unaccept_act);
18177 +
18178 + /* send command to mc*/
18179 + return mc_send_command(mc_io, &cmd);
18180 +}
18181 +
18182 +/**
18183 + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
18184 + * @mc_io: Pointer to MC portal's I/O object
18185 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18186 + * @token: Token of DPDMUX object
18187 + * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
18188 + * @attr: Interface attributes
18189 + *
18190 + * Return: '0' on Success; Error code otherwise.
18191 + */
18192 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
18193 + u32 cmd_flags,
18194 + u16 token,
18195 + u16 if_id,
18196 + struct dpdmux_if_attr *attr)
18197 +{
18198 + struct mc_command cmd = { 0 };
18199 + struct dpdmux_cmd_if *cmd_params;
18200 + struct dpdmux_rsp_if_get_attr *rsp_params;
18201 + int err;
18202 +
18203 + /* prepare command */
18204 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
18205 + cmd_flags,
18206 + token);
18207 + cmd_params = (struct dpdmux_cmd_if *)cmd.params;
18208 + cmd_params->if_id = cpu_to_le16(if_id);
18209 +
18210 + /* send command to mc*/
18211 + err = mc_send_command(mc_io, &cmd);
18212 + if (err)
18213 + return err;
18214 +
18215 + /* retrieve response parameters */
18216 + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
18217 + attr->rate = le32_to_cpu(rsp_params->rate);
18218 + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
18219 + attr->accept_frame_type =
18220 + dpdmux_get_field(rsp_params->accepted_frames_type,
18221 + ACCEPTED_FRAMES_TYPE);
18222 +
18223 + return 0;
18224 +}
18225 +
18226 +/**
18227 + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
18228 + * @mc_io: Pointer to MC portal's I/O object
18229 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18230 + * @token: Token of DPDMUX object
18231 + * @if_id: Destination interface ID
18232 + * @rule: L2 rule
18233 + *
18234 + * Function removes a L2 rule from DPDMUX table
18235 + * or adds an interface to an existing multicast address
18236 + *
18237 + * Return: '0' on Success; Error code otherwise.
18238 + */
18239 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
18240 + u32 cmd_flags,
18241 + u16 token,
18242 + u16 if_id,
18243 + const struct dpdmux_l2_rule *rule)
18244 +{
18245 + struct mc_command cmd = { 0 };
18246 + struct dpdmux_cmd_if_l2_rule *cmd_params;
18247 +
18248 + /* prepare command */
18249 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
18250 + cmd_flags,
18251 + token);
18252 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
18253 + cmd_params->if_id = cpu_to_le16(if_id);
18254 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
18255 + cmd_params->mac_addr5 = rule->mac_addr[5];
18256 + cmd_params->mac_addr4 = rule->mac_addr[4];
18257 + cmd_params->mac_addr3 = rule->mac_addr[3];
18258 + cmd_params->mac_addr2 = rule->mac_addr[2];
18259 + cmd_params->mac_addr1 = rule->mac_addr[1];
18260 + cmd_params->mac_addr0 = rule->mac_addr[0];
18261 +
18262 + /* send command to mc*/
18263 + return mc_send_command(mc_io, &cmd);
18264 +}
18265 +
18266 +/**
18267 + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
18268 + * @mc_io: Pointer to MC portal's I/O object
18269 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18270 + * @token: Token of DPDMUX object
18271 + * @if_id: Destination interface ID
18272 + * @rule: L2 rule
18273 + *
18274 + * Function adds a L2 rule into DPDMUX table
18275 + * or adds an interface to an existing multicast address
18276 + *
18277 + * Return: '0' on Success; Error code otherwise.
18278 + */
18279 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
18280 + u32 cmd_flags,
18281 + u16 token,
18282 + u16 if_id,
18283 + const struct dpdmux_l2_rule *rule)
18284 +{
18285 + struct mc_command cmd = { 0 };
18286 + struct dpdmux_cmd_if_l2_rule *cmd_params;
18287 +
18288 + /* prepare command */
18289 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
18290 + cmd_flags,
18291 + token);
18292 + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
18293 + cmd_params->if_id = cpu_to_le16(if_id);
18294 + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
18295 + cmd_params->mac_addr5 = rule->mac_addr[5];
18296 + cmd_params->mac_addr4 = rule->mac_addr[4];
18297 + cmd_params->mac_addr3 = rule->mac_addr[3];
18298 + cmd_params->mac_addr2 = rule->mac_addr[2];
18299 + cmd_params->mac_addr1 = rule->mac_addr[1];
18300 + cmd_params->mac_addr0 = rule->mac_addr[0];
18301 +
18302 + /* send command to mc*/
18303 + return mc_send_command(mc_io, &cmd);
18304 +}
18305 +
18306 +/**
18307 + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
18308 + * @mc_io: Pointer to MC portal's I/O object
18309 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18310 + * @token: Token of DPDMUX object
18311 + * @if_id: Interface Id
18312 + * @counter_type: counter type
18313 + * @counter: Returned specific counter information
18314 + *
18315 + * Return: '0' on Success; Error code otherwise.
18316 + */
18317 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
18318 + u32 cmd_flags,
18319 + u16 token,
18320 + u16 if_id,
18321 + enum dpdmux_counter_type counter_type,
18322 + u64 *counter)
18323 +{
18324 + struct mc_command cmd = { 0 };
18325 + struct dpdmux_cmd_if_get_counter *cmd_params;
18326 + struct dpdmux_rsp_if_get_counter *rsp_params;
18327 + int err;
18328 +
18329 + /* prepare command */
18330 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
18331 + cmd_flags,
18332 + token);
18333 + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
18334 + cmd_params->if_id = cpu_to_le16(if_id);
18335 + cmd_params->counter_type = counter_type;
18336 +
18337 + /* send command to mc*/
18338 + err = mc_send_command(mc_io, &cmd);
18339 + if (err)
18340 + return err;
18341 +
18342 + /* retrieve response parameters */
18343 + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
18344 + *counter = le64_to_cpu(rsp_params->counter);
18345 +
18346 + return 0;
18347 +}
18348 +
18349 +/**
18350 + * dpdmux_if_set_link_cfg() - set the link configuration.
18351 + * @mc_io: Pointer to MC portal's I/O object
18352 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18353 + * @token: Token of DPSW object
18354 + * @if_id: interface id
18355 + * @cfg: Link configuration
18356 + *
18357 + * Return: '0' on Success; Error code otherwise.
18358 + */
18359 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
18360 + u32 cmd_flags,
18361 + u16 token,
18362 + u16 if_id,
18363 + struct dpdmux_link_cfg *cfg)
18364 +{
18365 + struct mc_command cmd = { 0 };
18366 + struct dpdmux_cmd_if_set_link_cfg *cmd_params;
18367 +
18368 + /* prepare command */
18369 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
18370 + cmd_flags,
18371 + token);
18372 + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
18373 + cmd_params->if_id = cpu_to_le16(if_id);
18374 + cmd_params->rate = cpu_to_le32(cfg->rate);
18375 + cmd_params->options = cpu_to_le64(cfg->options);
18376 +
18377 + /* send command to mc*/
18378 + return mc_send_command(mc_io, &cmd);
18379 +}
18380 +
18381 +/**
18382 + * dpdmux_if_get_link_state - Return the link state
18383 + * @mc_io: Pointer to MC portal's I/O object
18384 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18385 + * @token: Token of DPSW object
18386 + * @if_id: interface id
18387 + * @state: link state
18388 + *
18389 + * @returns '0' on Success; Error code otherwise.
18390 + */
18391 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
18392 + u32 cmd_flags,
18393 + u16 token,
18394 + u16 if_id,
18395 + struct dpdmux_link_state *state)
18396 +{
18397 + struct mc_command cmd = { 0 };
18398 + struct dpdmux_cmd_if_get_link_state *cmd_params;
18399 + struct dpdmux_rsp_if_get_link_state *rsp_params;
18400 + int err;
18401 +
18402 + /* prepare command */
18403 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
18404 + cmd_flags,
18405 + token);
18406 + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
18407 + cmd_params->if_id = cpu_to_le16(if_id);
18408 +
18409 + /* send command to mc*/
18410 + err = mc_send_command(mc_io, &cmd);
18411 + if (err)
18412 + return err;
18413 +
18414 + /* retrieve response parameters */
18415 + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
18416 + state->rate = le32_to_cpu(rsp_params->rate);
18417 + state->options = le64_to_cpu(rsp_params->options);
18418 + state->up = dpdmux_get_field(rsp_params->up, ENABLE);
18419 +
18420 + return 0;
18421 +}
18422 +
18423 +/**
18424 + * dpdmux_set_custom_key - Set a custom classification key.
18425 + *
18426 + * This API is only available for DPDMUX instance created with
18427 + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
18428 + * classification table using dpdmux_add_custom_cls_entry.
18429 + *
18430 + * Calls to dpdmux_set_custom_key remove all existing classification entries
18431 + * that may have been added previously using dpdmux_add_custom_cls_entry.
18432 + *
18433 + * @mc_io: Pointer to MC portal's I/O object
18434 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18435 + * @token: Token of DPSW object
18436 + * @if_id: interface id
18437 + * @key_cfg_iova: DMA address of a configuration structure set up using
18438 + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
18439 + *
18440 + * @returns '0' on Success; Error code otherwise.
18441 + */
18442 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
18443 + u32 cmd_flags,
18444 + u16 token,
18445 + u64 key_cfg_iova)
18446 +{
18447 + struct dpdmux_set_custom_key *cmd_params;
18448 + struct mc_command cmd = { 0 };
18449 +
18450 + /* prepare command */
18451 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
18452 + cmd_flags,
18453 + token);
18454 + cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
18455 + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
18456 +
18457 + /* send command to mc*/
18458 + return mc_send_command(mc_io, &cmd);
18459 +}
18460 +
18461 +/**
18462 + * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
18463 + *
18464 + * This API is only available for DPDMUX instances created with
18465 + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
18466 + * composition rule must be set up using dpdmux_set_custom_key.
18467 + *
18468 + * @mc_io: Pointer to MC portal's I/O object
18469 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18470 + * @token: Token of DPSW object
18471 + * @rule: Classification rule to insert. Rules cannot be duplicated, if a
18472 + * matching rule already exists, the action will be replaced.
18473 + * @action: Action to perform for matching traffic.
18474 + *
18475 + * @returns '0' on Success; Error code otherwise.
18476 + */
18477 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
18478 + u32 cmd_flags,
18479 + u16 token,
18480 + struct dpdmux_rule_cfg *rule,
18481 + struct dpdmux_cls_action *action)
18482 +{
18483 + struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
18484 + struct mc_command cmd = { 0 };
18485 +
18486 + /* prepare command */
18487 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
18488 + cmd_flags,
18489 + token);
18490 +
18491 + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
18492 + cmd_params->key_size = rule->key_size;
18493 + cmd_params->dest_if = cpu_to_le16(action->dest_if);
18494 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
18495 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
18496 +
18497 + /* send command to mc*/
18498 + return mc_send_command(mc_io, &cmd);
18499 +}
18500 +
18501 +/**
18502 + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
18503 + *
18504 + * This API is only available for DPDMUX instances created with
18505 + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
18506 + * entries previously inserted using dpdmux_add_custom_cls_entry.
18507 + *
18508 + * @mc_io: Pointer to MC portal's I/O object
18509 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18510 + * @token: Token of DPSW object
18511 + * @rule: Classification rule to remove
18512 + *
18513 + * @returns '0' on Success; Error code otherwise.
18514 + */
18515 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
18516 + u32 cmd_flags,
18517 + u16 token,
18518 + struct dpdmux_rule_cfg *rule)
18519 +{
18520 + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
18521 + struct mc_command cmd = { 0 };
18522 +
18523 + /* prepare command */
18524 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
18525 + cmd_flags,
18526 + token);
18527 + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
18528 + cmd_params->key_size = rule->key_size;
18529 + cmd_params->key_iova = cpu_to_le64(rule->key_iova);
18530 + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
18531 +
18532 + /* send command to mc*/
18533 + return mc_send_command(mc_io, &cmd);
18534 +}
18535 +
18536 +/**
18537 + * dpdmux_get_api_version() - Get Data Path Demux API version
18538 + * @mc_io: Pointer to MC portal's I/O object
18539 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
18540 + * @major_ver: Major version of data path demux API
18541 + * @minor_ver: Minor version of data path demux API
18542 + *
18543 + * Return: '0' on Success; Error code otherwise.
18544 + */
18545 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
18546 + u32 cmd_flags,
18547 + u16 *major_ver,
18548 + u16 *minor_ver)
18549 +{
18550 + struct mc_command cmd = { 0 };
18551 + struct dpdmux_rsp_get_api_version *rsp_params;
18552 + int err;
18553 +
18554 + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
18555 + cmd_flags,
18556 + 0);
18557 +
18558 + err = mc_send_command(mc_io, &cmd);
18559 + if (err)
18560 + return err;
18561 +
18562 + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
18563 + *major_ver = le16_to_cpu(rsp_params->major);
18564 + *minor_ver = le16_to_cpu(rsp_params->minor);
18565 +
18566 + return 0;
18567 +}
18568 --- /dev/null
18569 +++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
18570 @@ -0,0 +1,453 @@
18571 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
18572 + *
18573 + * Redistribution and use in source and binary forms, with or without
18574 + * modification, are permitted provided that the following conditions are met:
18575 + * * Redistributions of source code must retain the above copyright
18576 + * notice, this list of conditions and the following disclaimer.
18577 + * * Redistributions in binary form must reproduce the above copyright
18578 + * notice, this list of conditions and the following disclaimer in the
18579 + * documentation and/or other materials provided with the distribution.
18580 + * * Neither the name of the above-listed copyright holders nor the
18581 + * names of any contributors may be used to endorse or promote products
18582 + * derived from this software without specific prior written permission.
18583 + *
18584 + *
18585 + * ALTERNATIVELY, this software may be distributed under the terms of the
18586 + * GNU General Public License ("GPL") as published by the Free Software
18587 + * Foundation, either version 2 of that License or (at your option) any
18588 + * later version.
18589 + *
18590 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18591 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18592 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18593 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
18594 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18595 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18596 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
18597 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
18598 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
18599 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
18600 + * POSSIBILITY OF SUCH DAMAGE.
18601 + */
18602 +#ifndef __FSL_DPDMUX_H
18603 +#define __FSL_DPDMUX_H
18604 +
18605 +struct fsl_mc_io;
18606 +
18607 +/* Data Path Demux API
18608 + * Contains API for handling DPDMUX topology and functionality
18609 + */
18610 +
18611 +int dpdmux_open(struct fsl_mc_io *mc_io,
18612 + u32 cmd_flags,
18613 + int dpdmux_id,
18614 + u16 *token);
18615 +
18616 +int dpdmux_close(struct fsl_mc_io *mc_io,
18617 + u32 cmd_flags,
18618 + u16 token);
18619 +
18620 +/**
18621 + * DPDMUX general options
18622 + */
18623 +
18624 +/**
18625 + * Enable bridging between internal interfaces
18626 + */
18627 +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
18628 +
18629 +/**
18630 + * Mask support for classification
18631 + */
18632 +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
18633 +
18634 +#define DPDMUX_IRQ_INDEX_IF 0x0000
18635 +#define DPDMUX_IRQ_INDEX 0x0001
18636 +
18637 +/**
18638 + * IRQ event - Indicates that the link state changed
18639 + */
18640 +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
18641 +
18642 +/**
18643 + * enum dpdmux_manip - DPDMUX manipulation operations
18644 + * @DPDMUX_MANIP_NONE: No manipulation on frames
18645 + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
18646 + */
18647 +enum dpdmux_manip {
18648 + DPDMUX_MANIP_NONE = 0x0,
18649 + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
18650 +};
18651 +
18652 +/**
18653 + * enum dpdmux_method - DPDMUX method options
18654 + * @DPDMUX_METHOD_NONE: no DPDMUX method
18655 + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
18656 + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
18657 + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
18658 + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
18659 + */
18660 +enum dpdmux_method {
18661 + DPDMUX_METHOD_NONE = 0x0,
18662 + DPDMUX_METHOD_C_VLAN_MAC = 0x1,
18663 + DPDMUX_METHOD_MAC = 0x2,
18664 + DPDMUX_METHOD_C_VLAN = 0x3,
18665 + DPDMUX_METHOD_S_VLAN = 0x4,
18666 + DPDMUX_METHOD_CUSTOM = 0x5
18667 +};
18668 +
18669 +/**
18670 + * struct dpdmux_cfg - DPDMUX configuration parameters
18671 + * @method: Defines the operation method for the DPDMUX address table
18672 + * @manip: Required manipulation operation
18673 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18674 + * @adv: Advanced parameters; default is all zeros;
18675 + * use this structure to change default settings
18676 + */
18677 +struct dpdmux_cfg {
18678 + enum dpdmux_method method;
18679 + enum dpdmux_manip manip;
18680 + u16 num_ifs;
18681 + /**
18682 + * struct adv - Advanced parameters
18683 + * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
18684 + * @max_dmat_entries: Maximum entries in DPDMUX address table
18685 + * 0 - indicates default: 64 entries per interface.
18686 + * @max_mc_groups: Number of multicast groups in DPDMUX table
18687 + * 0 - indicates default: 32 multicast groups
18688 + * @max_vlan_ids: max vlan ids allowed in the system -
18689 + * relevant only case of working in mac+vlan method.
18690 + * 0 - indicates default 16 vlan ids.
18691 + */
18692 + struct {
18693 + u64 options;
18694 + u16 max_dmat_entries;
18695 + u16 max_mc_groups;
18696 + u16 max_vlan_ids;
18697 + } adv;
18698 +};
18699 +
18700 +int dpdmux_create(struct fsl_mc_io *mc_io,
18701 + u16 dprc_token,
18702 + u32 cmd_flags,
18703 + const struct dpdmux_cfg *cfg,
18704 + u32 *obj_id);
18705 +
18706 +int dpdmux_destroy(struct fsl_mc_io *mc_io,
18707 + u16 dprc_token,
18708 + u32 cmd_flags,
18709 + u32 object_id);
18710 +
18711 +int dpdmux_enable(struct fsl_mc_io *mc_io,
18712 + u32 cmd_flags,
18713 + u16 token);
18714 +
18715 +int dpdmux_disable(struct fsl_mc_io *mc_io,
18716 + u32 cmd_flags,
18717 + u16 token);
18718 +
18719 +int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
18720 + u32 cmd_flags,
18721 + u16 token,
18722 + int *en);
18723 +
18724 +int dpdmux_reset(struct fsl_mc_io *mc_io,
18725 + u32 cmd_flags,
18726 + u16 token);
18727 +
18728 +int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
18729 + u32 cmd_flags,
18730 + u16 token,
18731 + u8 irq_index,
18732 + u8 en);
18733 +
18734 +int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
18735 + u32 cmd_flags,
18736 + u16 token,
18737 + u8 irq_index,
18738 + u8 *en);
18739 +
18740 +int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
18741 + u32 cmd_flags,
18742 + u16 token,
18743 + u8 irq_index,
18744 + u32 mask);
18745 +
18746 +int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
18747 + u32 cmd_flags,
18748 + u16 token,
18749 + u8 irq_index,
18750 + u32 *mask);
18751 +
18752 +int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
18753 + u32 cmd_flags,
18754 + u16 token,
18755 + u8 irq_index,
18756 + u32 *status);
18757 +
18758 +int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
18759 + u32 cmd_flags,
18760 + u16 token,
18761 + u8 irq_index,
18762 + u32 status);
18763 +
18764 +/**
18765 + * struct dpdmux_attr - Structure representing DPDMUX attributes
18766 + * @id: DPDMUX object ID
18767 + * @options: Configuration options (bitmap)
18768 + * @method: DPDMUX address table method
18769 + * @manip: DPDMUX manipulation type
18770 + * @num_ifs: Number of interfaces (excluding the uplink interface)
18771 + * @mem_size: DPDMUX frame storage memory size
18772 + */
18773 +struct dpdmux_attr {
18774 + int id;
18775 + u64 options;
18776 + enum dpdmux_method method;
18777 + enum dpdmux_manip manip;
18778 + u16 num_ifs;
18779 + u16 mem_size;
18780 +};
18781 +
18782 +int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
18783 + u32 cmd_flags,
18784 + u16 token,
18785 + struct dpdmux_attr *attr);
18786 +
18787 +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
18788 + u32 cmd_flags,
18789 + u16 token,
18790 + u16 max_frame_length);
18791 +
18792 +/**
18793 + * enum dpdmux_counter_type - Counter types
18794 + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
18795 + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
18796 + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
18797 + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
18798 + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
18799 + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
18800 + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
18801 + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
18802 + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
18803 + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
18804 + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
18805 + */
18806 +enum dpdmux_counter_type {
18807 + DPDMUX_CNT_ING_FRAME = 0x0,
18808 + DPDMUX_CNT_ING_BYTE = 0x1,
18809 + DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
18810 + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
18811 + DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
18812 + DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
18813 + DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
18814 + DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
18815 + DPDMUX_CNT_EGR_FRAME = 0x8,
18816 + DPDMUX_CNT_EGR_BYTE = 0x9,
18817 + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
18818 +};
18819 +
18820 +/**
18821 + * enum dpdmux_accepted_frames_type - DPDMUX frame types
18822 + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
18823 + * priority-tagged frames
18824 + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
18825 + * priority-tagged frames that are received on this
18826 + * interface
18827 + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
18828 + * received on this interface are accepted
18829 + */
18830 +enum dpdmux_accepted_frames_type {
18831 + DPDMUX_ADMIT_ALL = 0,
18832 + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
18833 + DPDMUX_ADMIT_ONLY_UNTAGGED = 2
18834 +};
18835 +
18836 +/**
18837 + * enum dpdmux_action - DPDMUX action for un-accepted frames
18838 + * @DPDMUX_ACTION_DROP: Drop un-accepted frames
18839 + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
18840 + * control interface
18841 + */
18842 +enum dpdmux_action {
18843 + DPDMUX_ACTION_DROP = 0,
18844 + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
18845 +};
18846 +
18847 +/**
18848 + * struct dpdmux_accepted_frames - Frame types configuration
18849 + * @type: Defines ingress accepted frames
18850 + * @unaccept_act: Defines action on frames not accepted
18851 + */
18852 +struct dpdmux_accepted_frames {
18853 + enum dpdmux_accepted_frames_type type;
18854 + enum dpdmux_action unaccept_act;
18855 +};
18856 +
18857 +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
18858 + u32 cmd_flags,
18859 + u16 token,
18860 + u16 if_id,
18861 + const struct dpdmux_accepted_frames *cfg);
18862 +
18863 +/**
18864 + * struct dpdmux_if_attr - Structure representing frame types configuration
18865 + * @rate: Configured interface rate (in bits per second)
18866 + * @enabled: Indicates if interface is enabled
18867 + * @accept_frame_type: Indicates type of accepted frames for the interface
18868 + */
18869 +struct dpdmux_if_attr {
18870 + u32 rate;
18871 + int enabled;
18872 + enum dpdmux_accepted_frames_type accept_frame_type;
18873 +};
18874 +
18875 +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
18876 + u32 cmd_flags,
18877 + u16 token,
18878 + u16 if_id,
18879 + struct dpdmux_if_attr *attr);
18880 +
18881 +int dpdmux_if_enable(struct fsl_mc_io *mc_io,
18882 + u32 cmd_flags,
18883 + u16 token,
18884 + u16 if_id);
18885 +
18886 +int dpdmux_if_disable(struct fsl_mc_io *mc_io,
18887 + u32 cmd_flags,
18888 + u16 token,
18889 + u16 if_id);
18890 +
18891 +/**
18892 + * struct dpdmux_l2_rule - Structure representing L2 rule
18893 + * @mac_addr: MAC address
18894 + * @vlan_id: VLAN ID
18895 + */
18896 +struct dpdmux_l2_rule {
18897 + u8 mac_addr[6];
18898 + u16 vlan_id;
18899 +};
18900 +
18901 +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
18902 + u32 cmd_flags,
18903 + u16 token,
18904 + u16 if_id,
18905 + const struct dpdmux_l2_rule *rule);
18906 +
18907 +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
18908 + u32 cmd_flags,
18909 + u16 token,
18910 + u16 if_id,
18911 + const struct dpdmux_l2_rule *rule);
18912 +
18913 +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
18914 + u32 cmd_flags,
18915 + u16 token,
18916 + u16 if_id,
18917 + enum dpdmux_counter_type counter_type,
18918 + u64 *counter);
18919 +
18920 +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
18921 + u32 cmd_flags,
18922 + u16 token);
18923 +
18924 +/**
18925 + * Enable auto-negotiation
18926 + */
18927 +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
18928 +/**
18929 + * Enable half-duplex mode
18930 + */
18931 +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
18932 +/**
18933 + * Enable pause frames
18934 + */
18935 +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
18936 +/**
18937 + * Enable a-symmetric pause frames
18938 + */
18939 +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
18940 +
18941 +/**
18942 + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
18943 + * @rate: Rate
18944 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18945 + */
18946 +struct dpdmux_link_cfg {
18947 + u32 rate;
18948 + u64 options;
18949 +};
18950 +
18951 +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
18952 + u32 cmd_flags,
18953 + u16 token,
18954 + u16 if_id,
18955 + struct dpdmux_link_cfg *cfg);
18956 +/**
18957 + * struct dpdmux_link_state - Structure representing DPDMUX link state
18958 + * @rate: Rate
18959 + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
18960 + * @up: 0 - down, 1 - up
18961 + */
18962 +struct dpdmux_link_state {
18963 + u32 rate;
18964 + u64 options;
18965 + int up;
18966 +};
18967 +
18968 +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
18969 + u32 cmd_flags,
18970 + u16 token,
18971 + u16 if_id,
18972 + struct dpdmux_link_state *state);
18973 +
18974 +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
18975 + u32 cmd_flags,
18976 + u16 token,
18977 + u64 key_cfg_iova);
18978 +
18979 +/**
18980 + * struct dpdmux_rule_cfg - Custom classification rule.
18981 + *
18982 + * @key_iova: DMA address of buffer storing the look-up value
18983 + * @mask_iova: DMA address of the mask used for TCAM classification
18984 + * @key_size: size, in bytes, of the look-up value. This must match the size
18985 + * of the look-up key defined using dpdmux_set_custom_key, otherwise the
18986 + * entry will never be hit
18987 + */
18988 +struct dpdmux_rule_cfg {
18989 + u64 key_iova;
18990 + u64 mask_iova;
18991 + u8 key_size;
18992 +};
18993 +
18994 +/**
18995 + * struct dpdmux_cls_action - Action to execute for frames matching the
18996 + * classification entry
18997 + *
18998 + * @dest_if: Interface to forward the frames to. Port numbering is similar to
18999 + * the one used to connect interfaces:
19000 + * - 0 is the uplink port,
19001 + * - all others are downlink ports.
19002 + */
19003 +struct dpdmux_cls_action {
19004 + u16 dest_if;
19005 +};
19006 +
19007 +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
19008 + u32 cmd_flags,
19009 + u16 token,
19010 + struct dpdmux_rule_cfg *rule,
19011 + struct dpdmux_cls_action *action);
19012 +
19013 +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
19014 + u32 cmd_flags,
19015 + u16 token,
19016 + struct dpdmux_rule_cfg *rule);
19017 +
19018 +int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
19019 + u32 cmd_flags,
19020 + u16 *major_ver,
19021 + u16 *minor_ver);
19022 +
19023 +#endif /* __FSL_DPDMUX_H */
19024 --- /dev/null
19025 +++ b/drivers/staging/fsl-dpaa2/evb/evb.c
19026 @@ -0,0 +1,1350 @@
19027 +/* Copyright 2015 Freescale Semiconductor Inc.
19028 + *
19029 + * Redistribution and use in source and binary forms, with or without
19030 + * modification, are permitted provided that the following conditions are met:
19031 + * * Redistributions of source code must retain the above copyright
19032 + * notice, this list of conditions and the following disclaimer.
19033 + * * Redistributions in binary form must reproduce the above copyright
19034 + * notice, this list of conditions and the following disclaimer in the
19035 + * documentation and/or other materials provided with the distribution.
19036 + * * Neither the name of Freescale Semiconductor nor the
19037 + * names of its contributors may be used to endorse or promote products
19038 + * derived from this software without specific prior written permission.
19039 + *
19040 + *
19041 + * ALTERNATIVELY, this software may be distributed under the terms of the
19042 + * GNU General Public License ("GPL") as published by the Free Software
19043 + * Foundation, either version 2 of that License or (at your option) any
19044 + * later version.
19045 + *
19046 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19047 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19048 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19049 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19050 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19051 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19052 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19053 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19054 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19055 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19056 + */
19057 +#include <linux/module.h>
19058 +#include <linux/msi.h>
19059 +#include <linux/netdevice.h>
19060 +#include <linux/etherdevice.h>
19061 +#include <linux/rtnetlink.h>
19062 +#include <linux/if_vlan.h>
19063 +
19064 +#include <uapi/linux/if_bridge.h>
19065 +#include <net/netlink.h>
19066 +
19067 +#include "../../fsl-mc/include/mc.h"
19068 +
19069 +#include "dpdmux.h"
19070 +#include "dpdmux-cmd.h"
19071 +
19072 +static const char evb_drv_version[] = "0.1";
19073 +
19074 +/* Minimal supported DPDMUX version */
19075 +#define DPDMUX_MIN_VER_MAJOR 6
19076 +#define DPDMUX_MIN_VER_MINOR 0
19077 +
19078 +/* IRQ index */
19079 +#define DPDMUX_MAX_IRQ_NUM 2
19080 +
19081 +/* MAX FRAME LENGTH (currently 10k) */
19082 +#define EVB_MAX_FRAME_LENGTH (10 * 1024)
19083 +/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
19084 +#define EVB_MIN_FRAME_LENGTH 68
19085 +
19086 +struct evb_port_priv {
19087 + struct net_device *netdev;
19088 + struct list_head list;
19089 + u16 port_index;
19090 + struct evb_priv *evb_priv;
19091 + u8 vlans[VLAN_VID_MASK + 1];
19092 +};
19093 +
19094 +struct evb_priv {
19095 + /* keep first */
19096 + struct evb_port_priv uplink;
19097 +
19098 + struct fsl_mc_io *mc_io;
19099 + struct list_head port_list;
19100 + struct dpdmux_attr attr;
19101 + u16 mux_handle;
19102 + int dev_id;
19103 +};
19104 +
19105 +static int _evb_port_carrier_state_sync(struct net_device *netdev)
19106 +{
19107 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19108 + struct dpdmux_link_state state;
19109 + int err;
19110 +
19111 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19112 + port_priv->evb_priv->mux_handle,
19113 + port_priv->port_index, &state);
19114 + if (unlikely(err)) {
19115 + netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
19116 + return err;
19117 + }
19118 +
19119 + WARN_ONCE(state.up > 1, "Garbage read into link_state");
19120 +
19121 + if (state.up)
19122 + netif_carrier_on(port_priv->netdev);
19123 + else
19124 + netif_carrier_off(port_priv->netdev);
19125 +
19126 + return 0;
19127 +}
19128 +
19129 +static int evb_port_open(struct net_device *netdev)
19130 +{
19131 + int err;
19132 +
19133 + /* FIXME: enable port when support added */
19134 +
19135 + err = _evb_port_carrier_state_sync(netdev);
19136 + if (err) {
19137 + netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
19138 + err);
19139 + return err;
19140 + }
19141 +
19142 + return 0;
19143 +}
19144 +
19145 +static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
19146 +{
19147 + /* we don't support I/O for now, drop the frame */
19148 + dev_kfree_skb_any(skb);
19149 + return NETDEV_TX_OK;
19150 +}
19151 +
19152 +static int evb_links_state_update(struct evb_priv *priv)
19153 +{
19154 + struct evb_port_priv *port_priv;
19155 + struct list_head *pos;
19156 + int err;
19157 +
19158 + list_for_each(pos, &priv->port_list) {
19159 + port_priv = list_entry(pos, struct evb_port_priv, list);
19160 +
19161 + err = _evb_port_carrier_state_sync(port_priv->netdev);
19162 + if (err)
19163 + netdev_err(port_priv->netdev,
19164 + "_evb_port_carrier_state_sync err %d\n",
19165 + err);
19166 + }
19167 +
19168 + return 0;
19169 +}
19170 +
19171 +static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
19172 +{
19173 + return IRQ_WAKE_THREAD;
19174 +}
19175 +
19176 +static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
19177 +{
19178 + struct device *dev = (struct device *)arg;
19179 + struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
19180 + struct net_device *netdev = dev_get_drvdata(dev);
19181 + struct evb_priv *priv = netdev_priv(netdev);
19182 + struct fsl_mc_io *io = priv->mc_io;
19183 + u16 token = priv->mux_handle;
19184 + int irq_index = DPDMUX_IRQ_INDEX_IF;
19185 +
19186 + /* Mask the events and the if_id reserved bits to be cleared on read */
19187 + u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
19188 + int err;
19189 +
19190 + /* Sanity check */
19191 + if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
19192 + goto out;
19193 + if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
19194 + goto out;
19195 +
19196 + err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
19197 + if (unlikely(err)) {
19198 + netdev_err(netdev, "Can't get irq status (err %d)", err);
19199 + err = dpdmux_clear_irq_status(io, 0, token, irq_index,
19200 + 0xFFFFFFFF);
19201 + if (unlikely(err))
19202 + netdev_err(netdev, "Can't clear irq status (err %d)",
19203 + err);
19204 + goto out;
19205 + }
19206 +
19207 + if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
19208 + err = evb_links_state_update(priv);
19209 + if (unlikely(err))
19210 + goto out;
19211 + }
19212 +
19213 +out:
19214 + return IRQ_HANDLED;
19215 +}
19216 +
19217 +static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
19218 +{
19219 + struct device *dev = &evb_dev->dev;
19220 + struct net_device *netdev = dev_get_drvdata(dev);
19221 + struct evb_priv *priv = netdev_priv(netdev);
19222 + int err = 0;
19223 + struct fsl_mc_device_irq *irq;
19224 + const int irq_index = DPDMUX_IRQ_INDEX_IF;
19225 + u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
19226 +
19227 + err = fsl_mc_allocate_irqs(evb_dev);
19228 + if (unlikely(err)) {
19229 + dev_err(dev, "MC irqs allocation failed\n");
19230 + return err;
19231 + }
19232 +
19233 + if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
19234 + err = -EINVAL;
19235 + goto free_irq;
19236 + }
19237 +
19238 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
19239 + irq_index, 0);
19240 + if (unlikely(err)) {
19241 + dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
19242 + goto free_irq;
19243 + }
19244 +
19245 + irq = evb_dev->irqs[irq_index];
19246 +
19247 + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
19248 + evb_irq0_handler,
19249 + _evb_irq0_handler_thread,
19250 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
19251 + dev_name(dev), dev);
19252 + if (unlikely(err)) {
19253 + dev_err(dev, "devm_request_threaded_irq(): %d", err);
19254 + goto free_irq;
19255 + }
19256 +
19257 + err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
19258 + irq_index, mask);
19259 + if (unlikely(err)) {
19260 + dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
19261 + goto free_devm_irq;
19262 + }
19263 +
19264 + err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
19265 + irq_index, 1);
19266 + if (unlikely(err)) {
19267 + dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
19268 + goto free_devm_irq;
19269 + }
19270 +
19271 + return 0;
19272 +
19273 +free_devm_irq:
19274 + devm_free_irq(dev, irq->msi_desc->irq, dev);
19275 +free_irq:
19276 + fsl_mc_free_irqs(evb_dev);
19277 + return err;
19278 +}
19279 +
19280 +static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
19281 +{
19282 + struct device *dev = &evb_dev->dev;
19283 + struct net_device *netdev = dev_get_drvdata(dev);
19284 + struct evb_priv *priv = netdev_priv(netdev);
19285 +
19286 + dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
19287 + DPDMUX_IRQ_INDEX_IF, 0);
19288 +
19289 + devm_free_irq(dev,
19290 + evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
19291 + dev);
19292 + fsl_mc_free_irqs(evb_dev);
19293 +}
19294 +
19295 +static int evb_port_add_rule(struct net_device *netdev,
19296 + const unsigned char *addr, u16 vid)
19297 +{
19298 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19299 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
19300 + int err;
19301 +
19302 + if (addr)
19303 + ether_addr_copy(rule.mac_addr, addr);
19304 +
19305 + err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
19306 + 0,
19307 + port_priv->evb_priv->mux_handle,
19308 + port_priv->port_index, &rule);
19309 + if (unlikely(err))
19310 + netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
19311 + return err;
19312 +}
19313 +
19314 +static int evb_port_del_rule(struct net_device *netdev,
19315 + const unsigned char *addr, u16 vid)
19316 +{
19317 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19318 + struct dpdmux_l2_rule rule = { .vlan_id = vid };
19319 + int err;
19320 +
19321 + if (addr)
19322 + ether_addr_copy(rule.mac_addr, addr);
19323 +
19324 + err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
19325 + 0,
19326 + port_priv->evb_priv->mux_handle,
19327 + port_priv->port_index, &rule);
19328 + if (unlikely(err))
19329 + netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
19330 + return err;
19331 +}
19332 +
19333 +static bool _lookup_address(struct net_device *netdev,
19334 + const unsigned char *addr)
19335 +{
19336 + struct netdev_hw_addr *ha;
19337 + struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
19338 + &netdev->uc : &netdev->mc;
19339 +
19340 + netif_addr_lock_bh(netdev);
19341 + list_for_each_entry(ha, &list->list, list) {
19342 + if (ether_addr_equal(ha->addr, addr)) {
19343 + netif_addr_unlock_bh(netdev);
19344 + return true;
19345 + }
19346 + }
19347 + netif_addr_unlock_bh(netdev);
19348 + return false;
19349 +}
19350 +
19351 +static inline int evb_port_fdb_prep(struct nlattr *tb[],
19352 + struct net_device *netdev,
19353 + const unsigned char *addr, u16 *vid,
19354 + bool del)
19355 +{
19356 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19357 + struct evb_priv *evb_priv = port_priv->evb_priv;
19358 +
19359 + *vid = 0;
19360 +
19361 + if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
19362 + evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
19363 + netdev_err(netdev,
19364 + "EVB mode does not support MAC classification\n");
19365 + return -EOPNOTSUPP;
19366 + }
19367 +
19368 + /* check if the address is configured on this port */
19369 + if (_lookup_address(netdev, addr)) {
19370 + if (!del)
19371 + return -EEXIST;
19372 + } else {
19373 + if (del)
19374 + return -ENOENT;
19375 + }
19376 +
19377 + if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
19378 + if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
19379 + netdev_err(netdev, "invalid vlan size %d\n",
19380 + nla_len(tb[NDA_VLAN]));
19381 + return -EINVAL;
19382 + }
19383 +
19384 + *vid = nla_get_u16(tb[NDA_VLAN]);
19385 +
19386 + if (!*vid || *vid >= VLAN_VID_MASK) {
19387 + netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
19388 + return -EINVAL;
19389 + }
19390 + } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
19391 + netdev_err(netdev,
19392 + "EVB mode requires explicit VLAN configuration\n");
19393 + return -EINVAL;
19394 + } else if (tb[NDA_VLAN]) {
19395 + netdev_warn(netdev, "VLAN not supported, argument ignored\n");
19396 + }
19397 +
19398 + return 0;
19399 +}
19400 +
19401 +static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
19402 + struct net_device *netdev,
19403 + const unsigned char *addr, u16 vid, u16 flags)
19404 +{
19405 + u16 _vid;
19406 + int err;
19407 +
19408 + /* TODO: add replace support when added to iproute bridge */
19409 + if (!(flags & NLM_F_REQUEST)) {
19410 + netdev_err(netdev,
19411 + "evb_port_fdb_add unexpected flags value %08x\n",
19412 + flags);
19413 + return -EINVAL;
19414 + }
19415 +
19416 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
19417 + if (unlikely(err))
19418 + return err;
19419 +
19420 + err = evb_port_add_rule(netdev, addr, _vid);
19421 + if (unlikely(err))
19422 + return err;
19423 +
19424 + if (is_unicast_ether_addr(addr)) {
19425 + err = dev_uc_add(netdev, addr);
19426 + if (unlikely(err)) {
19427 + netdev_err(netdev, "dev_uc_add err %d\n", err);
19428 + return err;
19429 + }
19430 + } else {
19431 + err = dev_mc_add(netdev, addr);
19432 + if (unlikely(err)) {
19433 + netdev_err(netdev, "dev_mc_add err %d\n", err);
19434 + return err;
19435 + }
19436 + }
19437 +
19438 + return 0;
19439 +}
19440 +
19441 +static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
19442 + struct net_device *netdev,
19443 + const unsigned char *addr, u16 vid)
19444 +{
19445 + u16 _vid;
19446 + int err;
19447 +
19448 + err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
19449 + if (unlikely(err))
19450 + return err;
19451 +
19452 + err = evb_port_del_rule(netdev, addr, _vid);
19453 + if (unlikely(err))
19454 + return err;
19455 +
19456 + if (is_unicast_ether_addr(addr)) {
19457 + err = dev_uc_del(netdev, addr);
19458 + if (unlikely(err)) {
19459 + netdev_err(netdev, "dev_uc_del err %d\n", err);
19460 + return err;
19461 + }
19462 + } else {
19463 + err = dev_mc_del(netdev, addr);
19464 + if (unlikely(err)) {
19465 + netdev_err(netdev, "dev_mc_del err %d\n", err);
19466 + return err;
19467 + }
19468 + }
19469 +
19470 + return 0;
19471 +}
19472 +
19473 +static int evb_change_mtu(struct net_device *netdev,
19474 + int mtu)
19475 +{
19476 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19477 + struct evb_priv *evb_priv = port_priv->evb_priv;
19478 + struct list_head *pos;
19479 + int err = 0;
19480 +
19481 + /* This operation is not permitted on downlinks */
19482 + if (port_priv->port_index > 0)
19483 + return -EPERM;
19484 +
19485 + if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
19486 + netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
19487 + mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
19488 + return -EINVAL;
19489 + }
19490 +
19491 + err = dpdmux_set_max_frame_length(evb_priv->mc_io,
19492 + 0,
19493 + evb_priv->mux_handle,
19494 + (uint16_t)mtu);
19495 +
19496 + if (unlikely(err)) {
19497 + netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
19498 + err);
19499 + return err;
19500 + }
19501 +
19502 + /* Update the max frame length for downlinks */
19503 + list_for_each(pos, &evb_priv->port_list) {
19504 + port_priv = list_entry(pos, struct evb_port_priv, list);
19505 + port_priv->netdev->mtu = mtu;
19506 + }
19507 +
19508 + netdev->mtu = mtu;
19509 + return 0;
19510 +}
19511 +
19512 +static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
19513 + [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
19514 + [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
19515 + [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
19516 + .len = sizeof(struct bridge_vlan_info), },
19517 +};
19518 +
19519 +static int evb_setlink_af_spec(struct net_device *netdev,
19520 + struct nlattr **tb)
19521 +{
19522 + struct bridge_vlan_info *vinfo;
19523 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19524 + int err = 0;
19525 +
19526 + if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
19527 + netdev_err(netdev, "no VLAN INFO in nlmsg\n");
19528 + return -EOPNOTSUPP;
19529 + }
19530 +
19531 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
19532 +
19533 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
19534 + return -EINVAL;
19535 +
19536 + err = evb_port_add_rule(netdev, NULL, vinfo->vid);
19537 + if (unlikely(err))
19538 + return err;
19539 +
19540 + port_priv->vlans[vinfo->vid] = 1;
19541 +
19542 + return 0;
19543 +}
19544 +
19545 +static int evb_setlink(struct net_device *netdev,
19546 + struct nlmsghdr *nlh,
19547 + u16 flags)
19548 +{
19549 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19550 + struct evb_priv *evb_priv = port_priv->evb_priv;
19551 + struct nlattr *attr;
19552 + struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
19553 + IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
19554 + int err = 0;
19555 +
19556 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
19557 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
19558 + netdev_err(netdev,
19559 + "EVB mode does not support VLAN only classification\n");
19560 + return -EOPNOTSUPP;
19561 + }
19562 +
19563 + attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
19564 + if (attr) {
19565 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
19566 + ifla_br_policy);
19567 + if (unlikely(err)) {
19568 + netdev_err(netdev,
19569 + "nla_parse_nested for br_policy err %d\n",
19570 + err);
19571 + return err;
19572 + }
19573 +
19574 + err = evb_setlink_af_spec(netdev, tb);
19575 + return err;
19576 + }
19577 +
19578 + netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
19579 + return -EOPNOTSUPP;
19580 +}
19581 +
19582 +static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
19583 +{
19584 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19585 + struct evb_priv *evb_priv = port_priv->evb_priv;
19586 + u8 operstate = netif_running(netdev) ?
19587 + netdev->operstate : IF_OPER_DOWN;
19588 + int iflink;
19589 + int err;
19590 +
19591 + err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
19592 + if (unlikely(err))
19593 + goto nla_put_err;
19594 + err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
19595 + if (unlikely(err))
19596 + goto nla_put_err;
19597 + err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
19598 + if (unlikely(err))
19599 + goto nla_put_err;
19600 + err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
19601 + if (unlikely(err))
19602 + goto nla_put_err;
19603 + if (netdev->addr_len) {
19604 + err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
19605 + netdev->dev_addr);
19606 + if (unlikely(err))
19607 + goto nla_put_err;
19608 + }
19609 +
19610 + iflink = dev_get_iflink(netdev);
19611 + if (netdev->ifindex != iflink) {
19612 + err = nla_put_u32(skb, IFLA_LINK, iflink);
19613 + if (unlikely(err))
19614 + goto nla_put_err;
19615 + }
19616 +
19617 + return 0;
19618 +
19619 +nla_put_err:
19620 + netdev_err(netdev, "nla_put_ err %d\n", err);
19621 + return err;
19622 +}
19623 +
19624 +static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
19625 +{
19626 + struct nlattr *nest;
19627 + int err;
19628 +
19629 + nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
19630 + if (!nest) {
19631 + netdev_err(netdev, "nla_nest_start failed\n");
19632 + return -ENOMEM;
19633 + }
19634 +
19635 + err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
19636 + if (unlikely(err))
19637 + goto nla_put_err;
19638 + err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
19639 + if (unlikely(err))
19640 + goto nla_put_err;
19641 + err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
19642 + if (unlikely(err))
19643 + goto nla_put_err;
19644 + err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
19645 + if (unlikely(err))
19646 + goto nla_put_err;
19647 + err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
19648 + if (unlikely(err))
19649 + goto nla_put_err;
19650 + err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
19651 + if (unlikely(err))
19652 + goto nla_put_err;
19653 + err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
19654 + if (unlikely(err))
19655 + goto nla_put_err;
19656 + err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
19657 + if (unlikely(err))
19658 + goto nla_put_err;
19659 + err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
19660 + if (unlikely(err))
19661 + goto nla_put_err;
19662 + nla_nest_end(skb, nest);
19663 +
19664 + return 0;
19665 +
19666 +nla_put_err:
19667 + netdev_err(netdev, "nla_put_ err %d\n", err);
19668 + nla_nest_cancel(skb, nest);
19669 + return err;
19670 +}
19671 +
19672 +static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
19673 +{
19674 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19675 + struct nlattr *nest;
19676 + struct bridge_vlan_info vinfo;
19677 + const u8 *vlans = port_priv->vlans;
19678 + u16 i;
19679 + int err;
19680 +
19681 + nest = nla_nest_start(skb, IFLA_AF_SPEC);
19682 + if (!nest) {
19683 + netdev_err(netdev, "nla_nest_start failed");
19684 + return -ENOMEM;
19685 + }
19686 +
19687 + for (i = 0; i < VLAN_VID_MASK + 1; i++) {
19688 + if (!vlans[i])
19689 + continue;
19690 +
19691 + vinfo.flags = 0;
19692 + vinfo.vid = i;
19693 +
19694 + err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
19695 + sizeof(vinfo), &vinfo);
19696 + if (unlikely(err))
19697 + goto nla_put_err;
19698 + }
19699 +
19700 + nla_nest_end(skb, nest);
19701 +
19702 + return 0;
19703 +
19704 +nla_put_err:
19705 + netdev_err(netdev, "nla_put_ err %d\n", err);
19706 + nla_nest_cancel(skb, nest);
19707 + return err;
19708 +}
19709 +
19710 +static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
19711 + struct net_device *netdev, u32 filter_mask, int nlflags)
19712 +{
19713 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19714 + struct evb_priv *evb_priv = port_priv->evb_priv;
19715 + struct ifinfomsg *hdr;
19716 + struct nlmsghdr *nlh;
19717 + int err;
19718 +
19719 + if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
19720 + evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
19721 + return 0;
19722 + }
19723 +
19724 + nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
19725 + if (!nlh)
19726 + return -EMSGSIZE;
19727 +
19728 + hdr = nlmsg_data(nlh);
19729 + memset(hdr, 0, sizeof(*hdr));
19730 + hdr->ifi_family = AF_BRIDGE;
19731 + hdr->ifi_type = netdev->type;
19732 + hdr->ifi_index = netdev->ifindex;
19733 + hdr->ifi_flags = dev_get_flags(netdev);
19734 +
19735 + err = __nla_put_netdev(skb, netdev);
19736 + if (unlikely(err))
19737 + goto nla_put_err;
19738 +
19739 + err = __nla_put_port(skb, netdev);
19740 + if (unlikely(err))
19741 + goto nla_put_err;
19742 +
19743 + /* Check if the VID information is requested */
19744 + if (filter_mask & RTEXT_FILTER_BRVLAN) {
19745 + err = __nla_put_vlan(skb, netdev);
19746 + if (unlikely(err))
19747 + goto nla_put_err;
19748 + }
19749 +
19750 + nlmsg_end(skb, nlh);
19751 + return skb->len;
19752 +
19753 +nla_put_err:
19754 + nlmsg_cancel(skb, nlh);
19755 + return -EMSGSIZE;
19756 +}
19757 +
19758 +static int evb_dellink(struct net_device *netdev,
19759 + struct nlmsghdr *nlh,
19760 + u16 flags)
19761 +{
19762 + struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
19763 + struct nlattr *spec;
19764 + struct bridge_vlan_info *vinfo;
19765 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19766 + int err = 0;
19767 +
19768 + spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
19769 + if (!spec)
19770 + return 0;
19771 +
19772 + err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
19773 + if (unlikely(err))
19774 + return err;
19775 +
19776 + if (!tb[IFLA_BRIDGE_VLAN_INFO])
19777 + return -EOPNOTSUPP;
19778 +
19779 + vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
19780 +
19781 + if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
19782 + return -EINVAL;
19783 +
19784 + err = evb_port_del_rule(netdev, NULL, vinfo->vid);
19785 + if (unlikely(err)) {
19786 + netdev_err(netdev, "evb_port_del_rule err %d\n", err);
19787 + return err;
19788 + }
19789 + port_priv->vlans[vinfo->vid] = 0;
19790 +
19791 + return 0;
19792 +}
19793 +
19794 +struct rtnl_link_stats64 *evb_port_get_stats(struct net_device *netdev,
19795 + struct rtnl_link_stats64 *storage)
19796 +{
19797 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19798 + u64 tmp;
19799 + int err;
19800 +
19801 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19802 + 0,
19803 + port_priv->evb_priv->mux_handle,
19804 + port_priv->port_index,
19805 + DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
19806 + if (unlikely(err))
19807 + goto error;
19808 +
19809 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19810 + 0,
19811 + port_priv->evb_priv->mux_handle,
19812 + port_priv->port_index,
19813 + DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
19814 + if (unlikely(err))
19815 + goto error;
19816 +
19817 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19818 + 0,
19819 + port_priv->evb_priv->mux_handle,
19820 + port_priv->port_index,
19821 + DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
19822 + if (unlikely(err))
19823 + goto error;
19824 +
19825 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19826 + 0,
19827 + port_priv->evb_priv->mux_handle,
19828 + port_priv->port_index,
19829 + DPDMUX_CNT_ING_FRAME_DISCARD,
19830 + &storage->rx_dropped);
19831 + if (unlikely(err)) {
19832 + storage->rx_dropped = tmp;
19833 + goto error;
19834 + }
19835 + storage->rx_dropped += tmp;
19836 +
19837 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19838 + 0,
19839 + port_priv->evb_priv->mux_handle,
19840 + port_priv->port_index,
19841 + DPDMUX_CNT_ING_MCAST_FRAME,
19842 + &storage->multicast);
19843 + if (unlikely(err))
19844 + goto error;
19845 +
19846 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19847 + 0,
19848 + port_priv->evb_priv->mux_handle,
19849 + port_priv->port_index,
19850 + DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
19851 + if (unlikely(err))
19852 + goto error;
19853 +
19854 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19855 + 0,
19856 + port_priv->evb_priv->mux_handle,
19857 + port_priv->port_index,
19858 + DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
19859 + if (unlikely(err))
19860 + goto error;
19861 +
19862 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
19863 + 0,
19864 + port_priv->evb_priv->mux_handle,
19865 + port_priv->port_index,
19866 + DPDMUX_CNT_EGR_FRAME_DISCARD,
19867 + &storage->tx_dropped);
19868 + if (unlikely(err))
19869 + goto error;
19870 +
19871 + return storage;
19872 +
19873 +error:
19874 + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
19875 +}
19876 +
19877 +static const struct net_device_ops evb_port_ops = {
19878 + .ndo_open = &evb_port_open,
19879 +
19880 + .ndo_start_xmit = &evb_dropframe,
19881 +
19882 + .ndo_fdb_add = &evb_port_fdb_add,
19883 + .ndo_fdb_del = &evb_port_fdb_del,
19884 +
19885 + .ndo_get_stats64 = &evb_port_get_stats,
19886 + .ndo_change_mtu = &evb_change_mtu,
19887 +};
19888 +
19889 +static void evb_get_drvinfo(struct net_device *netdev,
19890 + struct ethtool_drvinfo *drvinfo)
19891 +{
19892 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19893 + u16 version_major, version_minor;
19894 + int err;
19895 +
19896 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
19897 + strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
19898 +
19899 + err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
19900 + &version_major,
19901 + &version_minor);
19902 + if (err)
19903 + strlcpy(drvinfo->fw_version, "N/A",
19904 + sizeof(drvinfo->fw_version));
19905 + else
19906 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
19907 + "%u.%u", version_major, version_minor);
19908 +
19909 + strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
19910 + sizeof(drvinfo->bus_info));
19911 +}
19912 +
19913 +static int evb_get_settings(struct net_device *netdev,
19914 + struct ethtool_cmd *cmd)
19915 +{
19916 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19917 + struct dpdmux_link_state state = {0};
19918 + int err = 0;
19919 +
19920 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19921 + port_priv->evb_priv->mux_handle,
19922 + port_priv->port_index,
19923 + &state);
19924 + if (err) {
19925 + netdev_err(netdev, "ERROR %d getting link state", err);
19926 + goto out;
19927 + }
19928 +
19929 + /* At the moment, we have no way of interrogating the DPMAC
19930 + * from the DPDMUX side or there may not exist a DPMAC at all.
19931 + * Report only autoneg state, duplexity and speed.
19932 + */
19933 + if (state.options & DPDMUX_LINK_OPT_AUTONEG)
19934 + cmd->autoneg = AUTONEG_ENABLE;
19935 + if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
19936 + cmd->duplex = DUPLEX_FULL;
19937 + ethtool_cmd_speed_set(cmd, state.rate);
19938 +
19939 +out:
19940 + return err;
19941 +}
19942 +
19943 +static int evb_set_settings(struct net_device *netdev,
19944 + struct ethtool_cmd *cmd)
19945 +{
19946 + struct evb_port_priv *port_priv = netdev_priv(netdev);
19947 + struct dpdmux_link_state state = {0};
19948 + struct dpdmux_link_cfg cfg = {0};
19949 + int err = 0;
19950 +
19951 + netdev_dbg(netdev, "Setting link parameters...");
19952 +
19953 + err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
19954 + port_priv->evb_priv->mux_handle,
19955 + port_priv->port_index,
19956 + &state);
19957 + if (err) {
19958 + netdev_err(netdev, "ERROR %d getting link state", err);
19959 + goto out;
19960 + }
19961 +
19962 + /* Due to a temporary MC limitation, the DPDMUX port must be down
19963 + * in order to be able to change link settings. Taking steps to let
19964 + * the user know that.
19965 + */
19966 + if (netif_running(netdev)) {
19967 + netdev_info(netdev,
19968 + "Sorry, interface must be brought down first.\n");
19969 + return -EACCES;
19970 + }
19971 +
19972 + cfg.options = state.options;
19973 + cfg.rate = ethtool_cmd_speed(cmd);
19974 + if (cmd->autoneg == AUTONEG_ENABLE)
19975 + cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
19976 + else
19977 + cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
19978 + if (cmd->duplex == DUPLEX_HALF)
19979 + cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
19980 + else
19981 + cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
19982 +
19983 + err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
19984 + port_priv->evb_priv->mux_handle,
19985 + port_priv->port_index,
19986 + &cfg);
19987 + if (err)
19988 + /* ethtool will be loud enough if we return an error; no point
19989 + * in putting our own error message on the console by default
19990 + */
19991 + netdev_dbg(netdev, "ERROR %d setting link cfg", err);
19992 +
19993 +out:
19994 + return err;
19995 +}
19996 +
19997 +static struct {
19998 + enum dpdmux_counter_type id;
19999 + char name[ETH_GSTRING_LEN];
20000 +} evb_ethtool_counters[] = {
20001 + {DPDMUX_CNT_ING_FRAME, "rx frames"},
20002 + {DPDMUX_CNT_ING_BYTE, "rx bytes"},
20003 + {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
20004 + {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
20005 + {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
20006 + {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
20007 + {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
20008 + {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
20009 + {DPDMUX_CNT_EGR_FRAME, "tx frames"},
20010 + {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
20011 + {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
20012 +};
20013 +
20014 +static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
20015 +{
20016 + switch (sset) {
20017 + case ETH_SS_STATS:
20018 + return ARRAY_SIZE(evb_ethtool_counters);
20019 + default:
20020 + return -EOPNOTSUPP;
20021 + }
20022 +}
20023 +
20024 +static void evb_ethtool_get_strings(struct net_device *netdev,
20025 + u32 stringset, u8 *data)
20026 +{
20027 + u32 i;
20028 +
20029 + switch (stringset) {
20030 + case ETH_SS_STATS:
20031 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
20032 + memcpy(data + i * ETH_GSTRING_LEN,
20033 + evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
20034 + break;
20035 + }
20036 +}
20037 +
20038 +static void evb_ethtool_get_stats(struct net_device *netdev,
20039 + struct ethtool_stats *stats,
20040 + u64 *data)
20041 +{
20042 + struct evb_port_priv *port_priv = netdev_priv(netdev);
20043 + u32 i;
20044 + int err;
20045 +
20046 + for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
20047 + err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
20048 + 0,
20049 + port_priv->evb_priv->mux_handle,
20050 + port_priv->port_index,
20051 + evb_ethtool_counters[i].id,
20052 + &data[i]);
20053 + if (err)
20054 + netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
20055 + evb_ethtool_counters[i].name, err);
20056 + }
20057 +}
20058 +
20059 +static const struct ethtool_ops evb_port_ethtool_ops = {
20060 + .get_drvinfo = &evb_get_drvinfo,
20061 + .get_link = &ethtool_op_get_link,
20062 + .get_settings = &evb_get_settings,
20063 + .set_settings = &evb_set_settings,
20064 + .get_strings = &evb_ethtool_get_strings,
20065 + .get_ethtool_stats = &evb_ethtool_get_stats,
20066 + .get_sset_count = &evb_ethtool_get_sset_count,
20067 +};
20068 +
20069 +static int evb_open(struct net_device *netdev)
20070 +{
20071 + struct evb_priv *priv = netdev_priv(netdev);
20072 + int err = 0;
20073 +
20074 + err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
20075 + if (unlikely(err))
20076 + netdev_err(netdev, "dpdmux_enable err %d\n", err);
20077 +
20078 + return err;
20079 +}
20080 +
20081 +static int evb_close(struct net_device *netdev)
20082 +{
20083 + struct evb_priv *priv = netdev_priv(netdev);
20084 + int err = 0;
20085 +
20086 + err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
20087 + if (unlikely(err))
20088 + netdev_err(netdev, "dpdmux_disable err %d\n", err);
20089 +
20090 + return err;
20091 +}
20092 +
20093 +static const struct net_device_ops evb_ops = {
20094 + .ndo_start_xmit = &evb_dropframe,
20095 + .ndo_open = &evb_open,
20096 + .ndo_stop = &evb_close,
20097 +
20098 + .ndo_bridge_setlink = &evb_setlink,
20099 + .ndo_bridge_getlink = &evb_getlink,
20100 + .ndo_bridge_dellink = &evb_dellink,
20101 +
20102 + .ndo_get_stats64 = &evb_port_get_stats,
20103 + .ndo_change_mtu = &evb_change_mtu,
20104 +};
20105 +
20106 +static int evb_takedown(struct fsl_mc_device *evb_dev)
20107 +{
20108 + struct device *dev = &evb_dev->dev;
20109 + struct net_device *netdev = dev_get_drvdata(dev);
20110 + struct evb_priv *priv = netdev_priv(netdev);
20111 + int err;
20112 +
20113 + err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
20114 + if (unlikely(err))
20115 + dev_warn(dev, "dpdmux_close err %d\n", err);
20116 +
20117 + return 0;
20118 +}
20119 +
20120 +static int evb_init(struct fsl_mc_device *evb_dev)
20121 +{
20122 + struct device *dev = &evb_dev->dev;
20123 + struct net_device *netdev = dev_get_drvdata(dev);
20124 + struct evb_priv *priv = netdev_priv(netdev);
20125 + u16 version_major;
20126 + u16 version_minor;
20127 + int err = 0;
20128 +
20129 + priv->dev_id = evb_dev->obj_desc.id;
20130 +
20131 + err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
20132 + if (unlikely(err)) {
20133 + dev_err(dev, "dpdmux_open err %d\n", err);
20134 + goto err_exit;
20135 + }
20136 + if (!priv->mux_handle) {
20137 + dev_err(dev, "dpdmux_open returned null handle but no error\n");
20138 + err = -EFAULT;
20139 + goto err_exit;
20140 + }
20141 +
20142 + err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
20143 + &priv->attr);
20144 + if (unlikely(err)) {
20145 + dev_err(dev, "dpdmux_get_attributes err %d\n", err);
20146 + goto err_close;
20147 + }
20148 +
20149 + err = dpdmux_get_api_version(priv->mc_io, 0,
20150 + &version_major,
20151 + &version_minor);
20152 + if (unlikely(err)) {
20153 + dev_err(dev, "dpdmux_get_api_version err %d\n", err);
20154 + goto err_close;
20155 + }
20156 +
20157 + /* Minimum supported DPDMUX version check */
20158 + if (version_major < DPDMUX_MIN_VER_MAJOR ||
20159 + (version_major == DPDMUX_MIN_VER_MAJOR &&
20160 + version_minor < DPDMUX_MIN_VER_MINOR)) {
20161 + dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
20162 + version_major, version_minor,
20163 + DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
20164 + err = -ENOTSUPP;
20165 + goto err_close;
20166 + }
20167 +
20168 + err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
20169 + if (unlikely(err)) {
20170 + dev_err(dev, "dpdmux_reset err %d\n", err);
20171 + goto err_close;
20172 + }
20173 +
20174 + return 0;
20175 +
20176 +err_close:
20177 + dpdmux_close(priv->mc_io, 0, priv->mux_handle);
20178 +err_exit:
20179 + return err;
20180 +}
20181 +
20182 +static int evb_remove(struct fsl_mc_device *evb_dev)
20183 +{
20184 + struct device *dev = &evb_dev->dev;
20185 + struct net_device *netdev = dev_get_drvdata(dev);
20186 + struct evb_priv *priv = netdev_priv(netdev);
20187 + struct evb_port_priv *port_priv;
20188 + struct list_head *pos;
20189 +
20190 + list_for_each(pos, &priv->port_list) {
20191 + port_priv = list_entry(pos, struct evb_port_priv, list);
20192 +
20193 + rtnl_lock();
20194 + netdev_upper_dev_unlink(port_priv->netdev, netdev);
20195 + rtnl_unlock();
20196 +
20197 + unregister_netdev(port_priv->netdev);
20198 + free_netdev(port_priv->netdev);
20199 + }
20200 +
20201 + evb_teardown_irqs(evb_dev);
20202 +
20203 + unregister_netdev(netdev);
20204 +
20205 + evb_takedown(evb_dev);
20206 + fsl_mc_portal_free(priv->mc_io);
20207 +
20208 + dev_set_drvdata(dev, NULL);
20209 + free_netdev(netdev);
20210 +
20211 + return 0;
20212 +}
20213 +
20214 +static int evb_probe(struct fsl_mc_device *evb_dev)
20215 +{
20216 + struct device *dev;
20217 + struct evb_priv *priv = NULL;
20218 + struct net_device *netdev = NULL;
20219 + char port_name[IFNAMSIZ];
20220 + int i;
20221 + int err = 0;
20222 +
20223 + dev = &evb_dev->dev;
20224 +
20225 + /* register switch device, it's for management only - no I/O */
20226 + netdev = alloc_etherdev(sizeof(*priv));
20227 + if (!netdev) {
20228 + dev_err(dev, "alloc_etherdev error\n");
20229 + return -ENOMEM;
20230 + }
20231 + netdev->netdev_ops = &evb_ops;
20232 +
20233 + dev_set_drvdata(dev, netdev);
20234 +
20235 + priv = netdev_priv(netdev);
20236 +
20237 + err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
20238 + if (unlikely(err)) {
20239 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
20240 + goto err_free_netdev;
20241 + }
20242 + if (!priv->mc_io) {
20243 + dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
20244 + err = -EFAULT;
20245 + goto err_free_netdev;
20246 + }
20247 +
20248 + err = evb_init(evb_dev);
20249 + if (unlikely(err)) {
20250 + dev_err(dev, "evb init err %d\n", err);
20251 + goto err_free_cmdport;
20252 + }
20253 +
20254 + INIT_LIST_HEAD(&priv->port_list);
20255 + netdev->flags |= IFF_PROMISC | IFF_MASTER;
20256 +
20257 + dev_alloc_name(netdev, "evb%d");
20258 +
20259 + /* register switch ports */
20260 + snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
20261 +
20262 + /* only register downlinks? */
20263 + for (i = 0; i < priv->attr.num_ifs + 1; i++) {
20264 + struct net_device *port_netdev;
20265 + struct evb_port_priv *port_priv;
20266 +
20267 + if (i) {
20268 + port_netdev =
20269 + alloc_etherdev(sizeof(struct evb_port_priv));
20270 + if (!port_netdev) {
20271 + dev_err(dev, "alloc_etherdev error\n");
20272 + goto err_takedown;
20273 + }
20274 +
20275 + port_priv = netdev_priv(port_netdev);
20276 +
20277 + port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
20278 +
20279 + dev_alloc_name(port_netdev, port_name);
20280 + } else {
20281 + port_netdev = netdev;
20282 + port_priv = &priv->uplink;
20283 + }
20284 +
20285 + port_priv->netdev = port_netdev;
20286 + port_priv->evb_priv = priv;
20287 + port_priv->port_index = i;
20288 +
20289 + SET_NETDEV_DEV(port_netdev, dev);
20290 +
20291 + if (i) {
20292 + port_netdev->netdev_ops = &evb_port_ops;
20293 +
20294 + err = register_netdev(port_netdev);
20295 + if (err < 0) {
20296 + dev_err(dev, "register_netdev err %d\n", err);
20297 + free_netdev(port_netdev);
20298 + goto err_takedown;
20299 + }
20300 +
20301 + rtnl_lock();
20302 + err = netdev_master_upper_dev_link(port_netdev, netdev,
20303 + NULL, NULL);
20304 + if (unlikely(err)) {
20305 + dev_err(dev, "netdev_master_upper_dev_link err %d\n",
20306 + err);
20307 + unregister_netdev(port_netdev);
20308 + free_netdev(port_netdev);
20309 + rtnl_unlock();
20310 + goto err_takedown;
20311 + }
20312 + rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
20313 + IFF_SLAVE, GFP_KERNEL);
20314 + rtnl_unlock();
20315 +
20316 + list_add(&port_priv->list, &priv->port_list);
20317 + } else {
20318 + err = register_netdev(netdev);
20319 +
20320 + if (err < 0) {
20321 + dev_err(dev, "register_netdev error %d\n", err);
20322 + goto err_takedown;
20323 + }
20324 + }
20325 +
20326 + port_netdev->ethtool_ops = &evb_port_ethtool_ops;
20327 +
20328 + /* ports are up from init */
20329 + rtnl_lock();
20330 + err = dev_open(port_netdev);
20331 + rtnl_unlock();
20332 + if (unlikely(err))
20333 + dev_warn(dev, "dev_open err %d\n", err);
20334 + }
20335 +
20336 + /* setup irqs */
20337 + err = evb_setup_irqs(evb_dev);
20338 + if (unlikely(err)) {
20339 + dev_warn(dev, "evb_setup_irqs err %d\n", err);
20340 + goto err_takedown;
20341 + }
20342 +
20343 + dev_info(dev, "probed evb device with %d ports\n",
20344 + priv->attr.num_ifs);
20345 + return 0;
20346 +
20347 +err_takedown:
20348 + evb_remove(evb_dev);
20349 +err_free_cmdport:
20350 + fsl_mc_portal_free(priv->mc_io);
20351 +err_free_netdev:
20352 + return err;
20353 +}
20354 +
20355 +static const struct fsl_mc_device_id evb_match_id_table[] = {
20356 + {
20357 + .vendor = FSL_MC_VENDOR_FREESCALE,
20358 + .obj_type = "dpdmux",
20359 + },
20360 + {}
20361 +};
20362 +
20363 +static struct fsl_mc_driver evb_drv = {
20364 + .driver = {
20365 + .name = KBUILD_MODNAME,
20366 + .owner = THIS_MODULE,
20367 + },
20368 + .probe = evb_probe,
20369 + .remove = evb_remove,
20370 + .match_id_table = evb_match_id_table,
20371 +};
20372 +
20373 +module_fsl_mc_driver(evb_drv);
20374 +
20375 +MODULE_LICENSE("GPL");
20376 +MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
20377 --- /dev/null
20378 +++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
20379 @@ -0,0 +1,23 @@
20380 +config FSL_DPAA2_MAC
20381 + tristate "DPAA2 MAC / PHY interface"
20382 + depends on FSL_MC_BUS && FSL_DPAA2
20383 + select MDIO_BUS_MUX_MMIOREG
20384 + select FSL_XGMAC_MDIO
20385 + select FIXED_PHY
20386 + ---help---
20387 + Prototype driver for DPAA2 MAC / PHY interface object.
20388 + This driver works as a proxy between phylib including phy drivers and
20389 + the MC firmware. It receives updates on link state changes from PHY
20390 + lib and forwards them to MC and receives interrupt from MC whenever
20391 + a request is made to change the link state.
20392 +
20393 +
20394 +config FSL_DPAA2_MAC_NETDEVS
20395 + bool "Expose net interfaces for PHYs"
20396 + default n
20397 + depends on FSL_DPAA2_MAC
20398 + ---help---
20399 + Exposes macX net interfaces which allow direct control over MACs and
20400 + PHYs.
20401 + .
20402 + Leave disabled if unsure.
20403 --- /dev/null
20404 +++ b/drivers/staging/fsl-dpaa2/mac/Makefile
20405 @@ -0,0 +1,10 @@
20406 +
20407 +obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
20408 +
20409 +dpaa2-mac-objs := mac.o dpmac.o
20410 +
20411 +all:
20412 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
20413 +
20414 +clean:
20415 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
20416 --- /dev/null
20417 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
20418 @@ -0,0 +1,172 @@
20419 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20420 + *
20421 + * Redistribution and use in source and binary forms, with or without
20422 + * modification, are permitted provided that the following conditions are met:
20423 + * * Redistributions of source code must retain the above copyright
20424 + * notice, this list of conditions and the following disclaimer.
20425 + * * Redistributions in binary form must reproduce the above copyright
20426 + * notice, this list of conditions and the following disclaimer in the
20427 + * documentation and/or other materials provided with the distribution.
20428 + * * Neither the name of the above-listed copyright holders nor the
20429 + * names of any contributors may be used to endorse or promote products
20430 + * derived from this software without specific prior written permission.
20431 + *
20432 + *
20433 + * ALTERNATIVELY, this software may be distributed under the terms of the
20434 + * GNU General Public License ("GPL") as published by the Free Software
20435 + * Foundation, either version 2 of that License or (at your option) any
20436 + * later version.
20437 + *
20438 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20439 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20440 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20441 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20442 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20443 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20444 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20445 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20446 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20447 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20448 + * POSSIBILITY OF SUCH DAMAGE.
20449 + */
20450 +#ifndef _FSL_DPMAC_CMD_H
20451 +#define _FSL_DPMAC_CMD_H
20452 +
20453 +/* DPMAC Version */
20454 +#define DPMAC_VER_MAJOR 4
20455 +#define DPMAC_VER_MINOR 2
20456 +#define DPMAC_CMD_BASE_VERSION 1
20457 +#define DPMAC_CMD_ID_OFFSET 4
20458 +
20459 +#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
20460 +
20461 +/* Command IDs */
20462 +#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
20463 +#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
20464 +#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
20465 +#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
20466 +#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
20467 +
20468 +#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
20469 +#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
20470 +
20471 +#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
20472 +#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
20473 +#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
20474 +#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
20475 +#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
20476 +#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
20477 +
20478 +#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
20479 +#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
20480 +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
20481 +
20482 +#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
20483 +
20484 +/* Macros for accessing command fields smaller than 1byte */
20485 +#define DPMAC_MASK(field) \
20486 + GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
20487 + DPMAC_##field##_SHIFT)
20488 +#define dpmac_set_field(var, field, val) \
20489 + ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
20490 +#define dpmac_get_field(var, field) \
20491 + (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
20492 +
20493 +struct dpmac_cmd_open {
20494 + u32 dpmac_id;
20495 +};
20496 +
20497 +struct dpmac_cmd_create {
20498 + u32 mac_id;
20499 +};
20500 +
20501 +struct dpmac_cmd_destroy {
20502 + u32 dpmac_id;
20503 +};
20504 +
20505 +struct dpmac_cmd_set_irq_enable {
20506 + u8 enable;
20507 + u8 pad[3];
20508 + u8 irq_index;
20509 +};
20510 +
20511 +struct dpmac_cmd_get_irq_enable {
20512 + u32 pad;
20513 + u8 irq_index;
20514 +};
20515 +
20516 +struct dpmac_rsp_get_irq_enable {
20517 + u8 enabled;
20518 +};
20519 +
20520 +struct dpmac_cmd_set_irq_mask {
20521 + u32 mask;
20522 + u8 irq_index;
20523 +};
20524 +
20525 +struct dpmac_cmd_get_irq_mask {
20526 + u32 pad;
20527 + u8 irq_index;
20528 +};
20529 +
20530 +struct dpmac_rsp_get_irq_mask {
20531 + u32 mask;
20532 +};
20533 +
20534 +struct dpmac_cmd_get_irq_status {
20535 + u32 status;
20536 + u8 irq_index;
20537 +};
20538 +
20539 +struct dpmac_rsp_get_irq_status {
20540 + u32 status;
20541 +};
20542 +
20543 +struct dpmac_cmd_clear_irq_status {
20544 + u32 status;
20545 + u8 irq_index;
20546 +};
20547 +
20548 +struct dpmac_rsp_get_attributes {
20549 + u8 eth_if;
20550 + u8 link_type;
20551 + u16 id;
20552 + u32 max_rate;
20553 +};
20554 +
20555 +struct dpmac_rsp_get_link_cfg {
20556 + u64 options;
20557 + u32 rate;
20558 +};
20559 +
20560 +#define DPMAC_STATE_SIZE 1
20561 +#define DPMAC_STATE_SHIFT 0
20562 +
20563 +struct dpmac_cmd_set_link_state {
20564 + u64 options;
20565 + u32 rate;
20566 + u32 pad;
20567 + /* only least significant bit is valid */
20568 + u8 up;
20569 +};
20570 +
20571 +struct dpmac_cmd_get_counter {
20572 + u8 type;
20573 +};
20574 +
20575 +struct dpmac_rsp_get_counter {
20576 + u64 pad;
20577 + u64 counter;
20578 +};
20579 +
20580 +struct dpmac_rsp_get_api_version {
20581 + u16 major;
20582 + u16 minor;
20583 +};
20584 +
20585 +struct dpmac_cmd_set_port_mac_addr {
20586 + u8 pad[2];
20587 + u8 addr[6];
20588 +};
20589 +
20590 +#endif /* _FSL_DPMAC_CMD_H */
20591 --- /dev/null
20592 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
20593 @@ -0,0 +1,620 @@
20594 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
20595 + *
20596 + * Redistribution and use in source and binary forms, with or without
20597 + * modification, are permitted provided that the following conditions are met:
20598 + * * Redistributions of source code must retain the above copyright
20599 + * notice, this list of conditions and the following disclaimer.
20600 + * * Redistributions in binary form must reproduce the above copyright
20601 + * notice, this list of conditions and the following disclaimer in the
20602 + * documentation and/or other materials provided with the distribution.
20603 + * * Neither the name of the above-listed copyright holders nor the
20604 + * names of any contributors may be used to endorse or promote products
20605 + * derived from this software without specific prior written permission.
20606 + *
20607 + *
20608 + * ALTERNATIVELY, this software may be distributed under the terms of the
20609 + * GNU General Public License ("GPL") as published by the Free Software
20610 + * Foundation, either version 2 of that License or (at your option) any
20611 + * later version.
20612 + *
20613 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20614 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20615 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20616 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
20617 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20618 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20619 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20620 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20621 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20622 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
20623 + * POSSIBILITY OF SUCH DAMAGE.
20624 + */
20625 +#include "../../fsl-mc/include/mc-sys.h"
20626 +#include "../../fsl-mc/include/mc-cmd.h"
20627 +#include "dpmac.h"
20628 +#include "dpmac-cmd.h"
20629 +
20630 +/**
20631 + * dpmac_open() - Open a control session for the specified object.
20632 + * @mc_io: Pointer to MC portal's I/O object
20633 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20634 + * @dpmac_id: DPMAC unique ID
20635 + * @token: Returned token; use in subsequent API calls
20636 + *
20637 + * This function can be used to open a control session for an
20638 + * already created object; an object may have been declared in
20639 + * the DPL or by calling the dpmac_create function.
20640 + * This function returns a unique authentication token,
20641 + * associated with the specific object ID and the specific MC
20642 + * portal; this token must be used in all subsequent commands for
20643 + * this specific object
20644 + *
20645 + * Return: '0' on Success; Error code otherwise.
20646 + */
20647 +int dpmac_open(struct fsl_mc_io *mc_io,
20648 + u32 cmd_flags,
20649 + int dpmac_id,
20650 + u16 *token)
20651 +{
20652 + struct dpmac_cmd_open *cmd_params;
20653 + struct mc_command cmd = { 0 };
20654 + int err;
20655 +
20656 + /* prepare command */
20657 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
20658 + cmd_flags,
20659 + 0);
20660 + cmd_params = (struct dpmac_cmd_open *)cmd.params;
20661 + cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
20662 +
20663 + /* send command to mc*/
20664 + err = mc_send_command(mc_io, &cmd);
20665 + if (err)
20666 + return err;
20667 +
20668 + /* retrieve response parameters */
20669 + *token = mc_cmd_hdr_read_token(&cmd);
20670 +
20671 + return err;
20672 +}
20673 +
20674 +/**
20675 + * dpmac_close() - Close the control session of the object
20676 + * @mc_io: Pointer to MC portal's I/O object
20677 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20678 + * @token: Token of DPMAC object
20679 + *
20680 + * After this function is called, no further operations are
20681 + * allowed on the object without opening a new control session.
20682 + *
20683 + * Return: '0' on Success; Error code otherwise.
20684 + */
20685 +int dpmac_close(struct fsl_mc_io *mc_io,
20686 + u32 cmd_flags,
20687 + u16 token)
20688 +{
20689 + struct mc_command cmd = { 0 };
20690 +
20691 + /* prepare command */
20692 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
20693 + token);
20694 +
20695 + /* send command to mc*/
20696 + return mc_send_command(mc_io, &cmd);
20697 +}
20698 +
20699 +/**
20700 + * dpmac_create() - Create the DPMAC object.
20701 + * @mc_io: Pointer to MC portal's I/O object
20702 + * @dprc_token: Parent container token; '0' for default container
20703 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20704 + * @cfg: Configuration structure
20705 + * @obj_id: Returned object id
20706 + *
20707 + * Create the DPMAC object, allocate required resources and
20708 + * perform required initialization.
20709 + *
20710 + * The function accepts an authentication token of a parent
20711 + * container that this object should be assigned to. The token
20712 + * can be '0' so the object will be assigned to the default container.
20713 + * The newly created object can be opened with the returned
20714 + * object id and using the container's associated tokens and MC portals.
20715 + *
20716 + * Return: '0' on Success; Error code otherwise.
20717 + */
20718 +int dpmac_create(struct fsl_mc_io *mc_io,
20719 + u16 dprc_token,
20720 + u32 cmd_flags,
20721 + const struct dpmac_cfg *cfg,
20722 + u32 *obj_id)
20723 +{
20724 + struct dpmac_cmd_create *cmd_params;
20725 + struct mc_command cmd = { 0 };
20726 + int err;
20727 +
20728 + /* prepare command */
20729 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
20730 + cmd_flags,
20731 + dprc_token);
20732 + cmd_params = (struct dpmac_cmd_create *)cmd.params;
20733 + cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
20734 +
20735 + /* send command to mc*/
20736 + err = mc_send_command(mc_io, &cmd);
20737 + if (err)
20738 + return err;
20739 +
20740 + /* retrieve response parameters */
20741 + *obj_id = mc_cmd_read_object_id(&cmd);
20742 +
20743 + return 0;
20744 +}
20745 +
20746 +/**
20747 + * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
20748 + * @mc_io: Pointer to MC portal's I/O object
20749 + * @dprc_token: Parent container token; '0' for default container
20750 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20751 + * @object_id: The object id; it must be a valid id within the container that
20752 + * created this object;
20753 + *
20754 + * The function accepts the authentication token of the parent container that
20755 + * created the object (not the one that currently owns the object). The object
20756 + * is searched within parent using the provided 'object_id'.
20757 + * All tokens to the object must be closed before calling destroy.
20758 + *
20759 + * Return: '0' on Success; error code otherwise.
20760 + */
20761 +int dpmac_destroy(struct fsl_mc_io *mc_io,
20762 + u16 dprc_token,
20763 + u32 cmd_flags,
20764 + u32 object_id)
20765 +{
20766 + struct dpmac_cmd_destroy *cmd_params;
20767 + struct mc_command cmd = { 0 };
20768 +
20769 + /* prepare command */
20770 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
20771 + cmd_flags,
20772 + dprc_token);
20773 + cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
20774 + cmd_params->dpmac_id = cpu_to_le32(object_id);
20775 +
20776 + /* send command to mc*/
20777 + return mc_send_command(mc_io, &cmd);
20778 +}
20779 +
20780 +/**
20781 + * dpmac_set_irq_enable() - Set overall interrupt state.
20782 + * @mc_io: Pointer to MC portal's I/O object
20783 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20784 + * @token: Token of DPMAC object
20785 + * @irq_index: The interrupt index to configure
20786 + * @en: Interrupt state - enable = 1, disable = 0
20787 + *
20788 + * Allows GPP software to control when interrupts are generated.
20789 + * Each interrupt can have up to 32 causes. The enable/disable control's the
20790 + * overall interrupt state. if the interrupt is disabled no causes will cause
20791 + * an interrupt.
20792 + *
20793 + * Return: '0' on Success; Error code otherwise.
20794 + */
20795 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
20796 + u32 cmd_flags,
20797 + u16 token,
20798 + u8 irq_index,
20799 + u8 en)
20800 +{
20801 + struct dpmac_cmd_set_irq_enable *cmd_params;
20802 + struct mc_command cmd = { 0 };
20803 +
20804 + /* prepare command */
20805 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
20806 + cmd_flags,
20807 + token);
20808 + cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
20809 + cmd_params->irq_index = irq_index;
20810 + cmd_params->enable = en;
20811 +
20812 + /* send command to mc*/
20813 + return mc_send_command(mc_io, &cmd);
20814 +}
20815 +
20816 +/**
20817 + * dpmac_get_irq_enable() - Get overall interrupt state
20818 + * @mc_io: Pointer to MC portal's I/O object
20819 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20820 + * @token: Token of DPMAC object
20821 + * @irq_index: The interrupt index to configure
20822 + * @en: Returned interrupt state - enable = 1, disable = 0
20823 + *
20824 + * Return: '0' on Success; Error code otherwise.
20825 + */
20826 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
20827 + u32 cmd_flags,
20828 + u16 token,
20829 + u8 irq_index,
20830 + u8 *en)
20831 +{
20832 + struct dpmac_cmd_get_irq_enable *cmd_params;
20833 + struct dpmac_rsp_get_irq_enable *rsp_params;
20834 + struct mc_command cmd = { 0 };
20835 + int err;
20836 +
20837 + /* prepare command */
20838 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
20839 + cmd_flags,
20840 + token);
20841 + cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
20842 + cmd_params->irq_index = irq_index;
20843 +
20844 + /* send command to mc*/
20845 + err = mc_send_command(mc_io, &cmd);
20846 + if (err)
20847 + return err;
20848 +
20849 + /* retrieve response parameters */
20850 + rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
20851 + *en = rsp_params->enabled;
20852 +
20853 + return 0;
20854 +}
20855 +
20856 +/**
20857 + * dpmac_set_irq_mask() - Set interrupt mask.
20858 + * @mc_io: Pointer to MC portal's I/O object
20859 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20860 + * @token: Token of DPMAC object
20861 + * @irq_index: The interrupt index to configure
20862 + * @mask: Event mask to trigger interrupt;
20863 + * each bit:
20864 + * 0 = ignore event
20865 + * 1 = consider event for asserting IRQ
20866 + *
20867 + * Every interrupt can have up to 32 causes and the interrupt model supports
20868 + * masking/unmasking each cause independently
20869 + *
20870 + * Return: '0' on Success; Error code otherwise.
20871 + */
20872 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
20873 + u32 cmd_flags,
20874 + u16 token,
20875 + u8 irq_index,
20876 + u32 mask)
20877 +{
20878 + struct dpmac_cmd_set_irq_mask *cmd_params;
20879 + struct mc_command cmd = { 0 };
20880 +
20881 + /* prepare command */
20882 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
20883 + cmd_flags,
20884 + token);
20885 + cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
20886 + cmd_params->mask = cpu_to_le32(mask);
20887 + cmd_params->irq_index = irq_index;
20888 +
20889 + /* send command to mc*/
20890 + return mc_send_command(mc_io, &cmd);
20891 +}
20892 +
20893 +/**
20894 + * dpmac_get_irq_mask() - Get interrupt mask.
20895 + * @mc_io: Pointer to MC portal's I/O object
20896 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20897 + * @token: Token of DPMAC object
20898 + * @irq_index: The interrupt index to configure
20899 + * @mask: Returned event mask to trigger interrupt
20900 + *
20901 + * Every interrupt can have up to 32 causes and the interrupt model supports
20902 + * masking/unmasking each cause independently
20903 + *
20904 + * Return: '0' on Success; Error code otherwise.
20905 + */
20906 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
20907 + u32 cmd_flags,
20908 + u16 token,
20909 + u8 irq_index,
20910 + u32 *mask)
20911 +{
20912 + struct dpmac_cmd_get_irq_mask *cmd_params;
20913 + struct dpmac_rsp_get_irq_mask *rsp_params;
20914 + struct mc_command cmd = { 0 };
20915 + int err;
20916 +
20917 + /* prepare command */
20918 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
20919 + cmd_flags,
20920 + token);
20921 + cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
20922 + cmd_params->irq_index = irq_index;
20923 +
20924 + /* send command to mc*/
20925 + err = mc_send_command(mc_io, &cmd);
20926 + if (err)
20927 + return err;
20928 +
20929 + /* retrieve response parameters */
20930 + rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
20931 + *mask = le32_to_cpu(rsp_params->mask);
20932 +
20933 + return 0;
20934 +}
20935 +
20936 +/**
20937 + * dpmac_get_irq_status() - Get the current status of any pending interrupts.
20938 + *
20939 + * @mc_io: Pointer to MC portal's I/O object
20940 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20941 + * @token: Token of DPMAC object
20942 + * @irq_index: The interrupt index to configure
20943 + * @status: Returned interrupts status - one bit per cause:
20944 + * 0 = no interrupt pending
20945 + * 1 = interrupt pending
20946 + *
20947 + * Return: '0' on Success; Error code otherwise.
20948 + */
20949 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
20950 + u32 cmd_flags,
20951 + u16 token,
20952 + u8 irq_index,
20953 + u32 *status)
20954 +{
20955 + struct dpmac_cmd_get_irq_status *cmd_params;
20956 + struct dpmac_rsp_get_irq_status *rsp_params;
20957 + struct mc_command cmd = { 0 };
20958 + int err;
20959 +
20960 + /* prepare command */
20961 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
20962 + cmd_flags,
20963 + token);
20964 + cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
20965 + cmd_params->status = cpu_to_le32(*status);
20966 + cmd_params->irq_index = irq_index;
20967 +
20968 + /* send command to mc*/
20969 + err = mc_send_command(mc_io, &cmd);
20970 + if (err)
20971 + return err;
20972 +
20973 + /* retrieve response parameters */
20974 + rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
20975 + *status = le32_to_cpu(rsp_params->status);
20976 +
20977 + return 0;
20978 +}
20979 +
20980 +/**
20981 + * dpmac_clear_irq_status() - Clear a pending interrupt's status
20982 + *
20983 + * @mc_io: Pointer to MC portal's I/O object
20984 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
20985 + * @token: Token of DPMAC object
20986 + * @irq_index: The interrupt index to configure
20987 + * @status: Bits to clear (W1C) - one bit per cause:
20988 + * 0 = don't change
20989 + * 1 = clear status bit
20990 + *
20991 + * Return: '0' on Success; Error code otherwise.
20992 + */
20993 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
20994 + u32 cmd_flags,
20995 + u16 token,
20996 + u8 irq_index,
20997 + u32 status)
20998 +{
20999 + struct dpmac_cmd_clear_irq_status *cmd_params;
21000 + struct mc_command cmd = { 0 };
21001 +
21002 + /* prepare command */
21003 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
21004 + cmd_flags,
21005 + token);
21006 + cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
21007 + cmd_params->status = cpu_to_le32(status);
21008 + cmd_params->irq_index = irq_index;
21009 +
21010 + /* send command to mc*/
21011 + return mc_send_command(mc_io, &cmd);
21012 +}
21013 +
21014 +/**
21015 + * dpmac_get_attributes - Retrieve DPMAC attributes.
21016 + *
21017 + * @mc_io: Pointer to MC portal's I/O object
21018 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21019 + * @token: Token of DPMAC object
21020 + * @attr: Returned object's attributes
21021 + *
21022 + * Return: '0' on Success; Error code otherwise.
21023 + */
21024 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
21025 + u32 cmd_flags,
21026 + u16 token,
21027 + struct dpmac_attr *attr)
21028 +{
21029 + struct dpmac_rsp_get_attributes *rsp_params;
21030 + struct mc_command cmd = { 0 };
21031 + int err;
21032 +
21033 + /* prepare command */
21034 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
21035 + cmd_flags,
21036 + token);
21037 +
21038 + /* send command to mc*/
21039 + err = mc_send_command(mc_io, &cmd);
21040 + if (err)
21041 + return err;
21042 +
21043 + /* retrieve response parameters */
21044 + rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
21045 + attr->eth_if = rsp_params->eth_if;
21046 + attr->link_type = rsp_params->link_type;
21047 + attr->id = le16_to_cpu(rsp_params->id);
21048 + attr->max_rate = le32_to_cpu(rsp_params->max_rate);
21049 +
21050 + return 0;
21051 +}
21052 +
21053 +/**
21054 + * dpmac_get_link_cfg() - Get Ethernet link configuration
21055 + * @mc_io: Pointer to opaque I/O object
21056 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21057 + * @token: Token of DPMAC object
21058 + * @cfg: Returned structure with the link configuration
21059 + *
21060 + * Return: '0' on Success; Error code otherwise.
21061 + */
21062 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
21063 + u32 cmd_flags,
21064 + u16 token,
21065 + struct dpmac_link_cfg *cfg)
21066 +{
21067 + struct dpmac_rsp_get_link_cfg *rsp_params;
21068 + struct mc_command cmd = { 0 };
21069 + int err = 0;
21070 +
21071 + /* prepare command */
21072 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
21073 + cmd_flags,
21074 + token);
21075 +
21076 + /* send command to mc*/
21077 + err = mc_send_command(mc_io, &cmd);
21078 + if (err)
21079 + return err;
21080 +
21081 + rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
21082 + cfg->options = le64_to_cpu(rsp_params->options);
21083 + cfg->rate = le32_to_cpu(rsp_params->rate);
21084 +
21085 + return 0;
21086 +}
21087 +
21088 +/**
21089 + * dpmac_set_link_state() - Set the Ethernet link status
21090 + * @mc_io: Pointer to opaque I/O object
21091 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21092 + * @token: Token of DPMAC object
21093 + * @link_state: Link state configuration
21094 + *
21095 + * Return: '0' on Success; Error code otherwise.
21096 + */
21097 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
21098 + u32 cmd_flags,
21099 + u16 token,
21100 + struct dpmac_link_state *link_state)
21101 +{
21102 + struct dpmac_cmd_set_link_state *cmd_params;
21103 + struct mc_command cmd = { 0 };
21104 +
21105 + /* prepare command */
21106 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
21107 + cmd_flags,
21108 + token);
21109 + cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
21110 + cmd_params->options = cpu_to_le64(link_state->options);
21111 + cmd_params->rate = cpu_to_le32(link_state->rate);
21112 + cmd_params->up = dpmac_get_field(link_state->up, STATE);
21113 +
21114 + /* send command to mc*/
21115 + return mc_send_command(mc_io, &cmd);
21116 +}
21117 +
21118 +/**
21119 + * dpmac_get_counter() - Read a specific DPMAC counter
21120 + * @mc_io: Pointer to opaque I/O object
21121 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21122 + * @token: Token of DPMAC object
21123 + * @type: The requested counter
21124 + * @counter: Returned counter value
21125 + *
21126 + * Return: The requested counter; '0' otherwise.
21127 + */
21128 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
21129 + u32 cmd_flags,
21130 + u16 token,
21131 + enum dpmac_counter type,
21132 + u64 *counter)
21133 +{
21134 + struct dpmac_cmd_get_counter *dpmac_cmd;
21135 + struct dpmac_rsp_get_counter *dpmac_rsp;
21136 + struct mc_command cmd = { 0 };
21137 + int err = 0;
21138 +
21139 + /* prepare command */
21140 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
21141 + cmd_flags,
21142 + token);
21143 + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
21144 + dpmac_cmd->type = type;
21145 +
21146 + /* send command to mc*/
21147 + err = mc_send_command(mc_io, &cmd);
21148 + if (err)
21149 + return err;
21150 +
21151 + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
21152 + *counter = le64_to_cpu(dpmac_rsp->counter);
21153 +
21154 + return 0;
21155 +}
21156 +
21157 +/* untested */
21158 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
21159 + u32 cmd_flags,
21160 + u16 token,
21161 + const u8 addr[6])
21162 +{
21163 + struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
21164 + struct mc_command cmd = { 0 };
21165 +
21166 + /* prepare command */
21167 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
21168 + cmd_flags,
21169 + token);
21170 + dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
21171 + dpmac_cmd->addr[0] = addr[5];
21172 + dpmac_cmd->addr[1] = addr[4];
21173 + dpmac_cmd->addr[2] = addr[3];
21174 + dpmac_cmd->addr[3] = addr[2];
21175 + dpmac_cmd->addr[4] = addr[1];
21176 + dpmac_cmd->addr[5] = addr[0];
21177 +
21178 + /* send command to mc*/
21179 + return mc_send_command(mc_io, &cmd);
21180 +}
21181 +
21182 +/**
21183 + * dpmac_get_api_version() - Get Data Path MAC version
21184 + * @mc_io: Pointer to MC portal's I/O object
21185 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21186 + * @major_ver: Major version of data path mac API
21187 + * @minor_ver: Minor version of data path mac API
21188 + *
21189 + * Return: '0' on Success; Error code otherwise.
21190 + */
21191 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
21192 + u32 cmd_flags,
21193 + u16 *major_ver,
21194 + u16 *minor_ver)
21195 +{
21196 + struct dpmac_rsp_get_api_version *rsp_params;
21197 + struct mc_command cmd = { 0 };
21198 + int err;
21199 +
21200 + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
21201 + cmd_flags,
21202 + 0);
21203 +
21204 + err = mc_send_command(mc_io, &cmd);
21205 + if (err)
21206 + return err;
21207 +
21208 + rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
21209 + *major_ver = le16_to_cpu(rsp_params->major);
21210 + *minor_ver = le16_to_cpu(rsp_params->minor);
21211 +
21212 + return 0;
21213 +}
21214 --- /dev/null
21215 +++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
21216 @@ -0,0 +1,342 @@
21217 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
21218 + *
21219 + * Redistribution and use in source and binary forms, with or without
21220 + * modification, are permitted provided that the following conditions are met:
21221 + * * Redistributions of source code must retain the above copyright
21222 + * notice, this list of conditions and the following disclaimer.
21223 + * * Redistributions in binary form must reproduce the above copyright
21224 + * notice, this list of conditions and the following disclaimer in the
21225 + * documentation and/or other materials provided with the distribution.
21226 + * * Neither the name of the above-listed copyright holders nor the
21227 + * names of any contributors may be used to endorse or promote products
21228 + * derived from this software without specific prior written permission.
21229 + *
21230 + *
21231 + * ALTERNATIVELY, this software may be distributed under the terms of the
21232 + * GNU General Public License ("GPL") as published by the Free Software
21233 + * Foundation, either version 2 of that License or (at your option) any
21234 + * later version.
21235 + *
21236 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21237 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21238 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21239 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21240 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21241 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21242 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21243 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21244 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21245 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21246 + * POSSIBILITY OF SUCH DAMAGE.
21247 + */
21248 +#ifndef __FSL_DPMAC_H
21249 +#define __FSL_DPMAC_H
21250 +
21251 +/* Data Path MAC API
21252 + * Contains initialization APIs and runtime control APIs for DPMAC
21253 + */
21254 +
21255 +struct fsl_mc_io;
21256 +
21257 +int dpmac_open(struct fsl_mc_io *mc_io,
21258 + u32 cmd_flags,
21259 + int dpmac_id,
21260 + u16 *token);
21261 +
21262 +int dpmac_close(struct fsl_mc_io *mc_io,
21263 + u32 cmd_flags,
21264 + u16 token);
21265 +
21266 +/**
21267 + * enum dpmac_link_type - DPMAC link type
21268 + * @DPMAC_LINK_TYPE_NONE: No link
21269 + * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
21270 + * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
21271 + * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
21272 + */
21273 +enum dpmac_link_type {
21274 + DPMAC_LINK_TYPE_NONE,
21275 + DPMAC_LINK_TYPE_FIXED,
21276 + DPMAC_LINK_TYPE_PHY,
21277 + DPMAC_LINK_TYPE_BACKPLANE
21278 +};
21279 +
21280 +/**
21281 + * enum dpmac_eth_if - DPMAC Ethrnet interface
21282 + * @DPMAC_ETH_IF_MII: MII interface
21283 + * @DPMAC_ETH_IF_RMII: RMII interface
21284 + * @DPMAC_ETH_IF_SMII: SMII interface
21285 + * @DPMAC_ETH_IF_GMII: GMII interface
21286 + * @DPMAC_ETH_IF_RGMII: RGMII interface
21287 + * @DPMAC_ETH_IF_SGMII: SGMII interface
21288 + * @DPMAC_ETH_IF_QSGMII: QSGMII interface
21289 + * @DPMAC_ETH_IF_XAUI: XAUI interface
21290 + * @DPMAC_ETH_IF_XFI: XFI interface
21291 + */
21292 +enum dpmac_eth_if {
21293 + DPMAC_ETH_IF_MII,
21294 + DPMAC_ETH_IF_RMII,
21295 + DPMAC_ETH_IF_SMII,
21296 + DPMAC_ETH_IF_GMII,
21297 + DPMAC_ETH_IF_RGMII,
21298 + DPMAC_ETH_IF_SGMII,
21299 + DPMAC_ETH_IF_QSGMII,
21300 + DPMAC_ETH_IF_XAUI,
21301 + DPMAC_ETH_IF_XFI
21302 +};
21303 +
21304 +/**
21305 + * struct dpmac_cfg - Structure representing DPMAC configuration
21306 + * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
21307 + * the MAC IDs are continuous.
21308 + * For example: 2 WRIOPs, 16 MACs in each:
21309 + * MAC IDs for the 1st WRIOP: 1-16,
21310 + * MAC IDs for the 2nd WRIOP: 17-32.
21311 + */
21312 +struct dpmac_cfg {
21313 + u16 mac_id;
21314 +};
21315 +
21316 +int dpmac_create(struct fsl_mc_io *mc_io,
21317 + u16 dprc_token,
21318 + u32 cmd_flags,
21319 + const struct dpmac_cfg *cfg,
21320 + u32 *obj_id);
21321 +
21322 +int dpmac_destroy(struct fsl_mc_io *mc_io,
21323 + u16 dprc_token,
21324 + u32 cmd_flags,
21325 + u32 object_id);
21326 +
21327 +/**
21328 + * DPMAC IRQ Index and Events
21329 + */
21330 +
21331 +/**
21332 + * IRQ index
21333 + */
21334 +#define DPMAC_IRQ_INDEX 0
21335 +/**
21336 + * IRQ event - indicates a change in link state
21337 + */
21338 +#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
21339 +/**
21340 + * IRQ event - Indicates that the link state changed
21341 + */
21342 +#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
21343 +
21344 +int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
21345 + u32 cmd_flags,
21346 + u16 token,
21347 + u8 irq_index,
21348 + u8 en);
21349 +
21350 +int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
21351 + u32 cmd_flags,
21352 + u16 token,
21353 + u8 irq_index,
21354 + u8 *en);
21355 +
21356 +int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
21357 + u32 cmd_flags,
21358 + u16 token,
21359 + u8 irq_index,
21360 + u32 mask);
21361 +
21362 +int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
21363 + u32 cmd_flags,
21364 + u16 token,
21365 + u8 irq_index,
21366 + u32 *mask);
21367 +
21368 +int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
21369 + u32 cmd_flags,
21370 + u16 token,
21371 + u8 irq_index,
21372 + u32 *status);
21373 +
21374 +int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
21375 + u32 cmd_flags,
21376 + u16 token,
21377 + u8 irq_index,
21378 + u32 status);
21379 +
21380 +/**
21381 + * struct dpmac_attr - Structure representing DPMAC attributes
21382 + * @id: DPMAC object ID
21383 + * @max_rate: Maximum supported rate - in Mbps
21384 + * @eth_if: Ethernet interface
21385 + * @link_type: link type
21386 + */
21387 +struct dpmac_attr {
21388 + u16 id;
21389 + u32 max_rate;
21390 + enum dpmac_eth_if eth_if;
21391 + enum dpmac_link_type link_type;
21392 +};
21393 +
21394 +int dpmac_get_attributes(struct fsl_mc_io *mc_io,
21395 + u32 cmd_flags,
21396 + u16 token,
21397 + struct dpmac_attr *attr);
21398 +
21399 +/**
21400 + * DPMAC link configuration/state options
21401 + */
21402 +
21403 +/**
21404 + * Enable auto-negotiation
21405 + */
21406 +#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
21407 +/**
21408 + * Enable half-duplex mode
21409 + */
21410 +#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
21411 +/**
21412 + * Enable pause frames
21413 + */
21414 +#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
21415 +/**
21416 + * Enable a-symmetric pause frames
21417 + */
21418 +#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
21419 +
21420 +/**
21421 + * struct dpmac_link_cfg - Structure representing DPMAC link configuration
21422 + * @rate: Link's rate - in Mbps
21423 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
21424 + */
21425 +struct dpmac_link_cfg {
21426 + u32 rate;
21427 + u64 options;
21428 +};
21429 +
21430 +int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
21431 + u32 cmd_flags,
21432 + u16 token,
21433 + struct dpmac_link_cfg *cfg);
21434 +
21435 +/**
21436 + * struct dpmac_link_state - DPMAC link configuration request
21437 + * @rate: Rate in Mbps
21438 + * @options: Enable/Disable DPMAC link cfg features (bitmap)
21439 + * @up: Link state
21440 + */
21441 +struct dpmac_link_state {
21442 + u32 rate;
21443 + u64 options;
21444 + int up;
21445 +};
21446 +
21447 +int dpmac_set_link_state(struct fsl_mc_io *mc_io,
21448 + u32 cmd_flags,
21449 + u16 token,
21450 + struct dpmac_link_state *link_state);
21451 +
21452 +/**
21453 + * enum dpmac_counter - DPMAC counter types
21454 + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
21455 + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
21456 + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
21457 + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
21458 + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
21459 + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
21460 + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
21461 + * (up to max frame length specified),
21462 + * good or bad.
21463 + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
21464 + * with a wrong CRC
21465 + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
21466 + * specified, with a bad frame check sequence.
21467 + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
21468 + * Occurs when a receive FIFO overflows.
21469 + * Includes also frames truncated as a result of
21470 + * the receive FIFO overflow.
21471 + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
21472 + * (optional used for wrong SFD).
21473 + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
21474 + * bytes long with a good CRC.
21475 + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
21476 + * specified, with a good frame check sequence.
21477 + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
21478 + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
21479 + * (regular and PFC).
21480 + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
21481 + * frames and valid pause frames.
21482 + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
21483 + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
21484 + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
21485 + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
21486 + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
21487 + * (except for undersized/fragment frame).
21488 + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
21489 + * frames and valid pause frames transmitted.
21490 + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
21491 + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
21492 + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
21493 + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
21494 + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
21495 + * pause frames.
21496 + * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
21497 + * pause frames.
21498 + */
21499 +enum dpmac_counter {
21500 + DPMAC_CNT_ING_FRAME_64,
21501 + DPMAC_CNT_ING_FRAME_127,
21502 + DPMAC_CNT_ING_FRAME_255,
21503 + DPMAC_CNT_ING_FRAME_511,
21504 + DPMAC_CNT_ING_FRAME_1023,
21505 + DPMAC_CNT_ING_FRAME_1518,
21506 + DPMAC_CNT_ING_FRAME_1519_MAX,
21507 + DPMAC_CNT_ING_FRAG,
21508 + DPMAC_CNT_ING_JABBER,
21509 + DPMAC_CNT_ING_FRAME_DISCARD,
21510 + DPMAC_CNT_ING_ALIGN_ERR,
21511 + DPMAC_CNT_EGR_UNDERSIZED,
21512 + DPMAC_CNT_ING_OVERSIZED,
21513 + DPMAC_CNT_ING_VALID_PAUSE_FRAME,
21514 + DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
21515 + DPMAC_CNT_ING_BYTE,
21516 + DPMAC_CNT_ING_MCAST_FRAME,
21517 + DPMAC_CNT_ING_BCAST_FRAME,
21518 + DPMAC_CNT_ING_ALL_FRAME,
21519 + DPMAC_CNT_ING_UCAST_FRAME,
21520 + DPMAC_CNT_ING_ERR_FRAME,
21521 + DPMAC_CNT_EGR_BYTE,
21522 + DPMAC_CNT_EGR_MCAST_FRAME,
21523 + DPMAC_CNT_EGR_BCAST_FRAME,
21524 + DPMAC_CNT_EGR_UCAST_FRAME,
21525 + DPMAC_CNT_EGR_ERR_FRAME,
21526 + DPMAC_CNT_ING_GOOD_FRAME,
21527 + DPMAC_CNT_ENG_GOOD_FRAME
21528 +};
21529 +
21530 +int dpmac_get_counter(struct fsl_mc_io *mc_io,
21531 + u32 cmd_flags,
21532 + u16 token,
21533 + enum dpmac_counter type,
21534 + u64 *counter);
21535 +
21536 +/**
21537 + * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
21538 + * port. This is not used for filtering, MAC is always in
21539 + * promiscuous mode, it is passed to DPNIs through DPNI API for
21540 + * application used.
21541 + * @mc_io: Pointer to opaque I/O object
21542 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
21543 + * @token: Token of DPMAC object
21544 + * @addr: MAC address to set
21545 + *
21546 + * Return: The requested counter; '0' otherwise.
21547 + */
21548 +int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
21549 + u32 cmd_flags,
21550 + u16 token,
21551 + const u8 addr[6]);
21552 +
21553 +int dpmac_get_api_version(struct fsl_mc_io *mc_io,
21554 + u32 cmd_flags,
21555 + u16 *major_ver,
21556 + u16 *minor_ver);
21557 +
21558 +#endif /* __FSL_DPMAC_H */
21559 --- /dev/null
21560 +++ b/drivers/staging/fsl-dpaa2/mac/mac.c
21561 @@ -0,0 +1,669 @@
21562 +/* Copyright 2015 Freescale Semiconductor Inc.
21563 + *
21564 + * Redistribution and use in source and binary forms, with or without
21565 + * modification, are permitted provided that the following conditions are met:
21566 + * * Redistributions of source code must retain the above copyright
21567 + * notice, this list of conditions and the following disclaimer.
21568 + * * Redistributions in binary form must reproduce the above copyright
21569 + * notice, this list of conditions and the following disclaimer in the
21570 + * documentation and/or other materials provided with the distribution.
21571 + * * Neither the name of Freescale Semiconductor nor the
21572 + * names of its contributors may be used to endorse or promote products
21573 + * derived from this software without specific prior written permission.
21574 + *
21575 + *
21576 + * ALTERNATIVELY, this software may be distributed under the terms of the
21577 + * GNU General Public License ("GPL") as published by the Free Software
21578 + * Foundation, either version 2 of that License or (at your option) any
21579 + * later version.
21580 + *
21581 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21582 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21583 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21584 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
21585 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21586 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21587 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
21588 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21589 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21590 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21591 + */
21592 +
21593 +#include <linux/module.h>
21594 +
21595 +#include <linux/netdevice.h>
21596 +#include <linux/etherdevice.h>
21597 +#include <linux/msi.h>
21598 +#include <linux/rtnetlink.h>
21599 +#include <linux/if_vlan.h>
21600 +
21601 +#include <uapi/linux/if_bridge.h>
21602 +#include <net/netlink.h>
21603 +
21604 +#include <linux/of.h>
21605 +#include <linux/of_mdio.h>
21606 +#include <linux/of_net.h>
21607 +#include <linux/phy.h>
21608 +#include <linux/phy_fixed.h>
21609 +
21610 +#include "../../fsl-mc/include/mc.h"
21611 +#include "../../fsl-mc/include/mc-sys.h"
21612 +
21613 +#include "dpmac.h"
21614 +#include "dpmac-cmd.h"
21615 +
21616 +struct dpaa2_mac_priv {
21617 + struct net_device *netdev;
21618 + struct fsl_mc_device *mc_dev;
21619 + struct dpmac_attr attr;
21620 + struct dpmac_link_state old_state;
21621 +};
21622 +
21623 +/* TODO: fix the 10G modes, mapping can't be right:
21624 + * XGMII is paralel
21625 + * XAUI is serial, using 8b/10b encoding
21626 + * XFI is also serial but using 64b/66b encoding
21627 + * they can't all map to XGMII...
21628 + *
21629 + * This must be kept in sync with enum dpmac_eth_if.
21630 + */
21631 +static phy_interface_t dpaa2_mac_iface_mode[] = {
21632 + PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
21633 + PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
21634 + PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
21635 + PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
21636 + PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
21637 + PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
21638 + PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
21639 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
21640 + PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
21641 +};
21642 +
21643 +static void dpaa2_mac_link_changed(struct net_device *netdev)
21644 +{
21645 + struct phy_device *phydev;
21646 + struct dpmac_link_state state = { 0 };
21647 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21648 + int err;
21649 +
21650 + /* the PHY just notified us of link state change */
21651 + phydev = netdev->phydev;
21652 +
21653 + state.up = !!phydev->link;
21654 + if (phydev->link) {
21655 + state.rate = phydev->speed;
21656 +
21657 + if (!phydev->duplex)
21658 + state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
21659 + if (phydev->autoneg)
21660 + state.options |= DPMAC_LINK_OPT_AUTONEG;
21661 +
21662 + netif_carrier_on(netdev);
21663 + } else {
21664 + netif_carrier_off(netdev);
21665 + }
21666 +
21667 + if (priv->old_state.up != state.up ||
21668 + priv->old_state.rate != state.rate ||
21669 + priv->old_state.options != state.options) {
21670 + priv->old_state = state;
21671 + phy_print_status(phydev);
21672 + }
21673 +
21674 + /* We must interrogate MC at all times, because we don't know
21675 + * when and whether a potential DPNI may have read the link state.
21676 + */
21677 + err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
21678 + priv->mc_dev->mc_handle, &state);
21679 + if (unlikely(err))
21680 + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
21681 +}
21682 +
21683 +static int dpaa2_mac_open(struct net_device *netdev)
21684 +{
21685 + /* start PHY state machine */
21686 + phy_start(netdev->phydev);
21687 +
21688 + return 0;
21689 +}
21690 +
21691 +static int dpaa2_mac_stop(struct net_device *netdev)
21692 +{
21693 + if (!netdev->phydev)
21694 + goto done;
21695 +
21696 + /* stop PHY state machine */
21697 + phy_stop(netdev->phydev);
21698 +
21699 + /* signal link down to firmware */
21700 + netdev->phydev->link = 0;
21701 + dpaa2_mac_link_changed(netdev);
21702 +
21703 +done:
21704 + return 0;
21705 +}
21706 +
21707 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
21708 +static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
21709 + struct net_device *dev)
21710 +{
21711 + /* we don't support I/O for now, drop the frame */
21712 + dev_kfree_skb_any(skb);
21713 + return NETDEV_TX_OK;
21714 +}
21715 +
21716 +static int dpaa2_mac_get_settings(struct net_device *netdev,
21717 + struct ethtool_cmd *cmd)
21718 +{
21719 + return phy_ethtool_gset(netdev->phydev, cmd);
21720 +}
21721 +
21722 +static int dpaa2_mac_set_settings(struct net_device *netdev,
21723 + struct ethtool_cmd *cmd)
21724 +{
21725 + return phy_ethtool_sset(netdev->phydev, cmd);
21726 +}
21727 +
21728 +static void dpaa2_mac_get_stats(struct net_device *netdev,
21729 + struct rtnl_link_stats64 *storage)
21730 +{
21731 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21732 + u64 tmp;
21733 + int err;
21734 +
21735 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21736 + DPMAC_CNT_EGR_MCAST_FRAME,
21737 + &storage->tx_packets);
21738 + if (err)
21739 + goto error;
21740 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21741 + DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
21742 + if (err)
21743 + goto error;
21744 + storage->tx_packets += tmp;
21745 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21746 + DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
21747 + if (err)
21748 + goto error;
21749 + storage->tx_packets += tmp;
21750 +
21751 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21752 + DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
21753 + if (err)
21754 + goto error;
21755 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21756 + DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
21757 + if (err)
21758 + goto error;
21759 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21760 + DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
21761 + if (err)
21762 + goto error;
21763 +
21764 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21765 + DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
21766 + if (err)
21767 + goto error;
21768 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21769 + DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
21770 + if (err)
21771 + goto error;
21772 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21773 + DPMAC_CNT_ING_FRAME_DISCARD,
21774 + &storage->rx_dropped);
21775 + if (err)
21776 + goto error;
21777 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21778 + DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
21779 + if (err)
21780 + goto error;
21781 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21782 + DPMAC_CNT_ING_OVERSIZED, &tmp);
21783 + if (err)
21784 + goto error;
21785 + storage->rx_errors += tmp;
21786 + err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
21787 + DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
21788 + if (err)
21789 + goto error;
21790 +
21791 + return;
21792 +error:
21793 + netdev_err(netdev, "dpmac_get_counter err %d\n", err);
21794 +}
21795 +
21796 +static struct {
21797 + enum dpmac_counter id;
21798 + char name[ETH_GSTRING_LEN];
21799 +} dpaa2_mac_counters[] = {
21800 + {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
21801 + {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
21802 + {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
21803 + {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
21804 + {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
21805 + {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
21806 + {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
21807 + {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
21808 + {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
21809 + {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
21810 + {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
21811 + {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
21812 + {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
21813 + {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
21814 + {DPMAC_CNT_ING_FRAG, "rx frags"},
21815 + {DPMAC_CNT_ING_JABBER, "rx jabber"},
21816 + {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
21817 + {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
21818 + {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
21819 + {DPMAC_CNT_ING_BYTE, "rx bytes"},
21820 + {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
21821 + {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
21822 + {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
21823 + {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
21824 + {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
21825 + {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
21826 + {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
21827 + {DPMAC_CNT_EGR_BYTE, "tx bytes"},
21828 +
21829 +};
21830 +
21831 +static void dpaa2_mac_get_strings(struct net_device *netdev,
21832 + u32 stringset, u8 *data)
21833 +{
21834 + int i;
21835 +
21836 + switch (stringset) {
21837 + case ETH_SS_STATS:
21838 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
21839 + memcpy(data + i * ETH_GSTRING_LEN,
21840 + dpaa2_mac_counters[i].name,
21841 + ETH_GSTRING_LEN);
21842 + break;
21843 + }
21844 +}
21845 +
21846 +static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
21847 + struct ethtool_stats *stats,
21848 + u64 *data)
21849 +{
21850 + struct dpaa2_mac_priv *priv = netdev_priv(netdev);
21851 + int i;
21852 + int err;
21853 +
21854 + for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
21855 + err = dpmac_get_counter(priv->mc_dev->mc_io,
21856 + 0,
21857 + priv->mc_dev->mc_handle,
21858 + dpaa2_mac_counters[i].id, &data[i]);
21859 + if (err)
21860 + netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
21861 + dpaa2_mac_counters[i].name, err);
21862 + }
21863 +}
21864 +
21865 +static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
21866 +{
21867 + switch (sset) {
21868 + case ETH_SS_STATS:
21869 + return ARRAY_SIZE(dpaa2_mac_counters);
21870 + default:
21871 + return -EOPNOTSUPP;
21872 + }
21873 +}
21874 +
21875 +static const struct net_device_ops dpaa2_mac_ndo_ops = {
21876 + .ndo_open = &dpaa2_mac_open,
21877 + .ndo_stop = &dpaa2_mac_stop,
21878 + .ndo_start_xmit = &dpaa2_mac_drop_frame,
21879 + .ndo_get_stats64 = &dpaa2_mac_get_stats,
21880 +};
21881 +
21882 +static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
21883 + .get_settings = &dpaa2_mac_get_settings,
21884 + .set_settings = &dpaa2_mac_set_settings,
21885 + .get_strings = &dpaa2_mac_get_strings,
21886 + .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
21887 + .get_sset_count = &dpaa2_mac_get_sset_count,
21888 +};
21889 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
21890 +
21891 +static void configure_link(struct dpaa2_mac_priv *priv,
21892 + struct dpmac_link_cfg *cfg)
21893 +{
21894 + struct phy_device *phydev = priv->netdev->phydev;
21895 +
21896 + if (unlikely(!phydev))
21897 + return;
21898 +
21899 + phydev->speed = cfg->rate;
21900 + phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
21901 +
21902 + if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
21903 + phydev->autoneg = 1;
21904 + phydev->advertising |= ADVERTISED_Autoneg;
21905 + } else {
21906 + phydev->autoneg = 0;
21907 + phydev->advertising &= ~ADVERTISED_Autoneg;
21908 + }
21909 +
21910 + phy_start_aneg(phydev);
21911 +}
21912 +
21913 +static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
21914 +{
21915 + struct device *dev = (struct device *)arg;
21916 + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
21917 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
21918 + struct dpmac_link_cfg link_cfg;
21919 + u32 status;
21920 + int err;
21921 +
21922 + err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21923 + DPMAC_IRQ_INDEX, &status);
21924 + if (unlikely(err || !status))
21925 + return IRQ_NONE;
21926 +
21927 + /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
21928 + if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
21929 + err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
21930 + &link_cfg);
21931 + if (unlikely(err))
21932 + goto out;
21933 +
21934 + configure_link(priv, &link_cfg);
21935 + }
21936 +
21937 +out:
21938 + dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
21939 + DPMAC_IRQ_INDEX, status);
21940 +
21941 + return IRQ_HANDLED;
21942 +}
21943 +
21944 +static int setup_irqs(struct fsl_mc_device *mc_dev)
21945 +{
21946 + int err = 0;
21947 + struct fsl_mc_device_irq *irq;
21948 +
21949 + err = fsl_mc_allocate_irqs(mc_dev);
21950 + if (err) {
21951 + dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
21952 + return err;
21953 + }
21954 +
21955 + irq = mc_dev->irqs[0];
21956 + err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
21957 + NULL, &dpaa2_mac_irq_handler,
21958 + IRQF_NO_SUSPEND | IRQF_ONESHOT,
21959 + dev_name(&mc_dev->dev), &mc_dev->dev);
21960 + if (err) {
21961 + dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
21962 + err);
21963 + goto free_irq;
21964 + }
21965 +
21966 + err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
21967 + DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ);
21968 + if (err) {
21969 + dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
21970 + goto free_irq;
21971 + }
21972 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21973 + DPMAC_IRQ_INDEX, 1);
21974 + if (err) {
21975 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21976 + goto free_irq;
21977 + }
21978 +
21979 + return 0;
21980 +
21981 +free_irq:
21982 + fsl_mc_free_irqs(mc_dev);
21983 +
21984 + return err;
21985 +}
21986 +
21987 +static void teardown_irqs(struct fsl_mc_device *mc_dev)
21988 +{
21989 + int err;
21990 +
21991 + err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
21992 + DPMAC_IRQ_INDEX, 0);
21993 + if (err)
21994 + dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
21995 +
21996 + fsl_mc_free_irqs(mc_dev);
21997 +}
21998 +
21999 +static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
22000 +{
22001 + struct device_node *dpmacs, *dpmac = NULL;
22002 + struct device_node *mc_node = dev->of_node;
22003 + u32 id;
22004 + int err;
22005 +
22006 + dpmacs = of_find_node_by_name(mc_node, "dpmacs");
22007 + if (!dpmacs) {
22008 + dev_err(dev, "No dpmacs subnode in device-tree\n");
22009 + return NULL;
22010 + }
22011 +
22012 + while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
22013 + err = of_property_read_u32(dpmac, "reg", &id);
22014 + if (err)
22015 + continue;
22016 + if (id == dpmac_id)
22017 + return dpmac;
22018 + }
22019 +
22020 + return NULL;
22021 +}
22022 +
22023 +static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
22024 +{
22025 + struct device *dev;
22026 + struct dpaa2_mac_priv *priv = NULL;
22027 + struct device_node *phy_node, *dpmac_node;
22028 + struct net_device *netdev;
22029 + phy_interface_t if_mode;
22030 + int err = 0;
22031 +
22032 + dev = &mc_dev->dev;
22033 +
22034 + /* prepare a net_dev structure to make the phy lib API happy */
22035 + netdev = alloc_etherdev(sizeof(*priv));
22036 + if (!netdev) {
22037 + dev_err(dev, "alloc_etherdev error\n");
22038 + err = -ENOMEM;
22039 + goto err_exit;
22040 + }
22041 + priv = netdev_priv(netdev);
22042 + priv->mc_dev = mc_dev;
22043 + priv->netdev = netdev;
22044 +
22045 + SET_NETDEV_DEV(netdev, dev);
22046 +
22047 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
22048 + snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
22049 +#endif
22050 +
22051 + dev_set_drvdata(dev, priv);
22052 +
22053 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
22054 + if (err || !mc_dev->mc_io) {
22055 + dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
22056 + err = -ENODEV;
22057 + goto err_free_netdev;
22058 + }
22059 +
22060 + err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
22061 + &mc_dev->mc_handle);
22062 + if (err || !mc_dev->mc_handle) {
22063 + dev_err(dev, "dpmac_open error: %d\n", err);
22064 + err = -ENODEV;
22065 + goto err_free_mcp;
22066 + }
22067 +
22068 + err = dpmac_get_attributes(mc_dev->mc_io, 0,
22069 + mc_dev->mc_handle, &priv->attr);
22070 + if (err) {
22071 + dev_err(dev, "dpmac_get_attributes err %d\n", err);
22072 + err = -EINVAL;
22073 + goto err_close;
22074 + }
22075 +
22076 + /* Look up the DPMAC node in the device-tree. */
22077 + dpmac_node = find_dpmac_node(dev, priv->attr.id);
22078 + if (!dpmac_node) {
22079 + dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
22080 + err = -ENODEV;
22081 + goto err_close;
22082 + }
22083 +
22084 + err = setup_irqs(mc_dev);
22085 + if (err) {
22086 + err = -EFAULT;
22087 + goto err_close;
22088 + }
22089 +
22090 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
22091 + /* OPTIONAL, register netdev just to make it visible to the user */
22092 + netdev->netdev_ops = &dpaa2_mac_ndo_ops;
22093 + netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
22094 +
22095 + /* phy starts up enabled so netdev should be up too */
22096 + netdev->flags |= IFF_UP;
22097 +
22098 + err = register_netdev(priv->netdev);
22099 + if (err < 0) {
22100 + dev_err(dev, "register_netdev error %d\n", err);
22101 + err = -ENODEV;
22102 + goto err_free_irq;
22103 + }
22104 +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
22105 +
22106 + /* probe the PHY as a fixed-link if there's a phy-handle defined
22107 + * in the device tree
22108 + */
22109 + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
22110 + if (!phy_node) {
22111 + goto probe_fixed_link;
22112 + }
22113 +
22114 + if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
22115 + if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
22116 + dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
22117 + phy_modes(if_mode), priv->attr.eth_if);
22118 + } else {
22119 + dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
22120 + priv->attr.eth_if);
22121 + goto probe_fixed_link;
22122 + }
22123 +
22124 + /* try to connect to the PHY */
22125 + netdev->phydev = of_phy_connect(netdev, phy_node,
22126 + &dpaa2_mac_link_changed, 0, if_mode);
22127 + if (!netdev->phydev) {
22128 + /* No need for dev_err(); the kernel's loud enough as it is. */
22129 + dev_dbg(dev, "Can't of_phy_connect() now.\n");
22130 + /* We might be waiting for the MDIO MUX to probe, so defer
22131 + * our own probing.
22132 + */
22133 + err = -EPROBE_DEFER;
22134 + goto err_defer;
22135 + }
22136 + dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
22137 +
22138 +probe_fixed_link:
22139 + if (!netdev->phydev) {
22140 + struct fixed_phy_status status = {
22141 + .link = 1,
22142 + /* fixed-phys don't support 10Gbps speed for now */
22143 + .speed = 1000,
22144 + .duplex = 1,
22145 + };
22146 +
22147 + /* try to register a fixed link phy */
22148 + netdev->phydev = fixed_phy_register(PHY_POLL, &status, -1,
22149 + NULL);
22150 + if (!netdev->phydev || IS_ERR(netdev->phydev)) {
22151 + dev_err(dev, "error trying to register fixed PHY\n");
22152 + /* So we don't crash unregister_netdev() later on */
22153 + netdev->phydev = NULL;
22154 + err = -EFAULT;
22155 + goto err_no_phy;
22156 + }
22157 + dev_info(dev, "Registered fixed PHY.\n");
22158 + }
22159 +
22160 + dpaa2_mac_open(netdev);
22161 +
22162 + return 0;
22163 +
22164 +err_defer:
22165 +err_no_phy:
22166 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
22167 + unregister_netdev(netdev);
22168 +err_free_irq:
22169 +#endif
22170 + teardown_irqs(mc_dev);
22171 +err_close:
22172 + dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
22173 +err_free_mcp:
22174 + fsl_mc_portal_free(mc_dev->mc_io);
22175 +err_free_netdev:
22176 + free_netdev(netdev);
22177 +err_exit:
22178 + return err;
22179 +}
22180 +
22181 +static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
22182 +{
22183 + struct device *dev = &mc_dev->dev;
22184 + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
22185 + struct net_device *netdev = priv->netdev;
22186 +
22187 + dpaa2_mac_stop(netdev);
22188 +
22189 + if (phy_is_pseudo_fixed_link(netdev->phydev))
22190 + fixed_phy_unregister(netdev->phydev);
22191 + else
22192 + phy_disconnect(netdev->phydev);
22193 + netdev->phydev = NULL;
22194 +
22195 +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
22196 + unregister_netdev(priv->netdev);
22197 +#endif
22198 + teardown_irqs(priv->mc_dev);
22199 + dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
22200 + fsl_mc_portal_free(priv->mc_dev->mc_io);
22201 + free_netdev(priv->netdev);
22202 +
22203 + dev_set_drvdata(dev, NULL);
22204 +
22205 + return 0;
22206 +}
22207 +
22208 +static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
22209 + {
22210 + .vendor = FSL_MC_VENDOR_FREESCALE,
22211 + .obj_type = "dpmac",
22212 + },
22213 + { .vendor = 0x0 }
22214 +};
22215 +MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
22216 +
22217 +static struct fsl_mc_driver dpaa2_mac_drv = {
22218 + .driver = {
22219 + .name = KBUILD_MODNAME,
22220 + .owner = THIS_MODULE,
22221 + },
22222 + .probe = dpaa2_mac_probe,
22223 + .remove = dpaa2_mac_remove,
22224 + .match_id_table = dpaa2_mac_match_id_table,
22225 +};
22226 +
22227 +module_fsl_mc_driver(dpaa2_mac_drv);
22228 +
22229 +MODULE_LICENSE("GPL");
22230 +MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
22231 --- /dev/null
22232 +++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
22233 @@ -0,0 +1,10 @@
22234 +
22235 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
22236 +
22237 +dpaa2-rtc-objs := rtc.o dprtc.o
22238 +
22239 +all:
22240 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
22241 +
22242 +clean:
22243 + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
22244 --- /dev/null
22245 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
22246 @@ -0,0 +1,160 @@
22247 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
22248 + *
22249 + * Redistribution and use in source and binary forms, with or without
22250 + * modification, are permitted provided that the following conditions are met:
22251 + * * Redistributions of source code must retain the above copyright
22252 + * notice, this list of conditions and the following disclaimer.
22253 + * * Redistributions in binary form must reproduce the above copyright
22254 + * notice, this list of conditions and the following disclaimer in the
22255 + * documentation and/or other materials provided with the distribution.
22256 + * * Neither the name of the above-listed copyright holders nor the
22257 + * names of any contributors may be used to endorse or promote products
22258 + * derived from this software without specific prior written permission.
22259 + *
22260 + *
22261 + * ALTERNATIVELY, this software may be distributed under the terms of the
22262 + * GNU General Public License ("GPL") as published by the Free Software
22263 + * Foundation, either version 2 of that License or (at your option) any
22264 + * later version.
22265 + *
22266 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22267 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22268 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22269 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22270 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22271 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22272 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22273 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22274 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22275 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22276 + * POSSIBILITY OF SUCH DAMAGE.
22277 + */
22278 +#ifndef _FSL_DPRTC_CMD_H
22279 +#define _FSL_DPRTC_CMD_H
22280 +
22281 +/* DPRTC Version */
22282 +#define DPRTC_VER_MAJOR 2
22283 +#define DPRTC_VER_MINOR 0
22284 +
22285 +/* Command versioning */
22286 +#define DPRTC_CMD_BASE_VERSION 1
22287 +#define DPRTC_CMD_ID_OFFSET 4
22288 +
22289 +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
22290 +
22291 +/* Command IDs */
22292 +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
22293 +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
22294 +#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
22295 +#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
22296 +#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
22297 +
22298 +#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
22299 +#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
22300 +#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
22301 +#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
22302 +#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
22303 +
22304 +#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
22305 +#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
22306 +#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
22307 +#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
22308 +#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
22309 +#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
22310 +
22311 +#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
22312 +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
22313 +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
22314 +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
22315 +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
22316 +#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
22317 +#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
22318 +#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
22319 +#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
22320 +#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
22321 +#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
22322 +
22323 +/* Macros for accessing command fields smaller than 1byte */
22324 +#define DPRTC_MASK(field) \
22325 + GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
22326 + DPRTC_##field##_SHIFT)
22327 +#define dprtc_get_field(var, field) \
22328 + (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
22329 +
22330 +#pragma pack(push, 1)
22331 +struct dprtc_cmd_open {
22332 + uint32_t dprtc_id;
22333 +};
22334 +
22335 +struct dprtc_cmd_destroy {
22336 + uint32_t object_id;
22337 +};
22338 +
22339 +#define DPRTC_ENABLE_SHIFT 0
22340 +#define DPRTC_ENABLE_SIZE 1
22341 +
22342 +struct dprtc_rsp_is_enabled {
22343 + uint8_t en;
22344 +};
22345 +
22346 +struct dprtc_cmd_get_irq {
22347 + uint32_t pad;
22348 + uint8_t irq_index;
22349 +};
22350 +
22351 +struct dprtc_cmd_set_irq_enable {
22352 + uint8_t en;
22353 + uint8_t pad[3];
22354 + uint8_t irq_index;
22355 +};
22356 +
22357 +struct dprtc_rsp_get_irq_enable {
22358 + uint8_t en;
22359 +};
22360 +
22361 +struct dprtc_cmd_set_irq_mask {
22362 + uint32_t mask;
22363 + uint8_t irq_index;
22364 +};
22365 +
22366 +struct dprtc_rsp_get_irq_mask {
22367 + uint32_t mask;
22368 +};
22369 +
22370 +struct dprtc_cmd_get_irq_status {
22371 + uint32_t status;
22372 + uint8_t irq_index;
22373 +};
22374 +
22375 +struct dprtc_rsp_get_irq_status {
22376 + uint32_t status;
22377 +};
22378 +
22379 +struct dprtc_cmd_clear_irq_status {
22380 + uint32_t status;
22381 + uint8_t irq_index;
22382 +};
22383 +
22384 +struct dprtc_rsp_get_attributes {
22385 + uint32_t pad;
22386 + uint32_t id;
22387 +};
22388 +
22389 +struct dprtc_cmd_set_clock_offset {
22390 + uint64_t offset;
22391 +};
22392 +
22393 +struct dprtc_get_freq_compensation {
22394 + uint32_t freq_compensation;
22395 +};
22396 +
22397 +struct dprtc_time {
22398 + uint64_t time;
22399 +};
22400 +
22401 +struct dprtc_rsp_get_api_version {
22402 + uint16_t major;
22403 + uint16_t minor;
22404 +};
22405 +#pragma pack(pop)
22406 +#endif /* _FSL_DPRTC_CMD_H */
22407 --- /dev/null
22408 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
22409 @@ -0,0 +1,746 @@
22410 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
22411 + *
22412 + * Redistribution and use in source and binary forms, with or without
22413 + * modification, are permitted provided that the following conditions are met:
22414 + * * Redistributions of source code must retain the above copyright
22415 + * notice, this list of conditions and the following disclaimer.
22416 + * * Redistributions in binary form must reproduce the above copyright
22417 + * notice, this list of conditions and the following disclaimer in the
22418 + * documentation and/or other materials provided with the distribution.
22419 + * * Neither the name of the above-listed copyright holders nor the
22420 + * names of any contributors may be used to endorse or promote products
22421 + * derived from this software without specific prior written permission.
22422 + *
22423 + *
22424 + * ALTERNATIVELY, this software may be distributed under the terms of the
22425 + * GNU General Public License ("GPL") as published by the Free Software
22426 + * Foundation, either version 2 of that License or (at your option) any
22427 + * later version.
22428 + *
22429 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22430 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22431 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22432 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22433 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22434 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22435 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22436 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22437 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22438 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22439 + * POSSIBILITY OF SUCH DAMAGE.
22440 + */
22441 +#include "../../fsl-mc/include/mc-sys.h"
22442 +#include "../../fsl-mc/include/mc-cmd.h"
22443 +#include "dprtc.h"
22444 +#include "dprtc-cmd.h"
22445 +
22446 +/**
22447 + * dprtc_open() - Open a control session for the specified object.
22448 + * @mc_io: Pointer to MC portal's I/O object
22449 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22450 + * @dprtc_id: DPRTC unique ID
22451 + * @token: Returned token; use in subsequent API calls
22452 + *
22453 + * This function can be used to open a control session for an
22454 + * already created object; an object may have been declared in
22455 + * the DPL or by calling the dprtc_create function.
22456 + * This function returns a unique authentication token,
22457 + * associated with the specific object ID and the specific MC
22458 + * portal; this token must be used in all subsequent commands for
22459 + * this specific object
22460 + *
22461 + * Return: '0' on Success; Error code otherwise.
22462 + */
22463 +int dprtc_open(struct fsl_mc_io *mc_io,
22464 + uint32_t cmd_flags,
22465 + int dprtc_id,
22466 + uint16_t *token)
22467 +{
22468 + struct dprtc_cmd_open *cmd_params;
22469 + struct mc_command cmd = { 0 };
22470 + int err;
22471 +
22472 + /* prepare command */
22473 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
22474 + cmd_flags,
22475 + 0);
22476 + cmd_params = (struct dprtc_cmd_open *)cmd.params;
22477 + cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
22478 +
22479 + /* send command to mc*/
22480 + err = mc_send_command(mc_io, &cmd);
22481 + if (err)
22482 + return err;
22483 +
22484 + /* retrieve response parameters */
22485 + *token = mc_cmd_hdr_read_token(&cmd);
22486 +
22487 + return err;
22488 +}
22489 +
22490 +/**
22491 + * dprtc_close() - Close the control session of the object
22492 + * @mc_io: Pointer to MC portal's I/O object
22493 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22494 + * @token: Token of DPRTC object
22495 + *
22496 + * After this function is called, no further operations are
22497 + * allowed on the object without opening a new control session.
22498 + *
22499 + * Return: '0' on Success; Error code otherwise.
22500 + */
22501 +int dprtc_close(struct fsl_mc_io *mc_io,
22502 + uint32_t cmd_flags,
22503 + uint16_t token)
22504 +{
22505 + struct mc_command cmd = { 0 };
22506 +
22507 + /* prepare command */
22508 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
22509 + token);
22510 +
22511 + /* send command to mc*/
22512 + return mc_send_command(mc_io, &cmd);
22513 +}
22514 +
22515 +/**
22516 + * dprtc_create() - Create the DPRTC object.
22517 + * @mc_io: Pointer to MC portal's I/O object
22518 + * @dprc_token: Parent container token; '0' for default container
22519 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22520 + * @cfg: Configuration structure
22521 + * @obj_id: Returned object id
22522 + *
22523 + * Create the DPRTC object, allocate required resources and
22524 + * perform required initialization.
22525 + *
22526 + * The function accepts an authentication token of a parent
22527 + * container that this object should be assigned to. The token
22528 + * can be '0' so the object will be assigned to the default container.
22529 + * The newly created object can be opened with the returned
22530 + * object id and using the container's associated tokens and MC portals.
22531 + *
22532 + * Return: '0' on Success; Error code otherwise.
22533 + */
22534 +int dprtc_create(struct fsl_mc_io *mc_io,
22535 + uint16_t dprc_token,
22536 + uint32_t cmd_flags,
22537 + const struct dprtc_cfg *cfg,
22538 + uint32_t *obj_id)
22539 +{
22540 + struct mc_command cmd = { 0 };
22541 + int err;
22542 +
22543 + (void)(cfg); /* unused */
22544 +
22545 + /* prepare command */
22546 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
22547 + cmd_flags,
22548 + dprc_token);
22549 +
22550 + /* send command to mc*/
22551 + err = mc_send_command(mc_io, &cmd);
22552 + if (err)
22553 + return err;
22554 +
22555 + /* retrieve response parameters */
22556 + *obj_id = mc_cmd_read_object_id(&cmd);
22557 +
22558 + return 0;
22559 +}
22560 +
22561 +/**
22562 + * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
22563 + * @mc_io: Pointer to MC portal's I/O object
22564 + * @dprc_token: Parent container token; '0' for default container
22565 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22566 + * @object_id: The object id; it must be a valid id within the container that
22567 + * created this object;
22568 + *
22569 + * The function accepts the authentication token of the parent container that
22570 + * created the object (not the one that currently owns the object). The object
22571 + * is searched within parent using the provided 'object_id'.
22572 + * All tokens to the object must be closed before calling destroy.
22573 + *
22574 + * Return: '0' on Success; error code otherwise.
22575 + */
22576 +int dprtc_destroy(struct fsl_mc_io *mc_io,
22577 + uint16_t dprc_token,
22578 + uint32_t cmd_flags,
22579 + uint32_t object_id)
22580 +{
22581 + struct dprtc_cmd_destroy *cmd_params;
22582 + struct mc_command cmd = { 0 };
22583 +
22584 + /* prepare command */
22585 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
22586 + cmd_flags,
22587 + dprc_token);
22588 + cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
22589 + cmd_params->object_id = cpu_to_le32(object_id);
22590 +
22591 + /* send command to mc*/
22592 + return mc_send_command(mc_io, &cmd);
22593 +}
22594 +
22595 +int dprtc_enable(struct fsl_mc_io *mc_io,
22596 + uint32_t cmd_flags,
22597 + uint16_t token)
22598 +{
22599 + struct mc_command cmd = { 0 };
22600 +
22601 + /* prepare command */
22602 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
22603 + token);
22604 +
22605 + /* send command to mc*/
22606 + return mc_send_command(mc_io, &cmd);
22607 +}
22608 +
22609 +int dprtc_disable(struct fsl_mc_io *mc_io,
22610 + uint32_t cmd_flags,
22611 + uint16_t token)
22612 +{
22613 + struct mc_command cmd = { 0 };
22614 +
22615 + /* prepare command */
22616 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
22617 + cmd_flags,
22618 + token);
22619 +
22620 + /* send command to mc*/
22621 + return mc_send_command(mc_io, &cmd);
22622 +}
22623 +
22624 +int dprtc_is_enabled(struct fsl_mc_io *mc_io,
22625 + uint32_t cmd_flags,
22626 + uint16_t token,
22627 + int *en)
22628 +{
22629 + struct dprtc_rsp_is_enabled *rsp_params;
22630 + struct mc_command cmd = { 0 };
22631 + int err;
22632 +
22633 + /* prepare command */
22634 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
22635 + token);
22636 +
22637 + /* send command to mc*/
22638 + err = mc_send_command(mc_io, &cmd);
22639 + if (err)
22640 + return err;
22641 +
22642 + /* retrieve response parameters */
22643 + rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
22644 + *en = dprtc_get_field(rsp_params->en, ENABLE);
22645 +
22646 + return 0;
22647 +}
22648 +
22649 +int dprtc_reset(struct fsl_mc_io *mc_io,
22650 + uint32_t cmd_flags,
22651 + uint16_t token)
22652 +{
22653 + struct mc_command cmd = { 0 };
22654 +
22655 + /* prepare command */
22656 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
22657 + cmd_flags,
22658 + token);
22659 +
22660 + /* send command to mc*/
22661 + return mc_send_command(mc_io, &cmd);
22662 +}
22663 +
22664 +/**
22665 + * dprtc_set_irq_enable() - Set overall interrupt state.
22666 + * @mc_io: Pointer to MC portal's I/O object
22667 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22668 + * @token: Token of DPRTC object
22669 + * @irq_index: The interrupt index to configure
22670 + * @en: Interrupt state - enable = 1, disable = 0
22671 + *
22672 + * Allows GPP software to control when interrupts are generated.
22673 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22674 + * overall interrupt state. if the interrupt is disabled no causes will cause
22675 + * an interrupt.
22676 + *
22677 + * Return: '0' on Success; Error code otherwise.
22678 + */
22679 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
22680 + uint32_t cmd_flags,
22681 + uint16_t token,
22682 + uint8_t irq_index,
22683 + uint8_t en)
22684 +{
22685 + struct dprtc_cmd_set_irq_enable *cmd_params;
22686 + struct mc_command cmd = { 0 };
22687 +
22688 + /* prepare command */
22689 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
22690 + cmd_flags,
22691 + token);
22692 + cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
22693 + cmd_params->irq_index = irq_index;
22694 + cmd_params->en = en;
22695 +
22696 + /* send command to mc*/
22697 + return mc_send_command(mc_io, &cmd);
22698 +}
22699 +
22700 +/**
22701 + * dprtc_get_irq_enable() - Get overall interrupt state
22702 + * @mc_io: Pointer to MC portal's I/O object
22703 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22704 + * @token: Token of DPRTC object
22705 + * @irq_index: The interrupt index to configure
22706 + * @en: Returned interrupt state - enable = 1, disable = 0
22707 + *
22708 + * Return: '0' on Success; Error code otherwise.
22709 + */
22710 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
22711 + uint32_t cmd_flags,
22712 + uint16_t token,
22713 + uint8_t irq_index,
22714 + uint8_t *en)
22715 +{
22716 + struct dprtc_rsp_get_irq_enable *rsp_params;
22717 + struct dprtc_cmd_get_irq *cmd_params;
22718 + struct mc_command cmd = { 0 };
22719 + int err;
22720 +
22721 + /* prepare command */
22722 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
22723 + cmd_flags,
22724 + token);
22725 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22726 + cmd_params->irq_index = irq_index;
22727 +
22728 + /* send command to mc*/
22729 + err = mc_send_command(mc_io, &cmd);
22730 + if (err)
22731 + return err;
22732 +
22733 + /* retrieve response parameters */
22734 + rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
22735 + *en = rsp_params->en;
22736 +
22737 + return 0;
22738 +}
22739 +
22740 +/**
22741 + * dprtc_set_irq_mask() - Set interrupt mask.
22742 + * @mc_io: Pointer to MC portal's I/O object
22743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22744 + * @token: Token of DPRTC object
22745 + * @irq_index: The interrupt index to configure
22746 + * @mask: Event mask to trigger interrupt;
22747 + * each bit:
22748 + * 0 = ignore event
22749 + * 1 = consider event for asserting IRQ
22750 + *
22751 + * Every interrupt can have up to 32 causes and the interrupt model supports
22752 + * masking/unmasking each cause independently
22753 + *
22754 + * Return: '0' on Success; Error code otherwise.
22755 + */
22756 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
22757 + uint32_t cmd_flags,
22758 + uint16_t token,
22759 + uint8_t irq_index,
22760 + uint32_t mask)
22761 +{
22762 + struct dprtc_cmd_set_irq_mask *cmd_params;
22763 + struct mc_command cmd = { 0 };
22764 +
22765 + /* prepare command */
22766 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
22767 + cmd_flags,
22768 + token);
22769 + cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
22770 + cmd_params->mask = cpu_to_le32(mask);
22771 + cmd_params->irq_index = irq_index;
22772 +
22773 + /* send command to mc*/
22774 + return mc_send_command(mc_io, &cmd);
22775 +}
22776 +
22777 +/**
22778 + * dprtc_get_irq_mask() - Get interrupt mask.
22779 + * @mc_io: Pointer to MC portal's I/O object
22780 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22781 + * @token: Token of DPRTC object
22782 + * @irq_index: The interrupt index to configure
22783 + * @mask: Returned event mask to trigger interrupt
22784 + *
22785 + * Every interrupt can have up to 32 causes and the interrupt model supports
22786 + * masking/unmasking each cause independently
22787 + *
22788 + * Return: '0' on Success; Error code otherwise.
22789 + */
22790 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
22791 + uint32_t cmd_flags,
22792 + uint16_t token,
22793 + uint8_t irq_index,
22794 + uint32_t *mask)
22795 +{
22796 + struct dprtc_rsp_get_irq_mask *rsp_params;
22797 + struct dprtc_cmd_get_irq *cmd_params;
22798 + struct mc_command cmd = { 0 };
22799 + int err;
22800 +
22801 + /* prepare command */
22802 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
22803 + cmd_flags,
22804 + token);
22805 + cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
22806 + cmd_params->irq_index = irq_index;
22807 +
22808 + /* send command to mc*/
22809 + err = mc_send_command(mc_io, &cmd);
22810 + if (err)
22811 + return err;
22812 +
22813 + /* retrieve response parameters */
22814 + rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
22815 + *mask = le32_to_cpu(rsp_params->mask);
22816 +
22817 + return 0;
22818 +}
22819 +
22820 +/**
22821 + * dprtc_get_irq_status() - Get the current status of any pending interrupts.
22822 + *
22823 + * @mc_io: Pointer to MC portal's I/O object
22824 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22825 + * @token: Token of DPRTC object
22826 + * @irq_index: The interrupt index to configure
22827 + * @status: Returned interrupts status - one bit per cause:
22828 + * 0 = no interrupt pending
22829 + * 1 = interrupt pending
22830 + *
22831 + * Return: '0' on Success; Error code otherwise.
22832 + */
22833 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
22834 + uint32_t cmd_flags,
22835 + uint16_t token,
22836 + uint8_t irq_index,
22837 + uint32_t *status)
22838 +{
22839 + struct dprtc_cmd_get_irq_status *cmd_params;
22840 + struct dprtc_rsp_get_irq_status *rsp_params;
22841 + struct mc_command cmd = { 0 };
22842 + int err;
22843 +
22844 + /* prepare command */
22845 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
22846 + cmd_flags,
22847 + token);
22848 + cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
22849 + cmd_params->status = cpu_to_le32(*status);
22850 + cmd_params->irq_index = irq_index;
22851 +
22852 + /* send command to mc*/
22853 + err = mc_send_command(mc_io, &cmd);
22854 + if (err)
22855 + return err;
22856 +
22857 + /* retrieve response parameters */
22858 + rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
22859 + *status = rsp_params->status;
22860 +
22861 + return 0;
22862 +}
22863 +
22864 +/**
22865 + * dprtc_clear_irq_status() - Clear a pending interrupt's status
22866 + *
22867 + * @mc_io: Pointer to MC portal's I/O object
22868 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22869 + * @token: Token of DPRTC object
22870 + * @irq_index: The interrupt index to configure
22871 + * @status: Bits to clear (W1C) - one bit per cause:
22872 + * 0 = don't change
22873 + * 1 = clear status bit
22874 + *
22875 + * Return: '0' on Success; Error code otherwise.
22876 + */
22877 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
22878 + uint32_t cmd_flags,
22879 + uint16_t token,
22880 + uint8_t irq_index,
22881 + uint32_t status)
22882 +{
22883 + struct dprtc_cmd_clear_irq_status *cmd_params;
22884 + struct mc_command cmd = { 0 };
22885 +
22886 + /* prepare command */
22887 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
22888 + cmd_flags,
22889 + token);
22890 + cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
22891 + cmd_params->irq_index = irq_index;
22892 + cmd_params->status = cpu_to_le32(status);
22893 +
22894 + /* send command to mc*/
22895 + return mc_send_command(mc_io, &cmd);
22896 +}
22897 +
22898 +/**
22899 + * dprtc_get_attributes - Retrieve DPRTC attributes.
22900 + *
22901 + * @mc_io: Pointer to MC portal's I/O object
22902 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22903 + * @token: Token of DPRTC object
22904 + * @attr: Returned object's attributes
22905 + *
22906 + * Return: '0' on Success; Error code otherwise.
22907 + */
22908 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
22909 + uint32_t cmd_flags,
22910 + uint16_t token,
22911 + struct dprtc_attr *attr)
22912 +{
22913 + struct dprtc_rsp_get_attributes *rsp_params;
22914 + struct mc_command cmd = { 0 };
22915 + int err;
22916 +
22917 + /* prepare command */
22918 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
22919 + cmd_flags,
22920 + token);
22921 +
22922 + /* send command to mc*/
22923 + err = mc_send_command(mc_io, &cmd);
22924 + if (err)
22925 + return err;
22926 +
22927 + /* retrieve response parameters */
22928 + rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
22929 + attr->id = le32_to_cpu(rsp_params->id);
22930 +
22931 + return 0;
22932 +}
22933 +
22934 +/**
22935 + * dprtc_set_clock_offset() - Sets the clock's offset
22936 + * (usually relative to another clock).
22937 + *
22938 + * @mc_io: Pointer to MC portal's I/O object
22939 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22940 + * @token: Token of DPRTC object
22941 + * @offset: New clock offset (in nanoseconds).
22942 + *
22943 + * Return: '0' on Success; Error code otherwise.
22944 + */
22945 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
22946 + uint32_t cmd_flags,
22947 + uint16_t token,
22948 + int64_t offset)
22949 +{
22950 + struct dprtc_cmd_set_clock_offset *cmd_params;
22951 + struct mc_command cmd = { 0 };
22952 +
22953 + /* prepare command */
22954 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
22955 + cmd_flags,
22956 + token);
22957 + cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
22958 + cmd_params->offset = cpu_to_le64(offset);
22959 +
22960 + /* send command to mc*/
22961 + return mc_send_command(mc_io, &cmd);
22962 +}
22963 +
22964 +/**
22965 + * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
22966 + *
22967 + * @mc_io: Pointer to MC portal's I/O object
22968 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22969 + * @token: Token of DPRTC object
22970 + * @freq_compensation: The new frequency compensation value to set.
22971 + *
22972 + * Return: '0' on Success; Error code otherwise.
22973 + */
22974 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
22975 + uint32_t cmd_flags,
22976 + uint16_t token,
22977 + uint32_t freq_compensation)
22978 +{
22979 + struct dprtc_get_freq_compensation *cmd_params;
22980 + struct mc_command cmd = { 0 };
22981 +
22982 + /* prepare command */
22983 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
22984 + cmd_flags,
22985 + token);
22986 + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
22987 + cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
22988 +
22989 + /* send command to mc*/
22990 + return mc_send_command(mc_io, &cmd);
22991 +}
22992 +
22993 +/**
22994 + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
22995 + *
22996 + * @mc_io: Pointer to MC portal's I/O object
22997 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22998 + * @token: Token of DPRTC object
22999 + * @freq_compensation: Frequency compensation value
23000 + *
23001 + * Return: '0' on Success; Error code otherwise.
23002 + */
23003 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
23004 + uint32_t cmd_flags,
23005 + uint16_t token,
23006 + uint32_t *freq_compensation)
23007 +{
23008 + struct dprtc_get_freq_compensation *rsp_params;
23009 + struct mc_command cmd = { 0 };
23010 + int err;
23011 +
23012 + /* prepare command */
23013 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
23014 + cmd_flags,
23015 + token);
23016 +
23017 + /* send command to mc*/
23018 + err = mc_send_command(mc_io, &cmd);
23019 + if (err)
23020 + return err;
23021 +
23022 + /* retrieve response parameters */
23023 + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
23024 + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
23025 +
23026 + return 0;
23027 +}
23028 +
23029 +/**
23030 + * dprtc_get_time() - Returns the current RTC time.
23031 + *
23032 + * @mc_io: Pointer to MC portal's I/O object
23033 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23034 + * @token: Token of DPRTC object
23035 + * @time: Current RTC time.
23036 + *
23037 + * Return: '0' on Success; Error code otherwise.
23038 + */
23039 +int dprtc_get_time(struct fsl_mc_io *mc_io,
23040 + uint32_t cmd_flags,
23041 + uint16_t token,
23042 + uint64_t *time)
23043 +{
23044 + struct dprtc_time *rsp_params;
23045 + struct mc_command cmd = { 0 };
23046 + int err;
23047 +
23048 + /* prepare command */
23049 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
23050 + cmd_flags,
23051 + token);
23052 +
23053 + /* send command to mc*/
23054 + err = mc_send_command(mc_io, &cmd);
23055 + if (err)
23056 + return err;
23057 +
23058 + /* retrieve response parameters */
23059 + rsp_params = (struct dprtc_time *)cmd.params;
23060 + *time = le64_to_cpu(rsp_params->time);
23061 +
23062 + return 0;
23063 +}
23064 +
23065 +/**
23066 + * dprtc_set_time() - Updates current RTC time.
23067 + *
23068 + * @mc_io: Pointer to MC portal's I/O object
23069 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23070 + * @token: Token of DPRTC object
23071 + * @time: New RTC time.
23072 + *
23073 + * Return: '0' on Success; Error code otherwise.
23074 + */
23075 +int dprtc_set_time(struct fsl_mc_io *mc_io,
23076 + uint32_t cmd_flags,
23077 + uint16_t token,
23078 + uint64_t time)
23079 +{
23080 + struct dprtc_time *cmd_params;
23081 + struct mc_command cmd = { 0 };
23082 +
23083 + /* prepare command */
23084 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
23085 + cmd_flags,
23086 + token);
23087 + cmd_params = (struct dprtc_time *)cmd.params;
23088 + cmd_params->time = cpu_to_le64(time);
23089 +
23090 + /* send command to mc*/
23091 + return mc_send_command(mc_io, &cmd);
23092 +}
23093 +
23094 +/**
23095 + * dprtc_set_alarm() - Defines and sets alarm.
23096 + *
23097 + * @mc_io: Pointer to MC portal's I/O object
23098 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23099 + * @token: Token of DPRTC object
23100 + * @time: In nanoseconds, the time when the alarm
23101 + * should go off - must be a multiple of
23102 + * 1 microsecond
23103 + *
23104 + * Return: '0' on Success; Error code otherwise.
23105 + */
23106 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
23107 + uint32_t cmd_flags,
23108 + uint16_t token, uint64_t time)
23109 +{
23110 + struct dprtc_time *cmd_params;
23111 + struct mc_command cmd = { 0 };
23112 +
23113 + /* prepare command */
23114 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
23115 + cmd_flags,
23116 + token);
23117 + cmd_params = (struct dprtc_time *)cmd.params;
23118 + cmd_params->time = cpu_to_le64(time);
23119 +
23120 + /* send command to mc*/
23121 + return mc_send_command(mc_io, &cmd);
23122 +}
23123 +
23124 +/**
23125 + * dprtc_get_api_version() - Get Data Path Real Time Counter API version
23126 + * @mc_io: Pointer to MC portal's I/O object
23127 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23128 + * @major_ver: Major version of data path real time counter API
23129 + * @minor_ver: Minor version of data path real time counter API
23130 + *
23131 + * Return: '0' on Success; Error code otherwise.
23132 + */
23133 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
23134 + uint32_t cmd_flags,
23135 + uint16_t *major_ver,
23136 + uint16_t *minor_ver)
23137 +{
23138 + struct dprtc_rsp_get_api_version *rsp_params;
23139 + struct mc_command cmd = { 0 };
23140 + int err;
23141 +
23142 + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
23143 + cmd_flags,
23144 + 0);
23145 +
23146 + err = mc_send_command(mc_io, &cmd);
23147 + if (err)
23148 + return err;
23149 +
23150 + rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
23151 + *major_ver = le16_to_cpu(rsp_params->major);
23152 + *minor_ver = le16_to_cpu(rsp_params->minor);
23153 +
23154 + return 0;
23155 +}
23156 --- /dev/null
23157 +++ b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
23158 @@ -0,0 +1,172 @@
23159 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
23160 + *
23161 + * Redistribution and use in source and binary forms, with or without
23162 + * modification, are permitted provided that the following conditions are met:
23163 + * * Redistributions of source code must retain the above copyright
23164 + * notice, this list of conditions and the following disclaimer.
23165 + * * Redistributions in binary form must reproduce the above copyright
23166 + * notice, this list of conditions and the following disclaimer in the
23167 + * documentation and/or other materials provided with the distribution.
23168 + * * Neither the name of the above-listed copyright holders nor the
23169 + * names of any contributors may be used to endorse or promote products
23170 + * derived from this software without specific prior written permission.
23171 + *
23172 + *
23173 + * ALTERNATIVELY, this software may be distributed under the terms of the
23174 + * GNU General Public License ("GPL") as published by the Free Software
23175 + * Foundation, either version 2 of that License or (at your option) any
23176 + * later version.
23177 + *
23178 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23179 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23180 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23181 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23182 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23183 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23184 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23185 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23186 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23187 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23188 + * POSSIBILITY OF SUCH DAMAGE.
23189 + */
23190 +#ifndef __FSL_DPRTC_H
23191 +#define __FSL_DPRTC_H
23192 +
23193 +/* Data Path Real Time Counter API
23194 + * Contains initialization APIs and runtime control APIs for RTC
23195 + */
23196 +
23197 +struct fsl_mc_io;
23198 +
23199 +/**
23200 + * Number of irq's
23201 + */
23202 +#define DPRTC_MAX_IRQ_NUM 1
23203 +#define DPRTC_IRQ_INDEX 0
23204 +
23205 +/**
23206 + * Interrupt event masks:
23207 + */
23208 +
23209 +/**
23210 + * Interrupt event mask indicating alarm event had occurred
23211 + */
23212 +#define DPRTC_EVENT_ALARM 0x40000000
23213 +/**
23214 + * Interrupt event mask indicating periodic pulse event had occurred
23215 + */
23216 +#define DPRTC_EVENT_PPS 0x08000000
23217 +
23218 +int dprtc_open(struct fsl_mc_io *mc_io,
23219 + uint32_t cmd_flags,
23220 + int dprtc_id,
23221 + uint16_t *token);
23222 +
23223 +int dprtc_close(struct fsl_mc_io *mc_io,
23224 + uint32_t cmd_flags,
23225 + uint16_t token);
23226 +
23227 +/**
23228 + * struct dprtc_cfg - Structure representing DPRTC configuration
23229 + * @options: place holder
23230 + */
23231 +struct dprtc_cfg {
23232 + uint32_t options;
23233 +};
23234 +
23235 +int dprtc_create(struct fsl_mc_io *mc_io,
23236 + uint16_t dprc_token,
23237 + uint32_t cmd_flags,
23238 + const struct dprtc_cfg *cfg,
23239 + uint32_t *obj_id);
23240 +
23241 +int dprtc_destroy(struct fsl_mc_io *mc_io,
23242 + uint16_t dprc_token,
23243 + uint32_t cmd_flags,
23244 + uint32_t object_id);
23245 +
23246 +int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
23247 + uint32_t cmd_flags,
23248 + uint16_t token,
23249 + int64_t offset);
23250 +
23251 +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
23252 + uint32_t cmd_flags,
23253 + uint16_t token,
23254 + uint32_t freq_compensation);
23255 +
23256 +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
23257 + uint32_t cmd_flags,
23258 + uint16_t token,
23259 + uint32_t *freq_compensation);
23260 +
23261 +int dprtc_get_time(struct fsl_mc_io *mc_io,
23262 + uint32_t cmd_flags,
23263 + uint16_t token,
23264 + uint64_t *time);
23265 +
23266 +int dprtc_set_time(struct fsl_mc_io *mc_io,
23267 + uint32_t cmd_flags,
23268 + uint16_t token,
23269 + uint64_t time);
23270 +
23271 +int dprtc_set_alarm(struct fsl_mc_io *mc_io,
23272 + uint32_t cmd_flags,
23273 + uint16_t token,
23274 + uint64_t time);
23275 +
23276 +int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
23277 + uint32_t cmd_flags,
23278 + uint16_t token,
23279 + uint8_t irq_index,
23280 + uint8_t en);
23281 +
23282 +int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
23283 + uint32_t cmd_flags,
23284 + uint16_t token,
23285 + uint8_t irq_index,
23286 + uint8_t *en);
23287 +
23288 +int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
23289 + uint32_t cmd_flags,
23290 + uint16_t token,
23291 + uint8_t irq_index,
23292 + uint32_t mask);
23293 +
23294 +int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
23295 + uint32_t cmd_flags,
23296 + uint16_t token,
23297 + uint8_t irq_index,
23298 + uint32_t *mask);
23299 +
23300 +int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
23301 + uint32_t cmd_flags,
23302 + uint16_t token,
23303 + uint8_t irq_index,
23304 + uint32_t *status);
23305 +
23306 +int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
23307 + uint32_t cmd_flags,
23308 + uint16_t token,
23309 + uint8_t irq_index,
23310 + uint32_t status);
23311 +
23312 +/**
23313 + * struct dprtc_attr - Structure representing DPRTC attributes
23314 + * @id: DPRTC object ID
23315 + */
23316 +struct dprtc_attr {
23317 + int id;
23318 +};
23319 +
23320 +int dprtc_get_attributes(struct fsl_mc_io *mc_io,
23321 + uint32_t cmd_flags,
23322 + uint16_t token,
23323 + struct dprtc_attr *attr);
23324 +
23325 +int dprtc_get_api_version(struct fsl_mc_io *mc_io,
23326 + uint32_t cmd_flags,
23327 + uint16_t *major_ver,
23328 + uint16_t *minor_ver);
23329 +
23330 +#endif /* __FSL_DPRTC_H */
23331 --- /dev/null
23332 +++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
23333 @@ -0,0 +1,243 @@
23334 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
23335 + *
23336 + * Redistribution and use in source and binary forms, with or without
23337 + * modification, are permitted provided that the following conditions are met:
23338 + * * Redistributions of source code must retain the above copyright
23339 + * notice, this list of conditions and the following disclaimer.
23340 + * * Redistributions in binary form must reproduce the above copyright
23341 + * notice, this list of conditions and the following disclaimer in the
23342 + * documentation and/or other materials provided with the distribution.
23343 + * * Neither the name of the above-listed copyright holders nor the
23344 + * names of any contributors may be used to endorse or promote products
23345 + * derived from this software without specific prior written permission.
23346 + *
23347 + *
23348 + * ALTERNATIVELY, this software may be distributed under the terms of the
23349 + * GNU General Public License ("GPL") as published by the Free Software
23350 + * Foundation, either version 2 of that License or (at your option) any
23351 + * later version.
23352 + *
23353 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23354 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23355 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23356 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23357 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23358 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23359 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23360 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23361 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23362 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23363 + * POSSIBILITY OF SUCH DAMAGE.
23364 + */
23365 +
23366 +#include <linux/module.h>
23367 +#include <linux/ptp_clock_kernel.h>
23368 +
23369 +#include "../../fsl-mc/include/mc.h"
23370 +#include "../../fsl-mc/include/mc-sys.h"
23371 +
23372 +#include "dprtc.h"
23373 +#include "dprtc-cmd.h"
23374 +
23375 +#define N_EXT_TS 2
23376 +
23377 +struct ptp_clock *clock;
23378 +struct fsl_mc_device *rtc_mc_dev;
23379 +u32 freqCompensation;
23380 +
23381 +/* PTP clock operations */
23382 +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
23383 +{
23384 + u64 adj;
23385 + u32 diff, tmr_add;
23386 + int neg_adj = 0;
23387 + int err = 0;
23388 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
23389 + struct device *dev = &mc_dev->dev;
23390 +
23391 + if (ppb < 0) {
23392 + neg_adj = 1;
23393 + ppb = -ppb;
23394 + }
23395 +
23396 + tmr_add = freqCompensation;
23397 + adj = tmr_add;
23398 + adj *= ppb;
23399 + diff = div_u64(adj, 1000000000ULL);
23400 +
23401 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
23402 +
23403 + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0,
23404 + mc_dev->mc_handle, tmr_add);
23405 + if (err)
23406 + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
23407 + return 0;
23408 +}
23409 +
23410 +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
23411 +{
23412 + s64 now;
23413 + int err = 0;
23414 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
23415 + struct device *dev = &mc_dev->dev;
23416 +
23417 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
23418 + if (err) {
23419 + dev_err(dev, "dprtc_get_time err %d\n", err);
23420 + return 0;
23421 + }
23422 +
23423 + now += delta;
23424 +
23425 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
23426 + if (err) {
23427 + dev_err(dev, "dprtc_set_time err %d\n", err);
23428 + return 0;
23429 + }
23430 + return 0;
23431 +}
23432 +
23433 +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
23434 +{
23435 + u64 ns;
23436 + u32 remainder;
23437 + int err = 0;
23438 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
23439 + struct device *dev = &mc_dev->dev;
23440 +
23441 + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
23442 + if (err) {
23443 + dev_err(dev, "dprtc_get_time err %d\n", err);
23444 + return 0;
23445 + }
23446 +
23447 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
23448 + ts->tv_nsec = remainder;
23449 + return 0;
23450 +}
23451 +
23452 +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
23453 + const struct timespec *ts)
23454 +{
23455 + u64 ns;
23456 + int err = 0;
23457 + struct fsl_mc_device *mc_dev = rtc_mc_dev;
23458 + struct device *dev = &mc_dev->dev;
23459 +
23460 + ns = ts->tv_sec * 1000000000ULL;
23461 + ns += ts->tv_nsec;
23462 +
23463 + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
23464 + if (err)
23465 + dev_err(dev, "dprtc_set_time err %d\n", err);
23466 + return 0;
23467 +}
23468 +
23469 +static struct ptp_clock_info ptp_dpaa2_caps = {
23470 + .owner = THIS_MODULE,
23471 + .name = "dpaa2 clock",
23472 + .max_adj = 512000,
23473 + .n_alarm = 0,
23474 + .n_ext_ts = N_EXT_TS,
23475 + .n_per_out = 0,
23476 + .n_pins = 0,
23477 + .pps = 1,
23478 + .adjfreq = ptp_dpaa2_adjfreq,
23479 + .adjtime = ptp_dpaa2_adjtime,
23480 + .gettime64 = ptp_dpaa2_gettime,
23481 + .settime64 = ptp_dpaa2_settime,
23482 +};
23483 +
23484 +static int rtc_probe(struct fsl_mc_device *mc_dev)
23485 +{
23486 + struct device *dev;
23487 + int err = 0;
23488 + int dpaa2_phc_index;
23489 + u32 tmr_add = 0;
23490 +
23491 + if (!mc_dev)
23492 + return -EFAULT;
23493 +
23494 + dev = &mc_dev->dev;
23495 +
23496 + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
23497 + if (unlikely(err)) {
23498 + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
23499 + goto err_exit;
23500 + }
23501 + if (!mc_dev->mc_io) {
23502 + dev_err(dev,
23503 + "fsl_mc_portal_allocate returned null handle but no error\n");
23504 + err = -EFAULT;
23505 + goto err_exit;
23506 + }
23507 +
23508 + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
23509 + &mc_dev->mc_handle);
23510 + if (err) {
23511 + dev_err(dev, "dprtc_open err %d\n", err);
23512 + goto err_free_mcp;
23513 + }
23514 + if (!mc_dev->mc_handle) {
23515 + dev_err(dev, "dprtc_open returned null handle but no error\n");
23516 + err = -EFAULT;
23517 + goto err_free_mcp;
23518 + }
23519 +
23520 + rtc_mc_dev = mc_dev;
23521 +
23522 + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
23523 + mc_dev->mc_handle, &tmr_add);
23524 + if (err) {
23525 + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err);
23526 + goto err_close;
23527 + }
23528 + freqCompensation = tmr_add;
23529 +
23530 + clock = ptp_clock_register(&ptp_dpaa2_caps, dev);
23531 + if (IS_ERR(clock)) {
23532 + err = PTR_ERR(clock);
23533 + goto err_close;
23534 + }
23535 + dpaa2_phc_index = ptp_clock_index(clock);
23536 +
23537 + return 0;
23538 +err_close:
23539 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
23540 +err_free_mcp:
23541 + fsl_mc_portal_free(mc_dev->mc_io);
23542 +err_exit:
23543 + return err;
23544 +}
23545 +
23546 +static int rtc_remove(struct fsl_mc_device *mc_dev)
23547 +{
23548 + ptp_clock_unregister(clock);
23549 + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
23550 + fsl_mc_portal_free(mc_dev->mc_io);
23551 +
23552 + return 0;
23553 +}
23554 +
23555 +static const struct fsl_mc_device_id rtc_match_id_table[] = {
23556 + {
23557 + .vendor = FSL_MC_VENDOR_FREESCALE,
23558 + .obj_type = "dprtc",
23559 + },
23560 + {}
23561 +};
23562 +
23563 +static struct fsl_mc_driver rtc_drv = {
23564 + .driver = {
23565 + .name = KBUILD_MODNAME,
23566 + .owner = THIS_MODULE,
23567 + },
23568 + .probe = rtc_probe,
23569 + .remove = rtc_remove,
23570 + .match_id_table = rtc_match_id_table,
23571 +};
23572 +
23573 +module_fsl_mc_driver(rtc_drv);
23574 +
23575 +MODULE_LICENSE("GPL");
23576 +MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");