layerscape: add 64b/32b target for ls1043ardb device
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-4.4 / 7016-dpa-add-dpaa_eth-driver.patch
1 From 2af9b49c7e6bad2dee75960ddf61fd52a4d3748f Mon Sep 17 00:00:00 2001
2 From: Zhao Qiang <qiang.zhao@nxp.com>
3 Date: Wed, 16 Dec 2015 22:00:36 +0200
4 Subject: [PATCH 16/70] dpa: add dpaa_eth driver
5
6 Dpaa is Datapatch Acceleration Architecture, this architecture provides
7 the infrastructure to support simplified sharing of networking
8 interfaces and accelerators by multiple CPUs.
9
10 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
11 Signed-off-by: Camelia Groza <camelia.groza@freescale.com>
12 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
13 Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
14 Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
15 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
16 ---
17 drivers/net/ethernet/freescale/Kconfig | 2 +
18 drivers/net/ethernet/freescale/Makefile | 1 +
19 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 187 ++
20 drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 59 +
21 .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++++++
22 .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 ++
23 .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 ++
24 .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
25 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1183 +++++++++++
26 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 695 +++++++
27 .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 263 +++
28 .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 50 +
29 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1719 ++++++++++++++++
30 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 230 +++
31 .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1787 ++++++++++++++++
32 .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 227 +++
33 .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c | 1735 ++++++++++++++++
34 .../ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h | 90 +
35 .../freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c | 201 ++
36 .../ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c | 499 +++++
37 .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c | 2156 ++++++++++++++++++++
38 .../ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h | 294 +++
39 .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 ++++
40 .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1128 ++++++++++
41 .../ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c | 914 +++++++++
42 .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +++
43 .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 ++
44 .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 +++++
45 .../freescale/sdk_dpaa/dpaa_generic_ethtool.c | 286 +++
46 .../freescale/sdk_dpaa/dpaa_macsec_ethtool.c | 250 +++
47 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 287 +++
48 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 915 +++++++++
49 drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 470 +++++
50 drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 ++
51 .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 ++++++++
52 .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
53 36 files changed, 18957 insertions(+)
54 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
55 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
56 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
57 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
58 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
59 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
60 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
61 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
62 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
63 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
64 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
65 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
66 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
67 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
68 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
69 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
70 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
71 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
72 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
73 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
74 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
75 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
76 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
77 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
78 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
79 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
80 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
81 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
82 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
83 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
84 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
85 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
86 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
87 create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
88
89 --- a/drivers/net/ethernet/freescale/Kconfig
90 +++ b/drivers/net/ethernet/freescale/Kconfig
91 @@ -93,4 +93,6 @@ config GIANFAR
92 on the 8540.
93
94 source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
95 +source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
96 +
97 endif # NET_VENDOR_FREESCALE
98 --- a/drivers/net/ethernet/freescale/Makefile
99 +++ b/drivers/net/ethernet/freescale/Makefile
100 @@ -18,3 +18,4 @@ gianfar_driver-objs := gianfar.o \
101 obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
102 ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
103 obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
104 +obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
105 --- /dev/null
106 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
107 @@ -0,0 +1,187 @@
108 +menuconfig FSL_SDK_DPAA_ETH
109 + tristate "DPAA Ethernet"
110 + depends on (FSL_SOC || ARM64 || ARM) && FSL_BMAN && FSL_QMAN && FSL_SDK_FMAN
111 + select PHYLIB
112 + ---help---
113 + Data Path Acceleration Architecture Ethernet driver,
114 + supporting the Freescale QorIQ chips.
115 + Depends on Freescale Buffer Manager and Queue Manager
116 + driver and Frame Manager Driver.
117 +
118 +if FSL_SDK_DPAA_ETH
119 +
120 +config FSL_DPAA_HOOKS
121 + bool "DPAA Ethernet driver hooks"
122 +
123 +config FSL_DPAA_MACSEC
124 + tristate "DPAA MACSEC"
125 + select FSL_DPAA_HOOKS
126 + ---help---
127 + Enable MACSEC support in DPAA.
128 +
129 +config FSL_DPAA_CEETM
130 + bool "DPAA CEETM QoS"
131 + select NET_SCHED
132 + default n
133 + ---help---
134 + Enable QoS offloading support through the CEETM hardware block.
135 +
136 +config FSL_DPAA_OFFLINE_PORTS
137 + bool "Offline Ports support"
138 + depends on FSL_SDK_DPAA_ETH
139 + default y
140 + ---help---
141 + The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide
142 + most of the functionality of the regular, online ports, except they receive their
143 + frames from a core or an accelerator on the SoC, via QMan frame queues,
144 + rather than directly from the network.
145 + Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like
146 + any online FMan port. They deliver the processed frames to frame queues, according
147 + to the applied PCD configurations.
148 +
149 + Choosing this feature will not impact the functionality and/or performance of the system,
150 + so it is safe to have it.
151 +
152 +config FSL_DPAA_ADVANCED_DRIVERS
153 + bool "Advanced DPAA Ethernet drivers"
154 + depends on FSL_SDK_DPAA_ETH
155 + default y
156 + ---help---
157 + Besides the standard DPAA Ethernet driver there are available other flavours
158 + of DPAA drivers that support advanced scenarios:
159 + - DPAA Shared MAC driver
160 + - DPAA MAC-less driver
161 + - DPAA Proxy initialization driver (for USDPAA)
162 + Select this to also build the advanced drivers.
163 +
164 +config FSL_DPAA_GENERIC_DRIVER
165 + bool "Generic DPAA Ethernet driver"
166 + depends on FSL_SDK_DPAA_ETH
167 + default y
168 + ---help---
169 + This enables the DPAA Generic driver (oNIC).
170 +
171 +config FSL_DPAA_ETH_JUMBO_FRAME
172 + bool "Optimize for jumbo frames"
173 + depends on !ARM64 && !ARM
174 + default n
175 + ---help---
176 + Optimize the DPAA Ethernet driver throughput for large frames
177 + termination traffic (e.g. 4K and above).
178 + NOTE: This option can only be used if FSL_FM_MAX_FRAME_SIZE
179 + is set to 9600 bytes.
180 + Using this option in combination with small frames increases
181 + significantly the driver's memory footprint and may even deplete
182 + the system memory.
183 + This option is not available on LS1043.
184 +
185 +config FSL_DPAA_TS
186 + bool "Linux compliant timestamping"
187 + depends on FSL_SDK_DPAA_ETH
188 + default n
189 + ---help---
190 + Enable Linux API compliant timestamping support.
191 +
192 +config FSL_DPAA_1588
193 + bool "IEEE 1588-compliant timestamping"
194 + depends on FSL_SDK_DPAA_ETH
195 + select FSL_DPAA_TS
196 + default n
197 + ---help---
198 + Enable IEEE1588 support code.
199 +
200 +config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
201 + bool "Use driver's Tx queue selection mechanism"
202 + default y
203 + depends on FSL_SDK_DPAA_ETH
204 + ---help---
205 + The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
206 + of the egress FQ. That will override the XPS support for this netdevice.
207 + If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
208 + or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
209 + and use the standard XPS support instead.
210 +
211 +config FSL_DPAA_ETH_MAX_BUF_COUNT
212 + int "Maximum nuber of buffers in private bpool"
213 + depends on FSL_SDK_DPAA_ETH
214 + range 64 2048
215 + default "128"
216 + ---help---
217 + The maximum number of buffers to be by default allocated in the DPAA-Ethernet private port's
218 + buffer pool. One needn't normally modify this, as it has probably been tuned for performance
219 + already. This cannot be lower than DPAA_ETH_REFILL_THRESHOLD.
220 +
221 +config FSL_DPAA_ETH_REFILL_THRESHOLD
222 + int "Private bpool refill threshold"
223 + depends on FSL_SDK_DPAA_ETH
224 + range 32 FSL_DPAA_ETH_MAX_BUF_COUNT
225 + default "80"
226 + ---help---
227 + The DPAA-Ethernet driver will start replenishing buffer pools whose count
228 + falls below this threshold. This must be related to DPAA_ETH_MAX_BUF_COUNT. One needn't normally
229 + modify this value unless one has very specific performance reasons.
230 +
231 +config FSL_DPAA_CS_THRESHOLD_1G
232 + hex "Egress congestion threshold on 1G ports"
233 + depends on FSL_SDK_DPAA_ETH
234 + range 0x1000 0x10000000
235 + default "0x06000000"
236 + ---help---
237 + The size in bytes of the egress Congestion State notification threshold on 1G ports.
238 + The 1G dTSECs can quite easily be flooded by cores doing Tx in a tight loop
239 + (e.g. by sending UDP datagrams at "while(1) speed"),
240 + and the larger the frame size, the more acute the problem.
241 + So we have to find a balance between these factors:
242 + - avoiding the device staying congested for a prolonged time (risking
243 + the netdev watchdog to fire - see also the tx_timeout module param);
244 + - affecting performance of protocols such as TCP, which otherwise
245 + behave well under the congestion notification mechanism;
246 + - preventing the Tx cores from tightly-looping (as if the congestion
247 + threshold was too low to be effective);
248 + - running out of memory if the CS threshold is set too high.
249 +
250 +config FSL_DPAA_CS_THRESHOLD_10G
251 + hex "Egress congestion threshold on 10G ports"
252 + depends on FSL_SDK_DPAA_ETH
253 + range 0x1000 0x20000000
254 + default "0x10000000"
255 +
256 +config FSL_DPAA_INGRESS_CS_THRESHOLD
257 + hex "Ingress congestion threshold on FMan ports"
258 + depends on FSL_SDK_DPAA_ETH
259 + default "0x10000000"
260 + ---help---
261 + The size in bytes of the ingress tail-drop threshold on FMan ports.
262 + Traffic piling up above this value will be rejected by QMan and discarded by FMan.
263 +
264 +config FSL_DPAA_ETH_DEBUGFS
265 + bool "DPAA Ethernet debugfs interface"
266 + depends on DEBUG_FS && FSL_SDK_DPAA_ETH
267 + default y
268 + ---help---
269 + This option compiles debugfs code for the DPAA Ethernet driver.
270 +
271 +config FSL_DPAA_ETH_DEBUG
272 + bool "DPAA Ethernet Debug Support"
273 + depends on FSL_SDK_DPAA_ETH
274 + default n
275 + ---help---
276 + This option compiles debug code for the DPAA Ethernet driver.
277 +
278 +config FSL_DPAA_DBG_LOOP
279 + bool "DPAA Ethernet Debug loopback"
280 + depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
281 + default n
282 + ---help---
283 + This option allows to divert all received traffic on a certain interface A towards a
284 + selected interface B. This option is used to benchmark the HW + Ethernet driver in
285 + isolation from the Linux networking stack. The loops are controlled by debugfs entries,
286 + one for each interface. By default all loops are disabled (target value is -1). I.e. to
287 + change the loop setting for interface 4 and divert all received traffic to interface 5
288 + write Tx interface number in the receive interface debugfs file:
289 + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
290 + 4->-1
291 + # echo 5 > /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
292 + # cat /sys/kernel/debug/powerpc/fsl_dpa/eth4_loop
293 + 4->5
294 +endif # FSL_SDK_DPAA_ETH
295 --- /dev/null
296 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
297 @@ -0,0 +1,59 @@
298 +#
299 +# Makefile for the Freescale Ethernet controllers
300 +#
301 +ccflags-y += -DVERSION=\"\"
302 +#
303 +# Include netcomm SW specific definitions
304 +include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
305 +
306 +ccflags-y += -I$(NET_DPA)
307 +
308 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
309 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
310 +
311 +fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
312 +ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
313 +fsl_dpa-objs += dpaa_debugfs.o
314 +endif
315 +ifeq ($(CONFIG_FSL_DPAA_1588),y)
316 +fsl_dpa-objs += dpaa_1588.o
317 +endif
318 +ifeq ($(CONFIG_FSL_DPAA_CEETM),y)
319 +ccflags-y += -Idrivers/net/ethernet/freescale/sdk_fman/src/wrapper
320 +fsl_dpa-objs += dpaa_eth_ceetm.o
321 +endif
322 +
323 +fsl_mac-objs += mac.o mac-api.o
324 +
325 +# Advanced drivers
326 +ifeq ($(CONFIG_FSL_DPAA_ADVANCED_DRIVERS),y)
327 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_advanced.o
328 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_proxy.o
329 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_shared.o
330 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_dpa_macless.o
331 +obj-$(CONFIG_FSL_DPAA_MACSEC) += fsl_dpa_macsec.o
332 +
333 +fsl_advanced-objs += dpaa_eth_base.o
334 +# suport for multiple drivers per kernel module comes in kernel 3.14
335 +# so we are forced to generate several modules for the advanced drivers
336 +fsl_proxy-objs += dpaa_eth_proxy.o
337 +fsl_dpa_shared-objs += dpaa_eth_shared.o
338 +fsl_dpa_macless-objs += dpaa_eth_macless.o
339 +fsl_dpa_macsec-objs += dpaa_eth_macsec.o dpaa_macsec_ethtool.o
340 +
341 +ifeq ($(CONFIG_FSL_DPAA_OFFLINE_PORTS),y)
342 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_oh.o
343 +
344 +fsl_oh-objs += offline_port.o
345 +endif
346 +endif
347 +
348 +# Generic driver
349 +ifeq ($(CONFIG_FSL_DPAA_GENERIC_DRIVER),y)
350 +obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_generic.o
351 +
352 +fsl_generic-objs += dpaa_eth_generic.o dpaa_eth_generic_sysfs.o dpaa_generic_ethtool.o
353 +endif
354 +
355 +# Needed by the tracing framework
356 +CFLAGS_dpaa_eth.o := -I$(src)
357 --- /dev/null
358 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
359 @@ -0,0 +1,580 @@
360 +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
361 + * Copyright (C) 2009 IXXAT Automation, GmbH
362 + *
363 + * DPAA Ethernet Driver -- IEEE 1588 interface functionality
364 + *
365 + * This program is free software; you can redistribute it and/or modify
366 + * it under the terms of the GNU General Public License as published by
367 + * the Free Software Foundation; either version 2 of the License, or
368 + * (at your option) any later version.
369 + *
370 + * This program is distributed in the hope that it will be useful,
371 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
372 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
373 + * GNU General Public License for more details.
374 + *
375 + * You should have received a copy of the GNU General Public License along
376 + * with this program; if not, write to the Free Software Foundation, Inc.,
377 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
378 + *
379 + */
380 +#include <linux/io.h>
381 +#include <linux/device.h>
382 +#include <linux/fs.h>
383 +#include <linux/vmalloc.h>
384 +#include <linux/spinlock.h>
385 +#include <linux/ip.h>
386 +#include <linux/ipv6.h>
387 +#include <linux/udp.h>
388 +#include <asm/div64.h>
389 +#include "dpaa_eth.h"
390 +#include "dpaa_eth_common.h"
391 +#include "dpaa_1588.h"
392 +#include "mac.h"
393 +
394 +static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
395 +{
396 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
397 +
398 + circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size);
399 + if (!circ_buf->buf)
400 + return 1;
401 +
402 + circ_buf->head = 0;
403 + circ_buf->tail = 0;
404 + ptp_buf->size = size;
405 + spin_lock_init(&ptp_buf->ptp_lock);
406 +
407 + return 0;
408 +}
409 +
410 +static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size)
411 +{
412 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
413 +
414 + circ_buf->head = 0;
415 + circ_buf->tail = 0;
416 + ptp_buf->size = size;
417 +}
418 +
419 +static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf,
420 + struct dpa_ptp_data *data)
421 +{
422 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
423 + int size = ptp_buf->size;
424 + struct dpa_ptp_data *tmp;
425 + unsigned long flags;
426 + int head, tail;
427 +
428 + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
429 +
430 + head = circ_buf->head;
431 + tail = circ_buf->tail;
432 +
433 + if (CIRC_SPACE(head, tail, size) <= 0)
434 + circ_buf->tail = (tail + 1) & (size - 1);
435 +
436 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head;
437 + memcpy(tmp, data, sizeof(struct dpa_ptp_data));
438 +
439 + circ_buf->head = (head + 1) & (size - 1);
440 +
441 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
442 +
443 + return 0;
444 +}
445 +
446 +static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst,
447 + struct dpa_ptp_ident *src)
448 +{
449 + int ret;
450 +
451 + if ((dst->version != src->version) || (dst->msg_type != src->msg_type))
452 + return 0;
453 +
454 + if ((dst->netw_prot == src->netw_prot)
455 + || src->netw_prot == DPA_PTP_PROT_DONTCARE) {
456 + if (dst->seq_id != src->seq_id)
457 + return 0;
458 +
459 + ret = memcmp(dst->snd_port_id, src->snd_port_id,
460 + DPA_PTP_SOURCE_PORT_LENGTH);
461 + if (ret)
462 + return 0;
463 + else
464 + return 1;
465 + }
466 +
467 + return 0;
468 +}
469 +
470 +static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf,
471 + struct dpa_ptp_ident *ident,
472 + struct dpa_ptp_time *ts)
473 +{
474 + struct circ_buf *circ_buf = &ptp_buf->circ_buf;
475 + int size = ptp_buf->size;
476 + int head, tail, idx;
477 + unsigned long flags;
478 + struct dpa_ptp_data *tmp, *tmp2;
479 + struct dpa_ptp_ident *tmp_ident;
480 +
481 + spin_lock_irqsave(&ptp_buf->ptp_lock, flags);
482 +
483 + head = circ_buf->head;
484 + tail = idx = circ_buf->tail;
485 +
486 + if (CIRC_CNT(head, tail, size) == 0) {
487 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
488 + return 1;
489 + }
490 +
491 + while (idx != head) {
492 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
493 + tmp_ident = &tmp->ident;
494 + if (dpa_ptp_is_ident_match(tmp_ident, ident))
495 + break;
496 + idx = (idx + 1) & (size - 1);
497 + }
498 +
499 + if (idx == head) {
500 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
501 + return 1;
502 + }
503 +
504 + ts->sec = tmp->ts.sec;
505 + ts->nsec = tmp->ts.nsec;
506 +
507 + if (idx != tail) {
508 + if (CIRC_CNT(idx, tail, size) > TS_ACCUMULATION_THRESHOLD) {
509 + tail = circ_buf->tail =
510 + (idx - TS_ACCUMULATION_THRESHOLD) & (size - 1);
511 + }
512 +
513 + while (CIRC_CNT(idx, tail, size) > 0) {
514 + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
515 + idx = (idx - 1) & (size - 1);
516 + tmp2 = (struct dpa_ptp_data *)(circ_buf->buf) + idx;
517 + *tmp = *tmp2;
518 + }
519 + }
520 + circ_buf->tail = (tail + 1) & (size - 1);
521 +
522 + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags);
523 +
524 + return 0;
525 +}
526 +
527 +/* Parse the PTP packets
528 + *
529 + * The PTP header can be found in an IPv4 packet, IPv6 patcket or in
530 + * an IEEE802.3 ethernet frame. This function returns the position of
531 + * the PTP packet or NULL if no PTP found
532 + */
533 +static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type)
534 +{
535 + u8 *pos = skb->data + ETH_ALEN + ETH_ALEN;
536 + u8 *ptp_loc = NULL;
537 + u8 msg_type;
538 + u32 access_len = ETH_ALEN + ETH_ALEN + DPA_ETYPE_LEN;
539 + struct iphdr *iph;
540 + struct udphdr *udph;
541 + struct ipv6hdr *ipv6h;
542 +
543 + /* when we can receive S/G frames we need to check the data we want to
544 + * access is in the linear skb buffer
545 + */
546 + if (!pskb_may_pull(skb, access_len))
547 + return NULL;
548 +
549 + *eth_type = *((u16 *)pos);
550 +
551 + /* Check if inner tag is here */
552 + if (*eth_type == ETH_P_8021Q) {
553 + access_len += DPA_VLAN_TAG_LEN;
554 +
555 + if (!pskb_may_pull(skb, access_len))
556 + return NULL;
557 +
558 + pos += DPA_VLAN_TAG_LEN;
559 + *eth_type = *((u16 *)pos);
560 + }
561 +
562 + pos += DPA_ETYPE_LEN;
563 +
564 + switch (*eth_type) {
565 + /* Transport of PTP over Ethernet */
566 + case ETH_P_1588:
567 + ptp_loc = pos;
568 +
569 + if (!pskb_may_pull(skb, access_len + PTP_OFFS_MSG_TYPE + 1))
570 + return NULL;
571 +
572 + msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf;
573 + if ((msg_type == PTP_MSGTYPE_SYNC)
574 + || (msg_type == PTP_MSGTYPE_DELREQ)
575 + || (msg_type == PTP_MSGTYPE_PDELREQ)
576 + || (msg_type == PTP_MSGTYPE_PDELRESP))
577 + return ptp_loc;
578 + break;
579 + /* Transport of PTP over IPv4 */
580 + case ETH_P_IP:
581 + iph = (struct iphdr *)pos;
582 + access_len += sizeof(struct iphdr);
583 +
584 + if (!pskb_may_pull(skb, access_len))
585 + return NULL;
586 +
587 + if (ntohs(iph->protocol) != IPPROTO_UDP)
588 + return NULL;
589 +
590 + access_len += iph->ihl * 4 - sizeof(struct iphdr) +
591 + sizeof(struct udphdr);
592 +
593 + if (!pskb_may_pull(skb, access_len))
594 + return NULL;
595 +
596 + pos += iph->ihl * 4;
597 + udph = (struct udphdr *)pos;
598 + if (ntohs(udph->dest) != 319)
599 + return NULL;
600 + ptp_loc = pos + sizeof(struct udphdr);
601 + break;
602 + /* Transport of PTP over IPv6 */
603 + case ETH_P_IPV6:
604 + ipv6h = (struct ipv6hdr *)pos;
605 +
606 + access_len += sizeof(struct ipv6hdr) + sizeof(struct udphdr);
607 +
608 + if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP)
609 + return NULL;
610 +
611 + pos += sizeof(struct ipv6hdr);
612 + udph = (struct udphdr *)pos;
613 + if (ntohs(udph->dest) != 319)
614 + return NULL;
615 + ptp_loc = pos + sizeof(struct udphdr);
616 + break;
617 + default:
618 + break;
619 + }
620 +
621 + return ptp_loc;
622 +}
623 +
624 +static int dpa_ptp_store_stamp(const struct dpa_priv_s *priv,
625 + struct sk_buff *skb, void *data, enum port_type rx_tx,
626 + struct dpa_ptp_data *ptp_data)
627 +{
628 + u64 nsec;
629 + u32 mod;
630 + u8 *ptp_loc;
631 + u16 eth_type;
632 +
633 + ptp_loc = dpa_ptp_parse_packet(skb, &eth_type);
634 + if (!ptp_loc)
635 + return -EINVAL;
636 +
637 + switch (eth_type) {
638 + case ETH_P_IP:
639 + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4;
640 + break;
641 + case ETH_P_IPV6:
642 + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6;
643 + break;
644 + case ETH_P_1588:
645 + ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3;
646 + break;
647 + default:
648 + return -EINVAL;
649 + }
650 +
651 + if (!pskb_may_pull(skb, ptp_loc - skb->data + PTP_OFFS_SEQ_ID + 2))
652 + return -EINVAL;
653 +
654 + ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf;
655 + ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf;
656 + ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID));
657 + memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID,
658 + DPA_PTP_SOURCE_PORT_LENGTH);
659 +
660 + nsec = dpa_get_timestamp_ns(priv, rx_tx, data);
661 + mod = do_div(nsec, NANOSEC_PER_SECOND);
662 + ptp_data->ts.sec = nsec;
663 + ptp_data->ts.nsec = mod;
664 +
665 + return 0;
666 +}
667 +
668 +void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
669 + struct sk_buff *skb, void *data)
670 +{
671 + struct dpa_ptp_tsu *tsu = priv->tsu;
672 + struct dpa_ptp_data ptp_tx_data;
673 +
674 + if (dpa_ptp_store_stamp(priv, skb, data, TX, &ptp_tx_data))
675 + return;
676 +
677 + dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data);
678 +}
679 +
680 +void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
681 + struct sk_buff *skb, void *data)
682 +{
683 + struct dpa_ptp_tsu *tsu = priv->tsu;
684 + struct dpa_ptp_data ptp_rx_data;
685 +
686 + if (dpa_ptp_store_stamp(priv, skb, data, RX, &ptp_rx_data))
687 + return;
688 +
689 + dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data);
690 +}
691 +
692 +static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
693 + struct dpa_ptp_ident *ident,
694 + struct dpa_ptp_time *ts)
695 +{
696 + struct dpa_ptp_tsu *tsu = ptp_tsu;
697 + struct dpa_ptp_time tmp;
698 + int flag;
699 +
700 + flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp);
701 + if (!flag) {
702 + ts->sec = tmp.sec;
703 + ts->nsec = tmp.nsec;
704 + return 0;
705 + }
706 +
707 + return -1;
708 +}
709 +
710 +static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu,
711 + struct dpa_ptp_ident *ident,
712 + struct dpa_ptp_time *ts)
713 +{
714 + struct dpa_ptp_tsu *tsu = ptp_tsu;
715 + struct dpa_ptp_time tmp;
716 + int flag;
717 +
718 + flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp);
719 + if (!flag) {
720 + ts->sec = tmp.sec;
721 + ts->nsec = tmp.nsec;
722 + return 0;
723 + }
724 +
725 + return -1;
726 +}
727 +
728 +static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu,
729 + struct dpa_ptp_time *cnt_time)
730 +{
731 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
732 + u64 tmp, fiper;
733 +
734 + if (mac_dev->fm_rtc_disable)
735 + mac_dev->fm_rtc_disable(get_fm_handle(tsu->dpa_priv->net_dev));
736 +
737 + /* TMR_FIPER1 will pulse every second after ALARM1 expired */
738 + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
739 + fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
740 + if (mac_dev->fm_rtc_set_alarm)
741 + mac_dev->fm_rtc_set_alarm(get_fm_handle(tsu->dpa_priv->net_dev),
742 + 0, tmp);
743 + if (mac_dev->fm_rtc_set_fiper)
744 + mac_dev->fm_rtc_set_fiper(get_fm_handle(tsu->dpa_priv->net_dev),
745 + 0, fiper);
746 +
747 + if (mac_dev->fm_rtc_enable)
748 + mac_dev->fm_rtc_enable(get_fm_handle(tsu->dpa_priv->net_dev));
749 +}
750 +
751 +static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu,
752 + struct dpa_ptp_time *curr_time)
753 +{
754 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
755 + u64 tmp;
756 + u32 mod;
757 +
758 + if (mac_dev->fm_rtc_get_cnt)
759 + mac_dev->fm_rtc_get_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
760 + &tmp);
761 +
762 + mod = do_div(tmp, NANOSEC_PER_SECOND);
763 + curr_time->sec = (u32)tmp;
764 + curr_time->nsec = mod;
765 +}
766 +
767 +static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu,
768 + struct dpa_ptp_time *cnt_time)
769 +{
770 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
771 + u64 tmp;
772 +
773 + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec;
774 +
775 + if (mac_dev->fm_rtc_set_cnt)
776 + mac_dev->fm_rtc_set_cnt(get_fm_handle(tsu->dpa_priv->net_dev),
777 + tmp);
778 +
779 + /* Restart fiper two seconds later */
780 + cnt_time->sec += 2;
781 + cnt_time->nsec = 0;
782 + dpa_set_fiper_alarm(tsu, cnt_time);
783 +}
784 +
785 +static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend)
786 +{
787 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
788 + u32 drift;
789 +
790 + if (mac_dev->fm_rtc_get_drift)
791 + mac_dev->fm_rtc_get_drift(get_fm_handle(tsu->dpa_priv->net_dev),
792 + &drift);
793 +
794 + *addend = drift;
795 +}
796 +
797 +static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend)
798 +{
799 + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev;
800 +
801 + if (mac_dev->fm_rtc_set_drift)
802 + mac_dev->fm_rtc_set_drift(get_fm_handle(tsu->dpa_priv->net_dev),
803 + addend);
804 +}
805 +
806 +static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu)
807 +{
808 + dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
809 + dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
810 +}
811 +
812 +int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd)
813 +{
814 + struct dpa_priv_s *priv = netdev_priv(dev);
815 + struct dpa_ptp_tsu *tsu = priv->tsu;
816 + struct mac_device *mac_dev = priv->mac_dev;
817 + struct dpa_ptp_data ptp_data;
818 + struct dpa_ptp_data *ptp_data_user;
819 + struct dpa_ptp_time act_time;
820 + u32 addend;
821 + int retval = 0;
822 +
823 + if (!tsu || !tsu->valid)
824 + return -ENODEV;
825 +
826 + switch (cmd) {
827 + case PTP_ENBL_TXTS_IOCTL:
828 + tsu->hwts_tx_en_ioctl = 1;
829 + if (mac_dev->fm_rtc_enable)
830 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
831 + if (mac_dev->ptp_enable)
832 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
833 + break;
834 + case PTP_DSBL_TXTS_IOCTL:
835 + tsu->hwts_tx_en_ioctl = 0;
836 + if (mac_dev->fm_rtc_disable)
837 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
838 + if (mac_dev->ptp_disable)
839 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
840 + break;
841 + case PTP_ENBL_RXTS_IOCTL:
842 + tsu->hwts_rx_en_ioctl = 1;
843 + break;
844 + case PTP_DSBL_RXTS_IOCTL:
845 + tsu->hwts_rx_en_ioctl = 0;
846 + break;
847 + case PTP_GET_RX_TIMESTAMP:
848 + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
849 + if (copy_from_user(&ptp_data.ident,
850 + &ptp_data_user->ident, sizeof(ptp_data.ident)))
851 + return -EINVAL;
852 +
853 + if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
854 + return -EAGAIN;
855 +
856 + if (copy_to_user((void __user *)&ptp_data_user->ts,
857 + &ptp_data.ts, sizeof(ptp_data.ts)))
858 + return -EFAULT;
859 + break;
860 + case PTP_GET_TX_TIMESTAMP:
861 + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data;
862 + if (copy_from_user(&ptp_data.ident,
863 + &ptp_data_user->ident, sizeof(ptp_data.ident)))
864 + return -EINVAL;
865 +
866 + if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts))
867 + return -EAGAIN;
868 +
869 + if (copy_to_user((void __user *)&ptp_data_user->ts,
870 + &ptp_data.ts, sizeof(ptp_data.ts)))
871 + return -EFAULT;
872 + break;
873 + case PTP_GET_TIME:
874 + dpa_get_curr_cnt(tsu, &act_time);
875 + if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time)))
876 + return -EFAULT;
877 + break;
878 + case PTP_SET_TIME:
879 + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
880 + return -EINVAL;
881 + dpa_set_1588cnt(tsu, &act_time);
882 + break;
883 + case PTP_GET_ADJ:
884 + dpa_get_drift(tsu, &addend);
885 + if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend)))
886 + return -EFAULT;
887 + break;
888 + case PTP_SET_ADJ:
889 + if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend)))
890 + return -EINVAL;
891 + dpa_set_drift(tsu, addend);
892 + break;
893 + case PTP_SET_FIPER_ALARM:
894 + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time)))
895 + return -EINVAL;
896 + dpa_set_fiper_alarm(tsu, &act_time);
897 + break;
898 + case PTP_CLEANUP_TS:
899 + dpa_flush_timestamp(tsu);
900 + break;
901 + default:
902 + return -EINVAL;
903 + }
904 +
905 + return retval;
906 +}
907 +
908 +int dpa_ptp_init(struct dpa_priv_s *priv)
909 +{
910 + struct dpa_ptp_tsu *tsu;
911 +
912 + /* Allocate memory for PTP structure */
913 + tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL);
914 + if (!tsu)
915 + return -ENOMEM;
916 +
917 + tsu->valid = TRUE;
918 + tsu->dpa_priv = priv;
919 +
920 + dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ);
921 + dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ);
922 +
923 + priv->tsu = tsu;
924 +
925 + return 0;
926 +}
927 +EXPORT_SYMBOL(dpa_ptp_init);
928 +
929 +void dpa_ptp_cleanup(struct dpa_priv_s *priv)
930 +{
931 + struct dpa_ptp_tsu *tsu = priv->tsu;
932 +
933 + tsu->valid = FALSE;
934 + vfree(tsu->rx_timestamps.circ_buf.buf);
935 + vfree(tsu->tx_timestamps.circ_buf.buf);
936 +
937 + kfree(tsu);
938 +}
939 +EXPORT_SYMBOL(dpa_ptp_cleanup);
940 --- /dev/null
941 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.h
942 @@ -0,0 +1,138 @@
943 +/* Copyright (C) 2011 Freescale Semiconductor, Inc.
944 + *
945 + * This program is free software; you can redistribute it and/or modify
946 + * it under the terms of the GNU General Public License as published by
947 + * the Free Software Foundation; either version 2 of the License, or
948 + * (at your option) any later version.
949 + *
950 + * This program is distributed in the hope that it will be useful,
951 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
952 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
953 + * GNU General Public License for more details.
954 + *
955 + * You should have received a copy of the GNU General Public License along
956 + * with this program; if not, write to the Free Software Foundation, Inc.,
957 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
958 + *
959 + */
960 +#ifndef __DPAA_1588_H__
961 +#define __DPAA_1588_H__
962 +
963 +#include <linux/netdevice.h>
964 +#include <linux/etherdevice.h>
965 +#include <linux/circ_buf.h>
966 +#include <linux/fsl_qman.h>
967 +
968 +#define DEFAULT_PTP_RX_BUF_SZ 256
969 +#define DEFAULT_PTP_TX_BUF_SZ 256
970 +
971 +/* 1588 private ioctl calls */
972 +#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE
973 +#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1)
974 +#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2)
975 +#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3)
976 +#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4)
977 +#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5)
978 +#define PTP_SET_TIME (SIOCDEVPRIVATE + 6)
979 +#define PTP_GET_TIME (SIOCDEVPRIVATE + 7)
980 +#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8)
981 +#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9)
982 +#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10)
983 +#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11)
984 +
985 +/* PTP V2 message type */
986 +enum {
987 + PTP_MSGTYPE_SYNC = 0x0,
988 + PTP_MSGTYPE_DELREQ = 0x1,
989 + PTP_MSGTYPE_PDELREQ = 0x2,
990 + PTP_MSGTYPE_PDELRESP = 0x3,
991 + PTP_MSGTYPE_FLWUP = 0x8,
992 + PTP_MSGTYPE_DELRESP = 0x9,
993 + PTP_MSGTYPE_PDELRES_FLWUP = 0xA,
994 + PTP_MSGTYPE_ANNOUNCE = 0xB,
995 + PTP_MSGTYPE_SGNLNG = 0xC,
996 + PTP_MSGTYPE_MNGMNT = 0xD,
997 +};
998 +
999 +/* Byte offset of data in the PTP V2 headers */
1000 +#define PTP_OFFS_MSG_TYPE 0
1001 +#define PTP_OFFS_VER_PTP 1
1002 +#define PTP_OFFS_MSG_LEN 2
1003 +#define PTP_OFFS_DOM_NMB 4
1004 +#define PTP_OFFS_FLAGS 6
1005 +#define PTP_OFFS_CORFIELD 8
1006 +#define PTP_OFFS_SRCPRTID 20
1007 +#define PTP_OFFS_SEQ_ID 30
1008 +#define PTP_OFFS_CTRL 32
1009 +#define PTP_OFFS_LOGMEAN 33
1010 +
1011 +#define PTP_IP_OFFS 14
1012 +#define PTP_UDP_OFFS 34
1013 +#define PTP_HEADER_OFFS 42
1014 +#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE)
1015 +#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID)
1016 +#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID)
1017 +#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL)
1018 +
1019 +/* 1588-2008 network protocol enumeration values */
1020 +#define DPA_PTP_PROT_IPV4 1
1021 +#define DPA_PTP_PROT_IPV6 2
1022 +#define DPA_PTP_PROT_802_3 3
1023 +#define DPA_PTP_PROT_DONTCARE 0xFFFF
1024 +
1025 +#define DPA_PTP_SOURCE_PORT_LENGTH 10
1026 +#define DPA_PTP_HEADER_SZE 34
1027 +#define DPA_ETYPE_LEN 2
1028 +#define DPA_VLAN_TAG_LEN 4
1029 +#define NANOSEC_PER_SECOND 1000000000
1030 +
1031 +/* The threshold between the current found one and the oldest one */
1032 +#define TS_ACCUMULATION_THRESHOLD 50
1033 +
1034 +/* Struct needed to identify a timestamp */
1035 +struct dpa_ptp_ident {
1036 + u8 version;
1037 + u8 msg_type;
1038 + u16 netw_prot;
1039 + u16 seq_id;
1040 + u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH];
1041 +};
1042 +
1043 +/* Timestamp format in 1588-2008 */
1044 +struct dpa_ptp_time {
1045 + u64 sec; /* just 48 bit used */
1046 + u32 nsec;
1047 +};
1048 +
1049 +/* needed for timestamp data over ioctl */
1050 +struct dpa_ptp_data {
1051 + struct dpa_ptp_ident ident;
1052 + struct dpa_ptp_time ts;
1053 +};
1054 +
1055 +struct dpa_ptp_circ_buf {
1056 + struct circ_buf circ_buf;
1057 + u32 size;
1058 + spinlock_t ptp_lock;
1059 +};
1060 +
1061 +/* PTP TSU control structure */
1062 +struct dpa_ptp_tsu {
1063 + struct dpa_priv_s *dpa_priv;
1064 + bool valid;
1065 + struct dpa_ptp_circ_buf rx_timestamps;
1066 + struct dpa_ptp_circ_buf tx_timestamps;
1067 +
1068 + /* HW timestamping over ioctl enabled flag */
1069 + int hwts_tx_en_ioctl;
1070 + int hwts_rx_en_ioctl;
1071 +};
1072 +
1073 +extern int dpa_ptp_init(struct dpa_priv_s *priv);
1074 +extern void dpa_ptp_cleanup(struct dpa_priv_s *priv);
1075 +extern void dpa_ptp_store_txstamp(const struct dpa_priv_s *priv,
1076 + struct sk_buff *skb, void *data);
1077 +extern void dpa_ptp_store_rxstamp(const struct dpa_priv_s *priv,
1078 + struct sk_buff *skb, void *data);
1079 +extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd);
1080 +#endif
1081 --- /dev/null
1082 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c
1083 @@ -0,0 +1,180 @@
1084 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1085 + *
1086 + * Redistribution and use in source and binary forms, with or without
1087 + * modification, are permitted provided that the following conditions are met:
1088 + * * Redistributions of source code must retain the above copyright
1089 + * notice, this list of conditions and the following disclaimer.
1090 + * * Redistributions in binary form must reproduce the above copyright
1091 + * notice, this list of conditions and the following disclaimer in the
1092 + * documentation and/or other materials provided with the distribution.
1093 + * * Neither the name of Freescale Semiconductor nor the
1094 + * names of its contributors may be used to endorse or promote products
1095 + * derived from this software without specific prior written permission.
1096 + *
1097 + *
1098 + * ALTERNATIVELY, this software may be distributed under the terms of the
1099 + * GNU General Public License ("GPL") as published by the Free Software
1100 + * Foundation, either version 2 of that License or (at your option) any
1101 + * later version.
1102 + *
1103 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1104 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1105 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1106 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1107 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1108 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1109 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1110 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1111 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1112 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1113 + */
1114 +
1115 +#include <linux/module.h>
1116 +#include <linux/fsl_qman.h> /* struct qm_mcr_querycgr */
1117 +#include <linux/debugfs.h>
1118 +#include "dpaa_debugfs.h"
1119 +#include "dpaa_eth.h" /* struct dpa_priv_s, dpa_percpu_priv_s, dpa_bp */
1120 +
1121 +#define DPA_DEBUGFS_DESCRIPTION "FSL DPAA Ethernet debugfs entries"
1122 +#define DPA_ETH_DEBUGFS_ROOT "fsl_dpa"
1123 +
1124 +static struct dentry *dpa_debugfs_root;
1125 +
1126 +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file);
1127 +static ssize_t dpa_loop_write(struct file *f,
1128 + const char __user *buf, size_t count, loff_t *off);
1129 +
1130 +static const struct file_operations dpa_debugfs_lp_fops = {
1131 + .open = dpa_debugfs_loop_open,
1132 + .write = dpa_loop_write,
1133 + .read = seq_read,
1134 + .llseek = seq_lseek,
1135 + .release = single_release,
1136 +};
1137 +
1138 +static int dpa_debugfs_loop_show(struct seq_file *file, void *offset)
1139 +{
1140 + struct dpa_priv_s *priv;
1141 +
1142 + BUG_ON(offset == NULL);
1143 +
1144 + priv = netdev_priv((struct net_device *)file->private);
1145 + seq_printf(file, "%d->%d\n", priv->loop_id, priv->loop_to);
1146 +
1147 + return 0;
1148 +}
1149 +
1150 +static int user_input_convert(const char __user *user_buf, size_t count,
1151 + long *val)
1152 +{
1153 + char buf[12];
1154 +
1155 + if (count > sizeof(buf) - 1)
1156 + return -EINVAL;
1157 + if (copy_from_user(buf, user_buf, count))
1158 + return -EFAULT;
1159 + buf[count] = '\0';
1160 + if (kstrtol(buf, 0, val))
1161 + return -EINVAL;
1162 + return 0;
1163 +}
1164 +
1165 +static ssize_t dpa_loop_write(struct file *f,
1166 + const char __user *buf, size_t count, loff_t *off)
1167 +{
1168 + struct dpa_priv_s *priv;
1169 + struct net_device *netdev;
1170 + struct seq_file *sf;
1171 + int ret;
1172 + long val;
1173 +
1174 + ret = user_input_convert(buf, count, &val);
1175 + if (ret)
1176 + return ret;
1177 +
1178 + sf = (struct seq_file *)f->private_data;
1179 + netdev = (struct net_device *)sf->private;
1180 + priv = netdev_priv(netdev);
1181 +
1182 + priv->loop_to = ((val < 0) || (val > 20)) ? -1 : val;
1183 +
1184 + return count;
1185 +}
1186 +
1187 +static int __cold dpa_debugfs_loop_open(struct inode *inode, struct file *file)
1188 +{
1189 + int _errno;
1190 + const struct net_device *net_dev;
1191 +
1192 + _errno = single_open(file, dpa_debugfs_loop_show, inode->i_private);
1193 + if (unlikely(_errno < 0)) {
1194 + net_dev = (struct net_device *)inode->i_private;
1195 +
1196 + if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev)))
1197 + netdev_err(net_dev, "single_open() = %d\n",
1198 + _errno);
1199 + }
1200 +
1201 + return _errno;
1202 +}
1203 +
1204 +
1205 +int dpa_netdev_debugfs_create(struct net_device *net_dev)
1206 +{
1207 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1208 + static int cnt;
1209 + char loop_file_name[100];
1210 +
1211 + if (unlikely(dpa_debugfs_root == NULL)) {
1212 + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): \t%s\n",
1213 + KBUILD_BASENAME".c", __LINE__, __func__,
1214 + "root debugfs missing, possible module ordering issue");
1215 + return -ENOMEM;
1216 + }
1217 +
1218 + sprintf(loop_file_name, "eth%d_loop", ++cnt);
1219 + priv->debugfs_loop_file = debugfs_create_file(loop_file_name,
1220 + S_IRUGO,
1221 + dpa_debugfs_root,
1222 + net_dev,
1223 + &dpa_debugfs_lp_fops);
1224 + if (unlikely(priv->debugfs_loop_file == NULL)) {
1225 + netdev_err(net_dev, "debugfs_create_file(%s/%s)",
1226 + dpa_debugfs_root->d_iname,
1227 + loop_file_name);
1228 +
1229 + return -ENOMEM;
1230 + }
1231 + return 0;
1232 +}
1233 +
1234 +void dpa_netdev_debugfs_remove(struct net_device *net_dev)
1235 +{
1236 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1237 +
1238 + debugfs_remove(priv->debugfs_loop_file);
1239 +}
1240 +
1241 +int __init dpa_debugfs_module_init(void)
1242 +{
1243 + int _errno = 0;
1244 +
1245 + pr_info(KBUILD_MODNAME ": " DPA_DEBUGFS_DESCRIPTION "\n");
1246 +
1247 + dpa_debugfs_root = debugfs_create_dir(DPA_ETH_DEBUGFS_ROOT, NULL);
1248 +
1249 + if (unlikely(dpa_debugfs_root == NULL)) {
1250 + _errno = -ENOMEM;
1251 + pr_err(KBUILD_MODNAME ": %s:%hu:%s():\n",
1252 + KBUILD_BASENAME".c", __LINE__, __func__);
1253 + pr_err("\tdebugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n",
1254 + DPA_ETH_DEBUGFS_ROOT, _errno);
1255 + }
1256 +
1257 + return _errno;
1258 +}
1259 +
1260 +void __exit dpa_debugfs_module_exit(void)
1261 +{
1262 + debugfs_remove(dpa_debugfs_root);
1263 +}
1264 --- /dev/null
1265 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h
1266 @@ -0,0 +1,43 @@
1267 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1268 + *
1269 + * Redistribution and use in source and binary forms, with or without
1270 + * modification, are permitted provided that the following conditions are met:
1271 + * * Redistributions of source code must retain the above copyright
1272 + * notice, this list of conditions and the following disclaimer.
1273 + * * Redistributions in binary form must reproduce the above copyright
1274 + * notice, this list of conditions and the following disclaimer in the
1275 + * documentation and/or other materials provided with the distribution.
1276 + * * Neither the name of Freescale Semiconductor nor the
1277 + * names of its contributors may be used to endorse or promote products
1278 + * derived from this software without specific prior written permission.
1279 + *
1280 + *
1281 + * ALTERNATIVELY, this software may be distributed under the terms of the
1282 + * GNU General Public License ("GPL") as published by the Free Software
1283 + * Foundation, either version 2 of that License or (at your option) any
1284 + * later version.
1285 + *
1286 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1287 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1288 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1289 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1290 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1291 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1292 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1293 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1294 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1295 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1296 + */
1297 +
1298 +#ifndef DPAA_DEBUGFS_H_
1299 +#define DPAA_DEBUGFS_H_
1300 +
1301 +#include <linux/netdevice.h>
1302 +#include <linux/dcache.h> /* struct dentry needed in dpaa_eth.h */
1303 +
1304 +int dpa_netdev_debugfs_create(struct net_device *net_dev);
1305 +void dpa_netdev_debugfs_remove(struct net_device *net_dev);
1306 +int __init dpa_debugfs_module_init(void);
1307 +void __exit dpa_debugfs_module_exit(void);
1308 +
1309 +#endif /* DPAA_DEBUGFS_H_ */
1310 --- /dev/null
1311 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c
1312 @@ -0,0 +1,1183 @@
1313 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
1314 + *
1315 + * Redistribution and use in source and binary forms, with or without
1316 + * modification, are permitted provided that the following conditions are met:
1317 + * * Redistributions of source code must retain the above copyright
1318 + * notice, this list of conditions and the following disclaimer.
1319 + * * Redistributions in binary form must reproduce the above copyright
1320 + * notice, this list of conditions and the following disclaimer in the
1321 + * documentation and/or other materials provided with the distribution.
1322 + * * Neither the name of Freescale Semiconductor nor the
1323 + * names of its contributors may be used to endorse or promote products
1324 + * derived from this software without specific prior written permission.
1325 + *
1326 + *
1327 + * ALTERNATIVELY, this software may be distributed under the terms of the
1328 + * GNU General Public License ("GPL") as published by the Free Software
1329 + * Foundation, either version 2 of that License or (at your option) any
1330 + * later version.
1331 + *
1332 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1333 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1334 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1335 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1336 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1337 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1338 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1339 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1340 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1341 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1342 + */
1343 +
1344 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
1345 +#define pr_fmt(fmt) \
1346 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
1347 + KBUILD_BASENAME".c", __LINE__, __func__
1348 +#else
1349 +#define pr_fmt(fmt) \
1350 + KBUILD_MODNAME ": " fmt
1351 +#endif
1352 +
1353 +#include <linux/init.h>
1354 +#include <linux/module.h>
1355 +#include <linux/of_mdio.h>
1356 +#include <linux/of_net.h>
1357 +#include <linux/kthread.h>
1358 +#include <linux/io.h>
1359 +#include <linux/if_arp.h> /* arp_hdr_len() */
1360 +#include <linux/if_vlan.h> /* VLAN_HLEN */
1361 +#include <linux/icmp.h> /* struct icmphdr */
1362 +#include <linux/ip.h> /* struct iphdr */
1363 +#include <linux/ipv6.h> /* struct ipv6hdr */
1364 +#include <linux/udp.h> /* struct udphdr */
1365 +#include <linux/tcp.h> /* struct tcphdr */
1366 +#include <linux/net.h> /* net_ratelimit() */
1367 +#include <linux/if_ether.h> /* ETH_P_IP and ETH_P_IPV6 */
1368 +#include <linux/highmem.h>
1369 +#include <linux/percpu.h>
1370 +#include <linux/dma-mapping.h>
1371 +#include <linux/fsl_bman.h>
1372 +
1373 +#include "fsl_fman.h"
1374 +#include "fm_ext.h"
1375 +#include "fm_port_ext.h"
1376 +
1377 +#include "mac.h"
1378 +#include "dpaa_eth.h"
1379 +#include "dpaa_eth_common.h"
1380 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
1381 +#include "dpaa_debugfs.h"
1382 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
1383 +
1384 +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1385 + * using trace events only need to #include <trace/events/sched.h>
1386 + */
1387 +#define CREATE_TRACE_POINTS
1388 +#include "dpaa_eth_trace.h"
1389 +
1390 +#define DPA_NAPI_WEIGHT 64
1391 +
1392 +/* Valid checksum indication */
1393 +#define DPA_CSUM_VALID 0xFFFF
1394 +
1395 +#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
1396 +
1397 +MODULE_LICENSE("Dual BSD/GPL");
1398 +
1399 +MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
1400 +
1401 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
1402 +
1403 +static uint8_t debug = -1;
1404 +module_param(debug, byte, S_IRUGO);
1405 +MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
1406 +
1407 +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
1408 +static uint16_t tx_timeout = 1000;
1409 +module_param(tx_timeout, ushort, S_IRUGO);
1410 +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
1411 +
1412 +static const char rtx[][3] = {
1413 + [RX] = "RX",
1414 + [TX] = "TX"
1415 +};
1416 +
1417 +/* BM */
1418 +
1419 +#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
1420 +
1421 +static uint8_t dpa_priv_common_bpid;
1422 +
1423 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
1424 +struct net_device *dpa_loop_netdevs[20];
1425 +#endif
1426 +
1427 +#ifdef CONFIG_PM
1428 +
1429 +static int dpaa_suspend(struct device *dev)
1430 +{
1431 + struct net_device *net_dev;
1432 + struct dpa_priv_s *priv;
1433 + struct mac_device *mac_dev;
1434 + int err = 0;
1435 +
1436 + net_dev = dev_get_drvdata(dev);
1437 +
1438 + if (net_dev->flags & IFF_UP) {
1439 + priv = netdev_priv(net_dev);
1440 + mac_dev = priv->mac_dev;
1441 +
1442 + if (priv->wol & DPAA_WOL_MAGIC) {
1443 + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1444 + priv->mac_dev->get_mac_handle(mac_dev), true);
1445 + if (err) {
1446 + netdev_err(net_dev, "set_wol() = %d\n", err);
1447 + goto set_wol_failed;
1448 + }
1449 + }
1450 +
1451 + err = fm_port_suspend(mac_dev->port_dev[RX]);
1452 + if (err) {
1453 + netdev_err(net_dev, "fm_port_suspend(RX) = %d\n", err);
1454 + goto rx_port_suspend_failed;
1455 + }
1456 +
1457 + err = fm_port_suspend(mac_dev->port_dev[TX]);
1458 + if (err) {
1459 + netdev_err(net_dev, "fm_port_suspend(TX) = %d\n", err);
1460 + goto tx_port_suspend_failed;
1461 + }
1462 + }
1463 +
1464 + return 0;
1465 +
1466 +tx_port_suspend_failed:
1467 + fm_port_resume(mac_dev->port_dev[RX]);
1468 +rx_port_suspend_failed:
1469 + if (priv->wol & DPAA_WOL_MAGIC) {
1470 + priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1471 + priv->mac_dev->get_mac_handle(mac_dev), false);
1472 + }
1473 +set_wol_failed:
1474 + return err;
1475 +}
1476 +
1477 +static int dpaa_resume(struct device *dev)
1478 +{
1479 + struct net_device *net_dev;
1480 + struct dpa_priv_s *priv;
1481 + struct mac_device *mac_dev;
1482 + int err = 0;
1483 +
1484 + net_dev = dev_get_drvdata(dev);
1485 +
1486 + if (net_dev->flags & IFF_UP) {
1487 + priv = netdev_priv(net_dev);
1488 + mac_dev = priv->mac_dev;
1489 +
1490 + err = fm_port_resume(mac_dev->port_dev[TX]);
1491 + if (err) {
1492 + netdev_err(net_dev, "fm_port_resume(TX) = %d\n", err);
1493 + goto resume_failed;
1494 + }
1495 +
1496 + err = fm_port_resume(mac_dev->port_dev[RX]);
1497 + if (err) {
1498 + netdev_err(net_dev, "fm_port_resume(RX) = %d\n", err);
1499 + goto resume_failed;
1500 + }
1501 +
1502 + if (priv->wol & DPAA_WOL_MAGIC) {
1503 + err = priv->mac_dev->set_wol(mac_dev->port_dev[RX],
1504 + priv->mac_dev->get_mac_handle(mac_dev), false);
1505 + if (err) {
1506 + netdev_err(net_dev, "set_wol() = %d\n", err);
1507 + goto resume_failed;
1508 + }
1509 + }
1510 + }
1511 +
1512 + return 0;
1513 +
1514 +resume_failed:
1515 + return err;
1516 +}
1517 +
1518 +static const struct dev_pm_ops dpaa_pm_ops = {
1519 + .suspend = dpaa_suspend,
1520 + .resume = dpaa_resume,
1521 +};
1522 +
1523 +#define DPAA_PM_OPS (&dpaa_pm_ops)
1524 +
1525 +#else /* CONFIG_PM */
1526 +
1527 +#define DPAA_PM_OPS NULL
1528 +
1529 +#endif /* CONFIG_PM */
1530 +
1531 +/* Checks whether the checksum field in Parse Results array is valid
1532 + * (equals 0xFFFF) and increments the .cse counter otherwise
1533 + */
1534 +static inline void
1535 +dpa_csum_validation(const struct dpa_priv_s *priv,
1536 + struct dpa_percpu_priv_s *percpu_priv,
1537 + const struct qm_fd *fd)
1538 +{
1539 + dma_addr_t addr = qm_fd_addr(fd);
1540 + struct dpa_bp *dpa_bp = priv->dpa_bp;
1541 + void *frm = phys_to_virt(addr);
1542 + fm_prs_result_t *parse_result;
1543 +
1544 + if (unlikely(!frm))
1545 + return;
1546 +
1547 + dma_sync_single_for_cpu(dpa_bp->dev, addr, DPA_RX_PRIV_DATA_SIZE +
1548 + DPA_PARSE_RESULTS_SIZE, DMA_BIDIRECTIONAL);
1549 +
1550 + parse_result = (fm_prs_result_t *)(frm + DPA_RX_PRIV_DATA_SIZE);
1551 +
1552 + if (parse_result->cksum != DPA_CSUM_VALID)
1553 + percpu_priv->rx_errors.cse++;
1554 +}
1555 +
1556 +static void _dpa_rx_error(struct net_device *net_dev,
1557 + const struct dpa_priv_s *priv,
1558 + struct dpa_percpu_priv_s *percpu_priv,
1559 + const struct qm_fd *fd,
1560 + u32 fqid)
1561 +{
1562 + /* limit common, possibly innocuous Rx FIFO Overflow errors'
1563 + * interference with zero-loss convergence benchmark results.
1564 + */
1565 + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
1566 + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
1567 + else
1568 + if (netif_msg_hw(priv) && net_ratelimit())
1569 + netdev_dbg(net_dev, "Err FD status = 0x%08x\n",
1570 + fd->status & FM_FD_STAT_RX_ERRORS);
1571 +#ifdef CONFIG_FSL_DPAA_HOOKS
1572 + if (dpaa_eth_hooks.rx_error &&
1573 + dpaa_eth_hooks.rx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
1574 + /* it's up to the hook to perform resource cleanup */
1575 + return;
1576 +#endif
1577 + percpu_priv->stats.rx_errors++;
1578 +
1579 + if (fd->status & FM_PORT_FRM_ERR_DMA)
1580 + percpu_priv->rx_errors.dme++;
1581 + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
1582 + percpu_priv->rx_errors.fpe++;
1583 + if (fd->status & FM_PORT_FRM_ERR_SIZE)
1584 + percpu_priv->rx_errors.fse++;
1585 + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
1586 + percpu_priv->rx_errors.phe++;
1587 + if (fd->status & FM_FD_STAT_L4CV)
1588 + dpa_csum_validation(priv, percpu_priv, fd);
1589 +
1590 + dpa_fd_release(net_dev, fd);
1591 +}
1592 +
1593 +static void _dpa_tx_error(struct net_device *net_dev,
1594 + const struct dpa_priv_s *priv,
1595 + struct dpa_percpu_priv_s *percpu_priv,
1596 + const struct qm_fd *fd,
1597 + u32 fqid)
1598 +{
1599 + struct sk_buff *skb;
1600 +
1601 + if (netif_msg_hw(priv) && net_ratelimit())
1602 + netdev_warn(net_dev, "FD status = 0x%08x\n",
1603 + fd->status & FM_FD_STAT_TX_ERRORS);
1604 +#ifdef CONFIG_FSL_DPAA_HOOKS
1605 + if (dpaa_eth_hooks.tx_error &&
1606 + dpaa_eth_hooks.tx_error(net_dev, fd, fqid) == DPAA_ETH_STOLEN)
1607 + /* now the hook must ensure proper cleanup */
1608 + return;
1609 +#endif
1610 + percpu_priv->stats.tx_errors++;
1611 +
1612 + /* If we intended the buffers from this frame to go into the bpools
1613 + * when the FMan transmit was done, we need to put it in manually.
1614 + */
1615 + if (fd->bpid != 0xff) {
1616 + dpa_fd_release(net_dev, fd);
1617 + return;
1618 + }
1619 +
1620 + skb = _dpa_cleanup_tx_fd(priv, fd);
1621 + dev_kfree_skb(skb);
1622 +}
1623 +
1624 +/* Helper function to factor out frame validation logic on all Rx paths. Its
1625 + * purpose is to extract from the Parse Results structure information about
1626 + * the integrity of the frame, its checksum, the length of the parsed headers
1627 + * and whether the frame is suitable for GRO.
1628 + *
1629 + * Assumes no parser errors, since any error frame is dropped before this
1630 + * function is called.
1631 + *
1632 + * @skb will have its ip_summed field overwritten;
1633 + * @use_gro will only be written with 0, if the frame is definitely not
1634 + * GRO-able; otherwise, it will be left unchanged;
1635 + * @hdr_size will be written with a safe value, at least the size of the
1636 + * headers' length.
1637 + */
1638 +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
1639 + const struct qm_fd *fd,
1640 + struct sk_buff *skb, int *use_gro)
1641 +{
1642 + if (fd->status & FM_FD_STAT_L4CV) {
1643 + /* The parser has run and performed L4 checksum validation.
1644 + * We know there were no parser errors (and implicitly no
1645 + * L4 csum error), otherwise we wouldn't be here.
1646 + */
1647 + skb->ip_summed = CHECKSUM_UNNECESSARY;
1648 +
1649 + /* Don't go through GRO for certain types of traffic that
1650 + * we know are not GRO-able, such as dgram-based protocols.
1651 + * In the worst-case scenarios, such as small-pkt terminating
1652 + * UDP, the extra GRO processing would be overkill.
1653 + *
1654 + * The only protocol the Parser supports that is also GRO-able
1655 + * is currently TCP.
1656 + */
1657 + if (!fm_l4_frame_is_tcp(parse_results))
1658 + *use_gro = 0;
1659 +
1660 + return;
1661 + }
1662 +
1663 + /* We're here because either the parser didn't run or the L4 checksum
1664 + * was not verified. This may include the case of a UDP frame with
1665 + * checksum zero or an L4 proto other than TCP/UDP
1666 + */
1667 + skb->ip_summed = CHECKSUM_NONE;
1668 +
1669 + /* Bypass GRO for unknown traffic or if no PCDs are applied */
1670 + *use_gro = 0;
1671 +}
1672 +
1673 +int dpaa_eth_poll(struct napi_struct *napi, int budget)
1674 +{
1675 + struct dpa_napi_portal *np =
1676 + container_of(napi, struct dpa_napi_portal, napi);
1677 +
1678 + int cleaned = qman_p_poll_dqrr(np->p, budget);
1679 +
1680 + if (cleaned < budget) {
1681 + int tmp;
1682 + napi_complete(napi);
1683 + tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
1684 + DPA_BUG_ON(tmp);
1685 + }
1686 +
1687 + return cleaned;
1688 +}
1689 +EXPORT_SYMBOL(dpaa_eth_poll);
1690 +
1691 +static void __hot _dpa_tx_conf(struct net_device *net_dev,
1692 + const struct dpa_priv_s *priv,
1693 + struct dpa_percpu_priv_s *percpu_priv,
1694 + const struct qm_fd *fd,
1695 + u32 fqid)
1696 +{
1697 + struct sk_buff *skb;
1698 +
1699 + /* do we need the timestamp for the error frames? */
1700 +
1701 + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
1702 + if (netif_msg_hw(priv) && net_ratelimit())
1703 + netdev_warn(net_dev, "FD status = 0x%08x\n",
1704 + fd->status & FM_FD_STAT_TX_ERRORS);
1705 +
1706 + percpu_priv->stats.tx_errors++;
1707 + }
1708 +
1709 + /* hopefully we need not get the timestamp before the hook */
1710 +#ifdef CONFIG_FSL_DPAA_HOOKS
1711 + if (dpaa_eth_hooks.tx_confirm && dpaa_eth_hooks.tx_confirm(net_dev,
1712 + fd, fqid) == DPAA_ETH_STOLEN)
1713 + /* it's the hook that must now perform cleanup */
1714 + return;
1715 +#endif
1716 + /* This might not perfectly reflect the reality, if the core dequeuing
1717 + * the Tx confirmation is different from the one that did the enqueue,
1718 + * but at least it'll show up in the total count.
1719 + */
1720 + percpu_priv->tx_confirm++;
1721 +
1722 + skb = _dpa_cleanup_tx_fd(priv, fd);
1723 +
1724 + dev_kfree_skb(skb);
1725 +}
1726 +
1727 +enum qman_cb_dqrr_result
1728 +priv_rx_error_dqrr(struct qman_portal *portal,
1729 + struct qman_fq *fq,
1730 + const struct qm_dqrr_entry *dq)
1731 +{
1732 + struct net_device *net_dev;
1733 + struct dpa_priv_s *priv;
1734 + struct dpa_percpu_priv_s *percpu_priv;
1735 + int *count_ptr;
1736 +
1737 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1738 + priv = netdev_priv(net_dev);
1739 +
1740 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1741 + count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
1742 +
1743 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1744 + return qman_cb_dqrr_stop;
1745 +
1746 + if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
1747 + /* Unable to refill the buffer pool due to insufficient
1748 + * system memory. Just release the frame back into the pool,
1749 + * otherwise we'll soon end up with an empty buffer pool.
1750 + */
1751 + dpa_fd_release(net_dev, &dq->fd);
1752 + else
1753 + _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1754 +
1755 + return qman_cb_dqrr_consume;
1756 +}
1757 +
1758 +
1759 +enum qman_cb_dqrr_result __hot
1760 +priv_rx_default_dqrr(struct qman_portal *portal,
1761 + struct qman_fq *fq,
1762 + const struct qm_dqrr_entry *dq)
1763 +{
1764 + struct net_device *net_dev;
1765 + struct dpa_priv_s *priv;
1766 + struct dpa_percpu_priv_s *percpu_priv;
1767 + int *count_ptr;
1768 + struct dpa_bp *dpa_bp;
1769 +
1770 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1771 + priv = netdev_priv(net_dev);
1772 + dpa_bp = priv->dpa_bp;
1773 +
1774 + /* Trace the Rx fd */
1775 + trace_dpa_rx_fd(net_dev, fq, &dq->fd);
1776 +
1777 + /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
1778 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1779 + count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
1780 +
1781 + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
1782 + return qman_cb_dqrr_stop;
1783 +
1784 + /* Vale of plenty: make sure we didn't run out of buffers */
1785 +
1786 + if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
1787 + /* Unable to refill the buffer pool due to insufficient
1788 + * system memory. Just release the frame back into the pool,
1789 + * otherwise we'll soon end up with an empty buffer pool.
1790 + */
1791 + dpa_fd_release(net_dev, &dq->fd);
1792 + else
1793 + _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
1794 + count_ptr);
1795 +
1796 + return qman_cb_dqrr_consume;
1797 +}
1798 +
1799 +enum qman_cb_dqrr_result
1800 +priv_tx_conf_error_dqrr(struct qman_portal *portal,
1801 + struct qman_fq *fq,
1802 + const struct qm_dqrr_entry *dq)
1803 +{
1804 + struct net_device *net_dev;
1805 + struct dpa_priv_s *priv;
1806 + struct dpa_percpu_priv_s *percpu_priv;
1807 +
1808 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1809 + priv = netdev_priv(net_dev);
1810 +
1811 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1812 +
1813 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1814 + return qman_cb_dqrr_stop;
1815 +
1816 + _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1817 +
1818 + return qman_cb_dqrr_consume;
1819 +}
1820 +
1821 +enum qman_cb_dqrr_result __hot
1822 +priv_tx_conf_default_dqrr(struct qman_portal *portal,
1823 + struct qman_fq *fq,
1824 + const struct qm_dqrr_entry *dq)
1825 +{
1826 + struct net_device *net_dev;
1827 + struct dpa_priv_s *priv;
1828 + struct dpa_percpu_priv_s *percpu_priv;
1829 +
1830 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1831 + priv = netdev_priv(net_dev);
1832 +
1833 + /* Trace the fd */
1834 + trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
1835 +
1836 + /* Non-migratable context, safe to use raw_cpu_ptr */
1837 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1838 +
1839 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
1840 + return qman_cb_dqrr_stop;
1841 +
1842 + _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
1843 +
1844 + return qman_cb_dqrr_consume;
1845 +}
1846 +
1847 +void priv_ern(struct qman_portal *portal,
1848 + struct qman_fq *fq,
1849 + const struct qm_mr_entry *msg)
1850 +{
1851 + struct net_device *net_dev;
1852 + const struct dpa_priv_s *priv;
1853 + struct sk_buff *skb;
1854 + struct dpa_percpu_priv_s *percpu_priv;
1855 + struct qm_fd fd = msg->ern.fd;
1856 +
1857 + net_dev = ((struct dpa_fq *)fq)->net_dev;
1858 + priv = netdev_priv(net_dev);
1859 + /* Non-migratable context, safe to use raw_cpu_ptr */
1860 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
1861 +
1862 + percpu_priv->stats.tx_dropped++;
1863 + percpu_priv->stats.tx_fifo_errors++;
1864 + count_ern(percpu_priv, msg);
1865 +
1866 + /* If we intended this buffer to go into the pool
1867 + * when the FM was done, we need to put it in
1868 + * manually.
1869 + */
1870 + if (msg->ern.fd.bpid != 0xff) {
1871 + dpa_fd_release(net_dev, &fd);
1872 + return;
1873 + }
1874 +
1875 + skb = _dpa_cleanup_tx_fd(priv, &fd);
1876 + dev_kfree_skb_any(skb);
1877 +}
1878 +
1879 +const struct dpa_fq_cbs_t private_fq_cbs = {
1880 + .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
1881 + .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
1882 + .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
1883 + .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
1884 + .egress_ern = { .cb = { .ern = priv_ern } }
1885 +};
1886 +EXPORT_SYMBOL(private_fq_cbs);
1887 +
1888 +static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
1889 +{
1890 + struct dpa_percpu_priv_s *percpu_priv;
1891 + int i, j;
1892 +
1893 + for_each_possible_cpu(i) {
1894 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1895 +
1896 + for (j = 0; j < qman_portal_max; j++)
1897 + napi_enable(&percpu_priv->np[j].napi);
1898 + }
1899 +}
1900 +
1901 +static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
1902 +{
1903 + struct dpa_percpu_priv_s *percpu_priv;
1904 + int i, j;
1905 +
1906 + for_each_possible_cpu(i) {
1907 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
1908 +
1909 + for (j = 0; j < qman_portal_max; j++)
1910 + napi_disable(&percpu_priv->np[j].napi);
1911 + }
1912 +}
1913 +
1914 +static int __cold dpa_eth_priv_start(struct net_device *net_dev)
1915 +{
1916 + int err;
1917 + struct dpa_priv_s *priv;
1918 +
1919 + priv = netdev_priv(net_dev);
1920 +
1921 + dpaa_eth_napi_enable(priv);
1922 +
1923 + err = dpa_start(net_dev);
1924 + if (err < 0)
1925 + dpaa_eth_napi_disable(priv);
1926 +
1927 + return err;
1928 +}
1929 +
1930 +
1931 +
1932 +static int __cold dpa_eth_priv_stop(struct net_device *net_dev)
1933 +{
1934 + int _errno;
1935 + struct dpa_priv_s *priv;
1936 +
1937 + _errno = dpa_stop(net_dev);
1938 + /* Allow NAPI to consume any frame still in the Rx/TxConfirm
1939 + * ingress queues. This is to avoid a race between the current
1940 + * context and ksoftirqd which could leave NAPI disabled while
1941 + * in fact there's still Rx traffic to be processed.
1942 + */
1943 + usleep_range(5000, 10000);
1944 +
1945 + priv = netdev_priv(net_dev);
1946 + dpaa_eth_napi_disable(priv);
1947 +
1948 + return _errno;
1949 +}
1950 +
1951 +#ifdef CONFIG_NET_POLL_CONTROLLER
1952 +static void dpaa_eth_poll_controller(struct net_device *net_dev)
1953 +{
1954 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1955 + struct dpa_percpu_priv_s *percpu_priv =
1956 + raw_cpu_ptr(priv->percpu_priv);
1957 + struct qman_portal *p;
1958 + const struct qman_portal_config *pc;
1959 + struct dpa_napi_portal *np;
1960 +
1961 + p = (struct qman_portal *)qman_get_affine_portal(smp_processor_id());
1962 + pc = qman_p_get_portal_config(p);
1963 + np = &percpu_priv->np[pc->index];
1964 +
1965 + qman_p_irqsource_remove(np->p, QM_PIRQ_DQRI);
1966 + qman_p_poll_dqrr(np->p, np->napi.weight);
1967 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
1968 +}
1969 +#endif
1970 +
1971 +static const struct net_device_ops dpa_private_ops = {
1972 + .ndo_open = dpa_eth_priv_start,
1973 + .ndo_start_xmit = dpa_tx,
1974 + .ndo_stop = dpa_eth_priv_stop,
1975 + .ndo_tx_timeout = dpa_timeout,
1976 + .ndo_get_stats64 = dpa_get_stats64,
1977 + .ndo_set_mac_address = dpa_set_mac_address,
1978 + .ndo_validate_addr = eth_validate_addr,
1979 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
1980 + .ndo_select_queue = dpa_select_queue,
1981 +#endif
1982 + .ndo_change_mtu = dpa_change_mtu,
1983 + .ndo_set_rx_mode = dpa_set_rx_mode,
1984 + .ndo_init = dpa_ndo_init,
1985 + .ndo_set_features = dpa_set_features,
1986 + .ndo_fix_features = dpa_fix_features,
1987 + .ndo_do_ioctl = dpa_ioctl,
1988 +#ifdef CONFIG_NET_POLL_CONTROLLER
1989 + .ndo_poll_controller = dpaa_eth_poll_controller,
1990 +#endif
1991 +};
1992 +
1993 +static int dpa_private_napi_add(struct net_device *net_dev)
1994 +{
1995 + struct dpa_priv_s *priv = netdev_priv(net_dev);
1996 + struct dpa_percpu_priv_s *percpu_priv;
1997 + int i, cpu;
1998 +
1999 + for_each_possible_cpu(cpu) {
2000 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2001 +
2002 + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
2003 + qman_portal_max * sizeof(struct dpa_napi_portal),
2004 + GFP_KERNEL);
2005 +
2006 + if (unlikely(percpu_priv->np == NULL)) {
2007 + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
2008 + return -ENOMEM;
2009 + }
2010 +
2011 + for (i = 0; i < qman_portal_max; i++)
2012 + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
2013 + dpaa_eth_poll, DPA_NAPI_WEIGHT);
2014 + }
2015 +
2016 + return 0;
2017 +}
2018 +
2019 +void dpa_private_napi_del(struct net_device *net_dev)
2020 +{
2021 + struct dpa_priv_s *priv = netdev_priv(net_dev);
2022 + struct dpa_percpu_priv_s *percpu_priv;
2023 + int i, cpu;
2024 +
2025 + for_each_possible_cpu(cpu) {
2026 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2027 +
2028 + if (percpu_priv->np) {
2029 + for (i = 0; i < qman_portal_max; i++)
2030 + netif_napi_del(&percpu_priv->np[i].napi);
2031 +
2032 + devm_kfree(net_dev->dev.parent, percpu_priv->np);
2033 + }
2034 + }
2035 +}
2036 +EXPORT_SYMBOL(dpa_private_napi_del);
2037 +
2038 +static int dpa_private_netdev_init(struct net_device *net_dev)
2039 +{
2040 + int i;
2041 + struct dpa_priv_s *priv = netdev_priv(net_dev);
2042 + struct dpa_percpu_priv_s *percpu_priv;
2043 + const uint8_t *mac_addr;
2044 +
2045 + /* Although we access another CPU's private data here
2046 + * we do it at initialization so it is safe
2047 + */
2048 + for_each_possible_cpu(i) {
2049 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2050 + percpu_priv->net_dev = net_dev;
2051 + }
2052 +
2053 + net_dev->netdev_ops = &dpa_private_ops;
2054 + mac_addr = priv->mac_dev->addr;
2055 +
2056 + net_dev->mem_start = priv->mac_dev->res->start;
2057 + net_dev->mem_end = priv->mac_dev->res->end;
2058 +
2059 + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2060 + NETIF_F_LLTX);
2061 +
2062 + /* Advertise S/G and HIGHDMA support for private interfaces */
2063 + net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
2064 + /* Recent kernels enable GSO automatically, if
2065 + * we declare NETIF_F_SG. For conformity, we'll
2066 + * still declare GSO explicitly.
2067 + */
2068 + net_dev->features |= NETIF_F_GSO;
2069 +
2070 + /* Advertise GRO support */
2071 + net_dev->features |= NETIF_F_GRO;
2072 +
2073 + return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
2074 +}
2075 +
2076 +static struct dpa_bp * __cold
2077 +dpa_priv_bp_probe(struct device *dev)
2078 +{
2079 + struct dpa_bp *dpa_bp;
2080 +
2081 + dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
2082 + if (unlikely(dpa_bp == NULL)) {
2083 + dev_err(dev, "devm_kzalloc() failed\n");
2084 + return ERR_PTR(-ENOMEM);
2085 + }
2086 +
2087 + dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
2088 + dpa_bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
2089 +
2090 + dpa_bp->seed_cb = dpa_bp_priv_seed;
2091 + dpa_bp->free_buf_cb = _dpa_bp_free_pf;
2092 +
2093 + return dpa_bp;
2094 +}
2095 +
2096 +/* Place all ingress FQs (Rx Default, Rx Error, PCD FQs) in a dedicated CGR.
2097 + * We won't be sending congestion notifications to FMan; for now, we just use
2098 + * this CGR to generate enqueue rejections to FMan in order to drop the frames
2099 + * before they reach our ingress queues and eat up memory.
2100 + */
2101 +static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
2102 +{
2103 + struct qm_mcc_initcgr initcgr;
2104 + u32 cs_th;
2105 + int err;
2106 +
2107 + err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2108 + if (err < 0) {
2109 + pr_err("Error %d allocating CGR ID\n", err);
2110 + goto out_error;
2111 + }
2112 +
2113 + /* Enable CS TD, but disable Congestion State Change Notifications. */
2114 + initcgr.we_mask = QM_CGR_WE_CS_THRES;
2115 + initcgr.cgr.cscn_en = QM_CGR_EN;
2116 + cs_th = CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD;
2117 + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2118 +
2119 + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2120 + initcgr.cgr.cstd_en = QM_CGR_EN;
2121 +
2122 + /* This is actually a hack, because this CGR will be associated with
2123 + * our affine SWP. However, we'll place our ingress FQs in it.
2124 + */
2125 + err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2126 + &initcgr);
2127 + if (err < 0) {
2128 + pr_err("Error %d creating ingress CGR with ID %d\n", err,
2129 + priv->ingress_cgr.cgrid);
2130 + qman_release_cgrid(priv->ingress_cgr.cgrid);
2131 + goto out_error;
2132 + }
2133 + pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2134 + priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2135 +
2136 + /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
2137 + * range), but we have no common initialization path between the
2138 + * different variants of the DPAA Eth driver, so we do it here rather
2139 + * than modifying every other variant than "private Eth".
2140 + */
2141 + priv->use_ingress_cgr = true;
2142 +
2143 +out_error:
2144 + return err;
2145 +}
2146 +
2147 +static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
2148 + size_t count)
2149 +{
2150 + struct dpa_priv_s *priv = netdev_priv(net_dev);
2151 + int i;
2152 +
2153 + if (netif_msg_probe(priv))
2154 + dev_dbg(net_dev->dev.parent,
2155 + "Using private BM buffer pools\n");
2156 +
2157 + priv->bp_count = count;
2158 +
2159 + for (i = 0; i < count; i++) {
2160 + int err;
2161 + err = dpa_bp_alloc(&dpa_bp[i]);
2162 + if (err < 0) {
2163 + dpa_bp_free(priv);
2164 + priv->dpa_bp = NULL;
2165 + return err;
2166 + }
2167 +
2168 + priv->dpa_bp = &dpa_bp[i];
2169 + }
2170 +
2171 + dpa_priv_common_bpid = priv->dpa_bp->bpid;
2172 + return 0;
2173 +}
2174 +
2175 +static const struct of_device_id dpa_match[];
2176 +
2177 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2178 +static int dpa_new_loop_id(void)
2179 +{
2180 + static int if_id;
2181 +
2182 + return if_id++;
2183 +}
2184 +#endif
2185 +
2186 +static int
2187 +dpaa_eth_priv_probe(struct platform_device *_of_dev)
2188 +{
2189 + int err = 0, i, channel;
2190 + struct device *dev;
2191 + struct device_node *dpa_node;
2192 + struct dpa_bp *dpa_bp;
2193 + struct dpa_fq *dpa_fq, *tmp;
2194 + size_t count = 1;
2195 + struct net_device *net_dev = NULL;
2196 + struct dpa_priv_s *priv = NULL;
2197 + struct dpa_percpu_priv_s *percpu_priv;
2198 + struct fm_port_fqs port_fqs;
2199 + struct dpa_buffer_layout_s *buf_layout = NULL;
2200 + struct mac_device *mac_dev;
2201 + struct task_struct *kth;
2202 +
2203 + dev = &_of_dev->dev;
2204 +
2205 + dpa_node = dev->of_node;
2206 +
2207 + if (!of_device_is_available(dpa_node))
2208 + return -ENODEV;
2209 +
2210 + /* Get the buffer pools assigned to this interface;
2211 + * run only once the default pool probing code
2212 + */
2213 + dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
2214 + dpa_priv_bp_probe(dev);
2215 + if (IS_ERR(dpa_bp))
2216 + return PTR_ERR(dpa_bp);
2217 +
2218 + /* Allocate this early, so we can store relevant information in
2219 + * the private area (needed by 1588 code in dpa_mac_probe)
2220 + */
2221 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
2222 + if (!net_dev) {
2223 + dev_err(dev, "alloc_etherdev_mq() failed\n");
2224 + goto alloc_etherdev_mq_failed;
2225 + }
2226 +
2227 + /* Do this here, so we can be verbose early */
2228 + SET_NETDEV_DEV(net_dev, dev);
2229 + dev_set_drvdata(dev, net_dev);
2230 +
2231 + priv = netdev_priv(net_dev);
2232 + priv->net_dev = net_dev;
2233 + strcpy(priv->if_type, "private");
2234 +
2235 + priv->msg_enable = netif_msg_init(debug, -1);
2236 +
2237 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2238 + priv->loop_id = dpa_new_loop_id();
2239 + priv->loop_to = -1; /* disabled by default */
2240 + dpa_loop_netdevs[priv->loop_id] = net_dev;
2241 +#endif
2242 +
2243 + mac_dev = dpa_mac_probe(_of_dev);
2244 + if (IS_ERR(mac_dev) || !mac_dev) {
2245 + err = PTR_ERR(mac_dev);
2246 + goto mac_probe_failed;
2247 + }
2248 +
2249 + /* We have physical ports, so we need to establish
2250 + * the buffer layout.
2251 + */
2252 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
2253 + GFP_KERNEL);
2254 + if (!buf_layout) {
2255 + dev_err(dev, "devm_kzalloc() failed\n");
2256 + goto alloc_failed;
2257 + }
2258 + dpa_set_buffers_layout(mac_dev, buf_layout);
2259 +
2260 + /* For private ports, need to compute the size of the default
2261 + * buffer pool, based on FMan port buffer layout;also update
2262 + * the maximum buffer size for private ports if necessary
2263 + */
2264 + dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
2265 +
2266 +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
2267 + /* We only want to use jumbo frame optimization if we actually have
2268 + * L2 MAX FRM set for jumbo frames as well.
2269 + */
2270 + if (fm_get_max_frm() < 9600)
2271 + dev_warn(dev,
2272 + "Invalid configuration: if jumbo frames support is on, FSL_FM_MAX_FRAME_SIZE should be set to 9600\n");
2273 +#endif
2274 +
2275 + INIT_LIST_HEAD(&priv->dpa_fq_list);
2276 +
2277 + memset(&port_fqs, 0, sizeof(port_fqs));
2278 +
2279 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
2280 + if (!err)
2281 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
2282 + &port_fqs, true, TX);
2283 +
2284 + if (err < 0)
2285 + goto fq_probe_failed;
2286 +
2287 + /* bp init */
2288 +
2289 + err = dpa_priv_bp_create(net_dev, dpa_bp, count);
2290 +
2291 + if (err < 0)
2292 + goto bp_create_failed;
2293 +
2294 + priv->mac_dev = mac_dev;
2295 +
2296 + channel = dpa_get_channel();
2297 +
2298 + if (channel < 0) {
2299 + err = channel;
2300 + goto get_channel_failed;
2301 + }
2302 +
2303 + priv->channel = (uint16_t)channel;
2304 +
2305 + /* Start a thread that will walk the cpus with affine portals
2306 + * and add this pool channel to each's dequeue mask.
2307 + */
2308 + kth = kthread_run(dpaa_eth_add_channel,
2309 + (void *)(unsigned long)priv->channel,
2310 + "dpaa_%p:%d", net_dev, priv->channel);
2311 + if (!kth) {
2312 + err = -ENOMEM;
2313 + goto add_channel_failed;
2314 + }
2315 +
2316 + dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port_dev[TX]);
2317 +
2318 + /* Create a congestion group for this netdev, with
2319 + * dynamically-allocated CGR ID.
2320 + * Must be executed after probing the MAC, but before
2321 + * assigning the egress FQs to the CGRs.
2322 + */
2323 + err = dpaa_eth_cgr_init(priv);
2324 + if (err < 0) {
2325 + dev_err(dev, "Error initializing CGR\n");
2326 + goto tx_cgr_init_failed;
2327 + }
2328 + err = dpaa_eth_priv_ingress_cgr_init(priv);
2329 + if (err < 0) {
2330 + dev_err(dev, "Error initializing ingress CGR\n");
2331 + goto rx_cgr_init_failed;
2332 + }
2333 +
2334 + /* Add the FQs to the interface, and make them active */
2335 + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
2336 + err = dpa_fq_init(dpa_fq, false);
2337 + if (err < 0)
2338 + goto fq_alloc_failed;
2339 + }
2340 +
2341 + priv->buf_layout = buf_layout;
2342 + priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
2343 + priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
2344 +
2345 + /* All real interfaces need their ports initialized */
2346 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
2347 + buf_layout, dev);
2348 +
2349 +#ifdef CONFIG_FMAN_PFC
2350 + for (i = 0; i < CONFIG_FMAN_PFC_COS_COUNT; i++) {
2351 + err = fm_port_set_pfc_priorities_mapping_to_qman_wq(
2352 + mac_dev->port_dev[TX], i, i);
2353 + if (unlikely(err != 0)) {
2354 + dev_err(dev, "Error maping PFC %u to WQ %u\n", i, i);
2355 + goto pfc_mapping_failed;
2356 + }
2357 + }
2358 +#endif
2359 +
2360 + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2361 +
2362 + if (priv->percpu_priv == NULL) {
2363 + dev_err(dev, "devm_alloc_percpu() failed\n");
2364 + err = -ENOMEM;
2365 + goto alloc_percpu_failed;
2366 + }
2367 + for_each_possible_cpu(i) {
2368 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2369 + memset(percpu_priv, 0, sizeof(*percpu_priv));
2370 + }
2371 +
2372 + /* Initialize NAPI */
2373 + err = dpa_private_napi_add(net_dev);
2374 +
2375 + if (err < 0)
2376 + goto napi_add_failed;
2377 +
2378 + err = dpa_private_netdev_init(net_dev);
2379 +
2380 + if (err < 0)
2381 + goto netdev_init_failed;
2382 +
2383 + dpaa_eth_sysfs_init(&net_dev->dev);
2384 +
2385 +#ifdef CONFIG_PM
2386 + device_set_wakeup_capable(dev, true);
2387 +#endif
2388 +
2389 + pr_info("fsl_dpa: Probed interface %s\n", net_dev->name);
2390 +
2391 + return 0;
2392 +
2393 +netdev_init_failed:
2394 +napi_add_failed:
2395 + dpa_private_napi_del(net_dev);
2396 +alloc_percpu_failed:
2397 +#ifdef CONFIG_FMAN_PFC
2398 +pfc_mapping_failed:
2399 +#endif
2400 + dpa_fq_free(dev, &priv->dpa_fq_list);
2401 +fq_alloc_failed:
2402 + qman_delete_cgr_safe(&priv->ingress_cgr);
2403 + qman_release_cgrid(priv->ingress_cgr.cgrid);
2404 +rx_cgr_init_failed:
2405 + qman_delete_cgr_safe(&priv->cgr_data.cgr);
2406 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2407 +tx_cgr_init_failed:
2408 +add_channel_failed:
2409 +get_channel_failed:
2410 + dpa_bp_free(priv);
2411 +bp_create_failed:
2412 +fq_probe_failed:
2413 +alloc_failed:
2414 +mac_probe_failed:
2415 + dev_set_drvdata(dev, NULL);
2416 + free_netdev(net_dev);
2417 +alloc_etherdev_mq_failed:
2418 + if (atomic_read(&dpa_bp->refs) == 0)
2419 + devm_kfree(dev, dpa_bp);
2420 +
2421 + return err;
2422 +}
2423 +
2424 +static const struct of_device_id dpa_match[] = {
2425 + {
2426 + .compatible = "fsl,dpa-ethernet"
2427 + },
2428 + {}
2429 +};
2430 +MODULE_DEVICE_TABLE(of, dpa_match);
2431 +
2432 +static struct platform_driver dpa_driver = {
2433 + .driver = {
2434 + .name = KBUILD_MODNAME,
2435 + .of_match_table = dpa_match,
2436 + .owner = THIS_MODULE,
2437 + .pm = DPAA_PM_OPS,
2438 + },
2439 + .probe = dpaa_eth_priv_probe,
2440 + .remove = dpa_remove
2441 +};
2442 +
2443 +static int __init __cold dpa_load(void)
2444 +{
2445 + int _errno;
2446 +
2447 + pr_info(DPA_DESCRIPTION "\n");
2448 +
2449 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2450 + dpa_debugfs_module_init();
2451 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
2452 +
2453 + /* initialise dpaa_eth mirror values */
2454 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
2455 + dpa_max_frm = fm_get_max_frm();
2456 + dpa_num_cpus = num_possible_cpus();
2457 +
2458 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2459 + memset(dpa_loop_netdevs, 0, sizeof(dpa_loop_netdevs));
2460 +#endif
2461 +
2462 + _errno = platform_driver_register(&dpa_driver);
2463 + if (unlikely(_errno < 0)) {
2464 + pr_err(KBUILD_MODNAME
2465 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
2466 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
2467 + }
2468 +
2469 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
2470 + KBUILD_BASENAME".c", __func__);
2471 +
2472 + return _errno;
2473 +}
2474 +module_init(dpa_load);
2475 +
2476 +static void __exit __cold dpa_unload(void)
2477 +{
2478 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
2479 + KBUILD_BASENAME".c", __func__);
2480 +
2481 + platform_driver_unregister(&dpa_driver);
2482 +
2483 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2484 + dpa_debugfs_module_exit();
2485 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
2486 +
2487 + /* Only one channel is used and needs to be relased after all
2488 + * interfaces are removed
2489 + */
2490 + dpa_release_channel();
2491 +
2492 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
2493 + KBUILD_BASENAME".c", __func__);
2494 +}
2495 +module_exit(dpa_unload);
2496 --- /dev/null
2497 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
2498 @@ -0,0 +1,695 @@
2499 +/* Copyright 2008-2012 Freescale Semiconductor Inc.
2500 + *
2501 + * Redistribution and use in source and binary forms, with or without
2502 + * modification, are permitted provided that the following conditions are met:
2503 + * * Redistributions of source code must retain the above copyright
2504 + * notice, this list of conditions and the following disclaimer.
2505 + * * Redistributions in binary form must reproduce the above copyright
2506 + * notice, this list of conditions and the following disclaimer in the
2507 + * documentation and/or other materials provided with the distribution.
2508 + * * Neither the name of Freescale Semiconductor nor the
2509 + * names of its contributors may be used to endorse or promote products
2510 + * derived from this software without specific prior written permission.
2511 + *
2512 + *
2513 + * ALTERNATIVELY, this software may be distributed under the terms of the
2514 + * GNU General Public License ("GPL") as published by the Free Software
2515 + * Foundation, either version 2 of that License or (at your option) any
2516 + * later version.
2517 + *
2518 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
2519 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
2520 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2521 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
2522 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
2523 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
2524 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
2525 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2526 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2527 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2528 + */
2529 +
2530 +#ifndef __DPA_H
2531 +#define __DPA_H
2532 +
2533 +#include <linux/netdevice.h>
2534 +#include <linux/fsl_qman.h> /* struct qman_fq */
2535 +
2536 +#include "fm_ext.h"
2537 +#include "dpaa_eth_trace.h"
2538 +
2539 +extern int dpa_rx_extra_headroom;
2540 +extern int dpa_max_frm;
2541 +extern int dpa_num_cpus;
2542 +
2543 +#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
2544 +#define dpa_get_max_frm() dpa_max_frm
2545 +
2546 +#define dpa_get_max_mtu() \
2547 + (dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
2548 +
2549 +#define __hot
2550 +
2551 +/* Simple enum of FQ types - used for array indexing */
2552 +enum port_type {RX, TX};
2553 +
2554 +/* TODO: This structure should be renamed & moved to the FMD wrapper */
2555 +struct dpa_buffer_layout_s {
2556 + uint16_t priv_data_size;
2557 + bool parse_results;
2558 + bool time_stamp;
2559 + bool hash_results;
2560 + uint8_t manip_extra_space;
2561 + uint16_t data_align;
2562 +};
2563 +
2564 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
2565 +#define DPA_BUG_ON(cond) BUG_ON(cond)
2566 +#else
2567 +#define DPA_BUG_ON(cond)
2568 +#endif
2569 +
2570 +#define DPA_TX_PRIV_DATA_SIZE 16
2571 +#define DPA_PARSE_RESULTS_SIZE sizeof(fm_prs_result_t)
2572 +#define DPA_TIME_STAMP_SIZE 8
2573 +#define DPA_HASH_RESULTS_SIZE 8
2574 +#define DPA_RX_PRIV_DATA_SIZE (DPA_TX_PRIV_DATA_SIZE + \
2575 + dpa_get_rx_extra_headroom())
2576 +
2577 +#define FM_FD_STAT_RX_ERRORS \
2578 + (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \
2579 + FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \
2580 + FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \
2581 + FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \
2582 + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR)
2583 +
2584 +#define FM_FD_STAT_TX_ERRORS \
2585 + (FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT | \
2586 + FM_PORT_FRM_ERR_LENGTH | FM_PORT_FRM_ERR_DMA)
2587 +
2588 +#ifndef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
2589 +/* The raw buffer size must be cacheline aligned.
2590 + * Normally we use 2K buffers.
2591 + */
2592 +#define DPA_BP_RAW_SIZE 2048
2593 +#else
2594 +/* For jumbo frame optimizations, use buffers large enough to accommodate
2595 + * 9.6K frames, FD maximum offset, skb sh_info overhead and some extra
2596 + * space to account for further alignments.
2597 + */
2598 +#define DPA_MAX_FRM_SIZE 9600
2599 +#define DPA_BP_RAW_SIZE \
2600 + ((DPA_MAX_FRM_SIZE + DPA_MAX_FD_OFFSET + \
2601 + sizeof(struct skb_shared_info) + 128) & ~(SMP_CACHE_BYTES - 1))
2602 +#endif
2603 +
2604 +/* This is what FMan is ever allowed to use.
2605 + * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
2606 + * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
2607 + * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
2608 + * half-page-aligned buffers (can we?), so we reserve some more space
2609 + * for start-of-buffer alignment.
2610 + */
2611 +#define dpa_bp_size(buffer_layout) (SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
2612 + SMP_CACHE_BYTES)
2613 +/* We must ensure that skb_shinfo is always cacheline-aligned. */
2614 +#define DPA_SKB_SIZE(size) ((size) & ~(SMP_CACHE_BYTES - 1))
2615 +
2616 +/* Maximum size of a buffer for which recycling is allowed.
2617 + * We need an upper limit such that forwarded skbs that get reallocated on Tx
2618 + * aren't allowed to grow unboundedly. On the other hand, we need to make sure
2619 + * that skbs allocated by us will not fail to be recycled due to their size.
2620 + *
2621 + * For a requested size, the kernel allocator provides the next power of two
2622 + * sized block, which the stack will use as is, regardless of the actual size
2623 + * it required; since we must accommodate at most 9.6K buffers (L2 maximum
2624 + * supported frame size), set the recycling upper limit to 16K.
2625 + */
2626 +#define DPA_RECYCLE_MAX_SIZE 16384
2627 +
2628 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2629 +/*TODO: temporary for fman pcd testing */
2630 +#define FMAN_PCD_TESTS_MAX_NUM_RANGES 20
2631 +#endif
2632 +
2633 +#define DPAA_ETH_FQ_DELTA 0x10000
2634 +
2635 +#define DPAA_ETH_PCD_FQ_BASE(device_addr) \
2636 + (((device_addr) & 0x1fffff) >> 6)
2637 +
2638 +#define DPAA_ETH_PCD_FQ_HI_PRIO_BASE(device_addr) \
2639 + (DPAA_ETH_FQ_DELTA + DPAA_ETH_PCD_FQ_BASE(device_addr))
2640 +
2641 +/* Largest value that the FQD's OAL field can hold.
2642 + * This is DPAA-1.x specific.
2643 + * TODO: This rather belongs in fsl_qman.h
2644 + */
2645 +#define FSL_QMAN_MAX_OAL 127
2646 +
2647 +/* Maximum offset value for a contig or sg FD (represented on 9 bits) */
2648 +#define DPA_MAX_FD_OFFSET ((1 << 9) - 1)
2649 +
2650 +/* Default alignment for start of data in an Rx FD */
2651 +#define DPA_FD_DATA_ALIGNMENT 16
2652 +
2653 +/* Values for the L3R field of the FM Parse Results
2654 + */
2655 +/* L3 Type field: First IP Present IPv4 */
2656 +#define FM_L3_PARSE_RESULT_IPV4 0x8000
2657 +/* L3 Type field: First IP Present IPv6 */
2658 +#define FM_L3_PARSE_RESULT_IPV6 0x4000
2659 +
2660 +/* Values for the L4R field of the FM Parse Results
2661 + * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
2662 + */
2663 +/* L4 Type field: UDP */
2664 +#define FM_L4_PARSE_RESULT_UDP 0x40
2665 +/* L4 Type field: TCP */
2666 +#define FM_L4_PARSE_RESULT_TCP 0x20
2667 +/* FD status field indicating whether the FM Parser has attempted to validate
2668 + * the L4 csum of the frame.
2669 + * Note that having this bit set doesn't necessarily imply that the checksum
2670 + * is valid. One would have to check the parse results to find that out.
2671 + */
2672 +#define FM_FD_STAT_L4CV 0x00000004
2673 +
2674 +
2675 +#define FM_FD_STAT_ERR_PHYSICAL FM_PORT_FRM_ERR_PHYSICAL
2676 +
2677 +/* Check if the parsed frame was found to be a TCP segment.
2678 + *
2679 + * @parse_result_ptr must be of type (fm_prs_result_t *).
2680 + */
2681 +#define fm_l4_frame_is_tcp(parse_result_ptr) \
2682 + ((parse_result_ptr)->l4r & FM_L4_PARSE_RESULT_TCP)
2683 +
2684 +/* number of Tx queues to FMan */
2685 +#ifdef CONFIG_FMAN_PFC
2686 +#define DPAA_ETH_TX_QUEUES (NR_CPUS * CONFIG_FMAN_PFC_COS_COUNT)
2687 +#else
2688 +#define DPAA_ETH_TX_QUEUES NR_CPUS
2689 +#endif
2690 +
2691 +#define DPAA_ETH_RX_QUEUES 128
2692 +
2693 +/* Convenience macros for storing/retrieving the skb back-pointers. They must
2694 + * accommodate both recycling and confirmation paths - i.e. cases when the buf
2695 + * was allocated by ourselves, respectively by the stack. In the former case,
2696 + * we could store the skb at negative offset; in the latter case, we can't,
2697 + * so we'll use 0 as offset.
2698 + *
2699 + * NB: @off is an offset from a (struct sk_buff **) pointer!
2700 + */
2701 +#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
2702 +{ \
2703 + skbh = (struct sk_buff **)addr; \
2704 + *(skbh + (off)) = skb; \
2705 +}
2706 +#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
2707 +{ \
2708 + skbh = (struct sk_buff **)addr; \
2709 + skb = *(skbh + (off)); \
2710 +}
2711 +
2712 +#ifdef CONFIG_PM
2713 +/* Magic Packet wakeup */
2714 +#define DPAA_WOL_MAGIC 0x00000001
2715 +#endif
2716 +
2717 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2718 +struct pcd_range {
2719 + uint32_t base;
2720 + uint32_t count;
2721 +};
2722 +#endif
2723 +
2724 +/* More detailed FQ types - used for fine-grained WQ assignments */
2725 +enum dpa_fq_type {
2726 + FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
2727 + FQ_TYPE_RX_ERROR, /* Rx Error FQs */
2728 + FQ_TYPE_RX_PCD, /* User-defined PCDs */
2729 + FQ_TYPE_TX, /* "Real" Tx FQs */
2730 + FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
2731 + FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
2732 + FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
2733 + FQ_TYPE_RX_PCD_HI_PRIO, /* User-defined high-priority PCDs */
2734 +};
2735 +
2736 +struct dpa_fq {
2737 + struct qman_fq fq_base;
2738 + struct list_head list;
2739 + struct net_device *net_dev;
2740 + bool init;
2741 + uint32_t fqid;
2742 + uint32_t flags;
2743 + uint16_t channel;
2744 + uint8_t wq;
2745 + enum dpa_fq_type fq_type;
2746 +};
2747 +
2748 +struct dpa_fq_cbs_t {
2749 + struct qman_fq rx_defq;
2750 + struct qman_fq tx_defq;
2751 + struct qman_fq rx_errq;
2752 + struct qman_fq tx_errq;
2753 + struct qman_fq egress_ern;
2754 +};
2755 +
2756 +struct fqid_cell {
2757 + uint32_t start;
2758 + uint32_t count;
2759 +};
2760 +
2761 +struct dpa_bp {
2762 + struct bman_pool *pool;
2763 + uint8_t bpid;
2764 + struct device *dev;
2765 + union {
2766 + /* The buffer pools used for the private ports are initialized
2767 + * with target_count buffers for each CPU; at runtime the
2768 + * number of buffers per CPU is constantly brought back to this
2769 + * level
2770 + */
2771 + int target_count;
2772 + /* The configured value for the number of buffers in the pool,
2773 + * used for shared port buffer pools
2774 + */
2775 + int config_count;
2776 + };
2777 + size_t size;
2778 + bool seed_pool;
2779 + /* physical address of the contiguous memory used by the pool to store
2780 + * the buffers
2781 + */
2782 + dma_addr_t paddr;
2783 + /* virtual address of the contiguous memory used by the pool to store
2784 + * the buffers
2785 + */
2786 + void __iomem *vaddr;
2787 + /* current number of buffers in the bpool alloted to this CPU */
2788 + int __percpu *percpu_count;
2789 + atomic_t refs;
2790 + /* some bpools need to be seeded before use by this cb */
2791 + int (*seed_cb)(struct dpa_bp *);
2792 + /* some bpools need to be emptied before freeing; this cb is used
2793 + * for freeing of individual buffers taken from the pool
2794 + */
2795 + void (*free_buf_cb)(void *addr);
2796 +};
2797 +
2798 +struct dpa_rx_errors {
2799 + u64 dme; /* DMA Error */
2800 + u64 fpe; /* Frame Physical Error */
2801 + u64 fse; /* Frame Size Error */
2802 + u64 phe; /* Header Error */
2803 + u64 cse; /* Checksum Validation Error */
2804 +};
2805 +
2806 +/* Counters for QMan ERN frames - one counter per rejection code */
2807 +struct dpa_ern_cnt {
2808 + u64 cg_tdrop; /* Congestion group taildrop */
2809 + u64 wred; /* WRED congestion */
2810 + u64 err_cond; /* Error condition */
2811 + u64 early_window; /* Order restoration, frame too early */
2812 + u64 late_window; /* Order restoration, frame too late */
2813 + u64 fq_tdrop; /* FQ taildrop */
2814 + u64 fq_retired; /* FQ is retired */
2815 + u64 orp_zero; /* ORP disabled */
2816 +};
2817 +
2818 +struct dpa_napi_portal {
2819 + struct napi_struct napi;
2820 + struct qman_portal *p;
2821 +};
2822 +
2823 +struct dpa_percpu_priv_s {
2824 + struct net_device *net_dev;
2825 + struct dpa_napi_portal *np;
2826 + u64 in_interrupt;
2827 + u64 tx_returned;
2828 + u64 tx_confirm;
2829 + /* fragmented (non-linear) skbuffs received from the stack */
2830 + u64 tx_frag_skbuffs;
2831 + /* number of S/G frames received */
2832 + u64 rx_sg;
2833 +
2834 + struct rtnl_link_stats64 stats;
2835 + struct dpa_rx_errors rx_errors;
2836 + struct dpa_ern_cnt ern_cnt;
2837 +};
2838 +
2839 +struct dpa_priv_s {
2840 + struct dpa_percpu_priv_s __percpu *percpu_priv;
2841 + struct dpa_bp *dpa_bp;
2842 + /* Store here the needed Tx headroom for convenience and speed
2843 + * (even though it can be computed based on the fields of buf_layout)
2844 + */
2845 + uint16_t tx_headroom;
2846 + struct net_device *net_dev;
2847 + struct mac_device *mac_dev;
2848 + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
2849 + struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
2850 +
2851 + size_t bp_count;
2852 +
2853 + uint16_t channel; /* "fsl,qman-channel-id" */
2854 + struct list_head dpa_fq_list;
2855 +
2856 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2857 + struct dentry *debugfs_loop_file;
2858 +#endif
2859 +
2860 + uint32_t msg_enable; /* net_device message level */
2861 +#ifdef CONFIG_FSL_DPAA_1588
2862 + struct dpa_ptp_tsu *tsu;
2863 +#endif
2864 +
2865 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
2866 +/* TODO: this is temporary until pcd support is implemented in dpaa */
2867 + int priv_pcd_num_ranges;
2868 + struct pcd_range priv_pcd_ranges[FMAN_PCD_TESTS_MAX_NUM_RANGES];
2869 +#endif
2870 +
2871 + struct {
2872 + /**
2873 + * All egress queues to a given net device belong to one
2874 + * (and the same) congestion group.
2875 + */
2876 + struct qman_cgr cgr;
2877 + /* If congested, when it began. Used for performance stats. */
2878 + u32 congestion_start_jiffies;
2879 + /* Number of jiffies the Tx port was congested. */
2880 + u32 congested_jiffies;
2881 + /**
2882 + * Counter for the number of times the CGR
2883 + * entered congestion state
2884 + */
2885 + u32 cgr_congested_count;
2886 + } cgr_data;
2887 + /* Use a per-port CGR for ingress traffic. */
2888 + bool use_ingress_cgr;
2889 + struct qman_cgr ingress_cgr;
2890 +
2891 +#ifdef CONFIG_FSL_DPAA_TS
2892 + bool ts_tx_en; /* Tx timestamping enabled */
2893 + bool ts_rx_en; /* Rx timestamping enabled */
2894 +#endif /* CONFIG_FSL_DPAA_TS */
2895 +
2896 + struct dpa_buffer_layout_s *buf_layout;
2897 + uint16_t rx_headroom;
2898 + char if_type[30];
2899 +
2900 + void *peer;
2901 +#ifdef CONFIG_PM
2902 + u32 wol;
2903 +#endif
2904 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2905 + int loop_id;
2906 + int loop_to;
2907 +#endif
2908 +#ifdef CONFIG_FSL_DPAA_CEETM
2909 + bool ceetm_en; /* CEETM QoS enabled */
2910 +#endif
2911 +};
2912 +
2913 +struct fm_port_fqs {
2914 + struct dpa_fq *tx_defq;
2915 + struct dpa_fq *tx_errq;
2916 + struct dpa_fq *rx_defq;
2917 + struct dpa_fq *rx_errq;
2918 +};
2919 +
2920 +
2921 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
2922 +extern struct net_device *dpa_loop_netdevs[20];
2923 +#endif
2924 +
2925 +/* functions with different implementation for SG and non-SG: */
2926 +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
2927 +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
2928 +void __hot _dpa_rx(struct net_device *net_dev,
2929 + struct qman_portal *portal,
2930 + const struct dpa_priv_s *priv,
2931 + struct dpa_percpu_priv_s *percpu_priv,
2932 + const struct qm_fd *fd,
2933 + u32 fqid,
2934 + int *count_ptr);
2935 +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
2936 +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
2937 + struct qman_fq *egress_fq, struct qman_fq *conf_fq);
2938 +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
2939 + const struct qm_fd *fd);
2940 +void __hot _dpa_process_parse_results(const fm_prs_result_t *parse_results,
2941 + const struct qm_fd *fd,
2942 + struct sk_buff *skb,
2943 + int *use_gro);
2944 +#ifndef CONFIG_FSL_DPAA_TS
2945 +bool dpa_skb_is_recyclable(struct sk_buff *skb);
2946 +bool dpa_buf_is_recyclable(struct sk_buff *skb,
2947 + uint32_t min_size,
2948 + uint16_t min_offset,
2949 + unsigned char **new_buf_start);
2950 +#endif
2951 +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
2952 + struct sk_buff *skb, struct qm_fd *fd,
2953 + int *count_ptr, int *offset);
2954 +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
2955 + struct sk_buff *skb, struct qm_fd *fd);
2956 +int __cold __attribute__((nonnull))
2957 + _dpa_fq_free(struct device *dev, struct qman_fq *fq);
2958 +
2959 +/* Turn on HW checksum computation for this outgoing frame.
2960 + * If the current protocol is not something we support in this regard
2961 + * (or if the stack has already computed the SW checksum), we do nothing.
2962 + *
2963 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
2964 + * otherwise.
2965 + *
2966 + * Note that this function may modify the fd->cmd field and the skb data buffer
2967 + * (the Parse Results area).
2968 + */
2969 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
2970 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
2971 +
2972 +static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
2973 + struct qman_portal *portal)
2974 +{
2975 + /* In case of threaded ISR for RT enable kernel,
2976 + * in_irq() does not return appropriate value, so use
2977 + * in_serving_softirq to distinguish softirq or irq context.
2978 + */
2979 + if (unlikely(in_irq() || !in_serving_softirq())) {
2980 + /* Disable QMan IRQ and invoke NAPI */
2981 + int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2982 + if (likely(!ret)) {
2983 + const struct qman_portal_config *pc =
2984 + qman_p_get_portal_config(portal);
2985 + struct dpa_napi_portal *np =
2986 + &percpu_priv->np[pc->index];
2987 +
2988 + np->p = portal;
2989 + napi_schedule(&np->napi);
2990 + percpu_priv->in_interrupt++;
2991 + return 1;
2992 + }
2993 + }
2994 + return 0;
2995 +}
2996 +
2997 +static inline ssize_t __const __must_check __attribute__((nonnull))
2998 +dpa_fd_length(const struct qm_fd *fd)
2999 +{
3000 + return fd->length20;
3001 +}
3002 +
3003 +static inline ssize_t __const __must_check __attribute__((nonnull))
3004 +dpa_fd_offset(const struct qm_fd *fd)
3005 +{
3006 + return fd->offset;
3007 +}
3008 +
3009 +/* Verifies if the skb length is below the interface MTU */
3010 +static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
3011 +{
3012 + if (unlikely(skb->len > mtu))
3013 + if ((skb->protocol != htons(ETH_P_8021Q))
3014 + || (skb->len > mtu + 4))
3015 + return -1;
3016 +
3017 + return 0;
3018 +}
3019 +
3020 +static inline uint16_t dpa_get_headroom(struct dpa_buffer_layout_s *bl)
3021 +{
3022 + uint16_t headroom;
3023 + /* The frame headroom must accommodate:
3024 + * - the driver private data area
3025 + * - parse results, hash results, timestamp if selected
3026 + * - manip extra space
3027 + * If either hash results or time stamp are selected, both will
3028 + * be copied to/from the frame headroom, as TS is located between PR and
3029 + * HR in the IC and IC copy size has a granularity of 16bytes
3030 + * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
3031 + *
3032 + * Also make sure the headroom is a multiple of data_align bytes
3033 + */
3034 + headroom = (uint16_t)(bl->priv_data_size +
3035 + (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
3036 + (bl->hash_results || bl->time_stamp ?
3037 + DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0) +
3038 + bl->manip_extra_space);
3039 +
3040 + return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
3041 +}
3042 +
3043 +int fm_mac_dump_regs(struct mac_device *h_dev, char *buf, int n);
3044 +int fm_mac_dump_rx_stats(struct mac_device *h_dev, char *buf, int n);
3045 +int fm_mac_dump_tx_stats(struct mac_device *h_dev, char *buf, int n);
3046 +
3047 +void dpaa_eth_sysfs_remove(struct device *dev);
3048 +void dpaa_eth_sysfs_init(struct device *dev);
3049 +int dpaa_eth_poll(struct napi_struct *napi, int budget);
3050 +
3051 +void dpa_private_napi_del(struct net_device *net_dev);
3052 +
3053 +/* Equivalent to a memset(0), but works faster */
3054 +static inline void clear_fd(struct qm_fd *fd)
3055 +{
3056 + fd->opaque_addr = 0;
3057 + fd->opaque = 0;
3058 + fd->cmd = 0;
3059 +}
3060 +
3061 +static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
3062 + struct qman_fq *tx_fq)
3063 +{
3064 + int i;
3065 +
3066 + for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
3067 + if (priv->egress_fqs[i] == tx_fq)
3068 + return i;
3069 +
3070 + return -EINVAL;
3071 +}
3072 +
3073 +static inline int __hot dpa_xmit(struct dpa_priv_s *priv,
3074 + struct rtnl_link_stats64 *percpu_stats,
3075 + struct qm_fd *fd, struct qman_fq *egress_fq,
3076 + struct qman_fq *conf_fq)
3077 +{
3078 + int err, i;
3079 +
3080 + if (fd->bpid == 0xff)
3081 + fd->cmd |= qman_fq_fqid(conf_fq);
3082 +
3083 + /* Trace this Tx fd */
3084 + trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
3085 +
3086 + for (i = 0; i < 100000; i++) {
3087 + err = qman_enqueue(egress_fq, fd, 0);
3088 + if (err != -EBUSY)
3089 + break;
3090 + }
3091 +
3092 + if (unlikely(err < 0)) {
3093 + /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */
3094 + percpu_stats->tx_errors++;
3095 + percpu_stats->tx_fifo_errors++;
3096 + return err;
3097 + }
3098 +
3099 + percpu_stats->tx_packets++;
3100 + percpu_stats->tx_bytes += dpa_fd_length(fd);
3101 +
3102 + return 0;
3103 +}
3104 +
3105 +/* Use multiple WQs for FQ assignment:
3106 + * - Tx Confirmation queues go to WQ1.
3107 + * - Rx Default, Tx and PCD queues go to WQ3 (no differentiation between
3108 + * Rx and Tx traffic, or between Rx Default and Rx PCD frames).
3109 + * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
3110 + * to be scheduled, in case there are many more FQs in WQ3).
3111 + * This ensures that Tx-confirmed buffers are timely released. In particular,
3112 + * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
3113 + * are greatly outnumbered by other FQs in the system (usually PCDs), while
3114 + * dequeue scheduling is round-robin.
3115 + */
3116 +static inline void _dpa_assign_wq(struct dpa_fq *fq)
3117 +{
3118 + switch (fq->fq_type) {
3119 + case FQ_TYPE_TX_CONFIRM:
3120 + case FQ_TYPE_TX_CONF_MQ:
3121 + fq->wq = 1;
3122 + break;
3123 + case FQ_TYPE_RX_DEFAULT:
3124 + case FQ_TYPE_TX:
3125 + fq->wq = 3;
3126 + break;
3127 + case FQ_TYPE_RX_ERROR:
3128 + case FQ_TYPE_TX_ERROR:
3129 + case FQ_TYPE_RX_PCD_HI_PRIO:
3130 + fq->wq = 2;
3131 + break;
3132 + case FQ_TYPE_RX_PCD:
3133 + fq->wq = 5;
3134 + break;
3135 + default:
3136 + WARN(1, "Invalid FQ type %d for FQID %d!\n",
3137 + fq->fq_type, fq->fqid);
3138 + }
3139 +}
3140 +
3141 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
3142 +/* Use in lieu of skb_get_queue_mapping() */
3143 +#ifdef CONFIG_FMAN_PFC
3144 +#define dpa_get_queue_mapping(skb) \
3145 + (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
3146 + ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
3147 + ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
3148 + dpa_num_cpus + smp_processor_id()));
3149 +
3150 +#else
3151 +#define dpa_get_queue_mapping(skb) \
3152 + raw_smp_processor_id()
3153 +#endif
3154 +#else
3155 +/* Use the queue selected by XPS */
3156 +#define dpa_get_queue_mapping(skb) \
3157 + skb_get_queue_mapping(skb)
3158 +#endif
3159 +
3160 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
3161 +struct ptp_priv_s {
3162 + struct device_node *node;
3163 + struct platform_device *of_dev;
3164 + struct mac_device *mac_dev;
3165 +};
3166 +extern struct ptp_priv_s ptp_priv;
3167 +#endif
3168 +
3169 +static inline void _dpa_bp_free_pf(void *addr)
3170 +{
3171 + put_page(virt_to_head_page(addr));
3172 +}
3173 +
3174 +/* TODO: LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
3175 + * manifests itself at high traffic rates when frames exceed 4K memory
3176 + * boundaries; For the moment, we use a SW workaround to avoid frames larger
3177 + * than 4K or that exceed 4K alignements.
3178 + */
3179 +
3180 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3181 +#define DPAA_LS1043A_DMA_4K_ISSUE 1
3182 +#endif
3183 +
3184 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
3185 +#define HAS_DMA_ISSUE(start, size) \
3186 + (((unsigned long)(start) ^ ((unsigned long)(start) + \
3187 + (unsigned long)(size))) & ~0xFFF)
3188 +
3189 +#define BOUNDARY_4K(start, size) (((unsigned long)(start) + \
3190 + (unsigned long)(size)) & ~0xFFF)
3191 +#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
3192 +
3193 +#endif /* __DPA_H */
3194 --- /dev/null
3195 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.c
3196 @@ -0,0 +1,263 @@
3197 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
3198 + *
3199 + * Redistribution and use in source and binary forms, with or without
3200 + * modification, are permitted provided that the following conditions are met:
3201 + * * Redistributions of source code must retain the above copyright
3202 + * notice, this list of conditions and the following disclaimer.
3203 + * * Redistributions in binary form must reproduce the above copyright
3204 + * notice, this list of conditions and the following disclaimer in the
3205 + * documentation and/or other materials provided with the distribution.
3206 + * * Neither the name of Freescale Semiconductor nor the
3207 + * names of its contributors may be used to endorse or promote products
3208 + * derived from this software without specific prior written permission.
3209 + *
3210 + *
3211 + * ALTERNATIVELY, this software may be distributed under the terms of the
3212 + * GNU General Public License ("GPL") as published by the Free Software
3213 + * Foundation, either version 2 of that License or (at your option) any
3214 + * later version.
3215 + *
3216 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3217 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3218 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3219 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3220 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3221 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3222 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3223 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3224 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3225 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3226 + */
3227 +
3228 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
3229 +#define pr_fmt(fmt) \
3230 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
3231 + KBUILD_BASENAME".c", __LINE__, __func__
3232 +#else
3233 +#define pr_fmt(fmt) \
3234 + KBUILD_MODNAME ": " fmt
3235 +#endif
3236 +
3237 +#include <linux/init.h>
3238 +#include <linux/module.h>
3239 +#include <linux/io.h>
3240 +#include <linux/of_platform.h>
3241 +#include <linux/of_net.h>
3242 +#include <linux/etherdevice.h>
3243 +#include <linux/kthread.h>
3244 +#include <linux/percpu.h>
3245 +#include <linux/highmem.h>
3246 +#include <linux/sort.h>
3247 +#include <linux/fsl_qman.h>
3248 +#include "dpaa_eth.h"
3249 +#include "dpaa_eth_common.h"
3250 +#include "dpaa_eth_base.h"
3251 +
3252 +#define DPA_DESCRIPTION "FSL DPAA Advanced drivers:"
3253 +
3254 +MODULE_LICENSE("Dual BSD/GPL");
3255 +
3256 +uint8_t advanced_debug = -1;
3257 +module_param(advanced_debug, byte, S_IRUGO);
3258 +MODULE_PARM_DESC(advanced_debug, "Module/Driver verbosity level");
3259 +EXPORT_SYMBOL(advanced_debug);
3260 +
3261 +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1)
3262 +{
3263 + return ((struct dpa_bp *)dpa_bp0)->size -
3264 + ((struct dpa_bp *)dpa_bp1)->size;
3265 +}
3266 +
3267 +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
3268 +dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
3269 +{
3270 + int i, lenp, na, ns, err;
3271 + struct device *dev;
3272 + struct device_node *dev_node;
3273 + const __be32 *bpool_cfg;
3274 + struct dpa_bp *dpa_bp;
3275 + u32 bpid;
3276 +
3277 + dev = &_of_dev->dev;
3278 +
3279 + *count = of_count_phandle_with_args(dev->of_node,
3280 + "fsl,bman-buffer-pools", NULL);
3281 + if (*count < 1) {
3282 + dev_err(dev, "missing fsl,bman-buffer-pools device tree entry\n");
3283 + return ERR_PTR(-EINVAL);
3284 + }
3285 +
3286 + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
3287 + if (dpa_bp == NULL) {
3288 + dev_err(dev, "devm_kzalloc() failed\n");
3289 + return ERR_PTR(-ENOMEM);
3290 + }
3291 +
3292 + dev_node = of_find_node_by_path("/");
3293 + if (unlikely(dev_node == NULL)) {
3294 + dev_err(dev, "of_find_node_by_path(/) failed\n");
3295 + return ERR_PTR(-EINVAL);
3296 + }
3297 +
3298 + na = of_n_addr_cells(dev_node);
3299 + ns = of_n_size_cells(dev_node);
3300 +
3301 + for (i = 0; i < *count; i++) {
3302 + of_node_put(dev_node);
3303 +
3304 + dev_node = of_parse_phandle(dev->of_node,
3305 + "fsl,bman-buffer-pools", i);
3306 + if (dev_node == NULL) {
3307 + dev_err(dev, "of_find_node_by_phandle() failed\n");
3308 + return ERR_PTR(-EFAULT);
3309 + }
3310 +
3311 + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
3312 + dev_err(dev,
3313 + "!of_device_is_compatible(%s, fsl,bpool)\n",
3314 + dev_node->full_name);
3315 + dpa_bp = ERR_PTR(-EINVAL);
3316 + goto _return_of_node_put;
3317 + }
3318 +
3319 + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
3320 + if (err) {
3321 + dev_err(dev, "Cannot find buffer pool ID in the device tree\n");
3322 + dpa_bp = ERR_PTR(-EINVAL);
3323 + goto _return_of_node_put;
3324 + }
3325 + dpa_bp[i].bpid = (uint8_t)bpid;
3326 +
3327 + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
3328 + &lenp);
3329 + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
3330 + const uint32_t *seed_pool;
3331 +
3332 + dpa_bp[i].config_count =
3333 + (int)of_read_number(bpool_cfg, ns);
3334 + dpa_bp[i].size =
3335 + (size_t)of_read_number(bpool_cfg + ns, ns);
3336 + dpa_bp[i].paddr =
3337 + of_read_number(bpool_cfg + 2 * ns, na);
3338 +
3339 + seed_pool = of_get_property(dev_node,
3340 + "fsl,bpool-ethernet-seeds", &lenp);
3341 + dpa_bp[i].seed_pool = !!seed_pool;
3342 +
3343 + } else {
3344 + dev_err(dev,
3345 + "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
3346 + dev_node->full_name);
3347 + dpa_bp = ERR_PTR(-EINVAL);
3348 + goto _return_of_node_put;
3349 + }
3350 + }
3351 +
3352 + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);
3353 +
3354 + return dpa_bp;
3355 +
3356 +_return_of_node_put:
3357 + if (dev_node)
3358 + of_node_put(dev_node);
3359 +
3360 + return dpa_bp;
3361 +}
3362 +EXPORT_SYMBOL(dpa_bp_probe);
3363 +
3364 +int dpa_bp_shared_port_seed(struct dpa_bp *bp)
3365 +{
3366 + void __iomem **ptr;
3367 +
3368 + /* In MAC-less and Shared-MAC scenarios the physical
3369 + * address of the buffer pool in device tree is set
3370 + * to 0 to specify that another entity (USDPAA) will
3371 + * allocate and seed the buffers
3372 + */
3373 + if (!bp->paddr)
3374 + return 0;
3375 +
3376 + /* allocate memory region for buffers */
3377 + devm_request_mem_region(bp->dev, bp->paddr,
3378 + bp->size * bp->config_count, KBUILD_MODNAME);
3379 + /* managed ioremap unmapping */
3380 + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3381 + if (!ptr)
3382 + return -EIO;
3383 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3384 + bp->vaddr = ioremap_cache_ns(bp->paddr, bp->size * bp->config_count);
3385 +#else
3386 + bp->vaddr = ioremap_prot(bp->paddr, bp->size * bp->config_count, 0);
3387 +#endif
3388 + if (bp->vaddr == NULL) {
3389 + pr_err("Could not map memory for pool %d\n", bp->bpid);
3390 + devres_free(ptr);
3391 + return -EIO;
3392 + }
3393 + *ptr = bp->vaddr;
3394 + devres_add(bp->dev, ptr);
3395 +
3396 + /* seed pool with buffers from that memory region */
3397 + if (bp->seed_pool) {
3398 + int count = bp->target_count;
3399 + dma_addr_t addr = bp->paddr;
3400 +
3401 + while (count) {
3402 + struct bm_buffer bufs[8];
3403 + uint8_t num_bufs = 0;
3404 +
3405 + do {
3406 + BUG_ON(addr > 0xffffffffffffull);
3407 + bufs[num_bufs].bpid = bp->bpid;
3408 + bm_buffer_set64(&bufs[num_bufs++], addr);
3409 + addr += bp->size;
3410 +
3411 + } while (--count && (num_bufs < 8));
3412 +
3413 + while (bman_release(bp->pool, bufs, num_bufs, 0))
3414 + cpu_relax();
3415 + }
3416 + }
3417 +
3418 + return 0;
3419 +}
3420 +EXPORT_SYMBOL(dpa_bp_shared_port_seed);
3421 +
3422 +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
3423 + size_t count)
3424 +{
3425 + struct dpa_priv_s *priv = netdev_priv(net_dev);
3426 + int i;
3427 +
3428 + priv->dpa_bp = dpa_bp;
3429 + priv->bp_count = count;
3430 +
3431 + for (i = 0; i < count; i++) {
3432 + int err;
3433 + err = dpa_bp_alloc(&dpa_bp[i]);
3434 + if (err < 0) {
3435 + dpa_bp_free(priv);
3436 + priv->dpa_bp = NULL;
3437 + return err;
3438 + }
3439 + }
3440 +
3441 + return 0;
3442 +}
3443 +EXPORT_SYMBOL(dpa_bp_create);
3444 +
3445 +static int __init __cold dpa_advanced_load(void)
3446 +{
3447 + pr_info(DPA_DESCRIPTION "\n");
3448 +
3449 + return 0;
3450 +}
3451 +module_init(dpa_advanced_load);
3452 +
3453 +static void __exit __cold dpa_advanced_unload(void)
3454 +{
3455 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
3456 + KBUILD_BASENAME".c", __func__);
3457 +
3458 +}
3459 +module_exit(dpa_advanced_unload);
3460 --- /dev/null
3461 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_base.h
3462 @@ -0,0 +1,50 @@
3463 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
3464 + *
3465 + * Redistribution and use in source and binary forms, with or without
3466 + * modification, are permitted provided that the following conditions are met:
3467 + * * Redistributions of source code must retain the above copyright
3468 + * notice, this list of conditions and the following disclaimer.
3469 + * * Redistributions in binary form must reproduce the above copyright
3470 + * notice, this list of conditions and the following disclaimer in the
3471 + * documentation and/or other materials provided with the distribution.
3472 + * * Neither the name of Freescale Semiconductor nor the
3473 + * names of its contributors may be used to endorse or promote products
3474 + * derived from this software without specific prior written permission.
3475 + *
3476 + *
3477 + * ALTERNATIVELY, this software may be distributed under the terms of the
3478 + * GNU General Public License ("GPL") as published by the Free Software
3479 + * Foundation, either version 2 of that License or (at your option) any
3480 + * later version.
3481 + *
3482 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3483 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3484 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3485 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3486 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3487 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3488 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3489 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3490 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3491 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3492 + */
3493 +
3494 +#ifndef __DPAA_ETH_BASE_H
3495 +#define __DPAA_ETH_BASE_H
3496 +
3497 +#include <linux/etherdevice.h> /* struct net_device */
3498 +#include <linux/fsl_bman.h> /* struct bm_buffer */
3499 +#include <linux/of_platform.h> /* struct platform_device */
3500 +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
3501 +
3502 +extern uint8_t advanced_debug;
3503 +extern const struct dpa_fq_cbs_t shared_fq_cbs;
3504 +extern int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev);
3505 +
3506 +struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
3507 +dpa_bp_probe(struct platform_device *_of_dev, size_t *count);
3508 +int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
3509 + size_t count);
3510 +int dpa_bp_shared_port_seed(struct dpa_bp *bp);
3511 +
3512 +#endif /* __DPAA_ETH_BASE_H */
3513 --- /dev/null
3514 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
3515 @@ -0,0 +1,1719 @@
3516 +/* Copyright 2008-2016 Freescale Semiconductor Inc.
3517 + *
3518 + * Redistribution and use in source and binary forms, with or without
3519 + * modification, are permitted provided that the following conditions are met:
3520 + * * Redistributions of source code must retain the above copyright
3521 + * notice, this list of conditions and the following disclaimer.
3522 + * * Redistributions in binary form must reproduce the above copyright
3523 + * notice, this list of conditions and the following disclaimer in the
3524 + * documentation and/or other materials provided with the distribution.
3525 + * * Neither the name of Freescale Semiconductor nor the
3526 + * names of its contributors may be used to endorse or promote products
3527 + * derived from this software without specific prior written permission.
3528 + *
3529 + *
3530 + * ALTERNATIVELY, this software may be distributed under the terms of the
3531 + * GNU General Public License ("GPL") as published by the Free Software
3532 + * Foundation, either version 2 of that License or (at your option) any
3533 + * later version.
3534 + *
3535 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3536 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3537 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3538 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3539 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3540 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3541 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3542 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3543 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3544 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3545 + */
3546 +
3547 +#include <linux/init.h>
3548 +#include "dpaa_eth_ceetm.h"
3549 +
3550 +#define DPA_CEETM_DESCRIPTION "FSL DPAA CEETM qdisc"
3551 +
3552 +const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1] = {
3553 + [TCA_CEETM_COPT] = { .len = sizeof(struct tc_ceetm_copt) },
3554 + [TCA_CEETM_QOPS] = { .len = sizeof(struct tc_ceetm_qopt) },
3555 +};
3556 +
3557 +struct Qdisc_ops ceetm_qdisc_ops;
3558 +
3559 +/* Obtain the DCP and the SP ids from the FMan port */
3560 +static void get_dcp_and_sp(struct net_device *dev, enum qm_dc_portal *dcp_id,
3561 + unsigned int *sp_id)
3562 +{
3563 + uint32_t channel;
3564 + t_LnxWrpFmPortDev *port_dev;
3565 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3566 + struct mac_device *mac_dev = dpa_priv->mac_dev;
3567 +
3568 + port_dev = (t_LnxWrpFmPortDev *)mac_dev->port_dev[TX];
3569 + channel = port_dev->txCh;
3570 +
3571 + *sp_id = channel & CHANNEL_SP_MASK;
3572 + pr_debug(KBUILD_BASENAME " : FM sub-portal ID %d\n", *sp_id);
3573 +
3574 + if (channel < DCP0_MAX_CHANNEL) {
3575 + *dcp_id = qm_dc_portal_fman0;
3576 + pr_debug(KBUILD_BASENAME " : DCP ID 0\n");
3577 + } else {
3578 + *dcp_id = qm_dc_portal_fman1;
3579 + pr_debug(KBUILD_BASENAME " : DCP ID 1\n");
3580 + }
3581 +}
3582 +
3583 +/* Enqueue Rejection Notification callback */
3584 +static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
3585 + const struct qm_mr_entry *msg)
3586 +{
3587 + struct net_device *net_dev;
3588 + struct ceetm_class *cls;
3589 + struct ceetm_class_stats *cstats = NULL;
3590 + const struct dpa_priv_s *dpa_priv;
3591 + struct dpa_percpu_priv_s *dpa_percpu_priv;
3592 + struct sk_buff *skb;
3593 + struct qm_fd fd = msg->ern.fd;
3594 +
3595 + net_dev = ((struct ceetm_fq *)fq)->net_dev;
3596 + dpa_priv = netdev_priv(net_dev);
3597 + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
3598 +
3599 + /* Increment DPA counters */
3600 + dpa_percpu_priv->stats.tx_dropped++;
3601 + dpa_percpu_priv->stats.tx_fifo_errors++;
3602 +
3603 + /* Increment CEETM counters */
3604 + cls = ((struct ceetm_fq *)fq)->ceetm_cls;
3605 + switch (cls->type) {
3606 + case CEETM_PRIO:
3607 + cstats = this_cpu_ptr(cls->prio.cstats);
3608 + break;
3609 + case CEETM_WBFS:
3610 + cstats = this_cpu_ptr(cls->wbfs.cstats);
3611 + break;
3612 + }
3613 +
3614 + if (cstats)
3615 + cstats->ern_drop_count++;
3616 +
3617 + if (fd.bpid != 0xff) {
3618 + dpa_fd_release(net_dev, &fd);
3619 + return;
3620 + }
3621 +
3622 + skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
3623 + dev_kfree_skb_any(skb);
3624 +}
3625 +
3626 +/* Congestion State Change Notification callback */
3627 +static void ceetm_cscn(struct qm_ceetm_ccg *ccg, void *cb_ctx, int congested)
3628 +{
3629 + struct ceetm_fq *ceetm_fq = (struct ceetm_fq *)cb_ctx;
3630 + struct dpa_priv_s *dpa_priv = netdev_priv(ceetm_fq->net_dev);
3631 + struct ceetm_class *cls = ceetm_fq->ceetm_cls;
3632 + struct ceetm_class_stats *cstats = NULL;
3633 +
3634 + switch (cls->type) {
3635 + case CEETM_PRIO:
3636 + cstats = this_cpu_ptr(cls->prio.cstats);
3637 + break;
3638 + case CEETM_WBFS:
3639 + cstats = this_cpu_ptr(cls->wbfs.cstats);
3640 + break;
3641 + }
3642 +
3643 + if (congested) {
3644 + dpa_priv->cgr_data.congestion_start_jiffies = jiffies;
3645 + netif_tx_stop_all_queues(dpa_priv->net_dev);
3646 + dpa_priv->cgr_data.cgr_congested_count++;
3647 + if (cstats)
3648 + cstats->cgr_congested_count++;
3649 + } else {
3650 + dpa_priv->cgr_data.congested_jiffies +=
3651 + (jiffies - dpa_priv->cgr_data.congestion_start_jiffies);
3652 + netif_tx_wake_all_queues(dpa_priv->net_dev);
3653 + }
3654 +}
3655 +
3656 +/* Allocate a ceetm fq */
3657 +static int ceetm_alloc_fq(struct ceetm_fq **fq,
3658 + struct net_device *dev,
3659 + struct ceetm_class *cls)
3660 +{
3661 + *fq = kzalloc(sizeof(**fq), GFP_KERNEL);
3662 + if (!*fq)
3663 + return -ENOMEM;
3664 +
3665 + (*fq)->net_dev = dev;
3666 + (*fq)->ceetm_cls = cls;
3667 + return 0;
3668 +}
3669 +
3670 +/* Configure a ceetm Class Congestion Group */
3671 +static int ceetm_config_ccg(struct qm_ceetm_ccg **ccg,
3672 + struct qm_ceetm_channel *channel,
3673 + unsigned int id,
3674 + struct ceetm_fq *fq,
3675 + u32 if_support)
3676 +{
3677 + int err;
3678 + u32 cs_th;
3679 + u16 ccg_mask;
3680 + struct qm_ceetm_ccg_params ccg_params;
3681 +
3682 + err = qman_ceetm_ccg_claim(ccg, channel, id, ceetm_cscn, fq);
3683 + if (err)
3684 + return err;
3685 +
3686 + /* Configure the count mode (frames/bytes), enable
3687 + * notifications, enable tail-drop, and configure the tail-drop
3688 + * mode and threshold */
3689 + ccg_mask = QM_CCGR_WE_MODE | QM_CCGR_WE_CSCN_EN |
3690 + QM_CCGR_WE_TD_EN | QM_CCGR_WE_TD_MODE |
3691 + QM_CCGR_WE_TD_THRES;
3692 +
3693 + ccg_params.mode = 0; /* count bytes */
3694 + ccg_params.cscn_en = 1; /* generate notifications */
3695 + ccg_params.td_en = 1; /* enable tail-drop */
3696 + ccg_params.td_mode = 1; /* tail-drop on threshold */
3697 +
3698 + /* Configure the tail-drop threshold according to the link
3699 + * speed */
3700 + if (if_support & SUPPORTED_10000baseT_Full)
3701 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
3702 + else
3703 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
3704 + qm_cgr_cs_thres_set64(&ccg_params.td_thres, cs_th, 1);
3705 +
3706 + err = qman_ceetm_ccg_set(*ccg, ccg_mask, &ccg_params);
3707 + if (err)
3708 + return err;
3709 +
3710 + return 0;
3711 +}
3712 +
3713 +/* Configure a ceetm Logical Frame Queue */
3714 +static int ceetm_config_lfq(struct qm_ceetm_cq *cq, struct ceetm_fq *fq,
3715 + struct qm_ceetm_lfq **lfq)
3716 +{
3717 + int err;
3718 + u64 context_a;
3719 + u32 context_b;
3720 +
3721 + err = qman_ceetm_lfq_claim(lfq, cq);
3722 + if (err)
3723 + return err;
3724 +
3725 + /* Get the former contexts in order to preserve context B */
3726 + err = qman_ceetm_lfq_get_context(*lfq, &context_a, &context_b);
3727 + if (err)
3728 + return err;
3729 +
3730 + context_a = CEETM_CONTEXT_A;
3731 + err = qman_ceetm_lfq_set_context(*lfq, context_a, context_b);
3732 + if (err)
3733 + return err;
3734 +
3735 + (*lfq)->ern = ceetm_ern;
3736 +
3737 + err = qman_ceetm_create_fq(*lfq, &fq->fq);
3738 + if (err)
3739 + return err;
3740 +
3741 + return 0;
3742 +}
3743 +
3744 +/* Configure a prio ceetm class */
3745 +static int ceetm_config_prio_cls(struct ceetm_class *cls, struct net_device *dev,
3746 + struct qm_ceetm_channel *channel, unsigned int id)
3747 +{
3748 + int err;
3749 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3750 +
3751 + err = ceetm_alloc_fq(&cls->prio.fq, dev, cls);
3752 + if (err)
3753 + return err;
3754 +
3755 + /* Claim and configure the CCG */
3756 + err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
3757 + dpa_priv->mac_dev->if_support);
3758 + if (err)
3759 + return err;
3760 +
3761 + /* Claim and configure the CQ */
3762 + err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
3763 + if (err)
3764 + return err;
3765 +
3766 + if (cls->shaped) {
3767 + err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
3768 + if (err)
3769 + return err;
3770 +
3771 + err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
3772 + if (err)
3773 + return err;
3774 + }
3775 +
3776 + /* Claim and configure a LFQ */
3777 + err = ceetm_config_lfq(cls->prio.cq, cls->prio.fq, &cls->prio.lfq);
3778 + if (err)
3779 + return err;
3780 +
3781 + return 0;
3782 +}
3783 +
3784 +/* Configure a wbfs ceetm class */
3785 +static int ceetm_config_wbfs_cls(struct ceetm_class *cls, struct net_device *dev,
3786 + struct qm_ceetm_channel *channel, unsigned int id, int type)
3787 +{
3788 + int err;
3789 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
3790 +
3791 + err = ceetm_alloc_fq(&cls->wbfs.fq, dev, cls);
3792 + if (err)
3793 + return err;
3794 +
3795 + /* Claim and configure the CCG */
3796 + err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
3797 + dpa_priv->mac_dev->if_support);
3798 + if (err)
3799 + return err;
3800 +
3801 + /* Claim and configure the CQ */
3802 + if (type == WBFS_GRP_B)
3803 + err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
3804 + cls->wbfs.ccg);
3805 + else
3806 + err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
3807 + cls->wbfs.ccg);
3808 + if (err)
3809 + return err;
3810 +
3811 + /* Configure the CQ weight: real number mutiplied by 100 to get rid
3812 + * of the fraction */
3813 + err = qman_ceetm_set_queue_weight_in_ratio(cls->wbfs.cq,
3814 + cls->wbfs.weight * 100);
3815 + if (err)
3816 + return err;
3817 +
3818 + /* Claim and configure a LFQ */
3819 + err = ceetm_config_lfq(cls->wbfs.cq, cls->wbfs.fq, &cls->wbfs.lfq);
3820 + if (err)
3821 + return err;
3822 +
3823 + return 0;
3824 +}
3825 +
3826 +/* Find class in qdisc hash table using given handle */
3827 +static inline struct ceetm_class *ceetm_find(u32 handle, struct Qdisc *sch)
3828 +{
3829 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3830 + struct Qdisc_class_common *clc;
3831 +
3832 + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
3833 + __func__, handle, sch->handle);
3834 +
3835 + clc = qdisc_class_find(&priv->clhash, handle);
3836 + return clc ? container_of(clc, struct ceetm_class, common) : NULL;
3837 +}
3838 +
3839 +/* Insert a class in the qdisc's class hash */
3840 +static void ceetm_link_class(struct Qdisc *sch,
3841 + struct Qdisc_class_hash *clhash,
3842 + struct Qdisc_class_common *common)
3843 +{
3844 + sch_tree_lock(sch);
3845 + qdisc_class_hash_insert(clhash, common);
3846 + sch_tree_unlock(sch);
3847 + qdisc_class_hash_grow(sch, clhash);
3848 +}
3849 +
3850 +/* Destroy a ceetm class */
3851 +static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
3852 +{
3853 + if (!cl)
3854 + return;
3855 +
3856 + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
3857 + __func__, cl->common.classid, sch->handle);
3858 +
3859 + switch (cl->type) {
3860 + case CEETM_ROOT:
3861 + if (cl->root.child) {
3862 + qdisc_destroy(cl->root.child);
3863 + cl->root.child = NULL;
3864 + }
3865 +
3866 + if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
3867 + pr_err(KBUILD_BASENAME
3868 + " : %s : error releasing the channel %d\n",
3869 + __func__, cl->root.ch->idx);
3870 +
3871 + break;
3872 +
3873 + case CEETM_PRIO:
3874 + if (cl->prio.child) {
3875 + qdisc_destroy(cl->prio.child);
3876 + cl->prio.child = NULL;
3877 + }
3878 +
3879 + if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
3880 + pr_err(KBUILD_BASENAME
3881 + " : %s : error releasing the LFQ %d\n",
3882 + __func__, cl->prio.lfq->idx);
3883 +
3884 + if (cl->prio.cq && qman_ceetm_cq_release(cl->prio.cq))
3885 + pr_err(KBUILD_BASENAME
3886 + " : %s : error releasing the CQ %d\n",
3887 + __func__, cl->prio.cq->idx);
3888 +
3889 + if (cl->prio.ccg && qman_ceetm_ccg_release(cl->prio.ccg))
3890 + pr_err(KBUILD_BASENAME
3891 + " : %s : error releasing the CCG %d\n",
3892 + __func__, cl->prio.ccg->idx);
3893 +
3894 + if (cl->prio.fq)
3895 + kfree(cl->prio.fq);
3896 +
3897 + if (cl->prio.cstats)
3898 + free_percpu(cl->prio.cstats);
3899 +
3900 + break;
3901 +
3902 + case CEETM_WBFS:
3903 + if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
3904 + pr_err(KBUILD_BASENAME
3905 + " : %s : error releasing the LFQ %d\n",
3906 + __func__, cl->wbfs.lfq->idx);
3907 +
3908 + if (cl->wbfs.cq && qman_ceetm_cq_release(cl->wbfs.cq))
3909 + pr_err(KBUILD_BASENAME
3910 + " : %s : error releasing the CQ %d\n",
3911 + __func__, cl->wbfs.cq->idx);
3912 +
3913 + if (cl->wbfs.ccg && qman_ceetm_ccg_release(cl->wbfs.ccg))
3914 + pr_err(KBUILD_BASENAME
3915 + " : %s : error releasing the CCG %d\n",
3916 + __func__, cl->wbfs.ccg->idx);
3917 +
3918 + if (cl->wbfs.fq)
3919 + kfree(cl->wbfs.fq);
3920 +
3921 + if (cl->wbfs.cstats)
3922 + free_percpu(cl->wbfs.cstats);
3923 + }
3924 +
3925 + tcf_destroy_chain(&cl->filter_list);
3926 + kfree(cl);
3927 +}
3928 +
3929 +/* Destroy a ceetm qdisc */
3930 +static void ceetm_destroy(struct Qdisc *sch)
3931 +{
3932 + unsigned int ntx, i;
3933 + struct hlist_node *next;
3934 + struct ceetm_class *cl;
3935 + struct ceetm_qdisc *priv = qdisc_priv(sch);
3936 + struct net_device *dev = qdisc_dev(sch);
3937 +
3938 + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
3939 + __func__, sch->handle);
3940 +
3941 + /* All filters need to be removed before destroying the classes */
3942 + tcf_destroy_chain(&priv->filter_list);
3943 +
3944 + for (i = 0; i < priv->clhash.hashsize; i++) {
3945 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
3946 + tcf_destroy_chain(&cl->filter_list);
3947 + }
3948 +
3949 + for (i = 0; i < priv->clhash.hashsize; i++) {
3950 + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
3951 + common.hnode)
3952 + ceetm_cls_destroy(sch, cl);
3953 + }
3954 +
3955 + qdisc_class_hash_destroy(&priv->clhash);
3956 +
3957 + switch (priv->type) {
3958 + case CEETM_ROOT:
3959 + dpa_disable_ceetm(dev);
3960 +
3961 + if (priv->root.lni && qman_ceetm_lni_release(priv->root.lni))
3962 + pr_err(KBUILD_BASENAME
3963 + " : %s : error releasing the LNI %d\n",
3964 + __func__, priv->root.lni->idx);
3965 +
3966 + if (priv->root.sp && qman_ceetm_sp_release(priv->root.sp))
3967 + pr_err(KBUILD_BASENAME
3968 + " : %s : error releasing the SP %d\n",
3969 + __func__, priv->root.sp->idx);
3970 +
3971 + if (priv->root.qstats)
3972 + free_percpu(priv->root.qstats);
3973 +
3974 + if (!priv->root.qdiscs)
3975 + break;
3976 +
3977 + /* Remove the pfifo qdiscs */
3978 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
3979 + if (priv->root.qdiscs[ntx])
3980 + qdisc_destroy(priv->root.qdiscs[ntx]);
3981 +
3982 + kfree(priv->root.qdiscs);
3983 + break;
3984 +
3985 + case CEETM_PRIO:
3986 + if (priv->prio.parent)
3987 + priv->prio.parent->root.child = NULL;
3988 + break;
3989 +
3990 + case CEETM_WBFS:
3991 + if (priv->wbfs.parent)
3992 + priv->wbfs.parent->prio.child = NULL;
3993 + break;
3994 + }
3995 +}
3996 +
3997 +static int ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
3998 +{
3999 + struct Qdisc *qdisc;
4000 + unsigned int ntx, i;
4001 + struct nlattr *nest;
4002 + struct tc_ceetm_qopt qopt;
4003 + struct ceetm_qdisc_stats *qstats;
4004 + struct net_device *dev = qdisc_dev(sch);
4005 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4006 +
4007 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4008 +
4009 + sch_tree_lock(sch);
4010 + memset(&qopt, 0, sizeof(qopt));
4011 + qopt.type = priv->type;
4012 + qopt.shaped = priv->shaped;
4013 +
4014 + switch (priv->type) {
4015 + case CEETM_ROOT:
4016 + /* Gather statistics from the underlying pfifo qdiscs */
4017 + sch->q.qlen = 0;
4018 + memset(&sch->bstats, 0, sizeof(sch->bstats));
4019 + memset(&sch->qstats, 0, sizeof(sch->qstats));
4020 +
4021 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
4022 + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
4023 + sch->q.qlen += qdisc->q.qlen;
4024 + sch->bstats.bytes += qdisc->bstats.bytes;
4025 + sch->bstats.packets += qdisc->bstats.packets;
4026 + sch->qstats.qlen += qdisc->qstats.qlen;
4027 + sch->qstats.backlog += qdisc->qstats.backlog;
4028 + sch->qstats.drops += qdisc->qstats.drops;
4029 + sch->qstats.requeues += qdisc->qstats.requeues;
4030 + sch->qstats.overlimits += qdisc->qstats.overlimits;
4031 + }
4032 +
4033 + for_each_online_cpu(i) {
4034 + qstats = per_cpu_ptr(priv->root.qstats, i);
4035 + sch->qstats.drops += qstats->drops;
4036 + }
4037 +
4038 + qopt.rate = priv->root.rate;
4039 + qopt.ceil = priv->root.ceil;
4040 + qopt.overhead = priv->root.overhead;
4041 + break;
4042 +
4043 + case CEETM_PRIO:
4044 + qopt.qcount = priv->prio.qcount;
4045 + break;
4046 +
4047 + case CEETM_WBFS:
4048 + qopt.qcount = priv->wbfs.qcount;
4049 + qopt.cr = priv->wbfs.cr;
4050 + qopt.er = priv->wbfs.er;
4051 + break;
4052 +
4053 + default:
4054 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4055 + sch_tree_unlock(sch);
4056 + return -EINVAL;
4057 + }
4058 +
4059 + nest = nla_nest_start(skb, TCA_OPTIONS);
4060 + if (nest == NULL)
4061 + goto nla_put_failure;
4062 + if (nla_put(skb, TCA_CEETM_QOPS, sizeof(qopt), &qopt))
4063 + goto nla_put_failure;
4064 + nla_nest_end(skb, nest);
4065 +
4066 + sch_tree_unlock(sch);
4067 + return skb->len;
4068 +
4069 +nla_put_failure:
4070 + sch_tree_unlock(sch);
4071 + nla_nest_cancel(skb, nest);
4072 + return -EMSGSIZE;
4073 +}
4074 +
4075 +/* Configure a root ceetm qdisc */
4076 +static int ceetm_init_root(struct Qdisc *sch, struct ceetm_qdisc *priv,
4077 + struct tc_ceetm_qopt *qopt)
4078 +{
4079 + struct netdev_queue *dev_queue;
4080 + struct Qdisc *qdisc;
4081 + enum qm_dc_portal dcp_id;
4082 + unsigned int i, sp_id;
4083 + int err;
4084 + u64 bps;
4085 + struct qm_ceetm_sp *sp;
4086 + struct qm_ceetm_lni *lni;
4087 + struct net_device *dev = qdisc_dev(sch);
4088 + struct dpa_priv_s *dpa_priv = netdev_priv(dev);
4089 + struct mac_device *mac_dev = dpa_priv->mac_dev;
4090 +
4091 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4092 +
4093 + /* Validate inputs */
4094 + if (sch->parent != TC_H_ROOT) {
4095 + pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
4096 + tcf_destroy_chain(&priv->filter_list);
4097 + qdisc_class_hash_destroy(&priv->clhash);
4098 + return -EINVAL;
4099 + }
4100 +
4101 + if (!mac_dev) {
4102 + pr_err("CEETM: the interface is lacking a mac\n");
4103 + err = -EINVAL;
4104 + goto err_init_root;
4105 + }
4106 +
4107 + /* pre-allocate underlying pfifo qdiscs */
4108 + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
4109 + sizeof(priv->root.qdiscs[0]),
4110 + GFP_KERNEL);
4111 + if (priv->root.qdiscs == NULL) {
4112 + err = -ENOMEM;
4113 + goto err_init_root;
4114 + }
4115 +
4116 + for (i = 0; i < dev->num_tx_queues; i++) {
4117 + dev_queue = netdev_get_tx_queue(dev, i);
4118 + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
4119 + TC_H_MAKE(TC_H_MAJ(sch->handle),
4120 + TC_H_MIN(i + PFIFO_MIN_OFFSET)));
4121 + if (qdisc == NULL) {
4122 + err = -ENOMEM;
4123 + goto err_init_root;
4124 + }
4125 +
4126 + priv->root.qdiscs[i] = qdisc;
4127 + qdisc->flags |= TCQ_F_ONETXQUEUE;
4128 + }
4129 +
4130 + sch->flags |= TCQ_F_MQROOT;
4131 +
4132 + priv->root.qstats = alloc_percpu(struct ceetm_qdisc_stats);
4133 + if (!priv->root.qstats) {
4134 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4135 + __func__);
4136 + err = -ENOMEM;
4137 + goto err_init_root;
4138 + }
4139 +
4140 + priv->shaped = qopt->shaped;
4141 + priv->root.rate = qopt->rate;
4142 + priv->root.ceil = qopt->ceil;
4143 + priv->root.overhead = qopt->overhead;
4144 +
4145 + /* Claim the SP */
4146 + get_dcp_and_sp(dev, &dcp_id, &sp_id);
4147 + err = qman_ceetm_sp_claim(&sp, dcp_id, sp_id);
4148 + if (err) {
4149 + pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
4150 + __func__);
4151 + goto err_init_root;
4152 + }
4153 +
4154 + priv->root.sp = sp;
4155 +
4156 + /* Claim the LNI - will use the same id as the SP id since SPs 0-7
4157 + * are connected to the TX FMan ports */
4158 + err = qman_ceetm_lni_claim(&lni, dcp_id, sp_id);
4159 + if (err) {
4160 + pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
4161 + __func__);
4162 + goto err_init_root;
4163 + }
4164 +
4165 + priv->root.lni = lni;
4166 +
4167 + err = qman_ceetm_sp_set_lni(sp, lni);
4168 + if (err) {
4169 + pr_err(KBUILD_BASENAME " : %s : failed to link the SP and "
4170 + "LNI\n", __func__);
4171 + goto err_init_root;
4172 + }
4173 +
4174 + lni->sp = sp;
4175 +
4176 + /* Configure the LNI shaper */
4177 + if (priv->shaped) {
4178 + err = qman_ceetm_lni_enable_shaper(lni, 1, priv->root.overhead);
4179 + if (err) {
4180 + pr_err(KBUILD_BASENAME " : %s : failed to configure "
4181 + "the LNI shaper\n", __func__);
4182 + goto err_init_root;
4183 + }
4184 +
4185 + bps = priv->root.rate << 3; /* Bps -> bps */
4186 + err = qman_ceetm_lni_set_commit_rate_bps(lni, bps, dev->mtu);
4187 + if (err) {
4188 + pr_err(KBUILD_BASENAME " : %s : failed to configure "
4189 + "the LNI shaper\n", __func__);
4190 + goto err_init_root;
4191 + }
4192 +
4193 + bps = priv->root.ceil << 3; /* Bps -> bps */
4194 + err = qman_ceetm_lni_set_excess_rate_bps(lni, bps, dev->mtu);
4195 + if (err) {
4196 + pr_err(KBUILD_BASENAME " : %s : failed to configure "
4197 + "the LNI shaper\n", __func__);
4198 + goto err_init_root;
4199 + }
4200 + }
4201 +
4202 + /* TODO default configuration */
4203 +
4204 + dpa_enable_ceetm(dev);
4205 + return 0;
4206 +
4207 +err_init_root:
4208 + ceetm_destroy(sch);
4209 + return err;
4210 +}
4211 +
4212 +/* Configure a prio ceetm qdisc */
4213 +static int ceetm_init_prio(struct Qdisc *sch, struct ceetm_qdisc *priv,
4214 + struct tc_ceetm_qopt *qopt)
4215 +{
4216 + int err;
4217 + unsigned int i;
4218 + struct ceetm_class *parent_cl, *child_cl;
4219 + struct Qdisc *parent_qdisc;
4220 + struct net_device *dev = qdisc_dev(sch);
4221 +
4222 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4223 +
4224 + if (sch->parent == TC_H_ROOT) {
4225 + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
4226 + err = -EINVAL;
4227 + goto err_init_prio;
4228 + }
4229 +
4230 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
4231 + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
4232 + pr_err("CEETM: a ceetm qdisc can not be attached to other "
4233 + "qdisc/class types\n");
4234 + err = -EINVAL;
4235 + goto err_init_prio;
4236 + }
4237 +
4238 + /* Obtain the parent root ceetm_class */
4239 + parent_cl = ceetm_find(sch->parent, parent_qdisc);
4240 +
4241 + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
4242 + pr_err("CEETM: a prio ceetm qdiscs can be added only under a "
4243 + "root ceetm class\n");
4244 + err = -EINVAL;
4245 + goto err_init_prio;
4246 + }
4247 +
4248 + priv->prio.parent = parent_cl;
4249 + parent_cl->root.child = sch;
4250 +
4251 + priv->shaped = parent_cl->shaped;
4252 + priv->prio.qcount = qopt->qcount;
4253 +
4254 + /* Create and configure qcount child classes */
4255 + for (i = 0; i < priv->prio.qcount; i++) {
4256 + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
4257 + if (!child_cl) {
4258 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
4259 + __func__);
4260 + err = -ENOMEM;
4261 + goto err_init_prio;
4262 + }
4263 +
4264 + child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
4265 + if (!child_cl->prio.cstats) {
4266 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4267 + __func__);
4268 + err = -ENOMEM;
4269 + goto err_init_prio_cls;
4270 + }
4271 +
4272 + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
4273 + child_cl->refcnt = 1;
4274 + child_cl->parent = sch;
4275 + child_cl->type = CEETM_PRIO;
4276 + child_cl->shaped = priv->shaped;
4277 + child_cl->prio.child = NULL;
4278 +
4279 + /* All shaped CQs have CR and ER enabled by default */
4280 + child_cl->prio.cr = child_cl->shaped;
4281 + child_cl->prio.er = child_cl->shaped;
4282 + child_cl->prio.fq = NULL;
4283 + child_cl->prio.cq = NULL;
4284 +
4285 + /* Configure the corresponding hardware CQ */
4286 + err = ceetm_config_prio_cls(child_cl, dev,
4287 + parent_cl->root.ch, i);
4288 + if (err) {
4289 + pr_err(KBUILD_BASENAME " : %s : failed to configure "
4290 + "the ceetm prio class %X\n",
4291 + __func__,
4292 + child_cl->common.classid);
4293 + goto err_init_prio_cls;
4294 + }
4295 +
4296 + /* Add class handle in Qdisc */
4297 + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
4298 + pr_debug(KBUILD_BASENAME " : %s : added ceetm prio class %X "
4299 + "associated with CQ %d and CCG %d\n",
4300 + __func__,
4301 + child_cl->common.classid,
4302 + child_cl->prio.cq->idx,
4303 + child_cl->prio.ccg->idx);
4304 + }
4305 +
4306 + return 0;
4307 +
4308 +err_init_prio_cls:
4309 + ceetm_cls_destroy(sch, child_cl);
4310 +err_init_prio:
4311 + ceetm_destroy(sch);
4312 + return err;
4313 +}
4314 +
4315 +/* Configure a wbfs ceetm qdisc */
4316 +static int ceetm_init_wbfs(struct Qdisc *sch, struct ceetm_qdisc *priv,
4317 + struct tc_ceetm_qopt *qopt)
4318 +{
4319 + int err, group_b, small_group;
4320 + unsigned int i, id, prio_a, prio_b;
4321 + struct ceetm_class *parent_cl, *child_cl, *root_cl;
4322 + struct Qdisc *parent_qdisc;
4323 + struct ceetm_qdisc *parent_priv;
4324 + struct qm_ceetm_channel *channel;
4325 + struct net_device *dev = qdisc_dev(sch);
4326 +
4327 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4328 +
4329 + /* Validate inputs */
4330 + if (sch->parent == TC_H_ROOT) {
4331 + pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
4332 + err = -EINVAL;
4333 + goto err_init_wbfs;
4334 + }
4335 +
4336 + /* Obtain the parent prio ceetm qdisc */
4337 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
4338 + if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
4339 + pr_err("CEETM: a ceetm qdisc can not be attached to other "
4340 + "qdisc/class types\n");
4341 + err = -EINVAL;
4342 + goto err_init_wbfs;
4343 + }
4344 +
4345 + /* Obtain the parent prio ceetm class */
4346 + parent_cl = ceetm_find(sch->parent, parent_qdisc);
4347 + parent_priv = qdisc_priv(parent_qdisc);
4348 +
4349 + if (!parent_cl || parent_cl->type != CEETM_PRIO) {
4350 + pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a "
4351 + "prio ceetm class\n");
4352 + err = -EINVAL;
4353 + goto err_init_wbfs;
4354 + }
4355 +
4356 + priv->shaped = parent_cl->shaped;
4357 +
4358 + if (!priv->shaped && (qopt->cr || qopt->er)) {
4359 + pr_err("CEETM: CR/ER can be enabled only for shaped wbfs "
4360 + "ceetm qdiscs\n");
4361 + err = -EINVAL;
4362 + goto err_init_wbfs;
4363 + }
4364 +
4365 + if (priv->shaped && !(qopt->cr || qopt->er)) {
4366 + pr_err("CEETM: either CR or ER must be enabled for shaped "
4367 + "wbfs ceetm qdiscs\n");
4368 + err = -EINVAL;
4369 + goto err_init_wbfs;
4370 + }
4371 +
4372 + /* Obtain the parent root ceetm class */
4373 + root_cl = parent_priv->prio.parent;
4374 + if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b)
4375 + || root_cl->root.wbfs_grp_large) {
4376 + pr_err("CEETM: no more wbfs classes are available\n");
4377 + err = -EINVAL;
4378 + goto err_init_wbfs;
4379 + }
4380 +
4381 + if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b)
4382 + && qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
4383 + pr_err("CEETM: only %d wbfs classes are available\n",
4384 + CEETM_MIN_WBFS_QCOUNT);
4385 + err = -EINVAL;
4386 + goto err_init_wbfs;
4387 + }
4388 +
4389 + priv->wbfs.parent = parent_cl;
4390 + parent_cl->prio.child = sch;
4391 +
4392 + priv->wbfs.qcount = qopt->qcount;
4393 + priv->wbfs.cr = qopt->cr;
4394 + priv->wbfs.er = qopt->er;
4395 +
4396 + channel = root_cl->root.ch;
4397 +
4398 + /* Configure the hardware wbfs channel groups */
4399 + if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
4400 + /* Configure the large group A */
4401 + priv->wbfs.group_type = WBFS_GRP_LARGE;
4402 + small_group = false;
4403 + group_b = false;
4404 + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
4405 + prio_b = prio_a;
4406 +
4407 + } else if (root_cl->root.wbfs_grp_a) {
4408 + /* Configure the group B */
4409 + priv->wbfs.group_type = WBFS_GRP_B;
4410 +
4411 + err = qman_ceetm_channel_get_group(channel, &small_group,
4412 + &prio_a, &prio_b);
4413 + if (err) {
4414 + pr_err(KBUILD_BASENAME " : %s : failed to get group "
4415 + "details\n", __func__);
4416 + goto err_init_wbfs;
4417 + }
4418 +
4419 + small_group = true;
4420 + group_b = true;
4421 + prio_b = TC_H_MIN(parent_cl->common.classid) - 1;
4422 + /* If group A isn't configured, configure it as group B */
4423 + prio_a = prio_a ? : prio_b;
4424 +
4425 + } else {
4426 + /* Configure the small group A */
4427 + priv->wbfs.group_type = WBFS_GRP_A;
4428 +
4429 + err = qman_ceetm_channel_get_group(channel, &small_group,
4430 + &prio_a, &prio_b);
4431 + if (err) {
4432 + pr_err(KBUILD_BASENAME " : %s : failed to get group "
4433 + "details\n", __func__);
4434 + goto err_init_wbfs;
4435 + }
4436 +
4437 + small_group = true;
4438 + group_b = false;
4439 + prio_a = TC_H_MIN(parent_cl->common.classid) - 1;
4440 + /* If group B isn't configured, configure it as group A */
4441 + prio_b = prio_b ? : prio_a;
4442 + }
4443 +
4444 + err = qman_ceetm_channel_set_group(channel, small_group, prio_a, prio_b);
4445 + if (err)
4446 + goto err_init_wbfs;
4447 +
4448 + if (priv->shaped) {
4449 + err = qman_ceetm_channel_set_group_cr_eligibility(channel,
4450 + group_b,
4451 + priv->wbfs.cr);
4452 + if (err) {
4453 + pr_err(KBUILD_BASENAME " : %s : failed to set group "
4454 + "CR eligibility\n", __func__);
4455 + goto err_init_wbfs;
4456 + }
4457 +
4458 + err = qman_ceetm_channel_set_group_er_eligibility(channel,
4459 + group_b,
4460 + priv->wbfs.er);
4461 + if (err) {
4462 + pr_err(KBUILD_BASENAME " : %s : failed to set group "
4463 + "ER eligibility\n", __func__);
4464 + goto err_init_wbfs;
4465 + }
4466 + }
4467 +
4468 + /* Create qcount child classes */
4469 + for (i = 0; i < priv->wbfs.qcount; i++) {
4470 + child_cl = kzalloc(sizeof(*child_cl), GFP_KERNEL);
4471 + if (!child_cl) {
4472 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
4473 + __func__);
4474 + err = -ENOMEM;
4475 + goto err_init_wbfs;
4476 + }
4477 +
4478 + child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
4479 + if (!child_cl->wbfs.cstats) {
4480 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
4481 + __func__);
4482 + err = -ENOMEM;
4483 + goto err_init_wbfs_cls;
4484 + }
4485 +
4486 + child_cl->common.classid = TC_H_MAKE(sch->handle, (i + 1));
4487 + child_cl->refcnt = 1;
4488 + child_cl->parent = sch;
4489 + child_cl->type = CEETM_WBFS;
4490 + child_cl->shaped = priv->shaped;
4491 + child_cl->wbfs.fq = NULL;
4492 + child_cl->wbfs.cq = NULL;
4493 + child_cl->wbfs.weight = qopt->qweight[i];
4494 +
4495 + if (priv->wbfs.group_type == WBFS_GRP_B)
4496 + id = WBFS_GRP_B_OFFSET + i;
4497 + else
4498 + id = WBFS_GRP_A_OFFSET + i;
4499 +
4500 + err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
4501 + priv->wbfs.group_type);
4502 + if (err) {
4503 + pr_err(KBUILD_BASENAME " : %s : failed to configure "
4504 + "the ceetm wbfs class %X\n",
4505 + __func__,
4506 + child_cl->common.classid);
4507 + goto err_init_wbfs_cls;
4508 + }
4509 +
4510 + /* Add class handle in Qdisc */
4511 + ceetm_link_class(sch, &priv->clhash, &child_cl->common);
4512 + pr_debug(KBUILD_BASENAME " : %s : added ceetm wbfs class %X "
4513 + "associated with CQ %d and CCG %d\n",
4514 + __func__,
4515 + child_cl->common.classid,
4516 + child_cl->wbfs.cq->idx,
4517 + child_cl->wbfs.ccg->idx);
4518 + }
4519 +
4520 + /* Signal the root class that a group has been configured */
4521 + switch (priv->wbfs.group_type) {
4522 + case WBFS_GRP_LARGE:
4523 + root_cl->root.wbfs_grp_large = true;
4524 + break;
4525 + case WBFS_GRP_A:
4526 + root_cl->root.wbfs_grp_a = true;
4527 + break;
4528 + case WBFS_GRP_B:
4529 + root_cl->root.wbfs_grp_b = true;
4530 + break;
4531 + }
4532 +
4533 + return 0;
4534 +
4535 +err_init_wbfs_cls:
4536 + ceetm_cls_destroy(sch, child_cl);
4537 +err_init_wbfs:
4538 + ceetm_destroy(sch);
4539 + return err;
4540 +}
4541 +
4542 +/* Configure a generic ceetm qdisc */
4543 +static int ceetm_init(struct Qdisc *sch, struct nlattr *opt)
4544 +{
4545 + struct tc_ceetm_qopt *qopt;
4546 + struct nlattr *tb[TCA_CEETM_QOPS + 1];
4547 + int ret;
4548 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4549 + struct net_device *dev = qdisc_dev(sch);
4550 +
4551 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4552 +
4553 + if (!netif_is_multiqueue(dev))
4554 + return -EOPNOTSUPP;
4555 +
4556 + ret = nla_parse_nested(tb, TCA_CEETM_QOPS, opt, ceetm_policy);
4557 + if (ret < 0) {
4558 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4559 + return ret;
4560 + }
4561 +
4562 + if (tb[TCA_CEETM_QOPS] == NULL) {
4563 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4564 + return -EINVAL;
4565 + }
4566 +
4567 + if (TC_H_MIN(sch->handle)) {
4568 + pr_err("CEETM: a qdisc should not have a minor\n");
4569 + return -EINVAL;
4570 + }
4571 +
4572 + qopt = nla_data(tb[TCA_CEETM_QOPS]);
4573 +
4574 + /* Initialize the class hash list. Each qdisc has its own class hash */
4575 + ret = qdisc_class_hash_init(&priv->clhash);
4576 + if (ret < 0) {
4577 + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init "
4578 + "failed\n", __func__);
4579 + return ret;
4580 + }
4581 +
4582 + priv->type = qopt->type;
4583 +
4584 + switch (priv->type) {
4585 + case CEETM_ROOT:
4586 + ret = ceetm_init_root(sch, priv, qopt);
4587 + break;
4588 + case CEETM_PRIO:
4589 + ret = ceetm_init_prio(sch, priv, qopt);
4590 + break;
4591 + case CEETM_WBFS:
4592 + ret = ceetm_init_wbfs(sch, priv, qopt);
4593 + break;
4594 + default:
4595 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
4596 + ceetm_destroy(sch);
4597 + ret = -EINVAL;
4598 + }
4599 +
4600 + return ret;
4601 +}
4602 +
4603 +/* Attach the underlying pfifo qdiscs */
4604 +static void ceetm_attach(struct Qdisc *sch)
4605 +{
4606 + struct net_device *dev = qdisc_dev(sch);
4607 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4608 + struct Qdisc *qdisc, *old_qdisc;
4609 + unsigned int i;
4610 +
4611 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4612 +
4613 + for (i = 0; i < dev->num_tx_queues; i++) {
4614 + qdisc = priv->root.qdiscs[i];
4615 + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
4616 + if (old_qdisc)
4617 + qdisc_destroy(old_qdisc);
4618 + }
4619 +}
4620 +
4621 +static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
4622 +{
4623 + struct ceetm_class *cl;
4624 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
4625 + __func__, classid, sch->handle);
4626 + cl = ceetm_find(classid, sch);
4627 +
4628 + if (cl)
4629 + cl->refcnt++; /* Will decrement in put() */
4630 + return (unsigned long)cl;
4631 +}
4632 +
4633 +static void ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
4634 +{
4635 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4636 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
4637 + __func__, cl->common.classid, sch->handle);
4638 + cl->refcnt--;
4639 +
4640 + if (cl->refcnt == 0)
4641 + ceetm_cls_destroy(sch, cl);
4642 +}
4643 +
4644 +/* Add a ceetm root class or configure a ceetm prio class */
4645 +static int ceetm_cls_change(struct Qdisc *sch, u32 classid,
4646 + u32 parentid, struct nlattr **tca,
4647 + unsigned long *arg)
4648 +{
4649 + int err;
4650 + u64 bps;
4651 + struct ceetm_qdisc *priv;
4652 + struct ceetm_class *cl = (struct ceetm_class *)*arg;
4653 + struct nlattr *opt = tca[TCA_OPTIONS];
4654 + struct nlattr *tb[__TCA_CEETM_MAX];
4655 + struct tc_ceetm_copt *copt;
4656 + struct qm_ceetm_channel *channel;
4657 + struct net_device *dev = qdisc_dev(sch);
4658 +
4659 + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
4660 + __func__, classid, sch->handle);
4661 +
4662 + if (strcmp(sch->ops->id, ceetm_qdisc_ops.id)) {
4663 + pr_err("CEETM: a ceetm class can not be attached to other "
4664 + "qdisc/class types\n");
4665 + return -EINVAL;
4666 + }
4667 +
4668 + priv = qdisc_priv(sch);
4669 +
4670 + if (!opt) {
4671 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4672 + return -EINVAL;
4673 + }
4674 +
4675 + if (!cl && sch->handle != parentid) {
4676 + pr_err("CEETM: classes can be attached to the root ceetm "
4677 + "qdisc only\n");
4678 + return -EINVAL;
4679 + }
4680 +
4681 + if (!cl && priv->type != CEETM_ROOT) {
4682 + pr_err("CEETM: only root ceetm classes can be attached to the "
4683 + "root ceetm qdisc\n");
4684 + return -EINVAL;
4685 + }
4686 +
4687 + err = nla_parse_nested(tb, TCA_CEETM_COPT, opt, ceetm_policy);
4688 + if (err < 0) {
4689 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4690 + return -EINVAL;
4691 + }
4692 +
4693 + if (tb[TCA_CEETM_COPT] == NULL) {
4694 + pr_err(KBUILD_BASENAME " : %s : tc error\n", __func__);
4695 + return -EINVAL;
4696 + }
4697 +
4698 + if (TC_H_MIN(classid) >= PFIFO_MIN_OFFSET) {
4699 + pr_err("CEETM: only minors 0x01 to 0x20 can be used for ceetm "
4700 + "root classes\n");
4701 + return -EINVAL;
4702 + }
4703 +
4704 + copt = nla_data(tb[TCA_CEETM_COPT]);
4705 +
4706 + /* Configure an existing ceetm prio class */
4707 + if (cl) {
4708 + if (copt->type != CEETM_PRIO) {
4709 + pr_err("CEETM: only prio ceetm classes can be changed\n");
4710 + return -EINVAL;
4711 + }
4712 +
4713 + if (!cl->shaped && (copt->cr || copt->er)) {
4714 + pr_err("CEETM: only shaped classes can have CR and "
4715 + "ER enabled\n");
4716 + return -EINVAL;
4717 + }
4718 +
4719 + if (cl->prio.cr != (bool)copt->cr)
4720 + err = qman_ceetm_channel_set_cq_cr_eligibility(
4721 + cl->prio.cq->parent,
4722 + cl->prio.cq->idx,
4723 + copt->cr);
4724 +
4725 + if (!err && cl->prio.er != (bool)copt->er)
4726 + err = qman_ceetm_channel_set_cq_er_eligibility(
4727 + cl->prio.cq->parent,
4728 + cl->prio.cq->idx,
4729 + copt->er);
4730 +
4731 + if (err) {
4732 + pr_err(KBUILD_BASENAME " : %s : failed to configure "
4733 + "the ceetm prio class %X\n",
4734 + __func__,
4735 + cl->common.classid);
4736 + return err;
4737 + }
4738 +
4739 + cl->prio.cr = copt->cr;
4740 + cl->prio.er = copt->er;
4741 + return 0;
4742 + }
4743 +
4744 + /* Add a new root ceetm class */
4745 + if (copt->type != CEETM_ROOT) {
4746 + pr_err("CEETM: only root ceetm classes can be attached to the "
4747 + "root ceetm qdisc\n");
4748 + return -EINVAL;
4749 + }
4750 +
4751 + if (copt->shaped && !priv->shaped) {
4752 + pr_err("CEETM: can not add a shaped ceetm root class under an "
4753 + "unshaped ceetm root qdisc\n");
4754 + return -EINVAL;
4755 + }
4756 +
4757 + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
4758 + if (!cl) {
4759 + pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n", __func__);
4760 + return -ENOMEM;
4761 + }
4762 +
4763 + cl->type = copt->type;
4764 + cl->shaped = copt->shaped;
4765 + cl->root.rate = copt->rate;
4766 + cl->root.ceil = copt->ceil;
4767 + cl->root.tbl = copt->tbl;
4768 +
4769 + cl->common.classid = classid;
4770 + cl->refcnt = 1;
4771 + cl->parent = sch;
4772 + cl->root.child = NULL;
4773 + cl->root.wbfs_grp_a = false;
4774 + cl->root.wbfs_grp_b = false;
4775 + cl->root.wbfs_grp_large = false;
4776 +
4777 + /* Claim a CEETM channel */
4778 + err = qman_ceetm_channel_claim(&channel, priv->root.lni);
4779 + if (err) {
4780 + pr_err(KBUILD_BASENAME " : %s : failed to claim a channel\n",
4781 + __func__);
4782 + goto claim_err;
4783 + }
4784 +
4785 + cl->root.ch = channel;
4786 +
4787 + if (cl->shaped) {
4788 + /* Configure the channel shaper */
4789 + err = qman_ceetm_channel_enable_shaper(channel, 1);
4790 + if (err)
4791 + goto channel_err;
4792 +
4793 + bps = cl->root.rate << 3; /* Bps -> bps */
4794 + err = qman_ceetm_channel_set_commit_rate_bps(channel, bps,
4795 + dev->mtu);
4796 + if (err)
4797 + goto channel_err;
4798 +
4799 + bps = cl->root.ceil << 3; /* Bps -> bps */
4800 + err = qman_ceetm_channel_set_excess_rate_bps(channel, bps,
4801 + dev->mtu);
4802 + if (err)
4803 + goto channel_err;
4804 +
4805 + } else {
4806 + /* Configure the uFQ algorithm */
4807 + err = qman_ceetm_channel_set_weight(channel, cl->root.tbl);
4808 + if (err)
4809 + goto channel_err;
4810 + }
4811 +
4812 + /* Add class handle in Qdisc */
4813 + ceetm_link_class(sch, &priv->clhash, &cl->common);
4814 +
4815 + pr_debug(KBUILD_BASENAME " : %s : configured class %X associated with "
4816 + "channel %d\n", __func__, classid, channel->idx);
4817 + *arg = (unsigned long)cl;
4818 + return 0;
4819 +
4820 +channel_err:
4821 + pr_err(KBUILD_BASENAME " : %s : failed to configure the channel %d\n",
4822 + __func__, channel->idx);
4823 + if (qman_ceetm_channel_release(channel))
4824 + pr_err(KBUILD_BASENAME " : %s : failed to release the channel "
4825 + "%d\n", __func__, channel->idx);
4826 +claim_err:
4827 + if (cl) {
4828 + kfree(cl);
4829 + }
4830 + return err;
4831 +}
4832 +
4833 +static void ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
4834 +{
4835 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4836 + struct ceetm_class *cl;
4837 + unsigned int i;
4838 +
4839 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
4840 +
4841 + if (arg->stop)
4842 + return;
4843 +
4844 + for (i = 0; i < priv->clhash.hashsize; i++) {
4845 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
4846 + if (arg->count < arg->skip) {
4847 + arg->count++;
4848 + continue;
4849 + }
4850 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
4851 + arg->stop = 1;
4852 + return;
4853 + }
4854 + arg->count++;
4855 + }
4856 + }
4857 +}
4858 +
4859 +static int ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
4860 + struct sk_buff *skb, struct tcmsg *tcm)
4861 +{
4862 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4863 + struct nlattr *nest;
4864 + struct tc_ceetm_copt copt;
4865 +
4866 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
4867 + __func__, cl->common.classid, sch->handle);
4868 +
4869 + sch_tree_lock(sch);
4870 +
4871 + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
4872 + tcm->tcm_handle = cl->common.classid;
4873 +
4874 + memset(&copt, 0, sizeof(copt));
4875 +
4876 + copt.shaped = cl->shaped;
4877 + copt.type = cl->type;
4878 +
4879 + switch (cl->type) {
4880 + case CEETM_ROOT:
4881 + if (cl->root.child)
4882 + tcm->tcm_info = cl->root.child->handle;
4883 +
4884 + copt.rate = cl->root.rate;
4885 + copt.ceil = cl->root.ceil;
4886 + copt.tbl = cl->root.tbl;
4887 + break;
4888 +
4889 + case CEETM_PRIO:
4890 + if (cl->prio.child)
4891 + tcm->tcm_info = cl->prio.child->handle;
4892 +
4893 + copt.cr = cl->prio.cr;
4894 + copt.er = cl->prio.er;
4895 + break;
4896 +
4897 + case CEETM_WBFS:
4898 + copt.weight = cl->wbfs.weight;
4899 + break;
4900 + }
4901 +
4902 + nest = nla_nest_start(skb, TCA_OPTIONS);
4903 + if (nest == NULL)
4904 + goto nla_put_failure;
4905 + if (nla_put(skb, TCA_CEETM_COPT, sizeof(copt), &copt))
4906 + goto nla_put_failure;
4907 + nla_nest_end(skb, nest);
4908 + sch_tree_unlock(sch);
4909 + return skb->len;
4910 +
4911 +nla_put_failure:
4912 + sch_tree_unlock(sch);
4913 + nla_nest_cancel(skb, nest);
4914 + return -EMSGSIZE;
4915 +}
4916 +
4917 +static int ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
4918 +{
4919 + struct ceetm_qdisc *priv = qdisc_priv(sch);
4920 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4921 +
4922 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
4923 + __func__, cl->common.classid, sch->handle);
4924 +
4925 + sch_tree_lock(sch);
4926 + qdisc_class_hash_remove(&priv->clhash, &cl->common);
4927 + cl->refcnt--;
4928 +
4929 + /* The refcnt should be at least 1 since we have incremented it in
4930 + get(). Will decrement again in put() where we will call destroy()
4931 + to actually free the memory if it reaches 0. */
4932 + BUG_ON(cl->refcnt == 0);
4933 +
4934 + sch_tree_unlock(sch);
4935 + return 0;
4936 +}
4937 +
4938 +/* Get the class' child qdisc, if any */
4939 +static struct Qdisc *ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
4940 +{
4941 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4942 +
4943 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
4944 + __func__, cl->common.classid, sch->handle);
4945 +
4946 + switch (cl->type) {
4947 + case CEETM_ROOT:
4948 + return cl->root.child;
4949 + break;
4950 +
4951 + case CEETM_PRIO:
4952 + return cl->prio.child;
4953 + break;
4954 + }
4955 +
4956 + return NULL;
4957 +}
4958 +
4959 +static int ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
4960 + struct Qdisc *new, struct Qdisc **old)
4961 +{
4962 + if (new && strcmp(new->ops->id, ceetm_qdisc_ops.id)) {
4963 + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm "
4964 + "classes\n");
4965 + return -EOPNOTSUPP;
4966 + }
4967 +
4968 + return 0;
4969 +}
4970 +
4971 +static int ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
4972 + struct gnet_dump *d)
4973 +{
4974 + unsigned int i;
4975 + struct ceetm_class *cl = (struct ceetm_class *)arg;
4976 + struct gnet_stats_basic_packed tmp_bstats;
4977 + struct ceetm_class_stats *cstats = NULL;
4978 + struct qm_ceetm_cq *cq = NULL;
4979 + struct tc_ceetm_xstats xstats;
4980 +
4981 + memset(&xstats, 0, sizeof(xstats));
4982 + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
4983 +
4984 + switch (cl->type) {
4985 + case CEETM_ROOT:
4986 + return 0;
4987 + case CEETM_PRIO:
4988 + cq = cl->prio.cq;
4989 + break;
4990 + case CEETM_WBFS:
4991 + cq = cl->wbfs.cq;
4992 + break;
4993 + }
4994 +
4995 + for_each_online_cpu(i) {
4996 + switch (cl->type) {
4997 + case CEETM_PRIO:
4998 + cstats = per_cpu_ptr(cl->prio.cstats, i);
4999 + break;
5000 + case CEETM_WBFS:
5001 + cstats = per_cpu_ptr(cl->wbfs.cstats, i);
5002 + break;
5003 + }
5004 +
5005 + if (cstats) {
5006 + xstats.ern_drop_count += cstats->ern_drop_count;
5007 + xstats.cgr_congested_count += cstats->cgr_congested_count;
5008 + tmp_bstats.bytes += cstats->bstats.bytes;
5009 + tmp_bstats.packets += cstats->bstats.packets;
5010 + }
5011 + }
5012 +
5013 + if (gnet_stats_copy_basic(d, NULL, &tmp_bstats) < 0)
5014 + return -1;
5015 +
5016 + if (cq && qman_ceetm_cq_get_dequeue_statistics(cq, 0,
5017 + &xstats.frame_count, &xstats.byte_count))
5018 + return -1;
5019 +
5020 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
5021 +}
5022 +
5023 +static struct tcf_proto **ceetm_tcf_chain(struct Qdisc *sch, unsigned long arg)
5024 +{
5025 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5026 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5027 + struct tcf_proto **fl = cl ? &cl->filter_list : &priv->filter_list;
5028 +
5029 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5030 + cl ? cl->common.classid : 0, sch->handle);
5031 + return fl;
5032 +}
5033 +
5034 +static unsigned long ceetm_tcf_bind(struct Qdisc *sch, unsigned long parent,
5035 + u32 classid)
5036 +{
5037 + struct ceetm_class *cl = ceetm_find(classid, sch);
5038 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5039 + cl ? cl->common.classid : 0, sch->handle);
5040 + return (unsigned long)cl;
5041 +}
5042 +
5043 +static void ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
5044 +{
5045 + struct ceetm_class *cl = (struct ceetm_class *)arg;
5046 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
5047 + cl ? cl->common.classid : 0, sch->handle);
5048 +}
5049 +
5050 +const struct Qdisc_class_ops ceetm_cls_ops = {
5051 + .graft = ceetm_cls_graft,
5052 + .leaf = ceetm_cls_leaf,
5053 + .get = ceetm_cls_get,
5054 + .put = ceetm_cls_put,
5055 + .change = ceetm_cls_change,
5056 + .delete = ceetm_cls_delete,
5057 + .walk = ceetm_cls_walk,
5058 + .tcf_chain = ceetm_tcf_chain,
5059 + .bind_tcf = ceetm_tcf_bind,
5060 + .unbind_tcf = ceetm_tcf_unbind,
5061 + .dump = ceetm_cls_dump,
5062 + .dump_stats = ceetm_cls_dump_stats,
5063 +};
5064 +
5065 +struct Qdisc_ops ceetm_qdisc_ops __read_mostly = {
5066 + .id = "ceetm",
5067 + .priv_size = sizeof(struct ceetm_qdisc),
5068 + .cl_ops = &ceetm_cls_ops,
5069 + .init = ceetm_init,
5070 + .destroy = ceetm_destroy,
5071 + .dump = ceetm_dump,
5072 + .attach = ceetm_attach,
5073 + .owner = THIS_MODULE,
5074 +};
5075 +
5076 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
5077 +static struct ceetm_class *ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
5078 + int *qerr, bool *act_drop)
5079 +{
5080 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5081 + struct ceetm_class *cl = NULL, *wbfs_cl;
5082 + struct tcf_result res;
5083 + struct tcf_proto *tcf;
5084 + int result;
5085 +
5086 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
5087 + tcf = priv->filter_list;
5088 + while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
5089 +#ifdef CONFIG_NET_CLS_ACT
5090 + switch (result) {
5091 + case TC_ACT_QUEUED:
5092 + case TC_ACT_STOLEN:
5093 + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
5094 + case TC_ACT_SHOT:
5095 + /* No valid class found due to action */
5096 + *act_drop = true;
5097 + return NULL;
5098 + }
5099 +#endif
5100 + cl = (void *)res.class;
5101 + if (!cl) {
5102 + if (res.classid == sch->handle) {
5103 + /* The filter leads to the qdisc */
5104 + /* TODO default qdisc */
5105 + return NULL;
5106 + }
5107 +
5108 + cl = ceetm_find(res.classid, sch);
5109 + if (!cl)
5110 + /* The filter leads to an invalid class */
5111 + break;
5112 + }
5113 +
5114 + /* The class might have its own filters attached */
5115 + tcf = cl->filter_list;
5116 + }
5117 +
5118 + if (!cl) {
5119 + /* No valid class found */
5120 + /* TODO default qdisc */
5121 + return NULL;
5122 + }
5123 +
5124 + switch (cl->type) {
5125 + case CEETM_ROOT:
5126 + if (cl->root.child) {
5127 + /* Run the prio qdisc classifiers */
5128 + return ceetm_classify(skb, cl->root.child, qerr,
5129 + act_drop);
5130 + } else {
5131 + /* The root class does not have a child prio qdisc */
5132 + /* TODO default qdisc */
5133 + return NULL;
5134 + }
5135 + case CEETM_PRIO:
5136 + if (cl->prio.child) {
5137 + /* If filters lead to a wbfs class, return it.
5138 + * Otherwise, return the prio class */
5139 + wbfs_cl = ceetm_classify(skb, cl->prio.child, qerr,
5140 + act_drop);
5141 + /* A NULL result might indicate either an erroneous
5142 + * filter, or no filters at all. We will assume the
5143 + * latter */
5144 + return wbfs_cl ? : cl;
5145 + }
5146 + }
5147 +
5148 + /* For wbfs and childless prio classes, return the class directly */
5149 + return cl;
5150 +}
5151 +
5152 +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
5153 +{
5154 + int ret;
5155 + bool act_drop = false;
5156 + struct Qdisc *sch = net_dev->qdisc;
5157 + struct ceetm_class *cl;
5158 + struct dpa_priv_s *priv_dpa;
5159 + struct qman_fq *egress_fq, *conf_fq;
5160 + struct ceetm_qdisc *priv = qdisc_priv(sch);
5161 + struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
5162 + struct ceetm_class_stats *cstats;
5163 + const int queue_mapping = dpa_get_queue_mapping(skb);
5164 + spinlock_t *root_lock = qdisc_lock(sch);
5165 +
5166 + spin_lock(root_lock);
5167 + cl = ceetm_classify(skb, sch, &ret, &act_drop);
5168 + spin_unlock(root_lock);
5169 +
5170 +#ifdef CONFIG_NET_CLS_ACT
5171 + if (act_drop) {
5172 + if (ret & __NET_XMIT_BYPASS)
5173 + qstats->drops++;
5174 + goto drop;
5175 + }
5176 +#endif
5177 + /* TODO default class */
5178 + if (unlikely(!cl)) {
5179 + qstats->drops++;
5180 + goto drop;
5181 + }
5182 +
5183 + priv_dpa = netdev_priv(net_dev);
5184 + conf_fq = priv_dpa->conf_fqs[queue_mapping];
5185 +
5186 + /* Choose the proper tx fq and update the basic stats (bytes and
5187 + * packets sent by the class) */
5188 + switch (cl->type) {
5189 + case CEETM_PRIO:
5190 + egress_fq = &(cl->prio.fq->fq);
5191 + cstats = this_cpu_ptr(cl->prio.cstats);
5192 + break;
5193 + case CEETM_WBFS:
5194 + egress_fq = &(cl->wbfs.fq->fq);
5195 + cstats = this_cpu_ptr(cl->wbfs.cstats);
5196 + break;
5197 + default:
5198 + qstats->drops++;
5199 + goto drop;
5200 + }
5201 +
5202 + bstats_update(&cstats->bstats, skb);
5203 + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
5204 +
5205 +drop:
5206 + dev_kfree_skb_any(skb);
5207 + return NET_XMIT_SUCCESS;
5208 +}
5209 +
5210 +static int __init ceetm_register(void)
5211 +{
5212 + int _errno = 0;
5213 +
5214 + pr_info(KBUILD_MODNAME ": " DPA_CEETM_DESCRIPTION "\n");
5215 +
5216 + _errno = register_qdisc(&ceetm_qdisc_ops);
5217 + if (unlikely(_errno))
5218 + pr_err(KBUILD_MODNAME
5219 + ": %s:%hu:%s(): register_qdisc() = %d\n",
5220 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
5221 +
5222 + return _errno;
5223 +}
5224 +
5225 +static void __exit ceetm_unregister(void)
5226 +{
5227 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
5228 + KBUILD_BASENAME".c", __func__);
5229 +
5230 + unregister_qdisc(&ceetm_qdisc_ops);
5231 +}
5232 +
5233 +module_init(ceetm_register);
5234 +module_exit(ceetm_unregister);
5235 --- /dev/null
5236 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
5237 @@ -0,0 +1,230 @@
5238 +/* Copyright 2008-2016 Freescale Semiconductor Inc.
5239 + *
5240 + * Redistribution and use in source and binary forms, with or without
5241 + * modification, are permitted provided that the following conditions are met:
5242 + * * Redistributions of source code must retain the above copyright
5243 + * notice, this list of conditions and the following disclaimer.
5244 + * * Redistributions in binary form must reproduce the above copyright
5245 + * notice, this list of conditions and the following disclaimer in the
5246 + * documentation and/or other materials provided with the distribution.
5247 + * * Neither the name of Freescale Semiconductor nor the
5248 + * names of its contributors may be used to endorse or promote products
5249 + * derived from this software without specific prior written permission.
5250 + *
5251 + *
5252 + * ALTERNATIVELY, this software may be distributed under the terms of the
5253 + * GNU General Public License ("GPL") as published by the Free Software
5254 + * Foundation, either version 2 of that License or (at your option) any
5255 + * later version.
5256 + *
5257 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5258 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5259 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5260 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5261 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5262 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5263 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5264 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5265 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5266 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5267 + */
5268 +
5269 +#ifndef __DPAA_ETH_CEETM_H
5270 +#define __DPAA_ETH_CEETM_H
5271 +
5272 +#include <net/pkt_sched.h>
5273 +#include <net/netlink.h>
5274 +#include <lnxwrp_fm.h>
5275 +
5276 +#include "mac.h"
5277 +#include "dpaa_eth_common.h"
5278 +
5279 +/* Mask to determine the sub-portal id from a channel number */
5280 +#define CHANNEL_SP_MASK 0x1f
5281 +/* The number of the last channel that services DCP0, connected to FMan 0.
5282 + * Value validated for B4 and T series platforms.
5283 + */
5284 +#define DCP0_MAX_CHANNEL 0x80f
5285 +/* A2V=1 - field A2 is valid
5286 + * A0V=1 - field A0 is valid - enables frame confirmation
5287 + * OVOM=1 - override operation mode bits with values from A2
5288 + * EBD=1 - external buffers are deallocated at the end of the FMan flow
5289 + * NL=0 - the BMI releases all the internal buffers
5290 + */
5291 +#define CEETM_CONTEXT_A 0x1a00000080000000
5292 +
5293 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
5294 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
5295 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
5296 + * hex).
5297 + */
5298 +#define PFIFO_MIN_OFFSET 0x21
5299 +
5300 +/* A maximum of 8 CQs can be linked to a CQ channel or to a WBFS scheduler. */
5301 +#define CEETM_MAX_PRIO_QCOUNT 8
5302 +#define CEETM_MAX_WBFS_QCOUNT 8
5303 +#define CEETM_MIN_WBFS_QCOUNT 4
5304 +
5305 +/* The id offsets of the CQs belonging to WBFS groups (ids 8-11/15 for group A
5306 + * and/or 12-15 for group B).
5307 + */
5308 +#define WBFS_GRP_A_OFFSET 8
5309 +#define WBFS_GRP_B_OFFSET 12
5310 +
5311 +#define WBFS_GRP_A 1
5312 +#define WBFS_GRP_B 2
5313 +#define WBFS_GRP_LARGE 3
5314 +
5315 +enum {
5316 + TCA_CEETM_UNSPEC,
5317 + TCA_CEETM_COPT,
5318 + TCA_CEETM_QOPS,
5319 + __TCA_CEETM_MAX,
5320 +};
5321 +
5322 +/* CEETM configuration types */
5323 +enum {
5324 + CEETM_ROOT = 1,
5325 + CEETM_PRIO,
5326 + CEETM_WBFS
5327 +};
5328 +
5329 +#define TCA_CEETM_MAX (__TCA_CEETM_MAX - 1)
5330 +extern const struct nla_policy ceetm_policy[TCA_CEETM_MAX + 1];
5331 +
5332 +struct ceetm_class;
5333 +struct ceetm_qdisc_stats;
5334 +struct ceetm_class_stats;
5335 +
5336 +struct ceetm_fq {
5337 + struct qman_fq fq;
5338 + struct net_device *net_dev;
5339 + struct ceetm_class *ceetm_cls;
5340 +};
5341 +
5342 +struct root_q {
5343 + struct Qdisc **qdiscs;
5344 + __u16 overhead;
5345 + __u32 rate;
5346 + __u32 ceil;
5347 + struct qm_ceetm_sp *sp;
5348 + struct qm_ceetm_lni *lni;
5349 + struct ceetm_qdisc_stats __percpu *qstats;
5350 +};
5351 +
5352 +struct prio_q {
5353 + __u16 qcount;
5354 + struct ceetm_class *parent;
5355 +};
5356 +
5357 +struct wbfs_q {
5358 + __u16 qcount;
5359 + int group_type;
5360 + struct ceetm_class *parent;
5361 + __u16 cr;
5362 + __u16 er;
5363 +};
5364 +
5365 +struct ceetm_qdisc {
5366 + int type; /* LNI/CHNL/WBFS */
5367 + bool shaped;
5368 + union {
5369 + struct root_q root;
5370 + struct prio_q prio;
5371 + struct wbfs_q wbfs;
5372 + };
5373 + struct Qdisc_class_hash clhash;
5374 + struct tcf_proto *filter_list; /* qdisc attached filters */
5375 +};
5376 +
5377 +/* CEETM Qdisc configuration parameters */
5378 +struct tc_ceetm_qopt {
5379 + __u32 type;
5380 + __u16 shaped;
5381 + __u16 qcount;
5382 + __u16 overhead;
5383 + __u32 rate;
5384 + __u32 ceil;
5385 + __u16 cr;
5386 + __u16 er;
5387 + __u8 qweight[CEETM_MAX_WBFS_QCOUNT];
5388 +};
5389 +
5390 +struct root_c {
5391 + unsigned int rate;
5392 + unsigned int ceil;
5393 + unsigned int tbl;
5394 + bool wbfs_grp_a;
5395 + bool wbfs_grp_b;
5396 + bool wbfs_grp_large;
5397 + struct Qdisc *child;
5398 + struct qm_ceetm_channel *ch;
5399 +};
5400 +
5401 +struct prio_c {
5402 + bool cr;
5403 + bool er;
5404 + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
5405 + struct qm_ceetm_lfq *lfq;
5406 + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
5407 + struct qm_ceetm_ccg *ccg;
5408 + /* only one wbfs can be linked to one priority CQ */
5409 + struct Qdisc *child;
5410 + struct ceetm_class_stats __percpu *cstats;
5411 +};
5412 +
5413 +struct wbfs_c {
5414 + __u8 weight; /* The weight of the class between 1 and 248 */
5415 + struct ceetm_fq *fq; /* Hardware FQ instance Handle */
5416 + struct qm_ceetm_lfq *lfq;
5417 + struct qm_ceetm_cq *cq; /* Hardware Class Queue instance Handle */
5418 + struct qm_ceetm_ccg *ccg;
5419 + struct ceetm_class_stats __percpu *cstats;
5420 +};
5421 +
5422 +struct ceetm_class {
5423 + struct Qdisc_class_common common;
5424 + int refcnt; /* usage count of this class */
5425 + struct tcf_proto *filter_list; /* class attached filters */
5426 + struct Qdisc *parent;
5427 + bool shaped;
5428 + int type; /* ROOT/PRIO/WBFS */
5429 + union {
5430 + struct root_c root;
5431 + struct prio_c prio;
5432 + struct wbfs_c wbfs;
5433 + };
5434 +};
5435 +
5436 +/* CEETM Class configuration parameters */
5437 +struct tc_ceetm_copt {
5438 + __u32 type;
5439 + __u16 shaped;
5440 + __u32 rate;
5441 + __u32 ceil;
5442 + __u16 tbl;
5443 + __u16 cr;
5444 + __u16 er;
5445 + __u8 weight;
5446 +};
5447 +
5448 +/* CEETM stats */
5449 +struct ceetm_qdisc_stats {
5450 + __u32 drops;
5451 +};
5452 +
5453 +struct ceetm_class_stats {
5454 + struct gnet_stats_basic_packed bstats;
5455 + __u32 ern_drop_count;
5456 + __u32 cgr_congested_count;
5457 +};
5458 +
5459 +struct tc_ceetm_xstats {
5460 + __u32 ern_drop_count;
5461 + __u32 cgr_congested_count;
5462 + __u64 frame_count;
5463 + __u64 byte_count;
5464 +};
5465 +
5466 +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev);
5467 +#endif
5468 --- /dev/null
5469 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
5470 @@ -0,0 +1,1787 @@
5471 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
5472 + *
5473 + * Redistribution and use in source and binary forms, with or without
5474 + * modification, are permitted provided that the following conditions are met:
5475 + * * Redistributions of source code must retain the above copyright
5476 + * notice, this list of conditions and the following disclaimer.
5477 + * * Redistributions in binary form must reproduce the above copyright
5478 + * notice, this list of conditions and the following disclaimer in the
5479 + * documentation and/or other materials provided with the distribution.
5480 + * * Neither the name of Freescale Semiconductor nor the
5481 + * names of its contributors may be used to endorse or promote products
5482 + * derived from this software without specific prior written permission.
5483 + *
5484 + *
5485 + * ALTERNATIVELY, this software may be distributed under the terms of the
5486 + * GNU General Public License ("GPL") as published by the Free Software
5487 + * Foundation, either version 2 of that License or (at your option) any
5488 + * later version.
5489 + *
5490 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
5491 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
5492 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
5493 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
5494 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
5495 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
5496 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
5497 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5498 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5499 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5500 + */
5501 +
5502 +#include <linux/init.h>
5503 +#include <linux/module.h>
5504 +#include <linux/of_platform.h>
5505 +#include <linux/of_net.h>
5506 +#include <linux/etherdevice.h>
5507 +#include <linux/kthread.h>
5508 +#include <linux/percpu.h>
5509 +#include <linux/highmem.h>
5510 +#include <linux/sort.h>
5511 +#include <linux/fsl_qman.h>
5512 +#include <linux/ip.h>
5513 +#include <linux/ipv6.h>
5514 +#include <linux/if_vlan.h> /* vlan_eth_hdr */
5515 +#include "dpaa_eth.h"
5516 +#include "dpaa_eth_common.h"
5517 +#ifdef CONFIG_FSL_DPAA_1588
5518 +#include "dpaa_1588.h"
5519 +#endif
5520 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5521 +#include "dpaa_debugfs.h"
5522 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
5523 +#include "mac.h"
5524 +
5525 +/* DPAA platforms benefit from hardware-assisted queue management */
5526 +#define DPA_NETIF_FEATURES NETIF_F_HW_ACCEL_MQ
5527 +
5528 +/* Size in bytes of the FQ taildrop threshold */
5529 +#define DPA_FQ_TD 0x200000
5530 +
5531 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
5532 +struct ptp_priv_s ptp_priv;
5533 +#endif
5534 +
5535 +static struct dpa_bp *dpa_bp_array[64];
5536 +
5537 +int dpa_max_frm;
5538 +EXPORT_SYMBOL(dpa_max_frm);
5539 +
5540 +int dpa_rx_extra_headroom;
5541 +EXPORT_SYMBOL(dpa_rx_extra_headroom);
5542 +
5543 +int dpa_num_cpus = NR_CPUS;
5544 +
5545 +static const struct fqid_cell tx_confirm_fqids[] = {
5546 + {0, DPAA_ETH_TX_QUEUES}
5547 +};
5548 +
5549 +static struct fqid_cell default_fqids[][3] = {
5550 + [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
5551 + [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
5552 +};
5553 +
5554 +static const char fsl_qman_frame_queues[][25] = {
5555 + [RX] = "fsl,qman-frame-queues-rx",
5556 + [TX] = "fsl,qman-frame-queues-tx"
5557 +};
5558 +#ifdef CONFIG_FSL_DPAA_HOOKS
5559 +/* A set of callbacks for hooking into the fastpath at different points. */
5560 +struct dpaa_eth_hooks_s dpaa_eth_hooks;
5561 +EXPORT_SYMBOL(dpaa_eth_hooks);
5562 +/* This function should only be called on the probe paths, since it makes no
5563 + * effort to guarantee consistency of the destination hooks structure.
5564 + */
5565 +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks)
5566 +{
5567 + if (hooks)
5568 + dpaa_eth_hooks = *hooks;
5569 + else
5570 + pr_err("NULL pointer to hooks!\n");
5571 +}
5572 +EXPORT_SYMBOL(fsl_dpaa_eth_set_hooks);
5573 +#endif
5574 +
5575 +int dpa_netdev_init(struct net_device *net_dev,
5576 + const uint8_t *mac_addr,
5577 + uint16_t tx_timeout)
5578 +{
5579 + int err;
5580 + struct dpa_priv_s *priv = netdev_priv(net_dev);
5581 + struct device *dev = net_dev->dev.parent;
5582 +
5583 + net_dev->hw_features |= DPA_NETIF_FEATURES;
5584 +
5585 + net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5586 +
5587 + net_dev->features |= net_dev->hw_features;
5588 + net_dev->vlan_features = net_dev->features;
5589 +
5590 + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
5591 + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
5592 +
5593 + net_dev->ethtool_ops = &dpa_ethtool_ops;
5594 +
5595 + net_dev->needed_headroom = priv->tx_headroom;
5596 + net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
5597 +
5598 + err = register_netdev(net_dev);
5599 + if (err < 0) {
5600 + dev_err(dev, "register_netdev() = %d\n", err);
5601 + return err;
5602 + }
5603 +
5604 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5605 + /* create debugfs entry for this net_device */
5606 + err = dpa_netdev_debugfs_create(net_dev);
5607 + if (err) {
5608 + unregister_netdev(net_dev);
5609 + return err;
5610 + }
5611 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
5612 +
5613 + return 0;
5614 +}
5615 +EXPORT_SYMBOL(dpa_netdev_init);
5616 +
5617 +int __cold dpa_start(struct net_device *net_dev)
5618 +{
5619 + int err, i;
5620 + struct dpa_priv_s *priv;
5621 + struct mac_device *mac_dev;
5622 +
5623 + priv = netdev_priv(net_dev);
5624 + mac_dev = priv->mac_dev;
5625 +
5626 + err = mac_dev->init_phy(net_dev, priv->mac_dev);
5627 + if (err < 0) {
5628 + if (netif_msg_ifup(priv))
5629 + netdev_err(net_dev, "init_phy() = %d\n", err);
5630 + return err;
5631 + }
5632 +
5633 + for_each_port_device(i, mac_dev->port_dev) {
5634 + err = fm_port_enable(mac_dev->port_dev[i]);
5635 + if (err)
5636 + goto mac_start_failed;
5637 + }
5638 +
5639 + err = priv->mac_dev->start(mac_dev);
5640 + if (err < 0) {
5641 + if (netif_msg_ifup(priv))
5642 + netdev_err(net_dev, "mac_dev->start() = %d\n", err);
5643 + goto mac_start_failed;
5644 + }
5645 +
5646 + netif_tx_start_all_queues(net_dev);
5647 +
5648 + return 0;
5649 +
5650 +mac_start_failed:
5651 + for_each_port_device(i, mac_dev->port_dev)
5652 + fm_port_disable(mac_dev->port_dev[i]);
5653 +
5654 + return err;
5655 +}
5656 +EXPORT_SYMBOL(dpa_start);
5657 +
5658 +int __cold dpa_stop(struct net_device *net_dev)
5659 +{
5660 + int _errno, i, err;
5661 + struct dpa_priv_s *priv;
5662 + struct mac_device *mac_dev;
5663 +
5664 + priv = netdev_priv(net_dev);
5665 + mac_dev = priv->mac_dev;
5666 +
5667 + netif_tx_stop_all_queues(net_dev);
5668 + /* Allow the Fman (Tx) port to process in-flight frames before we
5669 + * try switching it off.
5670 + */
5671 + usleep_range(5000, 10000);
5672 +
5673 + _errno = mac_dev->stop(mac_dev);
5674 + if (unlikely(_errno < 0))
5675 + if (netif_msg_ifdown(priv))
5676 + netdev_err(net_dev, "mac_dev->stop() = %d\n",
5677 + _errno);
5678 +
5679 + for_each_port_device(i, mac_dev->port_dev) {
5680 + err = fm_port_disable(mac_dev->port_dev[i]);
5681 + _errno = err ? err : _errno;
5682 + }
5683 +
5684 + if (mac_dev->phy_dev)
5685 + phy_disconnect(mac_dev->phy_dev);
5686 + mac_dev->phy_dev = NULL;
5687 +
5688 + return _errno;
5689 +}
5690 +EXPORT_SYMBOL(dpa_stop);
5691 +
5692 +void __cold dpa_timeout(struct net_device *net_dev)
5693 +{
5694 + const struct dpa_priv_s *priv;
5695 + struct dpa_percpu_priv_s *percpu_priv;
5696 +
5697 + priv = netdev_priv(net_dev);
5698 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
5699 +
5700 + if (netif_msg_timer(priv))
5701 + netdev_crit(net_dev, "Transmit timeout latency: %u ms\n",
5702 + jiffies_to_msecs(jiffies - net_dev->trans_start));
5703 +
5704 + percpu_priv->stats.tx_errors++;
5705 +}
5706 +EXPORT_SYMBOL(dpa_timeout);
5707 +
5708 +/* net_device */
5709 +
5710 +/**
5711 + * @param net_dev the device for which statistics are calculated
5712 + * @param stats the function fills this structure with the device's statistics
5713 + * @return the address of the structure containing the statistics
5714 + *
5715 + * Calculates the statistics for the given device by adding the statistics
5716 + * collected by each CPU.
5717 + */
5718 +struct rtnl_link_stats64 * __cold
5719 +dpa_get_stats64(struct net_device *net_dev,
5720 + struct rtnl_link_stats64 *stats)
5721 +{
5722 + struct dpa_priv_s *priv = netdev_priv(net_dev);
5723 + u64 *cpustats;
5724 + u64 *netstats = (u64 *)stats;
5725 + int i, j;
5726 + struct dpa_percpu_priv_s *percpu_priv;
5727 + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
5728 +
5729 + for_each_possible_cpu(i) {
5730 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
5731 +
5732 + cpustats = (u64 *)&percpu_priv->stats;
5733 +
5734 + for (j = 0; j < numstats; j++)
5735 + netstats[j] += cpustats[j];
5736 + }
5737 +
5738 + return stats;
5739 +}
5740 +EXPORT_SYMBOL(dpa_get_stats64);
5741 +
5742 +int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
5743 +{
5744 + const int max_mtu = dpa_get_max_mtu();
5745 +
5746 + /* Make sure we don't exceed the Ethernet controller's MAXFRM */
5747 + if (new_mtu < 68 || new_mtu > max_mtu) {
5748 + netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
5749 + new_mtu, 68, max_mtu);
5750 + return -EINVAL;
5751 + }
5752 + net_dev->mtu = new_mtu;
5753 +
5754 + return 0;
5755 +}
5756 +EXPORT_SYMBOL(dpa_change_mtu);
5757 +
5758 +/* .ndo_init callback */
5759 +int dpa_ndo_init(struct net_device *net_dev)
5760 +{
5761 + /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
5762 + * we choose conservatively and let the user explicitly set a higher
5763 + * MTU via ifconfig. Otherwise, the user may end up with different MTUs
5764 + * in the same LAN.
5765 + * If on the other hand fsl_fm_max_frm has been chosen below 1500,
5766 + * start with the maximum allowed.
5767 + */
5768 + int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
5769 +
5770 + pr_debug("Setting initial MTU on net device: %d\n", init_mtu);
5771 + net_dev->mtu = init_mtu;
5772 +
5773 + return 0;
5774 +}
5775 +EXPORT_SYMBOL(dpa_ndo_init);
5776 +
5777 +int dpa_set_features(struct net_device *dev, netdev_features_t features)
5778 +{
5779 + /* Not much to do here for now */
5780 + dev->features = features;
5781 + return 0;
5782 +}
5783 +EXPORT_SYMBOL(dpa_set_features);
5784 +
5785 +netdev_features_t dpa_fix_features(struct net_device *dev,
5786 + netdev_features_t features)
5787 +{
5788 + netdev_features_t unsupported_features = 0;
5789 +
5790 + /* In theory we should never be requested to enable features that
5791 + * we didn't set in netdev->features and netdev->hw_features at probe
5792 + * time, but double check just to be on the safe side.
5793 + * We don't support enabling Rx csum through ethtool yet
5794 + */
5795 + unsupported_features |= NETIF_F_RXCSUM;
5796 +
5797 + features &= ~unsupported_features;
5798 +
5799 + return features;
5800 +}
5801 +EXPORT_SYMBOL(dpa_fix_features);
5802 +
5803 +#ifdef CONFIG_FSL_DPAA_TS
5804 +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
5805 + const void *data)
5806 +{
5807 + u64 *ts, ns;
5808 +
5809 + ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
5810 + data);
5811 +
5812 + if (!ts || *ts == 0)
5813 + return 0;
5814 +
5815 + be64_to_cpus(ts);
5816 +
5817 + /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
5818 + ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
5819 +
5820 + return ns;
5821 +}
5822 +
5823 +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
5824 + struct skb_shared_hwtstamps *shhwtstamps, const void *data)
5825 +{
5826 + u64 ns;
5827 +
5828 + ns = dpa_get_timestamp_ns(priv, rx_tx, data);
5829 +
5830 + if (ns == 0)
5831 + return -EINVAL;
5832 +
5833 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
5834 + shhwtstamps->hwtstamp = ns_to_ktime(ns);
5835 +
5836 + return 0;
5837 +}
5838 +
5839 +static void dpa_ts_tx_enable(struct net_device *dev)
5840 +{
5841 + struct dpa_priv_s *priv = netdev_priv(dev);
5842 + struct mac_device *mac_dev = priv->mac_dev;
5843 +
5844 + if (mac_dev->fm_rtc_enable)
5845 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
5846 + if (mac_dev->ptp_enable)
5847 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
5848 +
5849 + priv->ts_tx_en = true;
5850 +}
5851 +
5852 +static void dpa_ts_tx_disable(struct net_device *dev)
5853 +{
5854 + struct dpa_priv_s *priv = netdev_priv(dev);
5855 +
5856 +#if 0
5857 +/* the RTC might be needed by the Rx Ts, cannot disable here
5858 + * no separate ptp_disable API for Rx/Tx, cannot disable here
5859 + */
5860 + struct mac_device *mac_dev = priv->mac_dev;
5861 +
5862 + if (mac_dev->fm_rtc_disable)
5863 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
5864 +
5865 + if (mac_dev->ptp_disable)
5866 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
5867 +#endif
5868 +
5869 + priv->ts_tx_en = false;
5870 +}
5871 +
5872 +static void dpa_ts_rx_enable(struct net_device *dev)
5873 +{
5874 + struct dpa_priv_s *priv = netdev_priv(dev);
5875 + struct mac_device *mac_dev = priv->mac_dev;
5876 +
5877 + if (mac_dev->fm_rtc_enable)
5878 + mac_dev->fm_rtc_enable(get_fm_handle(dev));
5879 + if (mac_dev->ptp_enable)
5880 + mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
5881 +
5882 + priv->ts_rx_en = true;
5883 +}
5884 +
5885 +static void dpa_ts_rx_disable(struct net_device *dev)
5886 +{
5887 + struct dpa_priv_s *priv = netdev_priv(dev);
5888 +
5889 +#if 0
5890 +/* the RTC might be needed by the Tx Ts, cannot disable here
5891 + * no separate ptp_disable API for Rx/Tx, cannot disable here
5892 + */
5893 + struct mac_device *mac_dev = priv->mac_dev;
5894 +
5895 + if (mac_dev->fm_rtc_disable)
5896 + mac_dev->fm_rtc_disable(get_fm_handle(dev));
5897 +
5898 + if (mac_dev->ptp_disable)
5899 + mac_dev->ptp_disable(mac_dev->get_mac_handle(mac_dev));
5900 +#endif
5901 +
5902 + priv->ts_rx_en = false;
5903 +}
5904 +
5905 +static int dpa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5906 +{
5907 + struct hwtstamp_config config;
5908 +
5909 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
5910 + return -EFAULT;
5911 +
5912 + switch (config.tx_type) {
5913 + case HWTSTAMP_TX_OFF:
5914 + dpa_ts_tx_disable(dev);
5915 + break;
5916 + case HWTSTAMP_TX_ON:
5917 + dpa_ts_tx_enable(dev);
5918 + break;
5919 + default:
5920 + return -ERANGE;
5921 + }
5922 +
5923 + if (config.rx_filter == HWTSTAMP_FILTER_NONE)
5924 + dpa_ts_rx_disable(dev);
5925 + else {
5926 + dpa_ts_rx_enable(dev);
5927 + /* TS is set for all frame types, not only those requested */
5928 + config.rx_filter = HWTSTAMP_FILTER_ALL;
5929 + }
5930 +
5931 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
5932 + -EFAULT : 0;
5933 +}
5934 +#endif /* CONFIG_FSL_DPAA_TS */
5935 +
5936 +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5937 +{
5938 +#ifdef CONFIG_FSL_DPAA_1588
5939 + struct dpa_priv_s *priv = netdev_priv(dev);
5940 +#endif
5941 + int ret = 0;
5942 +
5943 + /* at least one timestamping feature must be enabled */
5944 +#ifdef CONFIG_FSL_DPAA_TS
5945 + if (!netif_running(dev))
5946 +#endif
5947 + return -EINVAL;
5948 +
5949 +#ifdef CONFIG_FSL_DPAA_TS
5950 + if (cmd == SIOCSHWTSTAMP)
5951 + return dpa_ts_ioctl(dev, rq, cmd);
5952 +#endif /* CONFIG_FSL_DPAA_TS */
5953 +
5954 +#ifdef CONFIG_FSL_DPAA_1588
5955 + if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) {
5956 + if (priv->tsu && priv->tsu->valid)
5957 + ret = dpa_ioctl_1588(dev, rq, cmd);
5958 + else
5959 + ret = -ENODEV;
5960 + }
5961 +#endif
5962 +
5963 + return ret;
5964 +}
5965 +EXPORT_SYMBOL(dpa_ioctl);
5966 +
5967 +int __cold dpa_remove(struct platform_device *of_dev)
5968 +{
5969 + int err;
5970 + struct device *dev;
5971 + struct net_device *net_dev;
5972 + struct dpa_priv_s *priv;
5973 +
5974 + dev = &of_dev->dev;
5975 + net_dev = dev_get_drvdata(dev);
5976 +
5977 + priv = netdev_priv(net_dev);
5978 +
5979 + dpaa_eth_sysfs_remove(dev);
5980 +
5981 + dev_set_drvdata(dev, NULL);
5982 + unregister_netdev(net_dev);
5983 +
5984 + err = dpa_fq_free(dev, &priv->dpa_fq_list);
5985 +
5986 + qman_delete_cgr_safe(&priv->ingress_cgr);
5987 + qman_release_cgrid(priv->ingress_cgr.cgrid);
5988 + qman_delete_cgr_safe(&priv->cgr_data.cgr);
5989 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
5990 +
5991 + dpa_private_napi_del(net_dev);
5992 +
5993 + dpa_bp_free(priv);
5994 +
5995 + if (priv->buf_layout)
5996 + devm_kfree(dev, priv->buf_layout);
5997 +
5998 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
5999 + /* remove debugfs entry for this net_device */
6000 + dpa_netdev_debugfs_remove(net_dev);
6001 +#endif /* CONFIG_FSL_DPAA_DBG_LOOP */
6002 +
6003 +#ifdef CONFIG_FSL_DPAA_1588
6004 + if (priv->tsu && priv->tsu->valid)
6005 + dpa_ptp_cleanup(priv);
6006 +#endif
6007 +
6008 + free_netdev(net_dev);
6009 +
6010 + return err;
6011 +}
6012 +EXPORT_SYMBOL(dpa_remove);
6013 +
6014 +struct mac_device * __cold __must_check
6015 +__attribute__((nonnull))
6016 +dpa_mac_probe(struct platform_device *_of_dev)
6017 +{
6018 + struct device *dpa_dev, *dev;
6019 + struct device_node *mac_node;
6020 + struct platform_device *of_dev;
6021 + struct mac_device *mac_dev;
6022 +#ifdef CONFIG_FSL_DPAA_1588
6023 + int lenp;
6024 + const phandle *phandle_prop;
6025 + struct net_device *net_dev = NULL;
6026 + struct dpa_priv_s *priv = NULL;
6027 + struct device_node *timer_node;
6028 +#endif
6029 + dpa_dev = &_of_dev->dev;
6030 +
6031 + mac_node = of_parse_phandle(_of_dev->dev.of_node, "fsl,fman-mac", 0);
6032 + if (unlikely(mac_node == NULL)) {
6033 + dev_err(dpa_dev, "Cannot find MAC device device tree node\n");
6034 + return ERR_PTR(-EFAULT);
6035 + }
6036 +
6037 + of_dev = of_find_device_by_node(mac_node);
6038 + if (unlikely(of_dev == NULL)) {
6039 + dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
6040 + mac_node->full_name);
6041 + of_node_put(mac_node);
6042 + return ERR_PTR(-EINVAL);
6043 + }
6044 + of_node_put(mac_node);
6045 +
6046 + dev = &of_dev->dev;
6047 +
6048 + mac_dev = dev_get_drvdata(dev);
6049 + if (unlikely(mac_dev == NULL)) {
6050 + dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
6051 + dev_name(dev));
6052 + return ERR_PTR(-EINVAL);
6053 + }
6054 +
6055 +#ifdef CONFIG_FSL_DPAA_1588
6056 + phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp);
6057 + if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
6058 + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
6059 + (mac_dev->speed == SPEED_1000)))) {
6060 + timer_node = of_find_node_by_phandle(*phandle_prop);
6061 + if (timer_node)
6062 + net_dev = dev_get_drvdata(dpa_dev);
6063 + if (timer_node && net_dev) {
6064 + priv = netdev_priv(net_dev);
6065 + if (!dpa_ptp_init(priv))
6066 + dev_info(dev, "%s: ptp 1588 is initialized.\n",
6067 + mac_node->full_name);
6068 + }
6069 + }
6070 +#endif
6071 +
6072 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
6073 + if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
6074 + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
6075 + (mac_dev->speed == SPEED_1000))) {
6076 + ptp_priv.node = of_parse_phandle(mac_node, "ptimer-handle", 0);
6077 + if (ptp_priv.node) {
6078 + ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
6079 + if (unlikely(ptp_priv.of_dev == NULL)) {
6080 + dev_err(dpa_dev,
6081 + "Cannot find device represented by timer_node\n");
6082 + of_node_put(ptp_priv.node);
6083 + return ERR_PTR(-EINVAL);
6084 + }
6085 + ptp_priv.mac_dev = mac_dev;
6086 + }
6087 + }
6088 +#endif
6089 + return mac_dev;
6090 +}
6091 +EXPORT_SYMBOL(dpa_mac_probe);
6092 +
6093 +int dpa_set_mac_address(struct net_device *net_dev, void *addr)
6094 +{
6095 + const struct dpa_priv_s *priv;
6096 + int _errno;
6097 + struct mac_device *mac_dev;
6098 +
6099 + priv = netdev_priv(net_dev);
6100 +
6101 + _errno = eth_mac_addr(net_dev, addr);
6102 + if (_errno < 0) {
6103 + if (netif_msg_drv(priv))
6104 + netdev_err(net_dev,
6105 + "eth_mac_addr() = %d\n",
6106 + _errno);
6107 + return _errno;
6108 + }
6109 +
6110 + mac_dev = priv->mac_dev;
6111 +
6112 + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
6113 + net_dev->dev_addr);
6114 + if (_errno < 0) {
6115 + if (netif_msg_drv(priv))
6116 + netdev_err(net_dev,
6117 + "mac_dev->change_addr() = %d\n",
6118 + _errno);
6119 + return _errno;
6120 + }
6121 +
6122 + return 0;
6123 +}
6124 +EXPORT_SYMBOL(dpa_set_mac_address);
6125 +
6126 +void dpa_set_rx_mode(struct net_device *net_dev)
6127 +{
6128 + int _errno;
6129 + const struct dpa_priv_s *priv;
6130 +
6131 + priv = netdev_priv(net_dev);
6132 +
6133 + if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
6134 + priv->mac_dev->promisc = !priv->mac_dev->promisc;
6135 + _errno = priv->mac_dev->set_promisc(
6136 + priv->mac_dev->get_mac_handle(priv->mac_dev),
6137 + priv->mac_dev->promisc);
6138 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6139 + netdev_err(net_dev,
6140 + "mac_dev->set_promisc() = %d\n",
6141 + _errno);
6142 + }
6143 +
6144 + _errno = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
6145 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6146 + netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno);
6147 +}
6148 +EXPORT_SYMBOL(dpa_set_rx_mode);
6149 +
6150 +void dpa_set_buffers_layout(struct mac_device *mac_dev,
6151 + struct dpa_buffer_layout_s *layout)
6152 +{
6153 + struct fm_port_params params;
6154 +
6155 + /* Rx */
6156 + layout[RX].priv_data_size = (uint16_t)DPA_RX_PRIV_DATA_SIZE;
6157 + layout[RX].parse_results = true;
6158 + layout[RX].hash_results = true;
6159 +#ifdef CONFIG_FSL_DPAA_TS
6160 + layout[RX].time_stamp = true;
6161 +#endif
6162 + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[RX], &params);
6163 + layout[RX].manip_extra_space = params.manip_extra_space;
6164 + /* a value of zero for data alignment means "don't care", so align to
6165 + * a non-zero value to prevent FMD from using its own default
6166 + */
6167 + layout[RX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
6168 +
6169 + /* Tx */
6170 + layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
6171 + layout[TX].parse_results = true;
6172 + layout[TX].hash_results = true;
6173 +#ifdef CONFIG_FSL_DPAA_TS
6174 + layout[TX].time_stamp = true;
6175 +#endif
6176 + fm_port_get_buff_layout_ext_params(mac_dev->port_dev[TX], &params);
6177 + layout[TX].manip_extra_space = params.manip_extra_space;
6178 + layout[TX].data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
6179 +}
6180 +EXPORT_SYMBOL(dpa_set_buffers_layout);
6181 +
6182 +int __attribute__((nonnull))
6183 +dpa_bp_alloc(struct dpa_bp *dpa_bp)
6184 +{
6185 + int err;
6186 + struct bman_pool_params bp_params;
6187 + struct platform_device *pdev;
6188 +
6189 + if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
6190 + pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
6191 + return -EINVAL;
6192 + }
6193 +
6194 + memset(&bp_params, 0, sizeof(struct bman_pool_params));
6195 +#ifdef CONFIG_FMAN_PFC
6196 + bp_params.flags = BMAN_POOL_FLAG_THRESH;
6197 + bp_params.thresholds[0] = bp_params.thresholds[2] =
6198 + CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD;
6199 + bp_params.thresholds[1] = bp_params.thresholds[3] =
6200 + CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
6201 +#endif
6202 +
6203 + /* If the pool is already specified, we only create one per bpid */
6204 + if (dpa_bpid2pool_use(dpa_bp->bpid))
6205 + return 0;
6206 +
6207 + if (dpa_bp->bpid == 0)
6208 + bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
6209 + else
6210 + bp_params.bpid = dpa_bp->bpid;
6211 +
6212 + dpa_bp->pool = bman_new_pool(&bp_params);
6213 + if (unlikely(dpa_bp->pool == NULL)) {
6214 + pr_err("bman_new_pool() failed\n");
6215 + return -ENODEV;
6216 + }
6217 +
6218 + dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid;
6219 +
6220 + pdev = platform_device_register_simple("dpaa_eth_bpool",
6221 + dpa_bp->bpid, NULL, 0);
6222 + if (IS_ERR(pdev)) {
6223 + err = PTR_ERR(pdev);
6224 + goto pdev_register_failed;
6225 + }
6226 +
6227 + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(40));
6228 + if (err)
6229 + goto pdev_mask_failed;
6230 + if (!pdev->dev.dma_mask)
6231 + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
6232 + else {
6233 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
6234 + if (err)
6235 + goto pdev_mask_failed;
6236 + }
6237 +
6238 +#ifdef CONFIG_FMAN_ARM
6239 + /* force coherency */
6240 + pdev->dev.archdata.dma_coherent = true;
6241 + arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true);
6242 +#endif
6243 +
6244 + dpa_bp->dev = &pdev->dev;
6245 +
6246 + if (dpa_bp->seed_cb) {
6247 + err = dpa_bp->seed_cb(dpa_bp);
6248 + if (err)
6249 + goto pool_seed_failed;
6250 + }
6251 +
6252 + dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
6253 +
6254 + return 0;
6255 +
6256 +pool_seed_failed:
6257 +pdev_mask_failed:
6258 + platform_device_unregister(pdev);
6259 +pdev_register_failed:
6260 + bman_free_pool(dpa_bp->pool);
6261 +
6262 + return err;
6263 +}
6264 +EXPORT_SYMBOL(dpa_bp_alloc);
6265 +
6266 +void dpa_bp_drain(struct dpa_bp *bp)
6267 +{
6268 + int ret, num = 8;
6269 +
6270 + do {
6271 + struct bm_buffer bmb[8];
6272 + int i;
6273 +
6274 + ret = bman_acquire(bp->pool, bmb, num, 0);
6275 + if (ret < 0) {
6276 + if (num == 8) {
6277 + /* we have less than 8 buffers left;
6278 + * drain them one by one
6279 + */
6280 + num = 1;
6281 + ret = 1;
6282 + continue;
6283 + } else {
6284 + /* Pool is fully drained */
6285 + break;
6286 + }
6287 + }
6288 +
6289 + for (i = 0; i < num; i++) {
6290 + dma_addr_t addr = bm_buf_addr(&bmb[i]);
6291 +
6292 + dma_unmap_single(bp->dev, addr, bp->size,
6293 + DMA_BIDIRECTIONAL);
6294 +
6295 + bp->free_buf_cb(phys_to_virt(addr));
6296 + }
6297 + } while (ret > 0);
6298 +}
6299 +EXPORT_SYMBOL(dpa_bp_drain);
6300 +
6301 +static void __cold __attribute__((nonnull))
6302 +_dpa_bp_free(struct dpa_bp *dpa_bp)
6303 +{
6304 + struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
6305 +
6306 + /* the mapping between bpid and dpa_bp is done very late in the
6307 + * allocation procedure; if something failed before the mapping, the bp
6308 + * was not configured, therefore we don't need the below instructions
6309 + */
6310 + if (!bp)
6311 + return;
6312 +
6313 + if (!atomic_dec_and_test(&bp->refs))
6314 + return;
6315 +
6316 + if (bp->free_buf_cb)
6317 + dpa_bp_drain(bp);
6318 +
6319 + dpa_bp_array[bp->bpid] = NULL;
6320 + bman_free_pool(bp->pool);
6321 +
6322 + if (bp->dev)
6323 + platform_device_unregister(to_platform_device(bp->dev));
6324 +}
6325 +
6326 +void __cold __attribute__((nonnull))
6327 +dpa_bp_free(struct dpa_priv_s *priv)
6328 +{
6329 + int i;
6330 +
6331 + for (i = 0; i < priv->bp_count; i++)
6332 + _dpa_bp_free(&priv->dpa_bp[i]);
6333 +}
6334 +EXPORT_SYMBOL(dpa_bp_free);
6335 +
6336 +struct dpa_bp *dpa_bpid2pool(int bpid)
6337 +{
6338 + return dpa_bp_array[bpid];
6339 +}
6340 +EXPORT_SYMBOL(dpa_bpid2pool);
6341 +
6342 +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
6343 +{
6344 + dpa_bp_array[bpid] = dpa_bp;
6345 + atomic_set(&dpa_bp->refs, 1);
6346 +}
6347 +
6348 +bool dpa_bpid2pool_use(int bpid)
6349 +{
6350 + if (dpa_bpid2pool(bpid)) {
6351 + atomic_inc(&dpa_bp_array[bpid]->refs);
6352 + return true;
6353 + }
6354 +
6355 + return false;
6356 +}
6357 +
6358 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
6359 +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
6360 + void *accel_priv, select_queue_fallback_t fallback)
6361 +{
6362 + return dpa_get_queue_mapping(skb);
6363 +}
6364 +EXPORT_SYMBOL(dpa_select_queue);
6365 +#endif
6366 +
6367 +struct dpa_fq *dpa_fq_alloc(struct device *dev,
6368 + u32 fq_start,
6369 + u32 fq_count,
6370 + struct list_head *list,
6371 + enum dpa_fq_type fq_type)
6372 +{
6373 + int i;
6374 + struct dpa_fq *dpa_fq;
6375 +
6376 + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fq_count, GFP_KERNEL);
6377 + if (dpa_fq == NULL)
6378 + return NULL;
6379 +
6380 + for (i = 0; i < fq_count; i++) {
6381 + dpa_fq[i].fq_type = fq_type;
6382 + if (fq_type == FQ_TYPE_RX_PCD_HI_PRIO)
6383 + dpa_fq[i].fqid = fq_start ?
6384 + DPAA_ETH_FQ_DELTA + fq_start + i : 0;
6385 + else
6386 + dpa_fq[i].fqid = fq_start ? fq_start + i : 0;
6387 +
6388 + list_add_tail(&dpa_fq[i].list, list);
6389 + }
6390 +
6391 +#ifdef CONFIG_FMAN_PFC
6392 + if (fq_type == FQ_TYPE_TX)
6393 + for (i = 0; i < fq_count; i++)
6394 + dpa_fq[i].wq = i / dpa_num_cpus;
6395 + else
6396 +#endif
6397 + for (i = 0; i < fq_count; i++)
6398 + _dpa_assign_wq(dpa_fq + i);
6399 +
6400 + return dpa_fq;
6401 +}
6402 +EXPORT_SYMBOL(dpa_fq_alloc);
6403 +
6404 +/* Probing of FQs for MACful ports */
6405 +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
6406 + struct fm_port_fqs *port_fqs,
6407 + bool alloc_tx_conf_fqs,
6408 + enum port_type ptype)
6409 +{
6410 + struct fqid_cell *fqids = NULL;
6411 + const void *fqids_off = NULL;
6412 + struct dpa_fq *dpa_fq = NULL;
6413 + struct device_node *np = dev->of_node;
6414 + int num_ranges;
6415 + int i, lenp;
6416 +
6417 + if (ptype == TX && alloc_tx_conf_fqs) {
6418 + if (!dpa_fq_alloc(dev, tx_confirm_fqids->start,
6419 + tx_confirm_fqids->count, list,
6420 + FQ_TYPE_TX_CONF_MQ))
6421 + goto fq_alloc_failed;
6422 + }
6423 +
6424 + fqids_off = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp);
6425 + if (fqids_off == NULL) {
6426 + /* No dts definition, so use the defaults. */
6427 + fqids = default_fqids[ptype];
6428 + num_ranges = 3;
6429 + } else {
6430 + num_ranges = lenp / sizeof(*fqids);
6431 +
6432 + fqids = devm_kzalloc(dev, sizeof(*fqids) * num_ranges,
6433 + GFP_KERNEL);
6434 + if (fqids == NULL)
6435 + goto fqids_alloc_failed;
6436 +
6437 + /* convert to CPU endianess */
6438 + for (i = 0; i < num_ranges; i++) {
6439 + fqids[i].start = be32_to_cpup(fqids_off +
6440 + i * sizeof(*fqids));
6441 + fqids[i].count = be32_to_cpup(fqids_off +
6442 + i * sizeof(*fqids) + sizeof(__be32));
6443 + }
6444 + }
6445 +
6446 + for (i = 0; i < num_ranges; i++) {
6447 + switch (i) {
6448 + case 0:
6449 + /* The first queue is the error queue */
6450 + if (fqids[i].count != 1)
6451 + goto invalid_error_queue;
6452 +
6453 + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
6454 + fqids[i].count, list,
6455 + ptype == RX ?
6456 + FQ_TYPE_RX_ERROR :
6457 + FQ_TYPE_TX_ERROR);
6458 + if (dpa_fq == NULL)
6459 + goto fq_alloc_failed;
6460 +
6461 + if (ptype == RX)
6462 + port_fqs->rx_errq = &dpa_fq[0];
6463 + else
6464 + port_fqs->tx_errq = &dpa_fq[0];
6465 + break;
6466 + case 1:
6467 + /* the second queue is the default queue */
6468 + if (fqids[i].count != 1)
6469 + goto invalid_default_queue;
6470 +
6471 + dpa_fq = dpa_fq_alloc(dev, fqids[i].start,
6472 + fqids[i].count, list,
6473 + ptype == RX ?
6474 + FQ_TYPE_RX_DEFAULT :
6475 + FQ_TYPE_TX_CONFIRM);
6476 + if (dpa_fq == NULL)
6477 + goto fq_alloc_failed;
6478 +
6479 + if (ptype == RX)
6480 + port_fqs->rx_defq = &dpa_fq[0];
6481 + else
6482 + port_fqs->tx_defq = &dpa_fq[0];
6483 + break;
6484 + default:
6485 + /* all subsequent queues are either RX* PCD or Tx */
6486 + if (ptype == RX) {
6487 + if (!dpa_fq_alloc(dev, fqids[i].start,
6488 + fqids[i].count, list,
6489 + FQ_TYPE_RX_PCD) ||
6490 + !dpa_fq_alloc(dev, fqids[i].start,
6491 + fqids[i].count, list,
6492 + FQ_TYPE_RX_PCD_HI_PRIO))
6493 + goto fq_alloc_failed;
6494 + } else {
6495 + if (!dpa_fq_alloc(dev, fqids[i].start,
6496 + fqids[i].count, list,
6497 + FQ_TYPE_TX))
6498 + goto fq_alloc_failed;
6499 + }
6500 + break;
6501 + }
6502 + }
6503 +
6504 + return 0;
6505 +
6506 +fq_alloc_failed:
6507 +fqids_alloc_failed:
6508 + dev_err(dev, "Cannot allocate memory for frame queues\n");
6509 + return -ENOMEM;
6510 +
6511 +invalid_default_queue:
6512 +invalid_error_queue:
6513 + dev_err(dev, "Too many default or error queues\n");
6514 + return -EINVAL;
6515 +}
6516 +EXPORT_SYMBOL(dpa_fq_probe_mac);
6517 +
6518 +static u32 rx_pool_channel;
6519 +static DEFINE_SPINLOCK(rx_pool_channel_init);
6520 +
6521 +int dpa_get_channel(void)
6522 +{
6523 + spin_lock(&rx_pool_channel_init);
6524 + if (!rx_pool_channel) {
6525 + u32 pool;
6526 + int ret = qman_alloc_pool(&pool);
6527 + if (!ret)
6528 + rx_pool_channel = pool;
6529 + }
6530 + spin_unlock(&rx_pool_channel_init);
6531 + if (!rx_pool_channel)
6532 + return -ENOMEM;
6533 + return rx_pool_channel;
6534 +}
6535 +EXPORT_SYMBOL(dpa_get_channel);
6536 +
6537 +void dpa_release_channel(void)
6538 +{
6539 + qman_release_pool(rx_pool_channel);
6540 +}
6541 +EXPORT_SYMBOL(dpa_release_channel);
6542 +
6543 +int dpaa_eth_add_channel(void *__arg)
6544 +{
6545 + const cpumask_t *cpus = qman_affine_cpus();
6546 + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
6547 + int cpu;
6548 + struct qman_portal *portal;
6549 +
6550 + for_each_cpu(cpu, cpus) {
6551 + portal = (struct qman_portal *)qman_get_affine_portal(cpu);
6552 + qman_p_static_dequeue_add(portal, pool);
6553 + }
6554 + return 0;
6555 +}
6556 +EXPORT_SYMBOL(dpaa_eth_add_channel);
6557 +
6558 +/**
6559 + * Congestion group state change notification callback.
6560 + * Stops the device's egress queues while they are congested and
6561 + * wakes them upon exiting congested state.
6562 + * Also updates some CGR-related stats.
6563 + */
6564 +static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
6565 +
6566 + int congested)
6567 +{
6568 + struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
6569 + struct dpa_priv_s, cgr_data.cgr);
6570 +
6571 + if (congested) {
6572 + priv->cgr_data.congestion_start_jiffies = jiffies;
6573 + netif_tx_stop_all_queues(priv->net_dev);
6574 + priv->cgr_data.cgr_congested_count++;
6575 + } else {
6576 + priv->cgr_data.congested_jiffies +=
6577 + (jiffies - priv->cgr_data.congestion_start_jiffies);
6578 + netif_tx_wake_all_queues(priv->net_dev);
6579 + }
6580 +}
6581 +
6582 +int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
6583 +{
6584 + struct qm_mcc_initcgr initcgr;
6585 + u32 cs_th;
6586 + int err;
6587 +
6588 + err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
6589 + if (err < 0) {
6590 + pr_err("Error %d allocating CGR ID\n", err);
6591 + goto out_error;
6592 + }
6593 + priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
6594 +
6595 + /* Enable Congestion State Change Notifications and CS taildrop */
6596 + initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
6597 + initcgr.cgr.cscn_en = QM_CGR_EN;
6598 +
6599 + /* Set different thresholds based on the MAC speed.
6600 + * TODO: this may turn suboptimal if the MAC is reconfigured at a speed
6601 + * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
6602 + * In such cases, we ought to reconfigure the threshold, too.
6603 + */
6604 + if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
6605 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G;
6606 + else
6607 + cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G;
6608 + qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
6609 +
6610 + initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
6611 + initcgr.cgr.cstd_en = QM_CGR_EN;
6612 +
6613 + err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
6614 + &initcgr);
6615 + if (err < 0) {
6616 + pr_err("Error %d creating CGR with ID %d\n", err,
6617 + priv->cgr_data.cgr.cgrid);
6618 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
6619 + goto out_error;
6620 + }
6621 + pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
6622 + priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
6623 + priv->cgr_data.cgr.chan);
6624 +
6625 +out_error:
6626 + return err;
6627 +}
6628 +EXPORT_SYMBOL(dpaa_eth_cgr_init);
6629 +
6630 +static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
6631 + struct dpa_fq *fq,
6632 + const struct qman_fq *template)
6633 +{
6634 + fq->fq_base = *template;
6635 + fq->net_dev = priv->net_dev;
6636 +
6637 + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
6638 + fq->channel = priv->channel;
6639 +}
6640 +
6641 +static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
6642 + struct dpa_fq *fq,
6643 + struct fm_port *port,
6644 + const struct qman_fq *template)
6645 +{
6646 + fq->fq_base = *template;
6647 + fq->net_dev = priv->net_dev;
6648 +
6649 + if (port) {
6650 + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
6651 + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
6652 + } else {
6653 + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
6654 + }
6655 +}
6656 +
6657 +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
6658 + struct fm_port *tx_port)
6659 +{
6660 + struct dpa_fq *fq;
6661 + uint16_t portals[NR_CPUS];
6662 + int cpu, portal_cnt = 0, num_portals = 0;
6663 + uint32_t pcd_fqid, pcd_fqid_hi_prio;
6664 + const cpumask_t *affine_cpus = qman_affine_cpus();
6665 + int egress_cnt = 0, conf_cnt = 0;
6666 +
6667 + /* Prepare for PCD FQs init */
6668 + for_each_cpu(cpu, affine_cpus)
6669 + portals[num_portals++] = qman_affine_channel(cpu);
6670 + if (num_portals == 0)
6671 + dev_err(priv->net_dev->dev.parent,
6672 + "No Qman software (affine) channels found");
6673 +
6674 + pcd_fqid = (priv->mac_dev) ?
6675 + DPAA_ETH_PCD_FQ_BASE(priv->mac_dev->res->start) : 0;
6676 + pcd_fqid_hi_prio = (priv->mac_dev) ?
6677 + DPAA_ETH_PCD_FQ_HI_PRIO_BASE(priv->mac_dev->res->start) : 0;
6678 +
6679 + /* Initialize each FQ in the list */
6680 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
6681 + switch (fq->fq_type) {
6682 + case FQ_TYPE_RX_DEFAULT:
6683 + BUG_ON(!priv->mac_dev);
6684 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6685 + break;
6686 + case FQ_TYPE_RX_ERROR:
6687 + BUG_ON(!priv->mac_dev);
6688 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
6689 + break;
6690 + case FQ_TYPE_RX_PCD:
6691 + /* For MACless we can't have dynamic Rx queues */
6692 + BUG_ON(!priv->mac_dev && !fq->fqid);
6693 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6694 + if (!fq->fqid)
6695 + fq->fqid = pcd_fqid++;
6696 + fq->channel = portals[portal_cnt];
6697 + portal_cnt = (portal_cnt + 1) % num_portals;
6698 + break;
6699 + case FQ_TYPE_RX_PCD_HI_PRIO:
6700 + /* For MACless we can't have dynamic Hi Pri Rx queues */
6701 + BUG_ON(!priv->mac_dev && !fq->fqid);
6702 + dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
6703 + if (!fq->fqid)
6704 + fq->fqid = pcd_fqid_hi_prio++;
6705 + fq->channel = portals[portal_cnt];
6706 + portal_cnt = (portal_cnt + 1) % num_portals;
6707 + break;
6708 + case FQ_TYPE_TX:
6709 + dpa_setup_egress(priv, fq, tx_port,
6710 + &fq_cbs->egress_ern);
6711 + /* If we have more Tx queues than the number of cores,
6712 + * just ignore the extra ones.
6713 + */
6714 + if (egress_cnt < DPAA_ETH_TX_QUEUES)
6715 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
6716 + break;
6717 + case FQ_TYPE_TX_CONFIRM:
6718 + BUG_ON(!priv->mac_dev);
6719 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
6720 + break;
6721 + case FQ_TYPE_TX_CONF_MQ:
6722 + BUG_ON(!priv->mac_dev);
6723 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
6724 + priv->conf_fqs[conf_cnt++] = &fq->fq_base;
6725 + break;
6726 + case FQ_TYPE_TX_ERROR:
6727 + BUG_ON(!priv->mac_dev);
6728 + dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
6729 + break;
6730 + default:
6731 + dev_warn(priv->net_dev->dev.parent,
6732 + "Unknown FQ type detected!\n");
6733 + break;
6734 + }
6735 + }
6736 +
6737 + /* The number of Tx queues may be smaller than the number of cores, if
6738 + * the Tx queue range is specified in the device tree instead of being
6739 + * dynamically allocated.
6740 + * Make sure all CPUs receive a corresponding Tx queue.
6741 + */
6742 + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
6743 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
6744 + if (fq->fq_type != FQ_TYPE_TX)
6745 + continue;
6746 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
6747 + if (egress_cnt == DPAA_ETH_TX_QUEUES)
6748 + break;
6749 + }
6750 + }
6751 +}
6752 +EXPORT_SYMBOL(dpa_fq_setup);
6753 +
6754 +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
6755 +{
6756 + int _errno;
6757 + const struct dpa_priv_s *priv;
6758 + struct device *dev;
6759 + struct qman_fq *fq;
6760 + struct qm_mcc_initfq initfq;
6761 + struct qman_fq *confq;
6762 + int queue_id;
6763 +
6764 + priv = netdev_priv(dpa_fq->net_dev);
6765 + dev = dpa_fq->net_dev->dev.parent;
6766 +
6767 + if (dpa_fq->fqid == 0)
6768 + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
6769 +
6770 + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
6771 +
6772 + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
6773 + if (_errno) {
6774 + dev_err(dev, "qman_create_fq() failed\n");
6775 + return _errno;
6776 + }
6777 + fq = &dpa_fq->fq_base;
6778 +
6779 + if (dpa_fq->init) {
6780 + memset(&initfq, 0, sizeof(initfq));
6781 +
6782 + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
6783 + /* FIXME: why would we want to keep an empty FQ in cache? */
6784 + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
6785 +
6786 + /* Try to reduce the number of portal interrupts for
6787 + * Tx Confirmation FQs.
6788 + */
6789 + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
6790 + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
6791 +
6792 + /* FQ placement */
6793 + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
6794 +
6795 + initfq.fqd.dest.channel = dpa_fq->channel;
6796 + initfq.fqd.dest.wq = dpa_fq->wq;
6797 +
6798 + /* Put all egress queues in a congestion group of their own.
6799 + * Sensu stricto, the Tx confirmation queues are Rx FQs,
6800 + * rather than Tx - but they nonetheless account for the
6801 + * memory footprint on behalf of egress traffic. We therefore
6802 + * place them in the netdev's CGR, along with the Tx FQs.
6803 + */
6804 + if (dpa_fq->fq_type == FQ_TYPE_TX ||
6805 + dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
6806 + dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
6807 + initfq.we_mask |= QM_INITFQ_WE_CGID;
6808 + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
6809 + initfq.fqd.cgid = (uint8_t)priv->cgr_data.cgr.cgrid;
6810 + /* Set a fixed overhead accounting, in an attempt to
6811 + * reduce the impact of fixed-size skb shells and the
6812 + * driver's needed headroom on system memory. This is
6813 + * especially the case when the egress traffic is
6814 + * composed of small datagrams.
6815 + * Unfortunately, QMan's OAL value is capped to an
6816 + * insufficient value, but even that is better than
6817 + * no overhead accounting at all.
6818 + */
6819 + initfq.we_mask |= QM_INITFQ_WE_OAC;
6820 + initfq.fqd.oac_init.oac = QM_OAC_CG;
6821 + initfq.fqd.oac_init.oal =
6822 + (signed char)(min(sizeof(struct sk_buff) +
6823 + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
6824 + }
6825 +
6826 + if (td_enable) {
6827 + initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
6828 + qm_fqd_taildrop_set(&initfq.fqd.td,
6829 + DPA_FQ_TD, 1);
6830 + initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
6831 + }
6832 +
6833 + /* Configure the Tx confirmation queue, now that we know
6834 + * which Tx queue it pairs with.
6835 + */
6836 + if (dpa_fq->fq_type == FQ_TYPE_TX) {
6837 + queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
6838 + if (queue_id >= 0) {
6839 + confq = priv->conf_fqs[queue_id];
6840 + if (confq) {
6841 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
6842 + /* ContextA: OVOM=1 (use contextA2 bits instead of ICAD)
6843 + * A2V=1 (contextA A2 field is valid)
6844 + * A0V=1 (contextA A0 field is valid)
6845 + * B0V=1 (contextB field is valid)
6846 + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
6847 + * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
6848 + */
6849 + initfq.fqd.context_a.hi = 0x1e000000;
6850 + initfq.fqd.context_a.lo = 0x80000000;
6851 + }
6852 + }
6853 + }
6854 +
6855 + /* Put all *private* ingress queues in our "ingress CGR". */
6856 + if (priv->use_ingress_cgr &&
6857 + (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
6858 + dpa_fq->fq_type == FQ_TYPE_RX_ERROR ||
6859 + dpa_fq->fq_type == FQ_TYPE_RX_PCD ||
6860 + dpa_fq->fq_type == FQ_TYPE_RX_PCD_HI_PRIO)) {
6861 + initfq.we_mask |= QM_INITFQ_WE_CGID;
6862 + initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
6863 + initfq.fqd.cgid = (uint8_t)priv->ingress_cgr.cgrid;
6864 + /* Set a fixed overhead accounting, just like for the
6865 + * egress CGR.
6866 + */
6867 + initfq.we_mask |= QM_INITFQ_WE_OAC;
6868 + initfq.fqd.oac_init.oac = QM_OAC_CG;
6869 + initfq.fqd.oac_init.oal =
6870 + (signed char)(min(sizeof(struct sk_buff) +
6871 + priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
6872 + }
6873 +
6874 + /* Initialization common to all ingress queues */
6875 + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
6876 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
6877 + initfq.fqd.fq_ctrl |=
6878 + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
6879 + initfq.fqd.context_a.stashing.exclusive =
6880 + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
6881 + QM_STASHING_EXCL_ANNOTATION;
6882 + initfq.fqd.context_a.stashing.data_cl = 2;
6883 + initfq.fqd.context_a.stashing.annotation_cl = 1;
6884 + initfq.fqd.context_a.stashing.context_cl =
6885 + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
6886 + }
6887 +
6888 + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
6889 + if (_errno < 0) {
6890 + dev_err(dev, "qman_init_fq(%u) = %d\n",
6891 + qman_fq_fqid(fq), _errno);
6892 + qman_destroy_fq(fq, 0);
6893 + return _errno;
6894 + }
6895 + }
6896 +
6897 + dpa_fq->fqid = qman_fq_fqid(fq);
6898 +
6899 + return 0;
6900 +}
6901 +EXPORT_SYMBOL(dpa_fq_init);
6902 +
6903 +int __cold __attribute__((nonnull))
6904 +_dpa_fq_free(struct device *dev, struct qman_fq *fq)
6905 +{
6906 + int _errno, __errno;
6907 + struct dpa_fq *dpa_fq;
6908 + const struct dpa_priv_s *priv;
6909 +
6910 + _errno = 0;
6911 +
6912 + dpa_fq = container_of(fq, struct dpa_fq, fq_base);
6913 + priv = netdev_priv(dpa_fq->net_dev);
6914 +
6915 + if (dpa_fq->init) {
6916 + _errno = qman_retire_fq(fq, NULL);
6917 + if (unlikely(_errno < 0) && netif_msg_drv(priv))
6918 + dev_err(dev, "qman_retire_fq(%u) = %d\n",
6919 + qman_fq_fqid(fq), _errno);
6920 +
6921 + __errno = qman_oos_fq(fq);
6922 + if (unlikely(__errno < 0) && netif_msg_drv(priv)) {
6923 + dev_err(dev, "qman_oos_fq(%u) = %d\n",
6924 + qman_fq_fqid(fq), __errno);
6925 + if (_errno >= 0)
6926 + _errno = __errno;
6927 + }
6928 + }
6929 +
6930 + qman_destroy_fq(fq, 0);
6931 + list_del(&dpa_fq->list);
6932 +
6933 + return _errno;
6934 +}
6935 +EXPORT_SYMBOL(_dpa_fq_free);
6936 +
6937 +int __cold __attribute__((nonnull))
6938 +dpa_fq_free(struct device *dev, struct list_head *list)
6939 +{
6940 + int _errno, __errno;
6941 + struct dpa_fq *dpa_fq, *tmp;
6942 +
6943 + _errno = 0;
6944 + list_for_each_entry_safe(dpa_fq, tmp, list, list) {
6945 + __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
6946 + if (unlikely(__errno < 0) && _errno >= 0)
6947 + _errno = __errno;
6948 + }
6949 +
6950 + return _errno;
6951 +}
6952 +EXPORT_SYMBOL(dpa_fq_free);
6953 +
6954 +static void
6955 +dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq,
6956 + struct dpa_fq *defq, struct dpa_buffer_layout_s *buf_layout)
6957 +{
6958 + struct fm_port_params tx_port_param;
6959 + bool frag_enabled = false;
6960 +
6961 + memset(&tx_port_param, 0, sizeof(tx_port_param));
6962 + dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid,
6963 + buf_layout, frag_enabled);
6964 +}
6965 +
6966 +static void
6967 +dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count,
6968 + struct dpa_fq *errq, struct dpa_fq *defq,
6969 + struct dpa_buffer_layout_s *buf_layout)
6970 +{
6971 + struct fm_port_params rx_port_param;
6972 + int i;
6973 + bool frag_enabled = false;
6974 +
6975 + memset(&rx_port_param, 0, sizeof(rx_port_param));
6976 + count = min(ARRAY_SIZE(rx_port_param.pool_param), count);
6977 + rx_port_param.num_pools = (uint8_t)count;
6978 + for (i = 0; i < count; i++) {
6979 + if (i >= rx_port_param.num_pools)
6980 + break;
6981 + rx_port_param.pool_param[i].id = bp[i].bpid;
6982 + rx_port_param.pool_param[i].size = (uint16_t)bp[i].size;
6983 + }
6984 +
6985 + dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid,
6986 + buf_layout, frag_enabled);
6987 +}
6988 +
6989 +#if defined(CONFIG_FSL_SDK_FMAN_TEST)
6990 +/* Defined as weak, to be implemented by fman pcd tester. */
6991 +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *)
6992 +__attribute__((weak));
6993 +
6994 +int dpa_free_pcd_fqids(struct device *, uint32_t) __attribute__((weak));
6995 +#else
6996 +int dpa_alloc_pcd_fqids(struct device *, uint32_t, uint8_t, uint32_t *);
6997 +
6998 +int dpa_free_pcd_fqids(struct device *, uint32_t);
6999 +
7000 +#endif /* CONFIG_FSL_SDK_FMAN_TEST */
7001 +
7002 +
7003 +int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num,
7004 + uint8_t alignment, uint32_t *base_fqid)
7005 +{
7006 + dev_crit(dev, "callback not implemented!\n");
7007 +
7008 + return 0;
7009 +}
7010 +
7011 +int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
7012 +{
7013 +
7014 + dev_crit(dev, "callback not implemented!\n");
7015 +
7016 + return 0;
7017 +}
7018 +
7019 +void dpaa_eth_init_ports(struct mac_device *mac_dev,
7020 + struct dpa_bp *bp, size_t count,
7021 + struct fm_port_fqs *port_fqs,
7022 + struct dpa_buffer_layout_s *buf_layout,
7023 + struct device *dev)
7024 +{
7025 + struct fm_port_pcd_param rx_port_pcd_param;
7026 + struct fm_port *rxport = mac_dev->port_dev[RX];
7027 + struct fm_port *txport = mac_dev->port_dev[TX];
7028 +
7029 + dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
7030 + port_fqs->tx_defq, &buf_layout[TX]);
7031 + dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
7032 + port_fqs->rx_defq, &buf_layout[RX]);
7033 +
7034 + rx_port_pcd_param.cba = dpa_alloc_pcd_fqids;
7035 + rx_port_pcd_param.cbf = dpa_free_pcd_fqids;
7036 + rx_port_pcd_param.dev = dev;
7037 + fm_port_pcd_bind(rxport, &rx_port_pcd_param);
7038 +}
7039 +EXPORT_SYMBOL(dpaa_eth_init_ports);
7040 +
7041 +void dpa_release_sgt(struct qm_sg_entry *sgt)
7042 +{
7043 + struct dpa_bp *dpa_bp;
7044 + struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
7045 + uint8_t i = 0, j;
7046 +
7047 + memset(bmb, 0, DPA_BUFF_RELEASE_MAX * sizeof(struct bm_buffer));
7048 +
7049 + do {
7050 + dpa_bp = dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i]));
7051 + DPA_BUG_ON(!dpa_bp);
7052 +
7053 + j = 0;
7054 + do {
7055 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
7056 + bm_buffer_set64(&bmb[j], qm_sg_addr(&sgt[i]));
7057 +
7058 + j++; i++;
7059 + } while (j < ARRAY_SIZE(bmb) &&
7060 + !qm_sg_entry_get_final(&sgt[i-1]) &&
7061 + qm_sg_entry_get_bpid(&sgt[i-1]) ==
7062 + qm_sg_entry_get_bpid(&sgt[i]));
7063 +
7064 + while (bman_release(dpa_bp->pool, bmb, j, 0))
7065 + cpu_relax();
7066 + } while (!qm_sg_entry_get_final(&sgt[i-1]));
7067 +}
7068 +EXPORT_SYMBOL(dpa_release_sgt);
7069 +
7070 +void __attribute__((nonnull))
7071 +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
7072 +{
7073 + struct qm_sg_entry *sgt;
7074 + struct dpa_bp *dpa_bp;
7075 + struct bm_buffer bmb;
7076 + dma_addr_t addr;
7077 + void *vaddr;
7078 +
7079 + memset(&bmb, 0, sizeof(struct bm_buffer));
7080 + bm_buffer_set64(&bmb, fd->addr);
7081 +
7082 + dpa_bp = dpa_bpid2pool(fd->bpid);
7083 + DPA_BUG_ON(!dpa_bp);
7084 +
7085 + if (fd->format == qm_fd_sg) {
7086 + vaddr = phys_to_virt(fd->addr);
7087 + sgt = vaddr + dpa_fd_offset(fd);
7088 +
7089 + dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
7090 + DMA_BIDIRECTIONAL);
7091 +
7092 + dpa_release_sgt(sgt);
7093 + addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
7094 + DMA_BIDIRECTIONAL);
7095 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
7096 + dev_err(dpa_bp->dev, "DMA mapping failed");
7097 + return;
7098 + }
7099 + bm_buffer_set64(&bmb, addr);
7100 + }
7101 +
7102 + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
7103 + cpu_relax();
7104 +}
7105 +EXPORT_SYMBOL(dpa_fd_release);
7106 +
7107 +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
7108 + const struct qm_mr_entry *msg)
7109 +{
7110 + switch (msg->ern.rc & QM_MR_RC_MASK) {
7111 + case QM_MR_RC_CGR_TAILDROP:
7112 + percpu_priv->ern_cnt.cg_tdrop++;
7113 + break;
7114 + case QM_MR_RC_WRED:
7115 + percpu_priv->ern_cnt.wred++;
7116 + break;
7117 + case QM_MR_RC_ERROR:
7118 + percpu_priv->ern_cnt.err_cond++;
7119 + break;
7120 + case QM_MR_RC_ORPWINDOW_EARLY:
7121 + percpu_priv->ern_cnt.early_window++;
7122 + break;
7123 + case QM_MR_RC_ORPWINDOW_LATE:
7124 + percpu_priv->ern_cnt.late_window++;
7125 + break;
7126 + case QM_MR_RC_FQ_TAILDROP:
7127 + percpu_priv->ern_cnt.fq_tdrop++;
7128 + break;
7129 + case QM_MR_RC_ORPWINDOW_RETIRED:
7130 + percpu_priv->ern_cnt.fq_retired++;
7131 + break;
7132 + case QM_MR_RC_ORP_ZERO:
7133 + percpu_priv->ern_cnt.orp_zero++;
7134 + break;
7135 + }
7136 +}
7137 +EXPORT_SYMBOL(count_ern);
7138 +
7139 +/**
7140 + * Turn on HW checksum computation for this outgoing frame.
7141 + * If the current protocol is not something we support in this regard
7142 + * (or if the stack has already computed the SW checksum), we do nothing.
7143 + *
7144 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
7145 + * otherwise.
7146 + *
7147 + * Note that this function may modify the fd->cmd field and the skb data buffer
7148 + * (the Parse Results area).
7149 + */
7150 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
7151 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results)
7152 +{
7153 + fm_prs_result_t *parse_result;
7154 + struct iphdr *iph;
7155 + struct ipv6hdr *ipv6h = NULL;
7156 + u8 l4_proto;
7157 + u16 ethertype = ntohs(skb->protocol);
7158 + int retval = 0;
7159 +
7160 + if (skb->ip_summed != CHECKSUM_PARTIAL)
7161 + return 0;
7162 +
7163 + /* Note: L3 csum seems to be already computed in sw, but we can't choose
7164 + * L4 alone from the FM configuration anyway.
7165 + */
7166 +
7167 + /* Fill in some fields of the Parse Results array, so the FMan
7168 + * can find them as if they came from the FMan Parser.
7169 + */
7170 + parse_result = (fm_prs_result_t *)parse_results;
7171 +
7172 + /* If we're dealing with VLAN, get the real Ethernet type */
7173 + if (ethertype == ETH_P_8021Q) {
7174 + /* We can't always assume the MAC header is set correctly
7175 + * by the stack, so reset to beginning of skb->data
7176 + */
7177 + skb_reset_mac_header(skb);
7178 + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
7179 + }
7180 +
7181 + /* Fill in the relevant L3 parse result fields
7182 + * and read the L4 protocol type
7183 + */
7184 + switch (ethertype) {
7185 + case ETH_P_IP:
7186 + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
7187 + iph = ip_hdr(skb);
7188 + DPA_BUG_ON(iph == NULL);
7189 + l4_proto = iph->protocol;
7190 + break;
7191 + case ETH_P_IPV6:
7192 + parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
7193 + ipv6h = ipv6_hdr(skb);
7194 + DPA_BUG_ON(ipv6h == NULL);
7195 + l4_proto = ipv6h->nexthdr;
7196 + break;
7197 + default:
7198 + /* We shouldn't even be here */
7199 + if (netif_msg_tx_err(priv) && net_ratelimit())
7200 + netdev_alert(priv->net_dev,
7201 + "Can't compute HW csum for L3 proto 0x%x\n",
7202 + ntohs(skb->protocol));
7203 + retval = -EIO;
7204 + goto return_error;
7205 + }
7206 +
7207 + /* Fill in the relevant L4 parse result fields */
7208 + switch (l4_proto) {
7209 + case IPPROTO_UDP:
7210 + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
7211 + break;
7212 + case IPPROTO_TCP:
7213 + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
7214 + break;
7215 + default:
7216 + /* This can as well be a BUG() */
7217 + if (netif_msg_tx_err(priv) && net_ratelimit())
7218 + netdev_alert(priv->net_dev,
7219 + "Can't compute HW csum for L4 proto 0x%x\n",
7220 + l4_proto);
7221 + retval = -EIO;
7222 + goto return_error;
7223 + }
7224 +
7225 + /* At index 0 is IPOffset_1 as defined in the Parse Results */
7226 + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
7227 + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
7228 +
7229 + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
7230 + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
7231 +
7232 + /* On P1023 and similar platforms fd->cmd interpretation could
7233 + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
7234 + * is not set so we do not need to check; in the future, if/when
7235 + * using context_a we need to check this bit
7236 + */
7237 +
7238 +return_error:
7239 + return retval;
7240 +}
7241 +EXPORT_SYMBOL(dpa_enable_tx_csum);
7242 +
7243 +#ifdef CONFIG_FSL_DPAA_CEETM
7244 +void dpa_enable_ceetm(struct net_device *dev)
7245 +{
7246 + struct dpa_priv_s *priv = netdev_priv(dev);
7247 + priv->ceetm_en = true;
7248 +}
7249 +EXPORT_SYMBOL(dpa_enable_ceetm);
7250 +
7251 +void dpa_disable_ceetm(struct net_device *dev)
7252 +{
7253 + struct dpa_priv_s *priv = netdev_priv(dev);
7254 + priv->ceetm_en = false;
7255 +}
7256 +EXPORT_SYMBOL(dpa_disable_ceetm);
7257 +#endif
7258 --- /dev/null
7259 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h
7260 @@ -0,0 +1,227 @@
7261 +/* Copyright 2008-2013 Freescale Semiconductor, Inc.
7262 + *
7263 + * Redistribution and use in source and binary forms, with or without
7264 + * modification, are permitted provided that the following conditions are met:
7265 + * * Redistributions of source code must retain the above copyright
7266 + * notice, this list of conditions and the following disclaimer.
7267 + * * Redistributions in binary form must reproduce the above copyright
7268 + * notice, this list of conditions and the following disclaimer in the
7269 + * documentation and/or other materials provided with the distribution.
7270 + * * Neither the name of Freescale Semiconductor nor the
7271 + * names of its contributors may be used to endorse or promote products
7272 + * derived from this software without specific prior written permission.
7273 + *
7274 + *
7275 + * ALTERNATIVELY, this software may be distributed under the terms of the
7276 + * GNU General Public License ("GPL") as published by the Free Software
7277 + * Foundation, either version 2 of that License or (at your option) any
7278 + * later version.
7279 + *
7280 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7281 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7282 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7283 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7284 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7285 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7286 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7287 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7288 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7289 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7290 + */
7291 +
7292 +#ifndef __DPAA_ETH_COMMON_H
7293 +#define __DPAA_ETH_COMMON_H
7294 +
7295 +#include <linux/etherdevice.h> /* struct net_device */
7296 +#include <linux/fsl_bman.h> /* struct bm_buffer */
7297 +#include <linux/of_platform.h> /* struct platform_device */
7298 +#include <linux/net_tstamp.h> /* struct hwtstamp_config */
7299 +
7300 +#include "dpaa_eth.h"
7301 +#include "lnxwrp_fsl_fman.h"
7302 +
7303 +#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, buf_layout,\
7304 + frag_enabled) \
7305 +{ \
7306 + param.errq = errq_id; \
7307 + param.defq = defq_id; \
7308 + param.priv_data_size = buf_layout->priv_data_size; \
7309 + param.parse_results = buf_layout->parse_results; \
7310 + param.hash_results = buf_layout->hash_results; \
7311 + param.frag_enable = frag_enabled; \
7312 + param.time_stamp = buf_layout->time_stamp; \
7313 + param.manip_extra_space = buf_layout->manip_extra_space; \
7314 + param.data_align = buf_layout->data_align; \
7315 + fm_set_##type##_port_params(port, &param); \
7316 +}
7317 +
7318 +#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
7319 +
7320 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
7321 +/* each S/G entry can be divided into two S/G entries */
7322 +#define DPA_SGT_ENTRIES_THRESHOLD 7
7323 +#else
7324 +#define DPA_SGT_ENTRIES_THRESHOLD DPA_SGT_MAX_ENTRIES
7325 +#endif /* DPAA_LS1043A_DMA_4K_ISSUE */
7326 +
7327 +
7328 +#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
7329 +
7330 +/* return codes for the dpaa-eth hooks */
7331 +enum dpaa_eth_hook_result {
7332 + /* fd/skb was retained by the hook.
7333 + *
7334 + * On the Rx path, this means the Ethernet driver will _not_
7335 + * deliver the skb to the stack. Instead, the hook implementation
7336 + * is expected to properly dispose of the skb.
7337 + *
7338 + * On the Tx path, the Ethernet driver's dpa_tx() function will
7339 + * immediately return NETDEV_TX_OK. The hook implementation is expected
7340 + * to free the skb. *DO*NOT* release it to BMan, or enqueue it to FMan,
7341 + * unless you know exactly what you're doing!
7342 + *
7343 + * On the confirmation/error paths, the Ethernet driver will _not_
7344 + * perform any fd cleanup, nor update the interface statistics.
7345 + */
7346 + DPAA_ETH_STOLEN,
7347 + /* fd/skb was returned to the Ethernet driver for regular processing.
7348 + * The hook is not allowed to, for instance, reallocate the skb (as if
7349 + * by linearizing, copying, cloning or reallocating the headroom).
7350 + */
7351 + DPAA_ETH_CONTINUE
7352 +};
7353 +
7354 +typedef enum dpaa_eth_hook_result (*dpaa_eth_ingress_hook_t)(
7355 + struct sk_buff *skb, struct net_device *net_dev, u32 fqid);
7356 +typedef enum dpaa_eth_hook_result (*dpaa_eth_egress_hook_t)(
7357 + struct sk_buff *skb, struct net_device *net_dev);
7358 +typedef enum dpaa_eth_hook_result (*dpaa_eth_confirm_hook_t)(
7359 + struct net_device *net_dev, const struct qm_fd *fd, u32 fqid);
7360 +
7361 +/* used in napi related functions */
7362 +extern u16 qman_portal_max;
7363 +
7364 +/* from dpa_ethtool.c */
7365 +extern const struct ethtool_ops dpa_ethtool_ops;
7366 +
7367 +#ifdef CONFIG_FSL_DPAA_HOOKS
7368 +/* Various hooks used for unit-testing and/or fastpath optimizations.
7369 + * Currently only one set of such hooks is supported.
7370 + */
7371 +struct dpaa_eth_hooks_s {
7372 + /* Invoked on the Tx private path, immediately after receiving the skb
7373 + * from the stack.
7374 + */
7375 + dpaa_eth_egress_hook_t tx;
7376 +
7377 + /* Invoked on the Rx private path, right before passing the skb
7378 + * up the stack. At that point, the packet's protocol id has already
7379 + * been set. The skb's data pointer is now at the L3 header, and
7380 + * skb->mac_header points to the L2 header. skb->len has been adjusted
7381 + * to be the length of L3+payload (i.e., the length of the
7382 + * original frame minus the L2 header len).
7383 + * For more details on what the skb looks like, see eth_type_trans().
7384 + */
7385 + dpaa_eth_ingress_hook_t rx_default;
7386 +
7387 + /* Driver hook for the Rx error private path. */
7388 + dpaa_eth_confirm_hook_t rx_error;
7389 + /* Driver hook for the Tx confirmation private path. */
7390 + dpaa_eth_confirm_hook_t tx_confirm;
7391 + /* Driver hook for the Tx error private path. */
7392 + dpaa_eth_confirm_hook_t tx_error;
7393 +};
7394 +
7395 +void fsl_dpaa_eth_set_hooks(struct dpaa_eth_hooks_s *hooks);
7396 +
7397 +extern struct dpaa_eth_hooks_s dpaa_eth_hooks;
7398 +#endif
7399 +
7400 +int dpa_netdev_init(struct net_device *net_dev,
7401 + const uint8_t *mac_addr,
7402 + uint16_t tx_timeout);
7403 +int __cold dpa_start(struct net_device *net_dev);
7404 +int __cold dpa_stop(struct net_device *net_dev);
7405 +void __cold dpa_timeout(struct net_device *net_dev);
7406 +struct rtnl_link_stats64 * __cold
7407 +dpa_get_stats64(struct net_device *net_dev,
7408 + struct rtnl_link_stats64 *stats);
7409 +int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
7410 +int dpa_ndo_init(struct net_device *net_dev);
7411 +int dpa_set_features(struct net_device *dev, netdev_features_t features);
7412 +netdev_features_t dpa_fix_features(struct net_device *dev,
7413 + netdev_features_t features);
7414 +#ifdef CONFIG_FSL_DPAA_TS
7415 +u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv,
7416 + enum port_type rx_tx, const void *data);
7417 +/* Updates the skb shared hw timestamp from the hardware timestamp */
7418 +int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
7419 + struct skb_shared_hwtstamps *shhwtstamps, const void *data);
7420 +#endif /* CONFIG_FSL_DPAA_TS */
7421 +int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7422 +int __cold dpa_remove(struct platform_device *of_dev);
7423 +struct mac_device * __cold __must_check
7424 +__attribute__((nonnull)) dpa_mac_probe(struct platform_device *_of_dev);
7425 +int dpa_set_mac_address(struct net_device *net_dev, void *addr);
7426 +void dpa_set_rx_mode(struct net_device *net_dev);
7427 +void dpa_set_buffers_layout(struct mac_device *mac_dev,
7428 + struct dpa_buffer_layout_s *layout);
7429 +int __attribute__((nonnull))
7430 +dpa_bp_alloc(struct dpa_bp *dpa_bp);
7431 +void __cold __attribute__((nonnull))
7432 +dpa_bp_free(struct dpa_priv_s *priv);
7433 +struct dpa_bp *dpa_bpid2pool(int bpid);
7434 +void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
7435 +bool dpa_bpid2pool_use(int bpid);
7436 +void dpa_bp_drain(struct dpa_bp *bp);
7437 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
7438 +u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
7439 + void *accel_priv, select_queue_fallback_t fallback);
7440 +#endif
7441 +struct dpa_fq *dpa_fq_alloc(struct device *dev,
7442 + u32 fq_start,
7443 + u32 fq_count,
7444 + struct list_head *list,
7445 + enum dpa_fq_type fq_type);
7446 +int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
7447 + struct fm_port_fqs *port_fqs,
7448 + bool tx_conf_fqs_per_core,
7449 + enum port_type ptype);
7450 +int dpa_get_channel(void);
7451 +void dpa_release_channel(void);
7452 +int dpaa_eth_add_channel(void *__arg);
7453 +int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
7454 +void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
7455 + struct fm_port *tx_port);
7456 +int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
7457 +int __cold __attribute__((nonnull))
7458 +dpa_fq_free(struct device *dev, struct list_head *list);
7459 +void dpaa_eth_init_ports(struct mac_device *mac_dev,
7460 + struct dpa_bp *bp, size_t count,
7461 + struct fm_port_fqs *port_fqs,
7462 + struct dpa_buffer_layout_s *buf_layout,
7463 + struct device *dev);
7464 +void dpa_release_sgt(struct qm_sg_entry *sgt);
7465 +void __attribute__((nonnull))
7466 +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
7467 +void count_ern(struct dpa_percpu_priv_s *percpu_priv,
7468 + const struct qm_mr_entry *msg);
7469 +int dpa_enable_tx_csum(struct dpa_priv_s *priv,
7470 + struct sk_buff *skb, struct qm_fd *fd, char *parse_results);
7471 +#ifdef CONFIG_FSL_DPAA_CEETM
7472 +void dpa_enable_ceetm(struct net_device *dev);
7473 +void dpa_disable_ceetm(struct net_device *dev);
7474 +#endif
7475 +struct proxy_device {
7476 + struct mac_device *mac_dev;
7477 +};
7478 +
7479 +/* mac device control functions exposed by proxy interface*/
7480 +int dpa_proxy_start(struct net_device *net_dev);
7481 +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev);
7482 +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
7483 + struct net_device *net_dev);
7484 +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
7485 + struct net_device *net_dev);
7486 +
7487 +#endif /* __DPAA_ETH_COMMON_H */
7488 --- /dev/null
7489 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.c
7490 @@ -0,0 +1,1735 @@
7491 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
7492 + *
7493 + * Redistribution and use in source and binary forms, with or without
7494 + * modification, are permitted provided that the following conditions are met:
7495 + * * Redistributions of source code must retain the above copyright
7496 + * notice, this list of conditions and the following disclaimer.
7497 + * * Redistributions in binary form must reproduce the above copyright
7498 + * notice, this list of conditions and the following disclaimer in the
7499 + * documentation and/or other materials provided with the distribution.
7500 + * * Neither the name of Freescale Semiconductor nor the
7501 + * names of its contributors may be used to endorse or promote products
7502 + * derived from this software without specific prior written permission.
7503 + *
7504 + *
7505 + * ALTERNATIVELY, this software may be distributed under the terms of the
7506 + * GNU General Public License ("GPL") as published by the Free Software
7507 + * Foundation, either version 2 of that License or (at your option) any
7508 + * later version.
7509 + *
7510 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7511 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7512 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7513 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7514 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7515 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7516 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7517 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7518 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7519 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7520 + */
7521 +
7522 +#include <linux/init.h>
7523 +#include <linux/module.h>
7524 +#include <linux/kthread.h>
7525 +#include <linux/of_net.h>
7526 +#include <linux/if_vlan.h>
7527 +#include <linux/ip.h>
7528 +#include <linux/ipv6.h>
7529 +#include <linux/percpu.h>
7530 +
7531 +#include "dpaa_eth.h"
7532 +#include "dpaa_eth_common.h"
7533 +#include "dpaa_eth_base.h"
7534 +#include "dpaa_eth_generic.h"
7535 +
7536 +#define DPA_DEFAULT_TX_HEADROOM 64
7537 +#define DPA_GENERIC_SKB_COPY_MAX_SIZE 256
7538 +#define DPA_GENERIC_NAPI_WEIGHT 64
7539 +#define DPA_GENERIC_DESCRIPTION "FSL DPAA Generic Ethernet driver"
7540 +#define DPA_GENERIC_BUFFER_QUOTA 4
7541 +
7542 +MODULE_LICENSE("Dual BSD/GPL");
7543 +MODULE_DESCRIPTION(DPA_GENERIC_DESCRIPTION);
7544 +
7545 +static uint8_t generic_debug = -1;
7546 +module_param(generic_debug, byte, S_IRUGO);
7547 +MODULE_PARM_DESC(generic_debug, "Module/Driver verbosity level");
7548 +
7549 +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
7550 +static uint16_t tx_timeout = 1000;
7551 +module_param(tx_timeout, ushort, S_IRUGO);
7552 +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
7553 +
7554 +struct rtnl_link_stats64 *__cold
7555 +dpa_generic_get_stats64(struct net_device *netdev,
7556 + struct rtnl_link_stats64 *stats);
7557 +static int dpa_generic_set_mac_address(struct net_device *net_dev,
7558 + void *addr);
7559 +static int __cold dpa_generic_start(struct net_device *netdev);
7560 +static int __cold dpa_generic_stop(struct net_device *netdev);
7561 +static int dpa_generic_eth_probe(struct platform_device *_of_dev);
7562 +static int dpa_generic_remove(struct platform_device *of_dev);
7563 +static void dpa_generic_ern(struct qman_portal *portal,
7564 + struct qman_fq *fq,
7565 + const struct qm_mr_entry *msg);
7566 +static int __hot dpa_generic_tx(struct sk_buff *skb,
7567 + struct net_device *netdev);
7568 +static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf);
7569 +static void dpa_generic_drain_sg_bp(struct dpa_bp *sg_bp, u8 nbuf);
7570 +
7571 +static const struct net_device_ops dpa_generic_ops = {
7572 + .ndo_open = dpa_generic_start,
7573 + .ndo_start_xmit = dpa_generic_tx,
7574 + .ndo_stop = dpa_generic_stop,
7575 + .ndo_set_mac_address = dpa_generic_set_mac_address,
7576 + .ndo_tx_timeout = dpa_timeout,
7577 + .ndo_get_stats64 = dpa_generic_get_stats64,
7578 + .ndo_init = dpa_ndo_init,
7579 + .ndo_set_features = dpa_set_features,
7580 + .ndo_fix_features = dpa_fix_features,
7581 + .ndo_change_mtu = dpa_change_mtu,
7582 +};
7583 +
7584 +static void dpa_generic_draining_timer(unsigned long arg)
7585 +{
7586 + struct dpa_generic_priv_s *priv = (struct dpa_generic_priv_s *)arg;
7587 +
7588 + dpa_generic_drain_bp(priv->draining_tx_bp, DPA_GENERIC_BUFFER_QUOTA);
7589 + dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp,
7590 + DPA_GENERIC_BUFFER_QUOTA);
7591 +
7592 + if (priv->net_dev->flags & IFF_UP)
7593 + mod_timer(&(priv->timer), jiffies + 1);
7594 +}
7595 +
7596 +struct rtnl_link_stats64 *__cold
7597 +dpa_generic_get_stats64(struct net_device *netdev,
7598 + struct rtnl_link_stats64 *stats)
7599 +{
7600 + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
7601 + u64 *cpustats;
7602 + u64 *netstats = (u64 *)stats;
7603 + int i, j;
7604 + struct dpa_percpu_priv_s *percpu_priv;
7605 + int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
7606 +
7607 + for_each_online_cpu(i) {
7608 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
7609 +
7610 + cpustats = (u64 *)&percpu_priv->stats;
7611 +
7612 + for (j = 0; j < numstats; j++)
7613 + netstats[j] += cpustats[j];
7614 + }
7615 +
7616 + return stats;
7617 +}
7618 +
7619 +static int dpa_generic_set_mac_address(struct net_device *net_dev,
7620 + void *addr)
7621 +{
7622 + const struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
7623 + int _errno;
7624 +
7625 + _errno = eth_mac_addr(net_dev, addr);
7626 + if (_errno < 0) {
7627 + if (netif_msg_drv(priv))
7628 + netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
7629 + return _errno;
7630 + }
7631 +
7632 + return 0;
7633 +}
7634 +
7635 +static const struct of_device_id dpa_generic_match[] = {
7636 + {
7637 + .compatible = "fsl,dpa-ethernet-generic"
7638 + },
7639 + {}
7640 +};
7641 +
7642 +MODULE_DEVICE_TABLE(of, dpa_generic_match);
7643 +
7644 +static struct platform_driver dpa_generic_driver = {
7645 + .driver = {
7646 + .name = KBUILD_MODNAME,
7647 + .of_match_table = dpa_generic_match,
7648 + .owner = THIS_MODULE,
7649 + },
7650 + .probe = dpa_generic_eth_probe,
7651 + .remove = dpa_generic_remove
7652 +};
7653 +
7654 +static int get_port_ref(struct device_node *dev_node,
7655 + struct fm_port **port)
7656 +{
7657 + struct platform_device *port_of_dev = NULL;
7658 + struct device *op_dev = NULL;
7659 + struct device_node *port_node = NULL;
7660 +
7661 + port_node = of_parse_phandle(dev_node, "fsl,fman-oh-port", 0);
7662 + if (port_node == NULL)
7663 + return -EINVAL;
7664 +
7665 + port_of_dev = of_find_device_by_node(port_node);
7666 + of_node_put(port_node);
7667 +
7668 + if (port_of_dev == NULL)
7669 + return -EINVAL;
7670 +
7671 + /* get the reference to oh port from FMD */
7672 + op_dev = &port_of_dev->dev;
7673 + *port = fm_port_bind(op_dev);
7674 +
7675 + if (*port == NULL)
7676 + return -EINVAL;
7677 +
7678 + return 0;
7679 +}
7680 +
7681 +static void dpaa_generic_napi_enable(struct dpa_generic_priv_s *priv)
7682 +{
7683 + struct dpa_percpu_priv_s *percpu_priv;
7684 + int i, j;
7685 +
7686 + for_each_possible_cpu(i) {
7687 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
7688 +
7689 + for (j = 0; j < qman_portal_max; j++)
7690 + napi_enable(&percpu_priv->np[j].napi);
7691 + }
7692 +}
7693 +
7694 +static void dpaa_generic_napi_disable(struct dpa_generic_priv_s *priv)
7695 +{
7696 + struct dpa_percpu_priv_s *percpu_priv;
7697 + int i, j;
7698 +
7699 + for_each_possible_cpu(i) {
7700 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
7701 +
7702 + for (j = 0; j < qman_portal_max; j++)
7703 + napi_disable(&percpu_priv->np[j].napi);
7704 + }
7705 +}
7706 +
7707 +static struct device_node *get_rx_op_port_node(struct platform_device *_of_dev)
7708 +{
7709 + struct device *dev = &_of_dev->dev;
7710 + struct device_node *port_node = NULL;
7711 + struct device_node *onic_node = NULL;
7712 + int num_ports = 0;
7713 +
7714 + onic_node = dev->of_node;
7715 +
7716 + num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
7717 + if (num_ports != 2) {
7718 + dev_err(dev, "There should be two O/H port handles in the device tree\n");
7719 + return ERR_PTR(-EINVAL);
7720 + }
7721 +
7722 + port_node = of_parse_phandle(onic_node, "fsl,oh-ports", 0);
7723 + if (port_node == NULL) {
7724 + dev_err(dev, "Cannot find O/H port node in the device tree\n");
7725 + return ERR_PTR(-EFAULT);
7726 + }
7727 +
7728 + return port_node;
7729 +}
7730 +
7731 +static int __cold dpa_generic_start(struct net_device *netdev)
7732 +{
7733 + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
7734 +
7735 + /* seed default buffer pool */
7736 + dpa_bp_priv_seed(priv->rx_bp);
7737 +
7738 + dpaa_generic_napi_enable(priv);
7739 + netif_tx_start_all_queues(netdev);
7740 +
7741 + mod_timer(&priv->timer, jiffies + 100);
7742 +
7743 + return 0;
7744 +}
7745 +
7746 +static int __cold dpa_generic_stop(struct net_device *netdev)
7747 +{
7748 + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
7749 +
7750 + netif_tx_stop_all_queues(netdev);
7751 + dpaa_generic_napi_disable(priv);
7752 +
7753 + return 0;
7754 +}
7755 +
7756 +static enum qman_cb_dqrr_result __hot
7757 +dpa_generic_rx_err_dqrr(struct qman_portal *portal,
7758 + struct qman_fq *fq,
7759 + const struct qm_dqrr_entry *dq)
7760 +{
7761 + struct net_device *netdev;
7762 + struct dpa_generic_priv_s *priv;
7763 + struct dpa_percpu_priv_s *percpu_priv;
7764 + const struct qm_fd *fd;
7765 + int *countptr;
7766 +
7767 + netdev = ((struct dpa_fq *)fq)->net_dev;
7768 + priv = netdev_priv(netdev);
7769 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
7770 + countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
7771 + fd = &dq->fd;
7772 +
7773 + /* TODO: extract bpid from the fd; when multiple bps are supported
7774 + * there won't be a default bp
7775 + */
7776 +
7777 + if (dpaa_eth_napi_schedule(percpu_priv, portal))
7778 + return qman_cb_dqrr_stop;
7779 +
7780 + if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
7781 + /* Unable to refill the buffer pool due to insufficient
7782 + * system memory. Just release the frame back into the pool,
7783 + * otherwise we'll soon end up with an empty buffer pool.
7784 + */
7785 + dpa_fd_release(netdev, fd);
7786 + goto qman_consume;
7787 + }
7788 +
7789 + /* limit common, possibly innocuous Rx FIFO Overflow errors'
7790 + * interference with zero-loss convergence benchmark results.
7791 + */
7792 + if (likely(fd->status & FM_FD_STAT_ERR_PHYSICAL))
7793 + pr_warn_once("fsl-dpa: non-zero error counters in fman statistics (sysfs)\n");
7794 + else
7795 + if (netif_msg_hw(priv) && net_ratelimit())
7796 + netdev_err(netdev, "Err FD status 2 = 0x%08x\n",
7797 + fd->status & FM_FD_STAT_RX_ERRORS);
7798 +
7799 +
7800 + percpu_priv->stats.rx_errors++;
7801 +
7802 + if (fd->status & FM_PORT_FRM_ERR_DMA)
7803 + percpu_priv->rx_errors.dme++;
7804 + if (fd->status & FM_PORT_FRM_ERR_PHYSICAL)
7805 + percpu_priv->rx_errors.fpe++;
7806 + if (fd->status & FM_PORT_FRM_ERR_SIZE)
7807 + percpu_priv->rx_errors.fse++;
7808 + if (fd->status & FM_PORT_FRM_ERR_PRS_HDR_ERR)
7809 + percpu_priv->rx_errors.phe++;
7810 +
7811 + /* TODO dpa_csum_validation */
7812 +
7813 + dpa_fd_release(netdev, fd);
7814 +
7815 +qman_consume:
7816 + return qman_cb_dqrr_consume;
7817 +}
7818 +
7819 +
7820 +static enum qman_cb_dqrr_result __hot
7821 +dpa_generic_rx_dqrr(struct qman_portal *portal,
7822 + struct qman_fq *fq,
7823 + const struct qm_dqrr_entry *dq)
7824 +{
7825 + struct net_device *netdev;
7826 + struct dpa_generic_priv_s *priv;
7827 + struct dpa_bp *bp;
7828 + struct dpa_percpu_priv_s *percpu_priv;
7829 + struct sk_buff **skbh;
7830 + struct sk_buff *skb;
7831 + const struct qm_fd *fd = &dq->fd;
7832 + unsigned int skb_len;
7833 + u32 fd_status = fd->status;
7834 + u64 pad;
7835 + dma_addr_t addr = qm_fd_addr(fd);
7836 + unsigned int data_start;
7837 + unsigned long skb_addr;
7838 + int *countptr;
7839 +
7840 + netdev = ((struct dpa_fq *)fq)->net_dev;
7841 + priv = netdev_priv(netdev);
7842 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
7843 + countptr = raw_cpu_ptr(priv->rx_bp->percpu_count);
7844 +
7845 + /* This is needed for TCP traffic as draining only on TX is not
7846 + * enough
7847 + */
7848 + dpa_generic_drain_bp(priv->draining_tx_bp, 1);
7849 + dpa_generic_drain_sg_bp(priv->draining_tx_sg_bp, 1);
7850 +
7851 + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
7852 + return qman_cb_dqrr_stop;
7853 +
7854 + if (unlikely(dpaa_eth_refill_bpools(priv->rx_bp, countptr))) {
7855 + /* Unable to refill the buffer pool due to insufficient
7856 + * system memory. Just release the frame back into the pool,
7857 + * otherwise we'll soon end up with an empty buffer pool.
7858 + */
7859 + dpa_fd_release(netdev, fd);
7860 + goto qman_consume;
7861 + }
7862 +
7863 + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), -1);
7864 +
7865 + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
7866 + if (netif_msg_hw(priv) && net_ratelimit())
7867 + netdev_warn(netdev, "FD status = 0x%08x\n",
7868 + fd->status & FM_FD_STAT_RX_ERRORS);
7869 +
7870 + percpu_priv->stats.rx_errors++;
7871 + dpa_fd_release(netdev, fd);
7872 + goto qman_consume;
7873 + }
7874 + if (unlikely(fd->format != qm_fd_contig)) {
7875 + percpu_priv->stats.rx_dropped++;
7876 + if (netif_msg_rx_status(priv) && net_ratelimit())
7877 + netdev_warn(netdev, "Dropping a SG frame\n");
7878 + dpa_fd_release(netdev, fd);
7879 + goto qman_consume;
7880 + }
7881 +
7882 + bp = dpa_bpid2pool(fd->bpid);
7883 +
7884 + /* find out the pad */
7885 + skb_addr = virt_to_phys(skb->head);
7886 + pad = addr - skb_addr;
7887 +
7888 + dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
7889 +
7890 + countptr = raw_cpu_ptr(bp->percpu_count);
7891 + (*countptr)--;
7892 +
7893 + /* The skb is currently pointed at head + headroom. The packet
7894 + * starts at skb->head + pad + fd offset.
7895 + */
7896 + data_start = (unsigned int)(pad + dpa_fd_offset(fd) -
7897 + skb_headroom(skb));
7898 + skb_put(skb, dpa_fd_length(fd) + data_start);
7899 + skb_pull(skb, data_start);
7900 + skb->protocol = eth_type_trans(skb, netdev);
7901 + if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) {
7902 + percpu_priv->stats.rx_dropped++;
7903 + dev_kfree_skb(skb);
7904 + goto qman_consume;
7905 + }
7906 +
7907 + skb_len = skb->len;
7908 +
7909 + if (fd->status & FM_FD_STAT_L4CV)
7910 + skb->ip_summed = CHECKSUM_UNNECESSARY;
7911 + else
7912 + skb->ip_summed = CHECKSUM_NONE;
7913 +
7914 + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
7915 + goto qman_consume;
7916 +
7917 + percpu_priv->stats.rx_packets++;
7918 + percpu_priv->stats.rx_bytes += skb_len;
7919 +
7920 +qman_consume:
7921 + return qman_cb_dqrr_consume;
7922 +}
7923 +
7924 +static void dpa_generic_drain_sg_bp(struct dpa_bp *sgbp, u8 nbuf)
7925 +{
7926 + int ret;
7927 + struct bm_buffer bmb[8];
7928 +
7929 + do {
7930 + ret = bman_acquire(sgbp->pool, bmb, nbuf, 0);
7931 + } while (ret >= 0);
7932 +}
7933 +
7934 +inline void dpa_release_sg(struct sk_buff *skb, dma_addr_t addr,
7935 + struct dpa_bp *bp)
7936 +{
7937 + struct qm_sg_entry *sgt = phys_to_virt(addr + DPA_DEFAULT_TX_HEADROOM);
7938 + int nr_frags = skb_shinfo(skb)->nr_frags;
7939 + dma_addr_t sg_addr;
7940 + int j;
7941 +
7942 + dma_unmap_single(bp->dev, addr, DPA_DEFAULT_TX_HEADROOM +
7943 + sizeof(struct qm_sg_entry) * (1 + nr_frags),
7944 + DMA_BIDIRECTIONAL);
7945 +
7946 + for (j = 0; j <= nr_frags; j++) {
7947 + DPA_BUG_ON(sgt[j].extension);
7948 + sg_addr = qm_sg_addr(&sgt[j]);
7949 + dma_unmap_page(bp->dev, sg_addr,
7950 + sgt[j].length, DMA_BIDIRECTIONAL);
7951 + }
7952 +
7953 + dev_kfree_skb_any(skb);
7954 +}
7955 +
7956 +inline void dpa_release_contig(struct sk_buff *skb, dma_addr_t addr,
7957 + struct dpa_bp *bp)
7958 +{
7959 + dma_unmap_single(bp->dev, addr, bp->size, DMA_BIDIRECTIONAL);
7960 + dev_kfree_skb_any(skb);
7961 +}
7962 +
7963 +static void dpa_generic_drain_bp(struct dpa_bp *bp, u8 nbuf)
7964 +{
7965 + int ret, i;
7966 + struct bm_buffer bmb[8];
7967 + dma_addr_t addr;
7968 + int *countptr = raw_cpu_ptr(bp->percpu_count);
7969 + int count = *countptr;
7970 + struct sk_buff **skbh;
7971 +
7972 + do {
7973 + /* bman_acquire will fail if nbuf > 8 */
7974 + ret = bman_acquire(bp->pool, bmb, nbuf, 0);
7975 + if (ret > 0) {
7976 + for (i = 0; i < nbuf; i++) {
7977 + addr = bm_buf_addr(&bmb[i]);
7978 + skbh = (struct sk_buff **)phys_to_virt(addr);
7979 + dma_unmap_single(bp->dev, addr, bp->size,
7980 + DMA_TO_DEVICE);
7981 +
7982 + if (skb_is_nonlinear(*skbh))
7983 + dpa_release_sg(*skbh, addr, bp);
7984 + else
7985 + dpa_release_contig(*skbh, addr, bp);
7986 + }
7987 + count -= i;
7988 + }
7989 + } while (ret > 0);
7990 +
7991 + *countptr = count;
7992 +}
7993 +
7994 +/**
7995 + * Turn on HW checksum computation for this outgoing frame.
7996 + * If the current protocol is not something we support in this regard
7997 + * (or if the stack has already computed the SW checksum), we do nothing.
7998 + *
7999 + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
8000 + * otherwise.
8001 + *
8002 + * Note that this function may modify the fd->cmd field and the skb data buffer
8003 + * (the Parse Results area).
8004 + */
8005 +static int dpa_generic_tx_csum(struct dpa_generic_priv_s *priv,
8006 + struct sk_buff *skb,
8007 + struct qm_fd *fd,
8008 + char *parse_results)
8009 +{
8010 + fm_prs_result_t *parse_result;
8011 + struct iphdr *iph;
8012 + struct ipv6hdr *ipv6h = NULL;
8013 + int l4_proto;
8014 + int ethertype = ntohs(skb->protocol);
8015 + int retval = 0;
8016 +
8017 + if (skb->ip_summed != CHECKSUM_PARTIAL)
8018 + return 0;
8019 +
8020 + /* Note: L3 csum seems to be already computed in sw, but we can't choose
8021 + * L4 alone from the FM configuration anyway.
8022 + */
8023 +
8024 + /* Fill in some fields of the Parse Results array, so the FMan
8025 + * can find them as if they came from the FMan Parser.
8026 + */
8027 + parse_result = (fm_prs_result_t *)parse_results;
8028 +
8029 + /* If we're dealing with VLAN, get the real Ethernet type */
8030 + if (ethertype == ETH_P_8021Q) {
8031 + /* We can't always assume the MAC header is set correctly
8032 + * by the stack, so reset to beginning of skb->data
8033 + */
8034 + skb_reset_mac_header(skb);
8035 + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
8036 + }
8037 +
8038 + /* Fill in the relevant L3 parse result fields
8039 + * and read the L4 protocol type
8040 + */
8041 + switch (ethertype) {
8042 + case ETH_P_IP:
8043 + parse_result->l3r = FM_L3_PARSE_RESULT_IPV4;
8044 + iph = ip_hdr(skb);
8045 + BUG_ON(iph == NULL);
8046 + l4_proto = iph->protocol;
8047 + break;
8048 + case ETH_P_IPV6:
8049 + parse_result->l3r = FM_L3_PARSE_RESULT_IPV6;
8050 + ipv6h = ipv6_hdr(skb);
8051 + BUG_ON(ipv6h == NULL);
8052 + l4_proto = ipv6h->nexthdr;
8053 + break;
8054 + default:
8055 + /* We shouldn't even be here */
8056 + if (netif_msg_tx_err(priv) && net_ratelimit())
8057 + netdev_alert(priv->net_dev,
8058 + "Can't compute HW csum for L3 proto 0x%x\n",
8059 + ntohs(skb->protocol));
8060 + retval = -EIO;
8061 + goto return_error;
8062 + }
8063 +
8064 + /* Fill in the relevant L4 parse result fields */
8065 + switch (l4_proto) {
8066 + case IPPROTO_UDP:
8067 + parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
8068 + break;
8069 + case IPPROTO_TCP:
8070 + parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
8071 + break;
8072 + default:
8073 + /* This can as well be a BUG() */
8074 + if (netif_msg_tx_err(priv) && net_ratelimit())
8075 + netdev_alert(priv->net_dev,
8076 + "Can't compute HW csum for L4 proto 0x%x\n",
8077 + l4_proto);
8078 + retval = -EIO;
8079 + goto return_error;
8080 + }
8081 +
8082 + /* At index 0 is IPOffset_1 as defined in the Parse Results */
8083 + parse_result->ip_off[0] = (uint8_t)skb_network_offset(skb);
8084 + parse_result->l4_off = (uint8_t)skb_transport_offset(skb);
8085 +
8086 + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
8087 + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
8088 +
8089 + /* On P1023 and similar platforms fd->cmd interpretation could
8090 + * be disabled by setting CONTEXT_A bit ICMD; currently this bit
8091 + * is not set so we do not need to check; in the future, if/when
8092 + * using context_a we need to check this bit
8093 + */
8094 +
8095 +return_error:
8096 + return retval;
8097 +}
8098 +
8099 +static inline int generic_skb_to_sg_fd(struct dpa_generic_priv_s *priv,
8100 + struct sk_buff *skb, struct qm_fd *fd)
8101 +{
8102 + struct dpa_bp *dpa_bp = priv->draining_tx_bp;
8103 + struct dpa_bp *dpa_sg_bp = priv->draining_tx_sg_bp;
8104 + dma_addr_t addr;
8105 + struct sk_buff **skbh;
8106 + struct net_device *net_dev = priv->net_dev;
8107 + int err;
8108 +
8109 + struct qm_sg_entry *sgt;
8110 + void *sgt_buf;
8111 + void *buffer_start;
8112 + skb_frag_t *frag;
8113 + int i, j;
8114 + const enum dma_data_direction dma_dir = DMA_BIDIRECTIONAL;
8115 + const int nr_frags = skb_shinfo(skb)->nr_frags;
8116 +
8117 + memset(fd, 0, sizeof(*fd));
8118 + fd->format = qm_fd_sg;
8119 +
8120 + /* get a page frag to store the SGTable */
8121 + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
8122 + sizeof(struct qm_sg_entry) * (1 + nr_frags));
8123 + if (unlikely(!sgt_buf)) {
8124 + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
8125 + return -ENOMEM;
8126 + }
8127 +
8128 + memset(sgt_buf, 0, priv->tx_headroom +
8129 + sizeof(struct qm_sg_entry) * (1 + nr_frags));
8130 +
8131 + /* do this before dma_map_single(DMA_TO_DEVICE), because we may need to
8132 + * write into the skb.
8133 + */
8134 + err = dpa_generic_tx_csum(priv, skb, fd,
8135 + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
8136 + if (unlikely(err < 0)) {
8137 + if (netif_msg_tx_err(priv) && net_ratelimit())
8138 + netdev_err(net_dev, "HW csum error: %d\n", err);
8139 + goto csum_failed;
8140 + }
8141 +
8142 + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
8143 + sgt[0].bpid = dpa_sg_bp->bpid;
8144 + sgt[0].offset = 0;
8145 + sgt[0].length = skb_headlen(skb);
8146 + sgt[0].extension = 0;
8147 + sgt[0].final = 0;
8148 +
8149 + addr = dma_map_single(dpa_sg_bp->dev, skb->data, sgt[0].length,
8150 + dma_dir);
8151 + if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
8152 + dev_err(dpa_sg_bp->dev, "DMA mapping failed");
8153 + err = -EINVAL;
8154 + goto sg0_map_failed;
8155 + }
8156 +
8157 + sgt[0].addr_hi = (uint8_t)upper_32_bits(addr);
8158 + sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
8159 +
8160 + /* populate the rest of SGT entries */
8161 + for (i = 1; i <= nr_frags; i++) {
8162 + frag = &skb_shinfo(skb)->frags[i - 1];
8163 + sgt[i].bpid = dpa_sg_bp->bpid;
8164 + sgt[i].offset = 0;
8165 + sgt[i].length = frag->size;
8166 + sgt[i].extension = 0;
8167 + sgt[i].final = 0;
8168 +
8169 + DPA_BUG_ON(!skb_frag_page(frag));
8170 + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
8171 + dma_dir);
8172 + if (unlikely(dma_mapping_error(dpa_sg_bp->dev, addr))) {
8173 + dev_err(dpa_sg_bp->dev, "DMA mapping failed");
8174 + err = -EINVAL;
8175 + goto sg_map_failed;
8176 + }
8177 +
8178 + /* keep the offset in the address */
8179 + sgt[i].addr_hi = (uint8_t)upper_32_bits(addr);
8180 + sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
8181 + }
8182 + sgt[i - 1].final = 1;
8183 +
8184 + fd->length20 = skb->len;
8185 + fd->offset = priv->tx_headroom;
8186 +
8187 + /* DMA map the SGT page */
8188 + buffer_start = (void *)sgt - dpa_fd_offset(fd);
8189 + /* Can't write at "negative" offset in buffer_start, because this skb
8190 + * may not have been allocated by us.
8191 + */
8192 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
8193 +
8194 + addr = dma_map_single(dpa_bp->dev, buffer_start,
8195 + priv->tx_headroom + sizeof(struct qm_sg_entry) * (1 + nr_frags),
8196 + dma_dir);
8197 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
8198 + dev_err(dpa_bp->dev, "DMA mapping failed");
8199 + err = -EINVAL;
8200 + goto sgt_map_failed;
8201 + }
8202 +
8203 + fd->bpid = dpa_bp->bpid;
8204 + fd->addr_hi = (uint8_t)upper_32_bits(addr);
8205 + fd->addr_lo = lower_32_bits(addr);
8206 +
8207 + return 0;
8208 +
8209 +sgt_map_failed:
8210 +sg_map_failed:
8211 + for (j = 0; j < i; j++)
8212 + dma_unmap_page(dpa_sg_bp->dev, qm_sg_addr(&sgt[j]),
8213 + be32_to_cpu(sgt[j].length), dma_dir);
8214 +sg0_map_failed:
8215 +csum_failed:
8216 + put_page(virt_to_head_page(sgt_buf));
8217 +
8218 + return err;
8219 +}
8220 +
8221 +static int __hot dpa_generic_tx(struct sk_buff *skb, struct net_device *netdev)
8222 +{
8223 + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
8224 + struct dpa_percpu_priv_s *percpu_priv =
8225 + raw_cpu_ptr(priv->percpu_priv);
8226 + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
8227 + struct dpa_bp *bp = priv->draining_tx_bp;
8228 + struct dpa_bp *sg_bp = priv->draining_tx_sg_bp;
8229 + struct sk_buff **skbh = NULL;
8230 + dma_addr_t addr;
8231 + struct qm_fd fd;
8232 + int queue_mapping;
8233 + struct qman_fq *egress_fq;
8234 + const bool nonlinear = skb_is_nonlinear(skb);
8235 + int i = 0, err = 0;
8236 + int *countptr;
8237 +
8238 + if (nonlinear && skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES) {
8239 + err = generic_skb_to_sg_fd(priv, skb, &fd);
8240 + if (unlikely(err < 0))
8241 + goto sg_failed;
8242 + percpu_priv->tx_frag_skbuffs++;
8243 + addr = qm_fd_addr(&fd);
8244 + } else {
8245 + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
8246 + struct sk_buff *skb_new;
8247 +
8248 + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
8249 + if (unlikely(!skb_new)) {
8250 + percpu_stats->tx_errors++;
8251 + kfree_skb(skb);
8252 + goto done;
8253 + }
8254 +
8255 + kfree_skb(skb);
8256 + skb = skb_new;
8257 + }
8258 +
8259 + clear_fd(&fd);
8260 +
8261 + /* store skb backpointer to release the skb later */
8262 + skbh = (struct sk_buff **)(skb->data - priv->tx_headroom);
8263 + *skbh = skb;
8264 +
8265 + /* do this before dma_map_single(), because we may need to write
8266 + * into the skb.
8267 + */
8268 + err = dpa_generic_tx_csum(priv, skb, &fd,
8269 + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
8270 + if (unlikely(err < 0)) {
8271 + if (netif_msg_tx_err(priv) && net_ratelimit())
8272 + netdev_err(netdev, "HW csum error: %d\n", err);
8273 + return err;
8274 + }
8275 +
8276 + addr = dma_map_single(bp->dev, skbh,
8277 + skb->len + priv->tx_headroom, DMA_TO_DEVICE);
8278 + if (unlikely(dma_mapping_error(bp->dev, addr))) {
8279 + if (netif_msg_tx_err(priv) && net_ratelimit())
8280 + netdev_err(netdev, "dma_map_single() failed\n");
8281 + goto dma_mapping_failed;
8282 + }
8283 +
8284 + fd.format = qm_fd_contig;
8285 + fd.length20 = skb->len;
8286 + fd.offset = priv->tx_headroom;
8287 + fd.addr_hi = (uint8_t)upper_32_bits(addr);
8288 + fd.addr_lo = lower_32_bits(addr);
8289 + /* fd.cmd |= FM_FD_CMD_FCO; */
8290 + fd.bpid = bp->bpid;
8291 + }
8292 +
8293 + dpa_generic_drain_bp(bp, 1);
8294 + dpa_generic_drain_sg_bp(sg_bp, 1);
8295 +
8296 + queue_mapping = dpa_get_queue_mapping(skb);
8297 + egress_fq = priv->egress_fqs[queue_mapping];
8298 +
8299 + for (i = 0; i < 100000; i++) {
8300 + err = qman_enqueue(egress_fq, &fd, 0);
8301 + if (err != -EBUSY)
8302 + break;
8303 + }
8304 +
8305 + if (unlikely(err < 0)) {
8306 + percpu_stats->tx_fifo_errors++;
8307 + goto xmit_failed;
8308 + }
8309 +
8310 + countptr = raw_cpu_ptr(bp->percpu_count);
8311 + (*countptr)++;
8312 +
8313 + percpu_stats->tx_packets++;
8314 + percpu_stats->tx_bytes += fd.length20;
8315 + netdev->trans_start = jiffies;
8316 +
8317 + goto done;
8318 +
8319 +xmit_failed:
8320 + dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE);
8321 +sg_failed:
8322 +dma_mapping_failed:
8323 + percpu_stats->tx_errors++;
8324 + dev_kfree_skb(skb);
8325 +done:
8326 + return NETDEV_TX_OK;
8327 +}
8328 +
8329 +static int dpa_generic_napi_add(struct net_device *net_dev)
8330 +{
8331 + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
8332 + struct dpa_percpu_priv_s *percpu_priv;
8333 + int i, cpu;
8334 +
8335 + for_each_possible_cpu(cpu) {
8336 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
8337 +
8338 + percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
8339 + qman_portal_max * sizeof(struct dpa_napi_portal),
8340 + GFP_KERNEL);
8341 +
8342 + if (unlikely(percpu_priv->np == NULL)) {
8343 + dev_err(net_dev->dev.parent, "devm_kzalloc() failed\n");
8344 + return -ENOMEM;
8345 + }
8346 +
8347 + for (i = 0; i < qman_portal_max; i++)
8348 + netif_napi_add(net_dev, &percpu_priv->np[i].napi,
8349 + dpaa_eth_poll, DPA_GENERIC_NAPI_WEIGHT);
8350 + }
8351 +
8352 + return 0;
8353 +}
8354 +
8355 +static void dpa_generic_napi_del(struct net_device *net_dev)
8356 +{
8357 + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
8358 + struct dpa_percpu_priv_s *percpu_priv;
8359 + int i, cpu;
8360 +
8361 + for_each_possible_cpu(cpu) {
8362 + percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
8363 +
8364 + if (percpu_priv->np) {
8365 + for (i = 0; i < qman_portal_max; i++)
8366 + netif_napi_del(&percpu_priv->np[i].napi);
8367 +
8368 + devm_kfree(net_dev->dev.parent, percpu_priv->np);
8369 + }
8370 + }
8371 +}
8372 +
8373 +
8374 +static int dpa_generic_netdev_init(struct device_node *dpa_node,
8375 + struct net_device *netdev)
8376 +{
8377 + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
8378 + struct device *dev = netdev->dev.parent;
8379 + const uint8_t *mac_addr;
8380 + int err;
8381 +
8382 + netdev->netdev_ops = &dpa_generic_ops;
8383 +
8384 + mac_addr = of_get_mac_address(dpa_node);
8385 + if (mac_addr == NULL) {
8386 + if (netif_msg_probe(priv))
8387 + dev_err(dev, "No virtual MAC address found!\n");
8388 + return -EINVAL;
8389 + }
8390 +
8391 + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG;
8392 + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
8393 + netdev->features |= netdev->hw_features;
8394 + netdev->vlan_features = netdev->features;
8395 +
8396 + memcpy(netdev->perm_addr, mac_addr, netdev->addr_len);
8397 + memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
8398 +
8399 + netdev->ethtool_ops = &dpa_generic_ethtool_ops;
8400 +
8401 + netdev->needed_headroom = priv->tx_headroom;
8402 + netdev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
8403 +
8404 + err = register_netdev(netdev);
8405 + if (err < 0) {
8406 + dev_err(dev, "register_netdev() = %d\n", err);
8407 + return err;
8408 + }
8409 +
8410 + return 0;
8411 +}
8412 +
8413 +static struct dpa_fq_cbs_t generic_fq_cbs = {
8414 + .rx_defq = { .cb = { .dqrr = dpa_generic_rx_dqrr } },
8415 + .rx_errq = { .cb = { .dqrr = dpa_generic_rx_err_dqrr } },
8416 + .egress_ern = { .cb = { .ern = dpa_generic_ern } }
8417 +};
8418 +
8419 +static struct fqid_cell *__fq_alloc(struct device *dev,
8420 + int num_ranges,
8421 + const void *fqids_off)
8422 +{
8423 + struct fqid_cell *fqids;
8424 + int i;
8425 +
8426 + fqids = kzalloc(sizeof(*fqids) * num_ranges, GFP_KERNEL);
8427 + if (fqids == NULL)
8428 + return NULL;
8429 +
8430 + /* convert to CPU endianess */
8431 + for (i = 0; i < num_ranges; i++) {
8432 + fqids[i].start = be32_to_cpup(fqids_off +
8433 + i * sizeof(*fqids));
8434 + fqids[i].count = be32_to_cpup(fqids_off +
8435 + i * sizeof(*fqids) + sizeof(__be32));
8436 + }
8437 +
8438 + return fqids;
8439 +}
8440 +
8441 +static struct list_head *dpa_generic_fq_probe(struct platform_device *_of_dev,
8442 + struct fm_port *tx_port)
8443 +{
8444 + struct device *dev = &_of_dev->dev;
8445 + struct device_node *oh_node = NULL;
8446 + struct device_node *onic_node = NULL;
8447 + struct fqid_cell *fqids;
8448 + const void *fqids_off;
8449 + struct dpa_fq *fq, *tmp;
8450 + struct list_head *list;
8451 + int num_ranges;
8452 + int i, lenp;
8453 +
8454 + onic_node = dev->of_node;
8455 +
8456 + list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
8457 + if (!list) {
8458 + dev_err(dev, "Cannot allocate space for frame queues list\n");
8459 + return ERR_PTR(-ENOMEM);
8460 + }
8461 +
8462 + INIT_LIST_HEAD(list);
8463 +
8464 + /* RX queues (RX error, RX default) are specified in Rx O/H port node */
8465 + oh_node = get_rx_op_port_node(_of_dev);
8466 + fqids_off = of_get_property(oh_node, "fsl,qman-frame-queues-oh", &lenp);
8467 + if (fqids_off == NULL) {
8468 + dev_err(dev, "Need Rx FQ definition in dts for generic devices\n");
8469 + return ERR_PTR(-EINVAL);
8470 + }
8471 + of_node_put(oh_node);
8472 +
8473 + num_ranges = lenp / sizeof(*fqids);
8474 + if (num_ranges != 2) {
8475 + dev_err(dev, "Need 2 Rx FQ definitions in dts for generic devices\n");
8476 + return ERR_PTR(-EINVAL);
8477 + }
8478 +
8479 + fqids = __fq_alloc(dev, num_ranges, fqids_off);
8480 + if (!dpa_fq_alloc(dev, fqids[0].start, fqids[0].count, list,
8481 + FQ_TYPE_RX_ERROR) ||
8482 + !dpa_fq_alloc(dev, fqids[1].start, fqids[1].count,
8483 + list, FQ_TYPE_RX_DEFAULT)) {
8484 + dev_err(dev, "Cannot allocate space for default frame queues\n");
8485 + return ERR_PTR(-ENOMEM);
8486 + }
8487 + kfree(fqids);
8488 +
8489 + /* TX queues */
8490 + fqids_off = of_get_property(onic_node, "fsl,qman-frame-queues-tx",
8491 + &lenp);
8492 + if (fqids_off == NULL) {
8493 + dev_err(dev, "Need Tx FQ definition in dts for generic devices\n");
8494 + return ERR_PTR(-EINVAL);
8495 + }
8496 +
8497 + num_ranges = lenp / sizeof(*fqids);
8498 + fqids = __fq_alloc(dev, num_ranges, fqids_off);
8499 + for (i = 0; i < num_ranges; i++) {
8500 + if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
8501 + FQ_TYPE_TX)) {
8502 + dev_err(dev, "_dpa_fq_alloc() failed\n");
8503 + return ERR_PTR(-ENOMEM);
8504 + }
8505 + }
8506 + kfree(fqids);
8507 +
8508 + /* optional RX PCD queues */
8509 + lenp = 0;
8510 + fqids_off = of_get_property(onic_node,
8511 + "fsl,qman-frame-queues-rx", &lenp);
8512 + num_ranges = lenp / sizeof(*fqids);
8513 + fqids = __fq_alloc(dev, num_ranges, fqids_off);
8514 + for (i = 0; i < num_ranges; i++) {
8515 + if (!dpa_fq_alloc(dev, fqids[i].start, fqids[i].count, list,
8516 + FQ_TYPE_RX_PCD)) {
8517 + dev_err(dev, "_dpa_fq_alloc() failed\n");
8518 + return ERR_PTR(-ENOMEM);
8519 + }
8520 + }
8521 + kfree(fqids);
8522 +
8523 + list_for_each_entry_safe(fq, tmp, list, list) {
8524 + if (fq->fq_type == FQ_TYPE_TX)
8525 + fq->channel = fm_get_tx_port_channel(tx_port);
8526 + }
8527 +
8528 + return list;
8529 +}
8530 +
8531 +static void dpa_generic_ern(struct qman_portal *portal,
8532 + struct qman_fq *fq,
8533 + const struct qm_mr_entry *msg)
8534 +{
8535 + struct net_device *netdev;
8536 + const struct dpa_generic_priv_s *priv;
8537 + struct dpa_percpu_priv_s *percpu_priv;
8538 + struct qm_fd fd = msg->ern.fd;
8539 +
8540 + netdev = ((struct dpa_fq *)fq)->net_dev;
8541 + priv = netdev_priv(netdev);
8542 + /* Non-migratable context, safe to use raw_cpu_ptr */
8543 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
8544 + percpu_priv->stats.tx_dropped++;
8545 + percpu_priv->stats.tx_fifo_errors++;
8546 + count_ern(percpu_priv, msg);
8547 +
8548 + /* release this buffer into the draining buffer pool */
8549 + dpa_fd_release(netdev, &fd);
8550 +}
8551 +
8552 +static int dpa_generic_rx_bp_probe(struct platform_device *_of_dev,
8553 + struct fm_port *rx_port,
8554 + int *rx_bp_count,
8555 + struct dpa_bp **rx_bp,
8556 + struct dpa_buffer_layout_s **rx_buf_layout)
8557 +{
8558 + struct device *dev = &_of_dev->dev;
8559 + struct fm_port_params params;
8560 + struct dpa_bp *bp = NULL;
8561 + int bp_count = 0;
8562 + int bpid;
8563 + const __be32 *bpool_cfg = NULL;
8564 + struct device_node *dev_node = NULL;
8565 + struct device_node *oh_node = NULL;
8566 + struct dpa_buffer_layout_s *buf_layout = NULL;
8567 + int lenp = 0;
8568 + int na = 0, ns = 0;
8569 + int err = 0, i = 0;
8570 +
8571 + oh_node = get_rx_op_port_node(_of_dev);
8572 +
8573 + bp_count = of_count_phandle_with_args(oh_node,
8574 + "fsl,bman-buffer-pools", NULL);
8575 + if (bp_count <= 0) {
8576 + dev_err(dev, "Missing buffer pool handles from onic node from device tree\n");
8577 + return -EINVAL;
8578 + }
8579 +
8580 + bp = devm_kzalloc(dev, bp_count * sizeof(*bp), GFP_KERNEL);
8581 + if (unlikely(bp == NULL)) {
8582 + dev_err(dev, "devm_kzalloc() failed\n");
8583 + err = -ENOMEM;
8584 + goto _return_of_node_put;
8585 + }
8586 +
8587 + dev_node = of_find_node_by_path("/");
8588 + if (unlikely(dev_node == NULL)) {
8589 + dev_err(dev, "of_find_node_by_path(/) failed\n");
8590 + err = -EINVAL;
8591 + goto _return_of_node_put;
8592 + }
8593 +
8594 + na = of_n_addr_cells(dev_node);
8595 + ns = of_n_size_cells(dev_node);
8596 +
8597 + of_node_put(dev_node);
8598 +
8599 + for (i = 0; i < bp_count; i++) {
8600 + dev_node = of_parse_phandle(oh_node,
8601 + "fsl,bman-buffer-pools", i);
8602 + if (dev_node == NULL) {
8603 + dev_err(dev, "Cannot find buffer pool node in the device tree\n");
8604 + err = -EINVAL;
8605 + goto _return_of_node_put;
8606 + }
8607 +
8608 + err = of_property_read_u32(dev_node, "fsl,bpid", &bpid);
8609 + if (err) {
8610 + dev_err(dev, "Cannot find buffer pool ID in the buffer pool node in the device tree\n");
8611 + goto _return_of_node_put;
8612 + }
8613 +
8614 + bp[i].bpid = (uint8_t)bpid;
8615 +
8616 + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
8617 + &lenp);
8618 + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
8619 + bp[i].config_count = (int)of_read_number(bpool_cfg, ns);
8620 + bp[i].size = of_read_number(bpool_cfg + ns, ns);
8621 + bp[i].paddr = 0;
8622 + bp[i].seed_pool = false;
8623 + } else {
8624 + dev_err(dev, "Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
8625 + dev_node->full_name);
8626 + err = -EINVAL;
8627 + goto _return_of_node_put;
8628 + }
8629 +
8630 + bp[i].percpu_count = devm_alloc_percpu(dev,
8631 + *bp[i].percpu_count);
8632 + }
8633 +
8634 + of_node_put(oh_node);
8635 +
8636 + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
8637 + if (!buf_layout) {
8638 + dev_err(dev, "devm_kzalloc() failed\n");
8639 + err = -ENOMEM;
8640 + goto _return_of_node_put;
8641 + }
8642 +
8643 + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
8644 + buf_layout->parse_results = false;
8645 + buf_layout->hash_results = false;
8646 + buf_layout->time_stamp = false;
8647 + fm_port_get_buff_layout_ext_params(rx_port, &params);
8648 + buf_layout->manip_extra_space = params.manip_extra_space;
8649 + /* a value of zero for data alignment means "don't care", so align to
8650 + * a non-zero value to prevent FMD from using its own default
8651 + */
8652 + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
8653 +
8654 + *rx_buf_layout = buf_layout;
8655 + *rx_bp = bp;
8656 + *rx_bp_count = bp_count;
8657 +
8658 + return 0;
8659 +
8660 +_return_of_node_put:
8661 + if (dev_node)
8662 + of_node_put(dev_node);
8663 +
8664 + return err;
8665 +}
8666 +
8667 +static int dpa_generic_tx_bp_probe(struct platform_device *_of_dev,
8668 + struct fm_port *tx_port,
8669 + struct dpa_bp **draining_tx_bp,
8670 + struct dpa_bp **draining_tx_sg_bp,
8671 + struct dpa_buffer_layout_s **tx_buf_layout)
8672 +{
8673 + struct device *dev = &_of_dev->dev;
8674 + struct fm_port_params params;
8675 + struct dpa_bp *bp = NULL;
8676 + struct dpa_bp *bp_sg = NULL;
8677 + struct dpa_buffer_layout_s *buf_layout = NULL;
8678 +
8679 + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL);
8680 + if (!buf_layout) {
8681 + dev_err(dev, "devm_kzalloc() failed\n");
8682 + return -ENOMEM;
8683 + }
8684 +
8685 + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
8686 + buf_layout->parse_results = true;
8687 + buf_layout->hash_results = true;
8688 + buf_layout->time_stamp = false;
8689 +
8690 + fm_port_get_buff_layout_ext_params(tx_port, &params);
8691 + buf_layout->manip_extra_space = params.manip_extra_space;
8692 + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT;
8693 +
8694 + bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL);
8695 + if (unlikely(bp == NULL)) {
8696 + dev_err(dev, "devm_kzalloc() failed\n");
8697 + return -ENOMEM;
8698 + }
8699 +
8700 + bp->size = dpa_bp_size(buf_layout);
8701 + bp->percpu_count = devm_alloc_percpu(dev, *bp->percpu_count);
8702 + bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
8703 +
8704 + *draining_tx_bp = bp;
8705 +
8706 + bp_sg = devm_kzalloc(dev, sizeof(*bp_sg), GFP_KERNEL);
8707 + if (unlikely(bp_sg == NULL)) {
8708 + dev_err(dev, "devm_kzalloc() failed\n");
8709 + return -ENOMEM;
8710 + }
8711 +
8712 + bp_sg->size = dpa_bp_size(buf_layout);
8713 + bp_sg->percpu_count = alloc_percpu(*bp_sg->percpu_count);
8714 + bp_sg->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT;
8715 +
8716 + *draining_tx_sg_bp = bp_sg;
8717 +
8718 + *tx_buf_layout = buf_layout;
8719 +
8720 + return 0;
8721 +}
8722 +
8723 +static int dpa_generic_buff_dealloc_probe(struct platform_device *_of_dev,
8724 + int *disable_buff_dealloc)
8725 +{
8726 + struct device *dev = &_of_dev->dev;
8727 + const phandle *disable_handle = NULL;
8728 + int lenp = 0;
8729 + int err = 0;
8730 +
8731 + disable_handle = of_get_property(dev->of_node,
8732 + "fsl,disable_buff_dealloc", &lenp);
8733 + if (disable_handle != NULL)
8734 + *disable_buff_dealloc = 1;
8735 +
8736 + return err;
8737 +}
8738 +
8739 +static int dpa_generic_port_probe(struct platform_device *_of_dev,
8740 + struct fm_port **rx_port,
8741 + struct fm_port **tx_port)
8742 +{
8743 + struct device *dev = &_of_dev->dev;
8744 + struct device_node *dev_node = NULL;
8745 + struct device_node *onic_node = NULL;
8746 + int num_ports = 0;
8747 + int err = 0;
8748 +
8749 + onic_node = dev->of_node;
8750 +
8751 + num_ports = of_count_phandle_with_args(onic_node, "fsl,oh-ports", NULL);
8752 + if (num_ports != 2) {
8753 + dev_err(dev, "There should be two OH ports in device tree (one for RX, one for TX\n");
8754 + return -EINVAL;
8755 + }
8756 +
8757 + dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", RX);
8758 + if (dev_node == NULL) {
8759 + dev_err(dev, "Cannot find Rx OH port node in device tree\n");
8760 + return err;
8761 + }
8762 +
8763 + err = get_port_ref(dev_node, rx_port);
8764 + if (err) {
8765 + dev_err(dev, "Cannot read Rx OH port node in device tree\n");
8766 + return err;
8767 + }
8768 +
8769 + dev_node = of_parse_phandle(onic_node, "fsl,oh-ports", TX);
8770 + if (dev_node == NULL) {
8771 + dev_err(dev, "Cannot find Tx OH port node in device tree\n");
8772 + return -EFAULT;
8773 + }
8774 +
8775 + err = get_port_ref(dev_node, tx_port);
8776 + if (err) {
8777 + dev_err(dev, "Cannot read Tx OH port node in device tree\n");
8778 + return err;
8779 + }
8780 +
8781 + return 0;
8782 +}
8783 +
8784 +static inline void dpa_generic_setup_ingress(
8785 + const struct dpa_generic_priv_s *priv,
8786 + struct dpa_fq *fq,
8787 + const struct qman_fq *template)
8788 +{
8789 + fq->fq_base = *template;
8790 + fq->net_dev = priv->net_dev;
8791 +
8792 + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
8793 + fq->channel = priv->channel;
8794 +}
8795 +
8796 +static inline void dpa_generic_setup_egress(
8797 + const struct dpa_generic_priv_s *priv,
8798 + struct dpa_fq *fq,
8799 + struct fm_port *port,
8800 + const struct qman_fq *template)
8801 +{
8802 + fq->fq_base = *template;
8803 + fq->net_dev = priv->net_dev;
8804 +
8805 + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
8806 + fq->channel = fm_get_tx_port_channel(port);
8807 +}
8808 +
8809 +static void dpa_generic_fq_setup(struct dpa_generic_priv_s *priv,
8810 + const struct dpa_fq_cbs_t *fq_cbs,
8811 + struct fm_port *tx_port)
8812 +{
8813 + struct dpa_fq *fq;
8814 + int egress_cnt = 0;
8815 +
8816 + /* Initialize each FQ in the list */
8817 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
8818 + switch (fq->fq_type) {
8819 + case FQ_TYPE_RX_DEFAULT:
8820 + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
8821 + break;
8822 + case FQ_TYPE_RX_ERROR:
8823 + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_errq);
8824 + break;
8825 + case FQ_TYPE_RX_PCD:
8826 + dpa_generic_setup_ingress(priv, fq, &fq_cbs->rx_defq);
8827 + break;
8828 + case FQ_TYPE_TX:
8829 + dpa_generic_setup_egress(priv, fq,
8830 + tx_port, &fq_cbs->egress_ern);
8831 + /* If we have more Tx queues than the number of cores,
8832 + * just ignore the extra ones.
8833 + */
8834 + if (egress_cnt < DPAA_ETH_TX_QUEUES)
8835 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
8836 + break;
8837 + default:
8838 + dev_warn(priv->net_dev->dev.parent,
8839 + "Unknown FQ type detected!\n");
8840 + break;
8841 + }
8842 + }
8843 +
8844 + /* The number of Tx queues may be smaller than the number of cores, if
8845 + * the Tx queue range is specified in the device tree instead of being
8846 + * dynamically allocated.
8847 + * Make sure all CPUs receive a corresponding Tx queue.
8848 + */
8849 + while (egress_cnt < DPAA_ETH_TX_QUEUES) {
8850 + list_for_each_entry(fq, &priv->dpa_fq_list, list) {
8851 + if (fq->fq_type != FQ_TYPE_TX)
8852 + continue;
8853 + priv->egress_fqs[egress_cnt++] = &fq->fq_base;
8854 + if (egress_cnt == DPAA_ETH_TX_QUEUES)
8855 + break;
8856 + }
8857 + }
8858 +}
8859 +
8860 +static int dpa_generic_fq_init(struct dpa_fq *dpa_fq, int disable_buff_dealloc)
8861 +{
8862 + int _errno;
8863 + struct device *dev;
8864 + struct qman_fq *fq;
8865 + struct qm_mcc_initfq initfq;
8866 +
8867 + dev = dpa_fq->net_dev->dev.parent;
8868 +
8869 + if (dpa_fq->fqid == 0)
8870 + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
8871 +
8872 + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
8873 + if (_errno) {
8874 + dev_err(dev, "qman_create_fq() failed\n");
8875 + return _errno;
8876 + }
8877 + fq = &dpa_fq->fq_base;
8878 +
8879 + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
8880 + /* FIXME: why would we want to keep an empty FQ in cache? */
8881 + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
8882 +
8883 + /* FQ placement */
8884 + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
8885 +
8886 + initfq.fqd.dest.channel = dpa_fq->channel;
8887 + initfq.fqd.dest.wq = dpa_fq->wq;
8888 +
8889 + if (dpa_fq->fq_type == FQ_TYPE_TX && !disable_buff_dealloc) {
8890 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
8891 + /* ContextA: A2V=1 (contextA A2 field is valid)
8892 + * ContextA A2: EBD=1 (deallocate buffers inside FMan)
8893 + */
8894 + initfq.fqd.context_a.hi = 0x10000000;
8895 + initfq.fqd.context_a.lo = 0x80000000;
8896 + }
8897 +
8898 + /* Initialization common to all ingress queues */
8899 + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
8900 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
8901 + initfq.fqd.fq_ctrl |=
8902 + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
8903 + initfq.fqd.context_a.stashing.exclusive =
8904 + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
8905 + QM_STASHING_EXCL_ANNOTATION;
8906 + initfq.fqd.context_a.stashing.data_cl = 2;
8907 + initfq.fqd.context_a.stashing.annotation_cl = 1;
8908 + initfq.fqd.context_a.stashing.context_cl =
8909 + DIV_ROUND_UP(sizeof(struct qman_fq), 64);
8910 + }
8911 +
8912 + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
8913 + if (_errno < 0) {
8914 + dev_err(dev, "qman_init_fq(%u) = %d\n",
8915 + qman_fq_fqid(fq), _errno);
8916 + qman_destroy_fq(fq, 0);
8917 + return _errno;
8918 + }
8919 +
8920 + dpa_fq->fqid = qman_fq_fqid(fq);
8921 +
8922 + return 0;
8923 +}
8924 +
8925 +static int dpa_generic_fq_create(struct net_device *netdev,
8926 + struct list_head *dpa_fq_list,
8927 + struct fm_port *tx_port)
8928 +{
8929 + struct dpa_generic_priv_s *priv = netdev_priv(netdev);
8930 + struct dpa_fq *fqs = NULL, *tmp = NULL;
8931 + struct task_struct *kth;
8932 + int err = 0;
8933 + int channel;
8934 +
8935 + INIT_LIST_HEAD(&priv->dpa_fq_list);
8936 +
8937 + list_replace_init(dpa_fq_list, &priv->dpa_fq_list);
8938 +
8939 + channel = dpa_get_channel();
8940 + if (channel < 0)
8941 + return channel;
8942 + priv->channel = (uint16_t)channel;
8943 +
8944 + /* Start a thread that will walk the cpus with affine portals
8945 + * and add this pool channel to each's dequeue mask.
8946 + */
8947 + kth = kthread_run(dpaa_eth_add_channel,
8948 + (void *)(unsigned long)priv->channel,
8949 + "dpaa_%p:%d", netdev, priv->channel);
8950 + if (!kth)
8951 + return -ENOMEM;
8952 +
8953 + dpa_generic_fq_setup(priv, &generic_fq_cbs, tx_port);
8954 +
8955 + /* Add the FQs to the interface, and make them active */
8956 + list_for_each_entry_safe(fqs, tmp, &priv->dpa_fq_list, list) {
8957 + err = dpa_generic_fq_init(fqs, priv->disable_buff_dealloc);
8958 + if (err)
8959 + return err;
8960 + }
8961 +
8962 + return 0;
8963 +}
8964 +
8965 +static int dpa_generic_bp_create(struct net_device *net_dev,
8966 + int rx_bp_count,
8967 + struct dpa_bp *rx_bp,
8968 + struct dpa_buffer_layout_s *rx_buf_layout,
8969 + struct dpa_bp *draining_tx_bp,
8970 + struct dpa_bp *draining_tx_sg_bp,
8971 + struct dpa_buffer_layout_s *tx_buf_layout)
8972 +{
8973 + struct dpa_generic_priv_s *priv = netdev_priv(net_dev);
8974 + int err = 0;
8975 +
8976 + /* TODO: multiple Rx bps */
8977 + priv->rx_bp_count = rx_bp_count;
8978 + priv->rx_bp = rx_bp;
8979 + priv->rx_buf_layout = rx_buf_layout;
8980 + priv->draining_tx_bp = draining_tx_bp;
8981 + priv->draining_tx_sg_bp = draining_tx_sg_bp;
8982 + priv->tx_buf_layout = tx_buf_layout;
8983 +
8984 + err = dpa_bp_alloc(priv->rx_bp);
8985 + if (err < 0) {
8986 + priv->rx_bp = NULL;
8987 + return err;
8988 + }
8989 +
8990 + err = dpa_bp_alloc(priv->draining_tx_bp);
8991 + if (err < 0) {
8992 + priv->draining_tx_bp = NULL;
8993 + return err;
8994 + }
8995 +
8996 + err = dpa_bp_alloc(priv->draining_tx_sg_bp);
8997 + if (err < 0) {
8998 + priv->draining_tx_sg_bp = NULL;
8999 + return err;
9000 + }
9001 +
9002 + return 0;
9003 +}
9004 +
9005 +static void dpa_generic_relase_bp(struct dpa_bp *bp)
9006 +{
9007 + if (!bp)
9008 + return;
9009 +
9010 + if (!atomic_dec_and_test(&bp->refs))
9011 + return;
9012 +
9013 + if (bp->free_buf_cb)
9014 + dpa_bp_drain(bp);
9015 +
9016 + bman_free_pool(bp->pool);
9017 +
9018 + if (bp->dev)
9019 + platform_device_unregister(to_platform_device(bp->dev));
9020 +}
9021 +
9022 +static void dpa_generic_bp_free(struct dpa_generic_priv_s *priv)
9023 +{
9024 + int i = 0;
9025 +
9026 + /* release the rx bpools */
9027 + for (i = 0; i < priv->rx_bp_count; i++)
9028 + dpa_generic_relase_bp(&priv->rx_bp[i]);
9029 +
9030 + /* release the tx draining bpools */
9031 + dpa_generic_relase_bp(priv->draining_tx_bp);
9032 + dpa_generic_relase_bp(priv->draining_tx_sg_bp);
9033 +}
9034 +
9035 +static int dpa_generic_remove(struct platform_device *of_dev)
9036 +{
9037 + int err;
9038 + struct device *dev;
9039 + struct net_device *net_dev;
9040 + struct dpa_generic_priv_s *priv;
9041 +
9042 + dev = &of_dev->dev;
9043 + net_dev = dev_get_drvdata(dev);
9044 + priv = netdev_priv(net_dev);
9045 +
9046 + dpaa_eth_generic_sysfs_remove(dev);
9047 +
9048 + dev_set_drvdata(dev, NULL);
9049 + unregister_netdev(net_dev);
9050 +
9051 + err = dpa_fq_free(dev, &priv->dpa_fq_list);
9052 +
9053 + dpa_generic_napi_del(net_dev);
9054 +
9055 + dpa_generic_bp_free(priv);
9056 +
9057 + free_netdev(net_dev);
9058 +
9059 + return err;
9060 +}
9061 +
9062 +static int dpa_generic_eth_probe(struct platform_device *_of_dev)
9063 +{
9064 + struct device *dev = &_of_dev->dev;
9065 + struct device_node *dpa_node = dev->of_node;
9066 + struct net_device *netdev = NULL;
9067 + struct dpa_generic_priv_s *priv;
9068 + struct fm_port *rx_port = NULL;
9069 + struct fm_port *tx_port = NULL;
9070 + struct dpa_percpu_priv_s *percpu_priv;
9071 + int rx_bp_count = 0;
9072 + int disable_buff_dealloc = 0;
9073 + struct dpa_bp *rx_bp = NULL, *draining_tx_bp = NULL;
9074 + struct dpa_bp *draining_tx_sg_bp = NULL;
9075 + struct dpa_buffer_layout_s *rx_buf_layout = NULL, *tx_buf_layout = NULL;
9076 + struct list_head *dpa_fq_list;
9077 + static u8 generic_idx;
9078 + int err = 0;
9079 + int i = 0;
9080 +
9081 + if (!of_device_is_available(dpa_node))
9082 + return -ENODEV;
9083 +
9084 + err = dpa_generic_port_probe(_of_dev, &tx_port, &rx_port);
9085 + if (err < 0)
9086 + return err;
9087 +
9088 + err = dpa_generic_rx_bp_probe(_of_dev, rx_port, &rx_bp_count,
9089 + &rx_bp, &rx_buf_layout);
9090 + if (err < 0)
9091 + return err;
9092 +
9093 + err = dpa_generic_tx_bp_probe(_of_dev, tx_port, &draining_tx_bp,
9094 + &draining_tx_sg_bp, &tx_buf_layout);
9095 + if (err < 0)
9096 + return err;
9097 +
9098 + dpa_fq_list = dpa_generic_fq_probe(_of_dev, tx_port);
9099 + if (IS_ERR(dpa_fq_list))
9100 + return PTR_ERR(dpa_fq_list);
9101 +
9102 + err = dpa_generic_buff_dealloc_probe(_of_dev, &disable_buff_dealloc);
9103 + if (err < 0)
9104 + return err;
9105 +
9106 + /* just one queue for now */
9107 + netdev = alloc_etherdev_mq(sizeof(*priv), 1);
9108 + if (!netdev) {
9109 + dev_err(dev, "alloc_etherdev_mq() failed\n");
9110 + return -ENOMEM;
9111 + }
9112 +
9113 + SET_NETDEV_DEV(netdev, dev);
9114 + dev_set_drvdata(dev, netdev);
9115 + priv = netdev_priv(netdev);
9116 + priv->net_dev = netdev;
9117 + sprintf(priv->if_type, "generic%d", generic_idx++);
9118 + priv->msg_enable = netif_msg_init(generic_debug, -1);
9119 + priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
9120 +
9121 + init_timer(&priv->timer);
9122 + priv->timer.data = (unsigned long)priv;
9123 + priv->timer.function = dpa_generic_draining_timer;
9124 +
9125 + err = dpa_generic_bp_create(netdev, rx_bp_count, rx_bp, rx_buf_layout,
9126 + draining_tx_bp, draining_tx_sg_bp, tx_buf_layout);
9127 + if (err < 0)
9128 + goto bp_create_failed;
9129 +
9130 + priv->disable_buff_dealloc = disable_buff_dealloc;
9131 +
9132 + err = dpa_generic_fq_create(netdev, dpa_fq_list, rx_port);
9133 + if (err < 0)
9134 + goto fq_create_failed;
9135 +
9136 + priv->tx_headroom = dpa_get_headroom(tx_buf_layout);
9137 + priv->rx_headroom = dpa_get_headroom(rx_buf_layout);
9138 + priv->rx_port = rx_port;
9139 + priv->tx_port = tx_port;
9140 + priv->mac_dev = NULL;
9141 +
9142 +
9143 + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
9144 + if (priv->percpu_priv == NULL) {
9145 + dev_err(dev, "devm_alloc_percpu() failed\n");
9146 + err = -ENOMEM;
9147 + goto alloc_percpu_failed;
9148 + }
9149 + for_each_online_cpu(i) {
9150 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
9151 + memset(percpu_priv, 0, sizeof(*percpu_priv));
9152 + }
9153 +
9154 + /* Initialize NAPI */
9155 + err = dpa_generic_napi_add(netdev);
9156 + if (err < 0)
9157 + goto napi_add_failed;
9158 +
9159 + err = dpa_generic_netdev_init(dpa_node, netdev);
9160 + if (err < 0)
9161 + goto netdev_init_failed;
9162 +
9163 + dpaa_eth_generic_sysfs_init(&netdev->dev);
9164 +
9165 + pr_info("fsl_dpa_generic: Probed %s interface as %s\n",
9166 + priv->if_type, netdev->name);
9167 +
9168 + return 0;
9169 +
9170 +netdev_init_failed:
9171 +napi_add_failed:
9172 + dpa_generic_napi_del(netdev);
9173 +alloc_percpu_failed:
9174 + if (netdev)
9175 + dpa_fq_free(dev, &priv->dpa_fq_list);
9176 +fq_create_failed:
9177 +bp_create_failed:
9178 + if (netdev)
9179 + dpa_generic_bp_free(priv);
9180 + dev_set_drvdata(dev, NULL);
9181 + if (netdev)
9182 + free_netdev(netdev);
9183 +
9184 + return err;
9185 +}
9186 +
9187 +static int __init __cold dpa_generic_load(void)
9188 +{
9189 + int _errno;
9190 +
9191 + pr_info(KBUILD_MODNAME ": " DPA_GENERIC_DESCRIPTION "\n");
9192 +
9193 + /* initialise dpaa_eth mirror values */
9194 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
9195 + dpa_max_frm = fm_get_max_frm();
9196 +
9197 + _errno = platform_driver_register(&dpa_generic_driver);
9198 + if (unlikely(_errno < 0)) {
9199 + pr_err(KBUILD_MODNAME
9200 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
9201 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
9202 + }
9203 +
9204 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
9205 + KBUILD_BASENAME".c", __func__);
9206 +
9207 + return _errno;
9208 +}
9209 +
9210 +/* waiting for all referenced ports to be initialized
9211 + * by other kernel modules (proxy ethernet, offline_port)
9212 + */
9213 +late_initcall(dpa_generic_load);
9214 +
9215 +static void __exit __cold dpa_generic_unload(void)
9216 +{
9217 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
9218 + KBUILD_BASENAME".c", __func__);
9219 +
9220 + platform_driver_unregister(&dpa_generic_driver);
9221 +
9222 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
9223 + KBUILD_BASENAME".c", __func__);
9224 +}
9225 +module_exit(dpa_generic_unload);
9226 --- /dev/null
9227 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic.h
9228 @@ -0,0 +1,90 @@
9229 +/* Copyright 2013 Freescale Semiconductor Inc.
9230 + *
9231 + * Redistribution and use in source and binary forms, with or without
9232 + * modification, are permitted provided that the following conditions are met:
9233 + * * Redistributions of source code must retain the above copyright
9234 + * notice, this list of conditions and the following disclaimer.
9235 + * * Redistributions in binary form must reproduce the above copyright
9236 + * notice, this list of conditions and the following disclaimer in the
9237 + * documentation and/or other materials provided with the distribution.
9238 + * * Neither the name of Freescale Semiconductor nor the
9239 + * names of its contributors may be used to endorse or promote products
9240 + * derived from this software without specific prior written permission.
9241 + *
9242 + *
9243 + * ALTERNATIVELY, this software may be distributed under the terms of the
9244 + * GNU General Public License ("GPL") as published by the Free Software
9245 + * Foundation, either version 2 of that License or (at your option) any
9246 + * later version.
9247 + *
9248 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9249 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9250 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9251 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9252 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9253 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9254 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9255 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9256 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9257 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9258 + */
9259 +
9260 +#ifndef __DPA_ETH_GENERIC_H
9261 +#define __DPA_ETH_GENERIC_H
9262 +
9263 +#include "lnxwrp_fsl_fman.h"
9264 +#include "dpaa_eth.h"
9265 +
9266 +struct dpa_generic_priv_s {
9267 + struct net_device *net_dev;
9268 + /* use the same percpu_priv as other DPAA Ethernet drivers */
9269 + struct dpa_percpu_priv_s __percpu *percpu_priv;
9270 +
9271 + /* up to 4 bps supported for RX */
9272 + int rx_bp_count;
9273 + struct dpa_bp *rx_bp;
9274 + struct dpa_buffer_layout_s *rx_buf_layout;
9275 +
9276 + struct dpa_bp *draining_tx_bp;
9277 + struct dpa_bp *draining_tx_sg_bp;
9278 + struct dpa_buffer_layout_s *tx_buf_layout;
9279 +
9280 + /* Store here the needed Tx headroom for convenience and speed
9281 + * (even though it can be computed based on the fields of buf_layout)
9282 + */
9283 + uint16_t tx_headroom;
9284 + uint16_t rx_headroom;
9285 +
9286 + /* In some scenarios, when VSP are not enabled on the Tx O/H port,
9287 + * the buffers will be released by other hardware modules
9288 + */
9289 + int disable_buff_dealloc;
9290 +
9291 + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
9292 +
9293 + struct fm_port *rx_port;
9294 + struct fm_port *tx_port;
9295 +
9296 + /* oNIC can have limited control capabilities over a MAC device */
9297 + struct mac_device *mac_dev;
9298 +
9299 + uint16_t channel; /* "fsl,qman-channel-id" */
9300 + struct list_head dpa_fq_list;
9301 +
9302 + uint32_t msg_enable; /* net_device message level */
9303 +
9304 + struct dpa_buffer_layout_s *buf_layout;
9305 + char if_type[30];
9306 +
9307 + /* periodic drain */
9308 + struct timer_list timer;
9309 +};
9310 +
9311 +extern const struct ethtool_ops dpa_generic_ethtool_ops;
9312 +
9313 +void dpaa_eth_generic_sysfs_init(struct device *dev);
9314 +void dpaa_eth_generic_sysfs_remove(struct device *dev);
9315 +int __init dpa_generic_debugfs_module_init(void);
9316 +void __exit dpa_generic_debugfs_module_exit(void);
9317 +
9318 +#endif /* __DPA_ETH_GENERIC_H */
9319 --- /dev/null
9320 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_generic_sysfs.c
9321 @@ -0,0 +1,201 @@
9322 +/* Copyright 2014 Freescale Semiconductor Inc.
9323 + *
9324 + * Redistribution and use in source and binary forms, with or without
9325 + * modification, are permitted provided that the following conditions are met:
9326 + * * Redistributions of source code must retain the above copyright
9327 + * notice, this list of conditions and the following disclaimer.
9328 + * * Redistributions in binary form must reproduce the above copyright
9329 + * notice, this list of conditions and the following disclaimer in the
9330 + * documentation and/or other materials provided with the distribution.
9331 + * * Neither the name of Freescale Semiconductor nor the
9332 + * names of its contributors may be used to endorse or promote products
9333 + * derived from this software without specific prior written permission.
9334 + *
9335 + *
9336 + * ALTERNATIVELY, this software may be distributed under the terms of the
9337 + * GNU General Public License ("GPL") as published by the Free Software
9338 + * Foundation, either version 2 of that License or (at your option) any
9339 + * later version.
9340 + *
9341 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9342 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9343 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9344 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9345 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9346 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9347 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9348 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9349 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9350 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9351 + */
9352 +
9353 +#include <linux/init.h>
9354 +#include <linux/module.h>
9355 +#include <linux/kthread.h>
9356 +#include <linux/io.h>
9357 +#include <linux/of_net.h>
9358 +
9359 +#include "dpaa_eth_generic.h"
9360 +#include "mac.h" /* struct mac_device */
9361 +
9362 +static ssize_t dpaa_eth_generic_show_addr(struct device *dev,
9363 + struct device_attribute *attr, char *buf)
9364 +{
9365 + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
9366 + struct mac_device *mac_dev = priv->mac_dev;
9367 +
9368 + if (mac_dev)
9369 + return sprintf(buf, "%llx\n",
9370 + (unsigned long long)mac_dev->res->start);
9371 + else
9372 + return sprintf(buf, "none\n");
9373 +}
9374 +
9375 +static ssize_t dpaa_eth_generic_show_type(struct device *dev,
9376 + struct device_attribute *attr, char *buf)
9377 +{
9378 + ssize_t res = 0;
9379 + res = sprintf(buf, "generic\n");
9380 +
9381 + return res;
9382 +}
9383 +
9384 +static ssize_t dpaa_eth_generic_show_fqids(struct device *dev,
9385 + struct device_attribute *attr, char *buf)
9386 +{
9387 + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
9388 + ssize_t bytes = 0;
9389 + int i = 0;
9390 + char *str;
9391 + struct dpa_fq *fq;
9392 + struct dpa_fq *tmp;
9393 + struct dpa_fq *prev = NULL;
9394 + u32 first_fqid = 0;
9395 + u32 last_fqid = 0;
9396 + char *prevstr = NULL;
9397 +
9398 + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
9399 + switch (fq->fq_type) {
9400 + case FQ_TYPE_RX_DEFAULT:
9401 + str = "Rx default";
9402 + break;
9403 + case FQ_TYPE_RX_ERROR:
9404 + str = "Rx error";
9405 + break;
9406 + case FQ_TYPE_RX_PCD:
9407 + str = "Rx PCD";
9408 + break;
9409 + case FQ_TYPE_TX_CONFIRM:
9410 + str = "Tx default confirmation";
9411 + break;
9412 + case FQ_TYPE_TX_CONF_MQ:
9413 + str = "Tx confirmation (mq)";
9414 + break;
9415 + case FQ_TYPE_TX_ERROR:
9416 + str = "Tx error";
9417 + break;
9418 + case FQ_TYPE_TX:
9419 + str = "Tx";
9420 + break;
9421 + default:
9422 + str = "Unknown";
9423 + }
9424 +
9425 + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
9426 + str != prevstr)) {
9427 + if (last_fqid == first_fqid)
9428 + bytes += sprintf(buf + bytes,
9429 + "%s: %d\n", prevstr, prev->fqid);
9430 + else
9431 + bytes += sprintf(buf + bytes,
9432 + "%s: %d - %d\n", prevstr,
9433 + first_fqid, last_fqid);
9434 + }
9435 +
9436 + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
9437 + last_fqid = fq->fqid;
9438 + else
9439 + first_fqid = last_fqid = fq->fqid;
9440 +
9441 + prev = fq;
9442 + prevstr = str;
9443 + i++;
9444 + }
9445 +
9446 + if (prev) {
9447 + if (last_fqid == first_fqid)
9448 + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
9449 + prev->fqid);
9450 + else
9451 + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
9452 + first_fqid, last_fqid);
9453 + }
9454 +
9455 + return bytes;
9456 +}
9457 +
9458 +static ssize_t dpaa_eth_generic_show_bpids(struct device *dev,
9459 + struct device_attribute *attr, char *buf)
9460 +{
9461 + ssize_t bytes = 0;
9462 + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
9463 + struct dpa_bp *rx_bp = priv->rx_bp;
9464 + struct dpa_bp *draining_tx_bp = priv->draining_tx_bp;
9465 + int i = 0;
9466 +
9467 + bytes += snprintf(buf + bytes, PAGE_SIZE, "Rx buffer pools:\n");
9468 + for (i = 0; i < priv->rx_bp_count; i++)
9469 + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u ",
9470 + rx_bp[i].bpid);
9471 +
9472 + bytes += snprintf(buf + bytes, PAGE_SIZE, "\n");
9473 + bytes += snprintf(buf + bytes, PAGE_SIZE, "Draining buffer pool:\n");
9474 + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n", draining_tx_bp->bpid);
9475 +
9476 + return bytes;
9477 +}
9478 +
9479 +static ssize_t dpaa_eth_generic_show_mac_regs(struct device *dev,
9480 + struct device_attribute *attr, char *buf)
9481 +{
9482 + struct dpa_generic_priv_s *priv = netdev_priv(to_net_dev(dev));
9483 + struct mac_device *mac_dev = priv->mac_dev;
9484 + int n = 0;
9485 +
9486 + if (mac_dev)
9487 + n = fm_mac_dump_regs(mac_dev, buf, n);
9488 + else
9489 + return sprintf(buf, "no mac control\n");
9490 +
9491 + return n;
9492 +}
9493 +
9494 +static struct device_attribute dpaa_eth_generic_attrs[] = {
9495 + __ATTR(device_addr, S_IRUGO, dpaa_eth_generic_show_addr, NULL),
9496 + __ATTR(device_type, S_IRUGO, dpaa_eth_generic_show_type, NULL),
9497 + __ATTR(fqids, S_IRUGO, dpaa_eth_generic_show_fqids, NULL),
9498 + __ATTR(bpids, S_IRUGO, dpaa_eth_generic_show_bpids, NULL),
9499 + __ATTR(mac_regs, S_IRUGO, dpaa_eth_generic_show_mac_regs, NULL),
9500 +};
9501 +
9502 +void dpaa_eth_generic_sysfs_init(struct device *dev)
9503 +{
9504 + int i;
9505 +
9506 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
9507 + if (device_create_file(dev, &dpaa_eth_generic_attrs[i])) {
9508 + dev_err(dev, "Error creating sysfs file\n");
9509 + while (i > 0)
9510 + device_remove_file(dev,
9511 + &dpaa_eth_generic_attrs[--i]);
9512 + return;
9513 + }
9514 +}
9515 +
9516 +void dpaa_eth_generic_sysfs_remove(struct device *dev)
9517 +{
9518 + int i;
9519 +
9520 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_generic_attrs); i++)
9521 + device_remove_file(dev, &dpaa_eth_generic_attrs[i]);
9522 +}
9523 --- /dev/null
9524 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macless.c
9525 @@ -0,0 +1,499 @@
9526 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
9527 + *
9528 + * Redistribution and use in source and binary forms, with or without
9529 + * modification, are permitted provided that the following conditions are met:
9530 + * * Redistributions of source code must retain the above copyright
9531 + * notice, this list of conditions and the following disclaimer.
9532 + * * Redistributions in binary form must reproduce the above copyright
9533 + * notice, this list of conditions and the following disclaimer in the
9534 + * documentation and/or other materials provided with the distribution.
9535 + * * Neither the name of Freescale Semiconductor nor the
9536 + * names of its contributors may be used to endorse or promote products
9537 + * derived from this software without specific prior written permission.
9538 + *
9539 + *
9540 + * ALTERNATIVELY, this software may be distributed under the terms of the
9541 + * GNU General Public License ("GPL") as published by the Free Software
9542 + * Foundation, either version 2 of that License or (at your option) any
9543 + * later version.
9544 + *
9545 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9546 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9547 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9548 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9549 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9550 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9551 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9552 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9553 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9554 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9555 + */
9556 +
9557 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
9558 +#define pr_fmt(fmt) \
9559 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
9560 + KBUILD_BASENAME".c", __LINE__, __func__
9561 +#else
9562 +#define pr_fmt(fmt) \
9563 + KBUILD_MODNAME ": " fmt
9564 +#endif
9565 +
9566 +#include <linux/init.h>
9567 +#include <linux/module.h>
9568 +#include <linux/of_platform.h>
9569 +#include <linux/of_net.h>
9570 +#include <linux/etherdevice.h>
9571 +#include <linux/kthread.h>
9572 +#include <linux/percpu.h>
9573 +#include <linux/highmem.h>
9574 +#include <linux/fsl_qman.h>
9575 +#include "dpaa_eth.h"
9576 +#include "dpaa_eth_common.h"
9577 +#include "dpaa_eth_base.h"
9578 +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
9579 +#include "mac.h"
9580 +
9581 +/* For MAC-based interfaces, we compute the tx needed headroom from the
9582 + * associated Tx port's buffer layout settings.
9583 + * For MACless interfaces just use a default value.
9584 + */
9585 +#define DPA_DEFAULT_TX_HEADROOM 64
9586 +
9587 +#define DPA_DESCRIPTION "FSL DPAA MACless Ethernet driver"
9588 +
9589 +MODULE_LICENSE("Dual BSD/GPL");
9590 +
9591 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
9592 +
9593 +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
9594 +static uint16_t macless_tx_timeout = 1000;
9595 +module_param(macless_tx_timeout, ushort, S_IRUGO);
9596 +MODULE_PARM_DESC(macless_tx_timeout, "The MACless Tx timeout in ms");
9597 +
9598 +/* forward declarations */
9599 +static int __cold dpa_macless_start(struct net_device *net_dev);
9600 +static int __cold dpa_macless_stop(struct net_device *net_dev);
9601 +static int __cold dpa_macless_set_address(struct net_device *net_dev,
9602 + void *addr);
9603 +static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev);
9604 +
9605 +static int dpaa_eth_macless_probe(struct platform_device *_of_dev);
9606 +static netdev_features_t
9607 +dpa_macless_fix_features(struct net_device *dev, netdev_features_t features);
9608 +
9609 +static const struct net_device_ops dpa_macless_ops = {
9610 + .ndo_open = dpa_macless_start,
9611 + .ndo_start_xmit = dpa_shared_tx,
9612 + .ndo_stop = dpa_macless_stop,
9613 + .ndo_tx_timeout = dpa_timeout,
9614 + .ndo_get_stats64 = dpa_get_stats64,
9615 + .ndo_set_mac_address = dpa_macless_set_address,
9616 + .ndo_set_rx_mode = dpa_macless_set_rx_mode,
9617 + .ndo_validate_addr = eth_validate_addr,
9618 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
9619 + .ndo_select_queue = dpa_select_queue,
9620 +#endif
9621 + .ndo_change_mtu = dpa_change_mtu,
9622 + .ndo_init = dpa_ndo_init,
9623 + .ndo_set_features = dpa_set_features,
9624 + .ndo_fix_features = dpa_macless_fix_features,
9625 +};
9626 +
9627 +static const struct of_device_id dpa_macless_match[] = {
9628 + {
9629 + .compatible = "fsl,dpa-ethernet-macless"
9630 + },
9631 + {}
9632 +};
9633 +MODULE_DEVICE_TABLE(of, dpa_macless_match);
9634 +
9635 +static struct platform_driver dpa_macless_driver = {
9636 + .driver = {
9637 + .name = KBUILD_MODNAME "-macless",
9638 + .of_match_table = dpa_macless_match,
9639 + .owner = THIS_MODULE,
9640 + },
9641 + .probe = dpaa_eth_macless_probe,
9642 + .remove = dpa_remove
9643 +};
9644 +
9645 +static const char macless_frame_queues[][25] = {
9646 + [RX] = "fsl,qman-frame-queues-rx",
9647 + [TX] = "fsl,qman-frame-queues-tx"
9648 +};
9649 +
9650 +static int __cold dpa_macless_start(struct net_device *net_dev)
9651 +{
9652 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
9653 + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
9654 +
9655 + netif_tx_start_all_queues(net_dev);
9656 +
9657 + if (proxy_dev)
9658 + dpa_proxy_start(net_dev);
9659 +
9660 +
9661 + return 0;
9662 +}
9663 +
9664 +static int __cold dpa_macless_stop(struct net_device *net_dev)
9665 +{
9666 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
9667 + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
9668 +
9669 + netif_tx_stop_all_queues(net_dev);
9670 +
9671 + if (proxy_dev)
9672 + dpa_proxy_stop(proxy_dev, net_dev);
9673 +
9674 + return 0;
9675 +}
9676 +
9677 +static int dpa_macless_set_address(struct net_device *net_dev, void *addr)
9678 +{
9679 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
9680 + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
9681 + int _errno;
9682 +
9683 + _errno = eth_mac_addr(net_dev, addr);
9684 + if (_errno < 0) {
9685 + if (netif_msg_drv(priv))
9686 + netdev_err(net_dev, "eth_mac_addr() = %d\n", _errno);
9687 + return _errno;
9688 + }
9689 +
9690 + if (proxy_dev) {
9691 + _errno = dpa_proxy_set_mac_address(proxy_dev, net_dev);
9692 + if (_errno < 0) {
9693 + if (netif_msg_drv(priv))
9694 + netdev_err(net_dev, "proxy_set_mac_address() = %d\n",
9695 + _errno);
9696 + return _errno;
9697 + }
9698 + }
9699 +
9700 + return 0;
9701 +}
9702 +
9703 +static void __cold dpa_macless_set_rx_mode(struct net_device *net_dev)
9704 +{
9705 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
9706 + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
9707 +
9708 + if (proxy_dev)
9709 + dpa_proxy_set_rx_mode(proxy_dev, net_dev);
9710 +}
9711 +
9712 +static netdev_features_t
9713 +dpa_macless_fix_features(struct net_device *dev, netdev_features_t features)
9714 +{
9715 + netdev_features_t unsupported_features = 0;
9716 +
9717 + /* In theory we should never be requested to enable features that
9718 + * we didn't set in netdev->features and netdev->hw_features at probe
9719 + * time, but double check just to be on the safe side.
9720 + */
9721 + unsupported_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9722 + /* We don't support enabling Rx csum through ethtool yet */
9723 + unsupported_features |= NETIF_F_RXCSUM;
9724 +
9725 + features &= ~unsupported_features;
9726 +
9727 + return features;
9728 +}
9729 +
9730 +static int dpa_macless_netdev_init(struct device_node *dpa_node,
9731 + struct net_device *net_dev)
9732 +{
9733 + struct dpa_priv_s *priv = netdev_priv(net_dev);
9734 + struct proxy_device *proxy_dev = (struct proxy_device *)priv->peer;
9735 + struct device *dev = net_dev->dev.parent;
9736 + const uint8_t *mac_addr;
9737 +
9738 + net_dev->netdev_ops = &dpa_macless_ops;
9739 +
9740 + if (proxy_dev) {
9741 + struct mac_device *mac_dev = proxy_dev->mac_dev;
9742 + net_dev->mem_start = mac_dev->res->start;
9743 + net_dev->mem_end = mac_dev->res->end;
9744 +
9745 + return dpa_netdev_init(net_dev, mac_dev->addr,
9746 + macless_tx_timeout);
9747 + } else {
9748 + /* Get the MAC address from device tree */
9749 + mac_addr = of_get_mac_address(dpa_node);
9750 +
9751 + if (mac_addr == NULL) {
9752 + if (netif_msg_probe(priv))
9753 + dev_err(dev, "No MAC address found!\n");
9754 + return -EINVAL;
9755 + }
9756 +
9757 + return dpa_netdev_init(net_dev, mac_addr,
9758 + macless_tx_timeout);
9759 + }
9760 +}
9761 +
9762 +/* Probing of FQs for MACless ports */
9763 +static int dpa_fq_probe_macless(struct device *dev, struct list_head *list,
9764 + enum port_type ptype)
9765 +{
9766 + struct device_node *np = dev->of_node;
9767 + const struct fqid_cell *fqids;
9768 + int num_ranges;
9769 + int i, lenp;
9770 +
9771 + fqids = of_get_property(np, macless_frame_queues[ptype], &lenp);
9772 + if (fqids == NULL) {
9773 + dev_err(dev, "Need FQ definition in dts for MACless devices\n");
9774 + return -EINVAL;
9775 + }
9776 +
9777 + num_ranges = lenp / sizeof(*fqids);
9778 +
9779 + /* All ranges defined in the device tree are used as Rx/Tx queues */
9780 + for (i = 0; i < num_ranges; i++) {
9781 + if (!dpa_fq_alloc(dev, be32_to_cpu(fqids[i].start),
9782 + be32_to_cpu(fqids[i].count), list,
9783 + ptype == RX ? FQ_TYPE_RX_PCD : FQ_TYPE_TX)) {
9784 + dev_err(dev, "_dpa_fq_alloc() failed\n");
9785 + return -ENOMEM;
9786 + }
9787 + }
9788 +
9789 + return 0;
9790 +}
9791 +
9792 + static struct proxy_device *
9793 +dpa_macless_proxy_probe(struct platform_device *_of_dev)
9794 +{
9795 + struct device *dev;
9796 + const phandle *proxy_prop;
9797 + struct proxy_device *proxy_dev;
9798 + struct device_node *proxy_node;
9799 + struct platform_device *proxy_pdev;
9800 + int lenp;
9801 +
9802 + dev = &_of_dev->dev;
9803 +
9804 + proxy_prop = of_get_property(dev->of_node, "proxy", &lenp);
9805 + if (!proxy_prop)
9806 + return NULL;
9807 +
9808 + proxy_node = of_find_node_by_phandle(*proxy_prop);
9809 + if (!proxy_node) {
9810 + dev_err(dev, "Cannot find proxy node\n");
9811 + return NULL;
9812 + }
9813 +
9814 + proxy_pdev = of_find_device_by_node(proxy_node);
9815 + if (!proxy_pdev) {
9816 + of_node_put(proxy_node);
9817 + dev_err(dev, "Cannot find device represented by proxy node\n");
9818 + return NULL;
9819 + }
9820 +
9821 + proxy_dev = dev_get_drvdata(&proxy_pdev->dev);
9822 +
9823 + of_node_put(proxy_node);
9824 +
9825 + return proxy_dev;
9826 +}
9827 +
9828 +static int dpaa_eth_macless_probe(struct platform_device *_of_dev)
9829 +{
9830 + int err = 0, i, channel;
9831 + struct device *dev;
9832 + struct device_node *dpa_node;
9833 + struct dpa_bp *dpa_bp;
9834 + struct dpa_fq *dpa_fq, *tmp;
9835 + size_t count;
9836 + struct net_device *net_dev = NULL;
9837 + struct dpa_priv_s *priv = NULL;
9838 + struct dpa_percpu_priv_s *percpu_priv;
9839 + static struct proxy_device *proxy_dev;
9840 + struct task_struct *kth;
9841 + static u8 macless_idx;
9842 +
9843 + dev = &_of_dev->dev;
9844 +
9845 + dpa_node = dev->of_node;
9846 +
9847 + if (!of_device_is_available(dpa_node))
9848 + return -ENODEV;
9849 +
9850 + /* Get the buffer pools assigned to this interface */
9851 + dpa_bp = dpa_bp_probe(_of_dev, &count);
9852 + if (IS_ERR(dpa_bp))
9853 + return PTR_ERR(dpa_bp);
9854 +
9855 + for (i = 0; i < count; i++)
9856 + dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
9857 +
9858 + proxy_dev = dpa_macless_proxy_probe(_of_dev);
9859 +
9860 +
9861 + /* Allocate this early, so we can store relevant information in
9862 + * the private area (needed by 1588 code in dpa_mac_probe)
9863 + */
9864 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
9865 + if (!net_dev) {
9866 + dev_err(dev, "alloc_etherdev_mq() failed\n");
9867 + return -ENOMEM;
9868 + }
9869 +
9870 + /* Do this here, so we can be verbose early */
9871 + SET_NETDEV_DEV(net_dev, dev);
9872 + dev_set_drvdata(dev, net_dev);
9873 +
9874 + priv = netdev_priv(net_dev);
9875 + priv->net_dev = net_dev;
9876 + sprintf(priv->if_type, "macless%d", macless_idx++);
9877 +
9878 + priv->msg_enable = netif_msg_init(advanced_debug, -1);
9879 +
9880 + priv->peer = NULL;
9881 + priv->mac_dev = NULL;
9882 + if (proxy_dev) {
9883 + /* This is a temporary solution for the need of
9884 + * having main driver upstreamability: adjust_link
9885 + * is a general function that should work for both
9886 + * private driver and macless driver with MAC device
9887 + * control capabilities even if the last will not be
9888 + * upstreamable.
9889 + * TODO: find a convenient solution (wrapper over
9890 + * main priv structure, etc.)
9891 + */
9892 + priv->mac_dev = proxy_dev->mac_dev;
9893 +
9894 + /* control over proxy's mac device */
9895 + priv->peer = (void *)proxy_dev;
9896 + }
9897 +
9898 + INIT_LIST_HEAD(&priv->dpa_fq_list);
9899 +
9900 + err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list, RX);
9901 + if (!err)
9902 + err = dpa_fq_probe_macless(dev, &priv->dpa_fq_list,
9903 + TX);
9904 + if (err < 0)
9905 + goto fq_probe_failed;
9906 +
9907 + /* bp init */
9908 + priv->bp_count = count;
9909 + err = dpa_bp_create(net_dev, dpa_bp, count);
9910 + if (err < 0)
9911 + goto bp_create_failed;
9912 +
9913 + channel = dpa_get_channel();
9914 +
9915 + if (channel < 0) {
9916 + err = channel;
9917 + goto get_channel_failed;
9918 + }
9919 +
9920 + priv->channel = (uint16_t)channel;
9921 +
9922 + /* Start a thread that will walk the cpus with affine portals
9923 + * and add this pool channel to each's dequeue mask.
9924 + */
9925 + kth = kthread_run(dpaa_eth_add_channel,
9926 + (void *)(unsigned long)priv->channel,
9927 + "dpaa_%p:%d", net_dev, priv->channel);
9928 + if (!kth) {
9929 + err = -ENOMEM;
9930 + goto add_channel_failed;
9931 + }
9932 +
9933 + dpa_fq_setup(priv, &shared_fq_cbs, NULL);
9934 +
9935 + /* Add the FQs to the interface, and make them active */
9936 + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
9937 + /* For MAC-less devices we only get here for RX frame queues
9938 + * initialization, which are the TX queues of the other
9939 + * partition.
9940 + * It is safe to rely on one partition to set the FQ taildrop
9941 + * threshold for the TX queues of the other partition
9942 + * because the ERN notifications will be received by the
9943 + * partition doing qman_enqueue.
9944 + */
9945 + err = dpa_fq_init(dpa_fq, true);
9946 + if (err < 0)
9947 + goto fq_alloc_failed;
9948 + }
9949 +
9950 + priv->tx_headroom = DPA_DEFAULT_TX_HEADROOM;
9951 +
9952 + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
9953 +
9954 + if (priv->percpu_priv == NULL) {
9955 + dev_err(dev, "devm_alloc_percpu() failed\n");
9956 + err = -ENOMEM;
9957 + goto alloc_percpu_failed;
9958 + }
9959 + for_each_possible_cpu(i) {
9960 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
9961 + memset(percpu_priv, 0, sizeof(*percpu_priv));
9962 + }
9963 +
9964 + err = dpa_macless_netdev_init(dpa_node, net_dev);
9965 + if (err < 0)
9966 + goto netdev_init_failed;
9967 +
9968 + dpaa_eth_sysfs_init(&net_dev->dev);
9969 +
9970 + pr_info("fsl_dpa_macless: Probed %s interface as %s\n",
9971 + priv->if_type, net_dev->name);
9972 +
9973 + return 0;
9974 +
9975 +netdev_init_failed:
9976 +alloc_percpu_failed:
9977 +fq_alloc_failed:
9978 + if (net_dev)
9979 + dpa_fq_free(dev, &priv->dpa_fq_list);
9980 +add_channel_failed:
9981 +get_channel_failed:
9982 + if (net_dev)
9983 + dpa_bp_free(priv);
9984 +bp_create_failed:
9985 +fq_probe_failed:
9986 + dev_set_drvdata(dev, NULL);
9987 + if (net_dev)
9988 + free_netdev(net_dev);
9989 +
9990 + return err;
9991 +}
9992 +
9993 +static int __init __cold dpa_macless_load(void)
9994 +{
9995 + int _errno;
9996 +
9997 + pr_info(DPA_DESCRIPTION "\n");
9998 +
9999 + /* Initialize dpaa_eth mirror values */
10000 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
10001 + dpa_max_frm = fm_get_max_frm();
10002 +
10003 + _errno = platform_driver_register(&dpa_macless_driver);
10004 + if (unlikely(_errno < 0)) {
10005 + pr_err(KBUILD_MODNAME
10006 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
10007 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
10008 + }
10009 +
10010 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
10011 + KBUILD_BASENAME".c", __func__);
10012 +
10013 + return _errno;
10014 +}
10015 +module_init(dpa_macless_load);
10016 +
10017 +static void __exit __cold dpa_macless_unload(void)
10018 +{
10019 + platform_driver_unregister(&dpa_macless_driver);
10020 +
10021 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
10022 + KBUILD_BASENAME".c", __func__);
10023 +}
10024 +module_exit(dpa_macless_unload);
10025 --- /dev/null
10026 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.c
10027 @@ -0,0 +1,2156 @@
10028 +/* Copyright 2015 Freescale Semiconductor Inc.
10029 + *
10030 + * Redistribution and use in source and binary forms, with or without
10031 + * modification, are permitted provided that the following conditions are met:
10032 + * * Redistributions of source code must retain the above copyright
10033 + * notice, this list of conditions and the following disclaimer.
10034 + * * Redistributions in binary form must reproduce the above copyright
10035 + * notice, this list of conditions and the following disclaimer in the
10036 + * documentation and/or other materials provided with the distribution.
10037 + * * Neither the name of Freescale Semiconductor nor the
10038 + * names of its contributors may be used to endorse or promote products
10039 + * derived from this software without specific prior written permission.
10040 + *
10041 + *
10042 + * ALTERNATIVELY, this software may be distributed under the terms of the
10043 + * GNU General Public License ("GPL") as published by the Free Software
10044 + * Foundation, either version 2 of that License or (at your option) any
10045 + * later version.
10046 + *
10047 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
10048 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
10049 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
10050 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
10051 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
10052 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
10053 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
10054 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10055 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10056 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10057 + */
10058 +
10059 +#include <linux/init.h>
10060 +#include <linux/module.h>
10061 +#include <linux/kernel.h>
10062 +#include <linux/moduleparam.h>
10063 +
10064 +#include <net/sock.h>
10065 +#include <linux/netlink.h>
10066 +#include <linux/skbuff.h>
10067 +
10068 +#include "dpaa_eth_macsec.h"
10069 +#include "dpaa_eth_common.h"
10070 +
10071 +#ifdef CONFIG_FSL_DPAA_1588
10072 +#include "dpaa_1588.h"
10073 +#endif
10074 +
10075 +static struct sock *nl_sk;
10076 +static struct macsec_priv_s *macsec_priv[FM_MAX_NUM_OF_MACS];
10077 +static char *macsec_ifs[FM_MAX_NUM_OF_MACS];
10078 +static int macsec_ifs_cnt;
10079 +
10080 +static char ifs[MAX_LEN];
10081 +const struct ethtool_ops *dpa_ethtool_ops_prev;
10082 +static struct ethtool_ops dpa_macsec_ethtool_ops;
10083 +
10084 +module_param_string(ifs, ifs, MAX_LEN, 0000);
10085 +MODULE_PARM_DESC(ifs, "Comma separated interface list");
10086 +
10087 +struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev)
10088 +{
10089 + return macsec_priv[net_dev->ifindex - 1];
10090 +}
10091 +
10092 +static void macsec_setup_ethtool_ops(struct net_device *net_dev)
10093 +{
10094 + /* remember private driver's ethtool ops just once */
10095 + if (!dpa_ethtool_ops_prev) {
10096 + dpa_ethtool_ops_prev = net_dev->ethtool_ops;
10097 +
10098 + memcpy(&dpa_macsec_ethtool_ops, net_dev->ethtool_ops,
10099 + sizeof(struct ethtool_ops));
10100 + dpa_macsec_ethtool_ops.get_sset_count =
10101 + dpa_macsec_get_sset_count;
10102 + dpa_macsec_ethtool_ops.get_ethtool_stats =
10103 + dpa_macsec_get_ethtool_stats;
10104 + dpa_macsec_ethtool_ops.get_strings =
10105 + dpa_macsec_get_strings;
10106 + }
10107 +
10108 + net_dev->ethtool_ops = &dpa_macsec_ethtool_ops;
10109 +}
10110 +
10111 +static void macsec_restore_ethtool_ops(struct net_device *net_dev)
10112 +{
10113 + net_dev->ethtool_ops = dpa_ethtool_ops_prev;
10114 +}
10115 +
10116 +
10117 +static int ifname_to_id(char *ifname)
10118 +{
10119 + int i;
10120 +
10121 + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
10122 + if (macsec_priv[i]->net_dev &&
10123 + (strcmp(ifname, macsec_priv[i]->net_dev->name) == 0)) {
10124 + return i;
10125 + }
10126 + }
10127 +
10128 + return -1;
10129 +}
10130 +
10131 +static void deinit_macsec(int macsec_id)
10132 +{
10133 + struct macsec_priv_s *selected_macsec_priv;
10134 + int i;
10135 +
10136 + selected_macsec_priv = macsec_priv[macsec_id];
10137 +
10138 + if (selected_macsec_priv->en_state == SECY_ENABLED) {
10139 + for (i = 0; i < NUM_OF_RX_SC; i++) {
10140 + if (!selected_macsec_priv->rx_sc_dev[i])
10141 + continue;
10142 + fm_macsec_secy_rxsa_disable_receive(
10143 + selected_macsec_priv->fm_ms_secy,
10144 + selected_macsec_priv->rx_sc_dev[i],
10145 + selected_macsec_priv->an);
10146 + pr_debug("disable rx_sa done\n");
10147 +
10148 + fm_macsec_secy_delete_rx_sa(
10149 + selected_macsec_priv->fm_ms_secy,
10150 + selected_macsec_priv->rx_sc_dev[i],
10151 + selected_macsec_priv->an);
10152 + pr_debug("delete rx_sa done\n");
10153 +
10154 + fm_macsec_secy_delete_rxsc(
10155 + selected_macsec_priv->fm_ms_secy,
10156 + selected_macsec_priv->rx_sc_dev[i]);
10157 + pr_debug("delete rx_sc done\n");
10158 + }
10159 +
10160 + fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
10161 + selected_macsec_priv->an);
10162 + pr_debug("delete tx_sa done\n");
10163 +
10164 + fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
10165 + selected_macsec_priv->fm_ms_secy = NULL;
10166 + pr_debug("secy free done\n");
10167 + }
10168 +
10169 + if (selected_macsec_priv->en_state != MACSEC_DISABLED) {
10170 + fm_macsec_disable(selected_macsec_priv->fm_macsec);
10171 + fm_macsec_free(selected_macsec_priv->fm_macsec);
10172 + selected_macsec_priv->fm_macsec = NULL;
10173 + pr_debug("macsec disable and free done\n");
10174 + }
10175 +}
10176 +
10177 +static void parse_ifs(void)
10178 +{
10179 + char *token, *strpos = ifs;
10180 +
10181 + while ((token = strsep(&strpos, ","))) {
10182 + if (strlen(token) == 0)
10183 + return;
10184 + else
10185 + macsec_ifs[macsec_ifs_cnt] = token;
10186 + macsec_ifs_cnt++;
10187 + }
10188 +}
10189 +
10190 +static void macsec_exception(handle_t _macsec_priv_s,
10191 + fm_macsec_exception exception)
10192 +{
10193 + struct macsec_priv_s *priv;
10194 + priv = (struct macsec_priv_s *)_macsec_priv_s;
10195 +
10196 + switch (exception) {
10197 + case (SINGLE_BIT_ECC):
10198 + dev_warn(priv->mac_dev->dev, "%s:%s SINGLE_BIT_ECC exception\n",
10199 + KBUILD_BASENAME".c", __func__);
10200 + break;
10201 + case (MULTI_BIT_ECC):
10202 + dev_warn(priv->mac_dev->dev, "%s:%s MULTI_BIT_ECC exception\n",
10203 + KBUILD_BASENAME".c", __func__);
10204 + break;
10205 + default:
10206 + dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
10207 + KBUILD_BASENAME".c", __func__, exception);
10208 + break;
10209 + }
10210 +}
10211 +
10212 +
10213 +static void macsec_secy_exception(handle_t _macsec_priv_s,
10214 + fm_macsec_secy_exception exception)
10215 +{
10216 + struct macsec_priv_s *priv;
10217 + priv = (struct macsec_priv_s *)_macsec_priv_s;
10218 +
10219 + switch (exception) {
10220 + case (SECY_EX_FRAME_DISCARDED):
10221 + dev_warn(priv->mac_dev->dev,
10222 + "%s:%s SECY_EX_FRAME_DISCARDED exception\n",
10223 + KBUILD_BASENAME".c", __func__);
10224 + break;
10225 + default:
10226 + dev_warn(priv->mac_dev->dev, "%s:%s exception %d\n",
10227 + KBUILD_BASENAME".c", __func__, exception);
10228 + break;
10229 + }
10230 +}
10231 +
10232 +static void macsec_secy_events(handle_t _macsec_priv_s,
10233 + fm_macsec_secy_event event)
10234 +{
10235 + struct macsec_priv_s *priv;
10236 + priv = (struct macsec_priv_s *)_macsec_priv_s;
10237 +
10238 + switch (event) {
10239 + case (SECY_EV_NEXT_PN):
10240 + dev_dbg(priv->mac_dev->dev, "%s:%s SECY_EV_NEXT_PN event\n",
10241 + KBUILD_BASENAME".c", __func__);
10242 + break;
10243 + default:
10244 + dev_dbg(priv->mac_dev->dev, "%s:%s event %d\n",
10245 + KBUILD_BASENAME".c", __func__, event);
10246 + break;
10247 + }
10248 +}
10249 +
10250 +static struct qman_fq *macsec_get_tx_conf_queue(
10251 + const struct macsec_priv_s *macsec_priv,
10252 + struct qman_fq *tx_fq)
10253 +{
10254 + int i;
10255 +
10256 + for (i = 0; i < MACSEC_ETH_TX_QUEUES; i++)
10257 + if (macsec_priv->egress_fqs[i] == tx_fq)
10258 + return macsec_priv->conf_fqs[i];
10259 + return NULL;
10260 +}
10261 +
10262 +/* Initialize qman fqs. Still need to set context_a, specifically the bits
10263 + * that identify the secure channel.
10264 + */
10265 +static int macsec_fq_init(struct dpa_fq *dpa_fq)
10266 +{
10267 + struct qman_fq *fq;
10268 + struct device *dev;
10269 + struct qm_mcc_initfq initfq;
10270 + uint32_t sc_phys_id;
10271 + int _errno, macsec_id;
10272 +
10273 + dev = dpa_fq->net_dev->dev.parent;
10274 + macsec_id = dpa_fq->net_dev->ifindex - 1;
10275 +
10276 + if (dpa_fq->fqid == 0)
10277 + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
10278 +
10279 + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
10280 + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
10281 +
10282 + if (_errno) {
10283 + dev_err(dev, "qman_create_fq() failed\n");
10284 + return _errno;
10285 + }
10286 +
10287 + fq = &dpa_fq->fq_base;
10288 +
10289 + if (dpa_fq->init) {
10290 + initfq.we_mask = QM_INITFQ_WE_FQCTRL;
10291 + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
10292 +
10293 + if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
10294 + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
10295 +
10296 + initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
10297 +
10298 + initfq.fqd.dest.channel = dpa_fq->channel;
10299 + initfq.fqd.dest.wq = dpa_fq->wq;
10300 +
10301 + if (dpa_fq->fq_type == FQ_TYPE_TX) {
10302 + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
10303 +
10304 + /* Obtain the TX scId from fman */
10305 + _errno = fm_macsec_secy_get_txsc_phys_id(
10306 + macsec_priv[macsec_id]->fm_ms_secy,
10307 + &sc_phys_id);
10308 + if (unlikely(_errno < 0)) {
10309 + dev_err(dev, "fm_macsec_secy_get_txsc_phys_id = %d\n",
10310 + _errno);
10311 + return _errno;
10312 + }
10313 +
10314 + /* Write the TX SC-ID in the context of the FQ.
10315 + * A2V=1 (use the A2 field)
10316 + * A0V=1 (use the A0 field)
10317 + * OVOM=1
10318 + * MCV=1 (MACsec controlled frames)
10319 + * MACCMD=the TX scId
10320 + */
10321 + initfq.fqd.context_a.hi = 0x1a100000 |
10322 + sc_phys_id << 16;
10323 + initfq.fqd.context_a.lo = 0x80000000;
10324 + }
10325 +
10326 + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
10327 + if (_errno < 0) {
10328 + dev_err(dev, "qman_init_fq(%u) = %d\n",
10329 + qman_fq_fqid(fq), _errno);
10330 + qman_destroy_fq(fq, 0);
10331 + return _errno;
10332 + }
10333 + }
10334 +
10335 + dpa_fq->fqid = qman_fq_fqid(fq);
10336 +
10337 + return 0;
10338 +}
10339 +
10340 +/* Configure and enable secy. */
10341 +static int enable_secy(struct generic_msg *gen, int *macsec_id)
10342 +{
10343 + struct enable_secy *sec;
10344 + int _errno;
10345 + struct fm_macsec_secy_params secy_params;
10346 + struct dpa_fq *dpa_fq, *tmp;
10347 + struct macsec_priv_s *selected_macsec_priv;
10348 +
10349 + sec = &gen->payload.secy;
10350 +
10351 + if (sec->macsec_id < 0 || sec->macsec_id >= FM_MAX_NUM_OF_MACS) {
10352 + _errno = -EINVAL;
10353 + goto _return;
10354 + }
10355 + *macsec_id = sec->macsec_id;
10356 + selected_macsec_priv = macsec_priv[sec->macsec_id];
10357 +
10358 + if (selected_macsec_priv->fm_ms_secy) {
10359 + pr_err("Secy has already been enabled\n");
10360 + return -EINVAL;
10361 + }
10362 +
10363 + memset(&secy_params, 0, sizeof(secy_params));
10364 + secy_params.fm_macsec_h = selected_macsec_priv->fm_macsec;
10365 + secy_params.num_receive_channels = NUM_OF_RX_SC;
10366 + secy_params.tx_sc_params.sci = sec->sci;
10367 +
10368 + /* Set encryption method */
10369 + secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_128;
10370 +#if (DPAA_VERSION >= 11)
10371 + secy_params.tx_sc_params.cipher_suite = SECY_GCM_AES_256;
10372 +#endif /* (DPAA_VERSION >= 11) */
10373 + secy_params.exception_f = macsec_secy_exception;
10374 + secy_params.event_f = macsec_secy_events;
10375 + secy_params.app_h = selected_macsec_priv;
10376 +
10377 + selected_macsec_priv->fm_ms_secy =
10378 + fm_macsec_secy_config(&secy_params);
10379 +
10380 + if (unlikely(selected_macsec_priv->fm_ms_secy == NULL)) {
10381 + _errno = -EINVAL;
10382 + goto _return;
10383 + }
10384 +
10385 + /* Configure the insertion mode */
10386 + if (sec->config_insertion_mode) {
10387 + _errno = fm_macsec_secy_config_sci_insertion_mode(
10388 + selected_macsec_priv->fm_ms_secy,
10389 + sec->sci_insertion_mode);
10390 + if (unlikely(_errno < 0))
10391 + goto _return;
10392 + }
10393 +
10394 + /* Configure the frame protection */
10395 + if (sec->config_protect_frames) {
10396 + _errno = fm_macsec_secy_config_protect_frames(
10397 + selected_macsec_priv->fm_ms_secy,
10398 + sec->protect_frames);
10399 + if (unlikely(_errno < 0))
10400 + goto _return;
10401 + }
10402 +
10403 + /* Configure the replay window */
10404 + if (sec->config_replay_window) {
10405 + _errno = fm_macsec_secy_config_replay_window(
10406 + selected_macsec_priv->fm_ms_secy,
10407 + sec->replay_protect,
10408 + sec->replay_window);
10409 + if (unlikely(_errno < 0))
10410 + goto _return;
10411 + }
10412 +
10413 + /* Configure the validation mode */
10414 + if (sec->config_validation_mode) {
10415 + _errno = fm_macsec_secy_config_validation_mode(
10416 + selected_macsec_priv->fm_ms_secy,
10417 + sec->validate_frames);
10418 + if (unlikely(_errno < 0))
10419 + goto _return;
10420 + }
10421 +
10422 + /* Select the exceptions that will be signaled */
10423 + if (sec->config_exception) {
10424 + _errno = fm_macsec_secy_config_exception(
10425 + selected_macsec_priv->fm_ms_secy,
10426 + sec->exception,
10427 + sec->enable_exception);
10428 + if (unlikely(_errno < 0))
10429 + goto _return;
10430 + }
10431 +
10432 + /* Select the events that will be signaled */
10433 + if (sec->config_event) {
10434 + _errno = fm_macsec_secy_config_event(
10435 + selected_macsec_priv->fm_ms_secy,
10436 + sec->event,
10437 + sec->enable_event);
10438 + if (unlikely(_errno < 0))
10439 + goto _return;
10440 + }
10441 +
10442 + /* Configure a point-to-point connection */
10443 + if (sec->config_point_to_point) {
10444 + _errno = fm_macsec_secy_config_point_to_point(
10445 + selected_macsec_priv->fm_ms_secy);
10446 + if (unlikely(_errno < 0))
10447 + goto _return;
10448 + }
10449 +
10450 + /* Configure the connection's confidentiality state */
10451 + if (sec->config_confidentiality) {
10452 + _errno = fm_macsec_secy_config_confidentiality(
10453 + selected_macsec_priv->fm_ms_secy,
10454 + sec->confidentiality_enable,
10455 + sec->confidentiality_offset);
10456 + if (unlikely(_errno < 0))
10457 + goto _return;
10458 + }
10459 +
10460 + _errno = fm_macsec_secy_init(selected_macsec_priv->fm_ms_secy);
10461 + if (unlikely(_errno < 0))
10462 + goto _return_fm_macsec_secy_free;
10463 +
10464 + list_for_each_entry_safe(dpa_fq,
10465 + tmp,
10466 + &selected_macsec_priv->dpa_fq_list,
10467 + list) {
10468 + _errno = macsec_fq_init(dpa_fq);
10469 + if (_errno < 0)
10470 + goto _return;
10471 + }
10472 +
10473 + return 0;
10474 +
10475 +_return_fm_macsec_secy_free:
10476 + fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
10477 + selected_macsec_priv->fm_ms_secy = NULL;
10478 +_return:
10479 + return _errno;
10480 +}
10481 +
10482 +static int set_macsec_exception(struct generic_msg *gen)
10483 +{
10484 + struct set_exception *set_ex;
10485 + struct macsec_priv_s *selected_macsec_priv;
10486 + int rv;
10487 +
10488 + set_ex = &(gen->payload.set_ex);
10489 +
10490 + selected_macsec_priv = macsec_priv[set_ex->macsec_id];
10491 +
10492 + rv = fm_macsec_set_exception(selected_macsec_priv->fm_macsec,
10493 + set_ex->exception,
10494 + set_ex->enable_exception);
10495 + if (unlikely(rv < 0))
10496 + pr_err("error when setting the macsec exception mask\n");
10497 +
10498 + return rv;
10499 +}
10500 +
10501 +static int create_tx_sa(struct generic_msg *gen)
10502 +{
10503 + struct create_tx_sa *c_tx_sa;
10504 + macsec_sa_key_t sa_key;
10505 + int rv;
10506 + struct macsec_priv_s *selected_macsec_priv;
10507 +
10508 + c_tx_sa = &(gen->payload.c_tx_sa);
10509 +
10510 + if (c_tx_sa->macsec_id < 0 ||
10511 + c_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
10512 + kfree(c_tx_sa);
10513 + return -EINVAL;
10514 + }
10515 + selected_macsec_priv = macsec_priv[c_tx_sa->macsec_id];
10516 +
10517 + /* set macsec_priv field */
10518 + selected_macsec_priv->an = c_tx_sa->an;
10519 +
10520 + /* because of the algorithms used */
10521 + if (unlikely(c_tx_sa->sak_len > 32)) {
10522 + pr_warn("size of secure key is greater than 32 bytes!\n");
10523 + kfree(c_tx_sa);
10524 + return -EINVAL;
10525 + }
10526 +
10527 + rv = copy_from_user(&sa_key,
10528 + c_tx_sa->sak,
10529 + c_tx_sa->sak_len);
10530 + if (unlikely(rv != 0)) {
10531 + pr_err("copy_from_user could not copy %i bytes\n", rv);
10532 + return -EFAULT;
10533 + }
10534 +
10535 + rv = fm_macsec_secy_create_tx_sa(selected_macsec_priv->fm_ms_secy,
10536 + c_tx_sa->an,
10537 + sa_key);
10538 + if (unlikely(rv < 0))
10539 + pr_err("error when creating tx sa\n");
10540 +
10541 + return rv;
10542 +}
10543 +
10544 +static int modify_tx_sa_key(struct generic_msg *gen)
10545 +{
10546 + struct modify_tx_sa_key *tx_sa_key;
10547 + struct macsec_priv_s *selected_macsec_priv;
10548 + macsec_sa_key_t sa_key;
10549 + int rv;
10550 +
10551 + tx_sa_key = &(gen->payload.modify_tx_sa_key);
10552 +
10553 + if (tx_sa_key->macsec_id < 0 ||
10554 + tx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
10555 + return -EINVAL;
10556 + selected_macsec_priv = macsec_priv[tx_sa_key->macsec_id];
10557 +
10558 + /* set macsec_priv field */
10559 + selected_macsec_priv->an = tx_sa_key->an;
10560 +
10561 + if (unlikely(tx_sa_key->sak_len > 32)) {
10562 + pr_warn("size of secure key is greater than 32 bytes!\n");
10563 + kfree(tx_sa_key);
10564 + return -EINVAL;
10565 + }
10566 +
10567 + rv = copy_from_user(&sa_key,
10568 + tx_sa_key->sak,
10569 + tx_sa_key->sak_len);
10570 + if (unlikely(rv != 0)) {
10571 + pr_err("copy_from_user could not copy %i bytes\n", rv);
10572 + return -EFAULT;
10573 + }
10574 +
10575 + rv = fm_macsec_secy_txsa_modify_key(selected_macsec_priv->fm_ms_secy,
10576 + tx_sa_key->an,
10577 + sa_key);
10578 + if (unlikely(rv < 0))
10579 + pr_err("error while modifying the tx sa key\n");
10580 +
10581 + return rv;
10582 +}
10583 +
10584 +static int activate_tx_sa(struct generic_msg *gen)
10585 +{
10586 + struct activate_tx_sa *a_tx_sa;
10587 + struct macsec_priv_s *selected_macsec_priv;
10588 + int rv;
10589 +
10590 + a_tx_sa = &(gen->payload.a_tx_sa);
10591 +
10592 + if (a_tx_sa->macsec_id < 0 ||
10593 + a_tx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
10594 + kfree(a_tx_sa);
10595 + return -EINVAL;
10596 + }
10597 + selected_macsec_priv = macsec_priv[a_tx_sa->macsec_id];
10598 +
10599 + rv = fm_macsec_secy_txsa_set_active(selected_macsec_priv->fm_ms_secy,
10600 + a_tx_sa->an);
10601 + if (unlikely(rv < 0))
10602 + pr_err("error when creating tx sa\n");
10603 +
10604 + return rv;
10605 +}
10606 +
10607 +static int get_tx_sa_an(struct generic_msg *gen, macsec_an_t *an)
10608 +{
10609 + struct macsec_priv_s *selected_macsec_priv;
10610 +
10611 + if (gen->payload.macsec_id < 0 ||
10612 + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS)
10613 + return -EINVAL;
10614 +
10615 + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
10616 +
10617 + fm_macsec_secy_txsa_get_active(selected_macsec_priv->fm_ms_secy, an);
10618 +
10619 + return 0;
10620 +}
10621 +
10622 +static int create_rx_sc(struct generic_msg *gen)
10623 +{
10624 + struct fm_macsec_secy_sc_params params;
10625 + struct macsec_priv_s *selected_macsec_priv;
10626 + struct rx_sc_dev *rx_sc_dev;
10627 + uint32_t sc_phys_id;
10628 + int i;
10629 +
10630 + if (gen->payload.c_rx_sc.macsec_id < 0 ||
10631 + gen->payload.c_rx_sc.macsec_id >= FM_MAX_NUM_OF_MACS)
10632 + return -EINVAL;
10633 + selected_macsec_priv = macsec_priv[gen->payload.c_rx_sc.macsec_id];
10634 +
10635 + for (i = 0; i < NUM_OF_RX_SC; i++)
10636 + if (!selected_macsec_priv->rx_sc_dev[i])
10637 + break;
10638 + if (i == NUM_OF_RX_SC) {
10639 + pr_err("number of maximum RX_SC's has been reached\n");
10640 + return -EINVAL;
10641 + }
10642 +
10643 + params.sci = gen->payload.c_rx_sc.sci;
10644 + params.cipher_suite = SECY_GCM_AES_128;
10645 +#if (DPAA_VERSION >= 11)
10646 + params.cipher_suite = SECY_GCM_AES_256;
10647 +#endif /* (DPAA_VERSION >= 11) */
10648 +
10649 + rx_sc_dev = fm_macsec_secy_create_rxsc(selected_macsec_priv->fm_ms_secy,
10650 + &params);
10651 +
10652 + fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
10653 + rx_sc_dev,
10654 + &sc_phys_id);
10655 +
10656 + selected_macsec_priv->rx_sc_dev[sc_phys_id] = rx_sc_dev;
10657 +
10658 + return sc_phys_id;
10659 +}
10660 +
10661 +static int create_rx_sa(struct generic_msg *gen)
10662 +{
10663 + struct create_rx_sa *c_rx_sa;
10664 + struct macsec_priv_s *selected_macsec_priv;
10665 + struct rx_sc_dev *selected_rx_sc_dev;
10666 + macsec_sa_key_t sak;
10667 + int rv;
10668 +
10669 + c_rx_sa = &(gen->payload.c_rx_sa);
10670 +
10671 + if (unlikely(c_rx_sa->sak_len > 32)) {
10672 + pr_warn("size of secure key is greater than 32 bytes!\n");
10673 + return -EINVAL;
10674 + }
10675 + rv = copy_from_user(&sak,
10676 + c_rx_sa->sak,
10677 + c_rx_sa->sak_len);
10678 + if (unlikely(rv != 0)) {
10679 + pr_err("copy_from_user could not copy %i bytes\n", rv);
10680 + return -EFAULT;
10681 + }
10682 +
10683 + if (c_rx_sa->macsec_id < 0 ||
10684 + c_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS)
10685 + return -EINVAL;
10686 +
10687 + selected_macsec_priv = macsec_priv[c_rx_sa->macsec_id];
10688 +
10689 + if (c_rx_sa->rx_sc_id < 0 || c_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
10690 + return -EINVAL;
10691 +
10692 + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[c_rx_sa->rx_sc_id];
10693 +
10694 + rv = fm_macsec_secy_create_rx_sa(selected_macsec_priv->fm_ms_secy,
10695 + selected_rx_sc_dev,
10696 + c_rx_sa->an,
10697 + c_rx_sa->lpn,
10698 + sak);
10699 + if (unlikely(rv < 0)) {
10700 + pr_err("fm_macsec_secy_create_rx_sa failed\n");
10701 + return -EBUSY;
10702 + }
10703 +
10704 + return 0;
10705 +}
10706 +
10707 +static int modify_rx_sa_key(struct generic_msg *gen)
10708 +{
10709 + struct modify_rx_sa_key *rx_sa_key;
10710 + struct macsec_priv_s *selected_macsec_priv;
10711 + struct rx_sc_dev *selected_rx_sc;
10712 + macsec_sa_key_t sa_key;
10713 + int rv;
10714 +
10715 + rx_sa_key = &(gen->payload.modify_rx_sa_key);
10716 +
10717 + if (rx_sa_key->macsec_id < 0 ||
10718 + rx_sa_key->macsec_id >= FM_MAX_NUM_OF_MACS)
10719 + return -EINVAL;
10720 + selected_macsec_priv = macsec_priv[rx_sa_key->macsec_id];
10721 +
10722 + if (rx_sa_key->rx_sc_id < 0 || rx_sa_key->rx_sc_id >= NUM_OF_RX_SC)
10723 + return -EINVAL;
10724 + selected_rx_sc = selected_macsec_priv->rx_sc_dev[rx_sa_key->rx_sc_id];
10725 +
10726 + /* set macsec_priv field */
10727 + selected_macsec_priv->an = rx_sa_key->an;
10728 +
10729 + if (unlikely(rx_sa_key->sak_len > 32)) {
10730 + pr_warn("size of secure key is greater than 32 bytes!\n");
10731 + kfree(rx_sa_key);
10732 + return -EINVAL;
10733 + }
10734 +
10735 + rv = copy_from_user(&sa_key,
10736 + rx_sa_key->sak,
10737 + rx_sa_key->sak_len);
10738 + if (unlikely(rv != 0)) {
10739 + pr_err("copy_from_user could not copy %i bytes\n", rv);
10740 + return -EFAULT;
10741 + }
10742 +
10743 + rv = fm_macsec_secy_rxsa_modify_key(selected_macsec_priv->fm_ms_secy,
10744 + selected_rx_sc,
10745 + rx_sa_key->an,
10746 + sa_key);
10747 + if (unlikely(rv < 0))
10748 + pr_err("error while modifying the rx sa key\n");
10749 +
10750 + return rv;
10751 +}
10752 +
10753 +static int update_npn(struct generic_msg *gen)
10754 +{
10755 + struct update_npn *update_npn;
10756 + struct macsec_priv_s *selected_macsec_priv;
10757 + struct rx_sc_dev *selected_rx_sc_dev;
10758 + int err;
10759 +
10760 + update_npn = &(gen->payload.update_npn);
10761 +
10762 + if (update_npn->macsec_id < 0 ||
10763 + update_npn->macsec_id >= FM_MAX_NUM_OF_MACS)
10764 + return -EINVAL;
10765 + selected_macsec_priv = macsec_priv[update_npn->macsec_id];
10766 +
10767 + if (update_npn->rx_sc_id < 0 || update_npn->rx_sc_id >= NUM_OF_RX_SC)
10768 + return -EINVAL;
10769 +
10770 + selected_rx_sc_dev =
10771 + selected_macsec_priv->rx_sc_dev[update_npn->rx_sc_id];
10772 +
10773 + err = fm_macsec_secy_rxsa_update_next_pn(
10774 + selected_macsec_priv->fm_ms_secy,
10775 + selected_rx_sc_dev,
10776 + update_npn->an,
10777 + update_npn->pn);
10778 + if (unlikely(err < 0)) {
10779 + pr_err("fm_macsec_secy_rxsa_update_next_pn failed\n");
10780 + return -EBUSY;
10781 + }
10782 +
10783 + return 0;
10784 +}
10785 +
10786 +static int update_lpn(struct generic_msg *gen)
10787 +{
10788 + struct update_lpn *update_lpn;
10789 + struct macsec_priv_s *selected_macsec_priv;
10790 + struct rx_sc_dev *selected_rx_sc_dev;
10791 + int err;
10792 +
10793 + update_lpn = &(gen->payload.update_lpn);
10794 +
10795 + if (update_lpn->macsec_id < 0 ||
10796 + update_lpn->macsec_id >= FM_MAX_NUM_OF_MACS)
10797 + return -EINVAL;
10798 + selected_macsec_priv = macsec_priv[update_lpn->macsec_id];
10799 +
10800 + if (update_lpn->rx_sc_id < 0 || update_lpn->rx_sc_id >= NUM_OF_RX_SC)
10801 + return -EINVAL;
10802 + selected_rx_sc_dev =
10803 + selected_macsec_priv->rx_sc_dev[update_lpn->rx_sc_id];
10804 +
10805 + err = fm_macsec_secy_rxsa_update_lowest_pn(
10806 + selected_macsec_priv->fm_ms_secy,
10807 + selected_rx_sc_dev,
10808 + update_lpn->an,
10809 + update_lpn->pn);
10810 + if (unlikely(err < 0)) {
10811 + pr_err("fm_macsec_secy_rxsa_update_lowest_pn failed\n");
10812 + return -EBUSY;
10813 + }
10814 +
10815 + return 0;
10816 +}
10817 +
10818 +static int activate_rx_sa(struct generic_msg *gen)
10819 +{
10820 + struct activate_rx_sa *a_rx_sa;
10821 + struct macsec_priv_s *selected_macsec_priv;
10822 + struct rx_sc_dev *selected_rx_sc_dev;
10823 + int err;
10824 +
10825 + a_rx_sa = &(gen->payload.a_rx_sa);
10826 +
10827 + if (a_rx_sa->macsec_id < 0 ||
10828 + a_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
10829 + return -EINVAL;
10830 + }
10831 + selected_macsec_priv = macsec_priv[a_rx_sa->macsec_id];
10832 +
10833 + if (a_rx_sa->rx_sc_id < 0 || a_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
10834 + return -EINVAL;
10835 + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[a_rx_sa->rx_sc_id];
10836 +
10837 + err = fm_macsec_secy_rxsa_enable_receive(
10838 + selected_macsec_priv->fm_ms_secy,
10839 + selected_rx_sc_dev,
10840 + a_rx_sa->an);
10841 + if (unlikely(err < 0)) {
10842 + pr_err("fm_macsec_secy_rxsa_enable_receive failed\n");
10843 + return -EBUSY;
10844 + }
10845 +
10846 + return 0;
10847 +}
10848 +
10849 +static int get_tx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
10850 +{
10851 + struct macsec_priv_s *selected_macsec_priv;
10852 + int err;
10853 +
10854 + if (gen->payload.macsec_id < 0 ||
10855 + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
10856 + return -EINVAL;
10857 + }
10858 + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
10859 +
10860 + err = fm_macsec_secy_get_txsc_phys_id(selected_macsec_priv->fm_ms_secy,
10861 + sc_id);
10862 +
10863 + if (unlikely(err < 0)) {
10864 + pr_err("fm_macsec_secy_get_txsc_phys_id failed\n");
10865 + return err;
10866 + }
10867 +
10868 + return 0;
10869 +}
10870 +
10871 +static int get_rx_sc_phys_id(struct generic_msg *gen, uint32_t *sc_id)
10872 +{
10873 + struct get_rx_sc_id *get_rx_sc_id;
10874 + struct macsec_priv_s *selected_macsec_priv;
10875 + struct rx_sc_dev *selected_rx_sc_dev;
10876 + int err;
10877 +
10878 + get_rx_sc_id = &(gen->payload.get_rx_sc_id);
10879 +
10880 + if (get_rx_sc_id->macsec_id < 0 ||
10881 + get_rx_sc_id->macsec_id >= FM_MAX_NUM_OF_MACS) {
10882 + return -EINVAL;
10883 + }
10884 + selected_macsec_priv = macsec_priv[get_rx_sc_id->macsec_id];
10885 +
10886 + if (get_rx_sc_id->rx_sc_id < 0 ||
10887 + get_rx_sc_id->rx_sc_id >= NUM_OF_RX_SC)
10888 + return -EINVAL;
10889 + selected_rx_sc_dev =
10890 + selected_macsec_priv->rx_sc_dev[get_rx_sc_id->rx_sc_id];
10891 +
10892 + err = fm_macsec_secy_get_rxsc_phys_id(selected_macsec_priv->fm_ms_secy,
10893 + selected_rx_sc_dev,
10894 + sc_id);
10895 + if (unlikely(err < 0)) {
10896 + pr_err("fm_macsec_secy_get_rxsc_phys_id failed\n");
10897 + return err;
10898 + }
10899 +
10900 + return 0;
10901 +}
10902 +
10903 +static int get_macsec_revision(struct generic_msg *gen, int *macsec_revision)
10904 +{
10905 + struct macsec_priv_s *selected_macsec_priv;
10906 + int err;
10907 +
10908 + if (gen->payload.macsec_id < 0 ||
10909 + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
10910 + return -EINVAL;
10911 + }
10912 + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
10913 +
10914 + err = fm_macsec_get_revision(selected_macsec_priv->fm_macsec,
10915 + macsec_revision);
10916 + if (unlikely(err < 0)) {
10917 + pr_err("fm_macsec_get_revision failed\n");
10918 + return err;
10919 + }
10920 +
10921 + return 0;
10922 +}
10923 +
10924 +static int rx_sa_disable(struct generic_msg *gen)
10925 +{
10926 + struct disable_rx_sa *disable_rx_sa;
10927 + struct macsec_priv_s *selected_macsec_priv;
10928 + struct rx_sc_dev *selected_rx_sc_dev;
10929 + int err;
10930 +
10931 + disable_rx_sa = &(gen->payload.d_rx_sa);
10932 +
10933 + if (disable_rx_sa->macsec_id < 0 ||
10934 + disable_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
10935 + return -EINVAL;
10936 + }
10937 + selected_macsec_priv = macsec_priv[disable_rx_sa->macsec_id];
10938 +
10939 + if (disable_rx_sa->rx_sc_id < 0 ||
10940 + disable_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
10941 + return -EINVAL;
10942 + selected_rx_sc_dev =
10943 + selected_macsec_priv->rx_sc_dev[disable_rx_sa->rx_sc_id];
10944 +
10945 + err = fm_macsec_secy_rxsa_disable_receive(
10946 + selected_macsec_priv->fm_ms_secy,
10947 + selected_rx_sc_dev,
10948 + selected_macsec_priv->an);
10949 +
10950 + if (unlikely(err < 0)) {
10951 + pr_err("fm_macsec_secy_rxsa_disable_receive failed\n");
10952 + return err;
10953 + }
10954 +
10955 + return 0;
10956 +}
10957 +
10958 +static int rx_sa_delete(struct generic_msg *gen)
10959 +{
10960 + struct delete_rx_sa *delete_rx_sa;
10961 + struct macsec_priv_s *selected_macsec_priv;
10962 + struct rx_sc_dev *selected_rx_sc_dev;
10963 + int err;
10964 +
10965 + delete_rx_sa = &(gen->payload.del_rx_sa);
10966 +
10967 + if (delete_rx_sa->macsec_id < 0 ||
10968 + delete_rx_sa->macsec_id >= FM_MAX_NUM_OF_MACS) {
10969 + return -EINVAL;
10970 + }
10971 + selected_macsec_priv = macsec_priv[delete_rx_sa->macsec_id];
10972 +
10973 + if (delete_rx_sa->rx_sc_id < 0 ||
10974 + delete_rx_sa->rx_sc_id >= NUM_OF_RX_SC)
10975 + return -EINVAL;
10976 + selected_rx_sc_dev =
10977 + selected_macsec_priv->rx_sc_dev[delete_rx_sa->rx_sc_id];
10978 +
10979 + err = fm_macsec_secy_delete_rx_sa(selected_macsec_priv->fm_ms_secy,
10980 + selected_rx_sc_dev,
10981 + selected_macsec_priv->an);
10982 +
10983 + if (unlikely(err < 0)) {
10984 + pr_err("fm_macsec_secy_delete_rx_sa failed\n");
10985 + return err;
10986 + }
10987 +
10988 + return 0;
10989 +}
10990 +
10991 +static int rx_sc_delete(struct generic_msg *gen)
10992 +{
10993 + struct delete_rx_sc *delete_rx_sc;
10994 + struct macsec_priv_s *selected_macsec_priv;
10995 + struct rx_sc_dev *selected_rx_sc_dev;
10996 + int err;
10997 +
10998 + delete_rx_sc = &(gen->payload.del_rx_sc);
10999 +
11000 + if (delete_rx_sc->macsec_id < 0 ||
11001 + delete_rx_sc->macsec_id >= FM_MAX_NUM_OF_MACS) {
11002 + return -EINVAL;
11003 + }
11004 + selected_macsec_priv = macsec_priv[delete_rx_sc->macsec_id];
11005 +
11006 + if (delete_rx_sc->rx_sc_id < 0 ||
11007 + delete_rx_sc->rx_sc_id >= NUM_OF_RX_SC)
11008 + return -EINVAL;
11009 + selected_rx_sc_dev =
11010 + selected_macsec_priv->rx_sc_dev[delete_rx_sc->rx_sc_id];
11011 +
11012 + err = fm_macsec_secy_delete_rxsc(selected_macsec_priv->fm_ms_secy,
11013 + selected_rx_sc_dev);
11014 +
11015 + if (unlikely(err < 0)) {
11016 + pr_err("fm_macsec_secy_delete_rxsc failed\n");
11017 + return err;
11018 + }
11019 +
11020 + return 0;
11021 +}
11022 +
11023 +static int tx_sa_delete(struct generic_msg *gen)
11024 +{
11025 + struct macsec_priv_s *selected_macsec_priv;
11026 + int err;
11027 +
11028 + if (gen->payload.del_tx_sa.macsec_id < 0 ||
11029 + gen->payload.del_tx_sa.macsec_id >= FM_MAX_NUM_OF_MACS) {
11030 + return -EINVAL;
11031 + }
11032 + selected_macsec_priv = macsec_priv[gen->payload.del_tx_sa.macsec_id];
11033 +
11034 + err = fm_macsec_secy_delete_tx_sa(selected_macsec_priv->fm_ms_secy,
11035 + selected_macsec_priv->an);
11036 +
11037 + if (unlikely(err < 0)) {
11038 + pr_err("fm_macsec_secy_delete_tx_sa failed\n");
11039 + return err;
11040 + }
11041 +
11042 + return 0;
11043 +}
11044 +
11045 +static int disable_secy(struct generic_msg *gen, int *macsec_id)
11046 +{
11047 + struct macsec_priv_s *selected_macsec_priv;
11048 + int err;
11049 +
11050 + if (gen->payload.macsec_id < 0 ||
11051 + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
11052 + return -EINVAL;
11053 + }
11054 + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
11055 + *macsec_id = gen->payload.macsec_id;
11056 +
11057 + err = fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
11058 + selected_macsec_priv->fm_ms_secy = NULL;
11059 +
11060 + if (unlikely(err < 0)) {
11061 + pr_err("fm_macsec_secy_free failed\n");
11062 + return err;
11063 + }
11064 +
11065 + return 0;
11066 +}
11067 +
11068 +static int disable_macsec(struct generic_msg *gen, int *macsec_id)
11069 +{
11070 + struct macsec_priv_s *selected_macsec_priv;
11071 + int err;
11072 +
11073 + if (gen->payload.macsec_id < 0 ||
11074 + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
11075 + return -EINVAL;
11076 + }
11077 +
11078 + selected_macsec_priv =
11079 + macsec_priv[gen->payload.macsec_id];
11080 + *macsec_id = gen->payload.macsec_id;
11081 +
11082 + err = fm_macsec_disable(selected_macsec_priv->fm_macsec);
11083 + err += fm_macsec_free(selected_macsec_priv->fm_macsec);
11084 + selected_macsec_priv->fm_macsec = NULL;
11085 +
11086 + if (unlikely(err < 0)) {
11087 + pr_err("macsec disable failed\n");
11088 + return err;
11089 + }
11090 +
11091 + return 0;
11092 +
11093 +}
11094 +
11095 +static int disable_all(struct generic_msg *gen, int *macsec_id)
11096 +{
11097 + struct macsec_priv_s *selected_macsec_priv;
11098 + struct rx_sc_dev *selected_rx_sc_dev;
11099 + int err = 0, i;
11100 +
11101 + if (gen->payload.macsec_id < 0 ||
11102 + gen->payload.macsec_id >= FM_MAX_NUM_OF_MACS) {
11103 + return -EINVAL;
11104 + }
11105 +
11106 + selected_macsec_priv = macsec_priv[gen->payload.macsec_id];
11107 + *macsec_id = gen->payload.macsec_id;
11108 +
11109 + for (i = 0; i < NUM_OF_RX_SC; i++) {
11110 + selected_rx_sc_dev = selected_macsec_priv->rx_sc_dev[i];
11111 +
11112 + if (!selected_rx_sc_dev)
11113 + continue;
11114 +
11115 + err += fm_macsec_secy_rxsa_disable_receive(
11116 + selected_macsec_priv->fm_ms_secy,
11117 + selected_rx_sc_dev,
11118 + selected_macsec_priv->an);
11119 +
11120 + err += fm_macsec_secy_delete_rx_sa(
11121 + selected_macsec_priv->fm_ms_secy,
11122 + selected_rx_sc_dev,
11123 + selected_macsec_priv->an);
11124 +
11125 + err += fm_macsec_secy_delete_rxsc(
11126 + selected_macsec_priv->fm_ms_secy,
11127 + selected_rx_sc_dev);
11128 + }
11129 +
11130 + err += fm_macsec_secy_delete_tx_sa(
11131 + selected_macsec_priv->fm_ms_secy,
11132 + selected_macsec_priv->an);
11133 +
11134 + err += fm_macsec_secy_free(selected_macsec_priv->fm_ms_secy);
11135 + selected_macsec_priv->fm_ms_secy = NULL;
11136 +
11137 + err += fm_macsec_disable(selected_macsec_priv->fm_macsec);
11138 +
11139 + err += fm_macsec_free(selected_macsec_priv->fm_macsec);
11140 + selected_macsec_priv->fm_macsec = NULL;
11141 +
11142 + if (unlikely(err < 0)) {
11143 + pr_err("macsec disable failed\n");
11144 + return err;
11145 + }
11146 +
11147 + return 0;
11148 +}
11149 +
11150 +static inline void macsec_setup_ingress(struct macsec_priv_s *macsec_priv,
11151 + struct dpa_fq *fq,
11152 + const struct qman_fq *template)
11153 +{
11154 + fq->fq_base = *template;
11155 + fq->net_dev = macsec_priv->net_dev;
11156 +
11157 + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
11158 + fq->channel = macsec_priv->channel;
11159 +}
11160 +
11161 +static inline void macsec_setup_egress(struct macsec_priv_s *macsec_priv,
11162 + struct dpa_fq *fq,
11163 + struct fm_port *port,
11164 + const struct qman_fq *template)
11165 +{
11166 + fq->fq_base = *template;
11167 + fq->net_dev = macsec_priv->net_dev;
11168 +
11169 + if (port) {
11170 + fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
11171 + fq->channel = (uint16_t)fm_get_tx_port_channel(port);
11172 + } else {
11173 + fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
11174 + }
11175 +}
11176 +
11177 +/* At the moment, we don't create recycle queues. */
11178 +static void macsec_fq_setup(struct macsec_priv_s *macsec_priv,
11179 + const struct dpa_fq_cbs_t *fq_cbs,
11180 + struct fm_port *tx_port)
11181 +{
11182 + struct dpa_fq *fq;
11183 + int egress_cnt = 0, conf_cnt = 0;
11184 +
11185 + /* Initialize each FQ in the list */
11186 + list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
11187 + switch (fq->fq_type) {
11188 + /* Normal TX queues */
11189 + case FQ_TYPE_TX:
11190 + macsec_setup_egress(macsec_priv, fq, tx_port,
11191 + &fq_cbs->egress_ern);
11192 + /* If we have more Tx queues than the number of cores,
11193 + * just ignore the extra ones.
11194 + */
11195 + if (egress_cnt < MACSEC_ETH_TX_QUEUES)
11196 + macsec_priv->egress_fqs[egress_cnt++] =
11197 + &fq->fq_base;
11198 + break;
11199 + case FQ_TYPE_TX_CONFIRM:
11200 + BUG_ON(!macsec_priv->mac_dev);
11201 + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
11202 + break;
11203 + /* TX confirm multiple queues */
11204 + case FQ_TYPE_TX_CONF_MQ:
11205 + BUG_ON(!macsec_priv->mac_dev);
11206 + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_defq);
11207 + macsec_priv->conf_fqs[conf_cnt++] = &fq->fq_base;
11208 + break;
11209 + case FQ_TYPE_TX_ERROR:
11210 + BUG_ON(!macsec_priv->mac_dev);
11211 + macsec_setup_ingress(macsec_priv, fq, &fq_cbs->tx_errq);
11212 + break;
11213 + default:
11214 + dev_warn(macsec_priv->net_dev->dev.parent,
11215 + "Unknown FQ type detected!\n");
11216 + break;
11217 + }
11218 + }
11219 +
11220 + /* The number of Tx queues may be smaller than the number of cores, if
11221 + * the Tx queue range is specified in the device tree instead of being
11222 + * dynamically allocated.
11223 + * Make sure all CPUs receive a corresponding Tx queue.
11224 + */
11225 + while (egress_cnt < MACSEC_ETH_TX_QUEUES) {
11226 + list_for_each_entry(fq, &macsec_priv->dpa_fq_list, list) {
11227 + if (fq->fq_type != FQ_TYPE_TX)
11228 + continue;
11229 + macsec_priv->egress_fqs[egress_cnt++] = &fq->fq_base;
11230 + if (egress_cnt == MACSEC_ETH_TX_QUEUES)
11231 + break;
11232 + }
11233 + }
11234 +
11235 +}
11236 +
11237 +static const struct fqid_cell tx_fqids[] = {
11238 + {0, MACSEC_ETH_TX_QUEUES}
11239 +};
11240 +
11241 +static const struct fqid_cell tx_confirm_fqids[] = {
11242 + {0, MACSEC_ETH_TX_QUEUES}
11243 +};
11244 +
11245 +/* Allocate percpu priv. This is used to keep track of rx and tx packets on
11246 + * each cpu (take into consideration that the number of queues is equal to the
11247 + * number of cpus, so there is one queue/cpu).
11248 + */
11249 +static void alloc_priv(struct macsec_percpu_priv_s *percpu_priv,
11250 + struct macsec_priv_s *macsec_priv, struct device *dev)
11251 +{
11252 + int i, err;
11253 +
11254 + macsec_priv->percpu_priv = alloc_percpu(*macsec_priv->percpu_priv);
11255 +
11256 + if (unlikely(macsec_priv->percpu_priv == NULL)) {
11257 + dev_err(dev, "alloc_percpu() failed\n");
11258 + err = -ENOMEM;
11259 + dpa_fq_free(dev, &macsec_priv->dpa_fq_list);
11260 + }
11261 +
11262 + for_each_possible_cpu(i) {
11263 + percpu_priv = per_cpu_ptr(macsec_priv->percpu_priv, i);
11264 + memset(percpu_priv, 0, sizeof(*percpu_priv));
11265 + }
11266 +
11267 +}
11268 +
11269 +/* On RX, we only need to retain the information about frames, if they were
11270 + * encrypted or not. Statistics regarding this will be printed in a log file.
11271 + */
11272 +static int macsec_rx_hook(void *ptr, struct net_device *net_dev, u32 fqid)
11273 +{
11274 +
11275 + struct qm_fd *rx_fd = (struct qm_fd *)ptr;
11276 + struct macsec_percpu_priv_s *percpu_priv_m;
11277 + struct macsec_priv_s *selected_macsec_priv;
11278 +
11279 + selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
11280 +
11281 + percpu_priv_m = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
11282 +
11283 + if ((rx_fd->status & FM_FD_STAT_RX_MACSEC) != 0) {
11284 + if (netif_msg_hw(selected_macsec_priv) && net_ratelimit())
11285 + netdev_warn(net_dev, "FD status = 0x%u\n",
11286 + rx_fd->status & FM_FD_STAT_RX_MACSEC);
11287 + percpu_priv_m->rx_macsec++;
11288 + }
11289 +
11290 + return DPAA_ETH_CONTINUE;
11291 +}
11292 +
11293 +/* Split TX traffic. If encryption enabled, send packets on specific QMAN frame
11294 + * queues. Other way, let them be handled by dpa eth. Also, keep track of the
11295 + * number of packets that are walking away through "macsec" queues.
11296 + */
11297 +static enum dpaa_eth_hook_result macsec_tx_hook(struct sk_buff *skb,
11298 + struct net_device *net_dev)
11299 +{
11300 + struct dpa_priv_s *dpa_priv;
11301 + struct qm_fd fd;
11302 + struct macsec_percpu_priv_s *macsec_percpu_priv;
11303 + struct dpa_percpu_priv_s *dpa_percpu_priv;
11304 + int i, err = 0;
11305 + int *countptr, offset = 0;
11306 + const bool nonlinear = skb_is_nonlinear(skb);
11307 + struct qman_fq *egress_fq;
11308 + struct macsec_priv_s *selected_macsec_priv;
11309 +
11310 + selected_macsec_priv = macsec_priv[net_dev->ifindex - 1];
11311 +
11312 + if (!selected_macsec_priv->net_dev ||
11313 + (selected_macsec_priv->en_state != SECY_ENABLED) ||
11314 + (ntohs(skb->protocol) == ETH_P_PAE))
11315 + return DPAA_ETH_CONTINUE;
11316 +
11317 + dpa_priv = netdev_priv(net_dev);
11318 + /* Non-migratable context, safe to use raw_cpu_ptr */
11319 + macsec_percpu_priv = raw_cpu_ptr(selected_macsec_priv->percpu_priv);
11320 + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
11321 +
11322 + countptr = raw_cpu_ptr(dpa_priv->dpa_bp->percpu_count);
11323 +
11324 + clear_fd(&fd);
11325 +
11326 +#ifdef CONFIG_FSL_DPAA_1588
11327 + if (dpa_priv->tsu && dpa_priv->tsu->valid &&
11328 + dpa_priv->tsu->hwts_tx_en_ioctl)
11329 + fd.cmd |= FM_FD_CMD_UPD;
11330 +#endif
11331 +#ifdef CONFIG_FSL_DPAA_TS
11332 + if (unlikely(dpa_priv->ts_tx_en &&
11333 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
11334 + fd.cmd |= FM_FD_CMD_UPD;
11335 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
11336 +#endif /* CONFIG_FSL_DPAA_TS */
11337 +
11338 + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
11339 + * we don't feed FMan with more fragments than it supports.
11340 + * Btw, we're using the first sgt entry to store the linear part of
11341 + * the skb, so we're one extra frag short.
11342 + */
11343 + if (nonlinear &&
11344 + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
11345 + /* Just create a S/G fd based on the skb */
11346 + err = skb_to_sg_fd(dpa_priv, skb, &fd);
11347 + dpa_percpu_priv->tx_frag_skbuffs++;
11348 + } else {
11349 + /* Make sure we have enough headroom to accommodate private
11350 + * data, parse results, etc. Normally this shouldn't happen if
11351 + * we're here via the standard kernel stack.
11352 + */
11353 + if (unlikely(skb_headroom(skb) < dpa_priv->tx_headroom)) {
11354 + struct sk_buff *skb_new;
11355 +
11356 + skb_new = skb_realloc_headroom(skb,
11357 + dpa_priv->tx_headroom);
11358 + if (unlikely(!skb_new)) {
11359 + dev_kfree_skb(skb);
11360 + dpa_percpu_priv->stats.tx_errors++;
11361 + return DPAA_ETH_STOLEN;
11362 + }
11363 + dev_kfree_skb(skb);
11364 + skb = skb_new;
11365 + }
11366 +
11367 + /* We're going to store the skb backpointer at the beginning
11368 + * of the data buffer, so we need a privately owned skb
11369 + */
11370 +
11371 + /* Code borrowed from skb_unshare(). */
11372 + if (skb_cloned(skb)) {
11373 + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
11374 + kfree_skb(skb);
11375 + skb = nskb;
11376 + /* skb_copy() has now linearized the skbuff. */
11377 + } else if (unlikely(nonlinear)) {
11378 + /* We are here because the egress skb contains
11379 + * more fragments than we support. In this case,
11380 + * we have no choice but to linearize it ourselves.
11381 + */
11382 + err = __skb_linearize(skb);
11383 + }
11384 + if (unlikely(!skb || err < 0)) {
11385 + /* Common out-of-memory error path */
11386 + goto enomem;
11387 + }
11388 +
11389 + /* Finally, create a contig FD from this skb */
11390 + err = skb_to_contig_fd(dpa_priv, skb, &fd, countptr, &offset);
11391 + }
11392 + if (unlikely(err < 0))
11393 + goto skb_to_fd_failed;
11394 +
11395 + if (fd.bpid != 0xff) {
11396 + skb_recycle(skb);
11397 + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
11398 + * but we need the skb to look as if returned by build_skb().
11399 + * We need to manually adjust the tailptr as well.
11400 + */
11401 + skb->data = skb->head + offset;
11402 + skb_reset_tail_pointer(skb);
11403 +
11404 + (*countptr)++;
11405 + dpa_percpu_priv->tx_returned++;
11406 + }
11407 +
11408 + egress_fq = selected_macsec_priv->egress_fqs[smp_processor_id()];
11409 + if (fd.bpid == 0xff)
11410 + fd.cmd |= qman_fq_fqid(macsec_get_tx_conf_queue(
11411 + selected_macsec_priv,
11412 + egress_fq));
11413 +
11414 + for (i = 0; i < 100000; i++) {
11415 + err = qman_enqueue(egress_fq, &fd, 0);
11416 + if (err != -EBUSY)
11417 + break;
11418 + }
11419 +
11420 + if (unlikely(err < 0)) {
11421 + dpa_percpu_priv->stats.tx_errors++;
11422 + dpa_percpu_priv->stats.tx_fifo_errors++;
11423 + goto xmit_failed;
11424 + }
11425 +
11426 + macsec_percpu_priv->tx_macsec++;
11427 + dpa_percpu_priv->stats.tx_packets++;
11428 + dpa_percpu_priv->stats.tx_bytes += dpa_fd_length(&fd);
11429 +
11430 + net_dev->trans_start = jiffies;
11431 + return DPAA_ETH_STOLEN;
11432 +
11433 +xmit_failed:
11434 + if (fd.bpid != 0xff) {
11435 + (*countptr)--;
11436 + dpa_percpu_priv->tx_returned--;
11437 + dpa_fd_release(net_dev, &fd);
11438 + dpa_percpu_priv->stats.tx_errors++;
11439 + return DPAA_ETH_STOLEN;
11440 + }
11441 + _dpa_cleanup_tx_fd(dpa_priv, &fd);
11442 +skb_to_fd_failed:
11443 +enomem:
11444 + dpa_percpu_priv->stats.tx_errors++;
11445 + dev_kfree_skb(skb);
11446 + return DPAA_ETH_STOLEN;
11447 +}
11448 +
11449 +/* Allocate and initialize macsec priv and fqs. Also, create debugfs entry for
11450 + * a spcific interface. Iterate thourgh existing devices in order to find the
11451 + * one we want to have macsec for.
11452 + */
11453 +static int macsec_setup(void)
11454 +{
11455 + struct net_device *net_dev;
11456 + struct macsec_percpu_priv_s *percpu_priv = NULL;
11457 + struct dpa_priv_s *dpa_priv = NULL;
11458 + struct dpa_fq *dpa_fq;
11459 + struct device *dev = NULL;
11460 + int err, i, j, macsec_id;
11461 +
11462 + pr_debug("Entering: %s\n", __func__);
11463 +
11464 + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
11465 + macsec_priv[i] = kzalloc(sizeof(*(macsec_priv[i])), GFP_KERNEL);
11466 +
11467 + if (unlikely(macsec_priv[i] == NULL)) {
11468 + int j;
11469 + for (j = 0; j < i; j++)
11470 + kfree(macsec_priv[j]);
11471 + pr_err("could not allocate\n");
11472 + return -ENOMEM;
11473 + }
11474 + }
11475 +
11476 + for (i = 0; i < macsec_ifs_cnt; i++) {
11477 + net_dev = first_net_device(&init_net);
11478 + macsec_id = net_dev->ifindex - 1;
11479 + while (net_dev) {
11480 + macsec_id = net_dev->ifindex - 1;
11481 +
11482 + /* to maintain code readability and less than
11483 + * 80 characters per line
11484 + */
11485 + if (strcmp(net_dev->name, macsec_ifs[i]) != 0) {
11486 + net_dev = next_net_device(net_dev);
11487 + continue;
11488 + }
11489 +
11490 + /* strcmp(net_dev->name, macsec_ifs[i]) == 0 */
11491 + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
11492 + macsec_priv[macsec_id]->net_dev = net_dev;
11493 + dpa_priv = netdev_priv(net_dev);
11494 + macsec_priv[macsec_id]->mac_dev = dpa_priv->mac_dev;
11495 + macsec_priv[macsec_id]->channel = dpa_priv->channel;
11496 + dev = net_dev->dev.parent;
11497 +
11498 + INIT_LIST_HEAD(&macsec_priv[macsec_id]->dpa_fq_list);
11499 +
11500 + dpa_fq = dpa_fq_alloc(dev,
11501 + tx_fqids->start, tx_fqids->count,
11502 + &macsec_priv[macsec_id]->dpa_fq_list,
11503 + FQ_TYPE_TX);
11504 + if (unlikely(dpa_fq == NULL)) {
11505 + dev_err(dev, "dpa_fq_alloc() failed\n");
11506 + return -ENOMEM;
11507 + }
11508 +
11509 + dpa_fq = dpa_fq_alloc(dev,
11510 + tx_confirm_fqids->start,
11511 + tx_confirm_fqids->count,
11512 + &macsec_priv[macsec_id]->dpa_fq_list,
11513 + FQ_TYPE_TX_CONF_MQ);
11514 + if (unlikely(dpa_fq == NULL)) {
11515 + dev_err(dev, "dpa_fq_alloc() failed\n");
11516 + return -ENOMEM;
11517 + }
11518 +
11519 + macsec_fq_setup(macsec_priv[macsec_id], &private_fq_cbs,
11520 + macsec_priv[macsec_id]->mac_dev->port_dev[TX]);
11521 +
11522 + alloc_priv(percpu_priv, macsec_priv[macsec_id], dev);
11523 +
11524 + break;
11525 + }
11526 + if (macsec_priv[macsec_id]->net_dev == NULL) {
11527 + pr_err("Interface unknown\n");
11528 + err = -EINVAL;
11529 + goto _error;
11530 + }
11531 +
11532 + /* setup specific ethtool ops for macsec */
11533 + macsec_setup_ethtool_ops(net_dev);
11534 + }
11535 + return 0;
11536 +
11537 +_error:
11538 + for (j = 0; j < i; i++) {
11539 + net_dev = first_net_device(&init_net);
11540 + while (net_dev) {
11541 + macsec_id = net_dev->ifindex - 1;
11542 + if (strcmp(net_dev->name, macsec_ifs[j]) != 0) {
11543 + net_dev = next_net_device(net_dev);
11544 + continue;
11545 + }
11546 + dpa_fq_free(net_dev->dev.parent,
11547 + &macsec_priv[macsec_id]->dpa_fq_list);
11548 + break;
11549 + }
11550 + macsec_restore_ethtool_ops(macsec_priv[j]->net_dev);
11551 + kfree(macsec_priv[j]);
11552 + }
11553 + for (j = i; j < FM_MAX_NUM_OF_MACS; j++)
11554 + kfree(macsec_priv[j]);
11555 + return err;
11556 +}
11557 +
11558 +static int enable_macsec(struct generic_msg *gen)
11559 +{
11560 + struct fm_macsec_params macsec_params;
11561 + int rv, macsec_id;
11562 + void __iomem *mac_dev_base_addr;
11563 + uintptr_t macsec_reg_addr;
11564 + struct macsec_data *mdata;
11565 + char if_name[IFNAMSIZ];
11566 + struct macsec_priv_s *selected_macsec_priv;
11567 +
11568 + mdata = &gen->payload.en_macsec;
11569 +
11570 + if (unlikely(mdata->if_name_length > IFNAMSIZ)) {
11571 + pr_err("interface name too long\n");
11572 + return -EINVAL;
11573 + }
11574 +
11575 + rv = copy_from_user(if_name, mdata->if_name, mdata->if_name_length);
11576 + if (unlikely(rv != 0)) {
11577 + pr_err("copy_from_user could not copy %i bytes\n", rv);
11578 + return -EFAULT;
11579 + }
11580 +
11581 + macsec_id = ifname_to_id(if_name);
11582 + if (macsec_id < 0 || macsec_id >= FM_MAX_NUM_OF_MACS) {
11583 + pr_err("error on converting to macsec_id\n");
11584 + return -ENXIO;
11585 + }
11586 +
11587 + selected_macsec_priv = macsec_priv[macsec_id];
11588 +
11589 + if (selected_macsec_priv->fm_macsec) {
11590 + pr_err("macsec has already been configured\n");
11591 + return -EINVAL;
11592 + }
11593 +
11594 + mac_dev_base_addr = selected_macsec_priv->mac_dev->vaddr;
11595 +
11596 + macsec_reg_addr = (uintptr_t)(mac_dev_base_addr + MACSEC_REG_OFFSET);
11597 +
11598 + memset(&macsec_params, 0, sizeof(macsec_params));
11599 + macsec_params.fm_h = (handle_t)selected_macsec_priv->mac_dev->fm;
11600 + macsec_params.guest_mode = FALSE;
11601 + /* The MACsec offset relative to the memory mapped MAC device */
11602 + macsec_params.non_guest_params.base_addr = macsec_reg_addr;
11603 + macsec_params.non_guest_params.fm_mac_h =
11604 + (handle_t)selected_macsec_priv->mac_dev->get_mac_handle(
11605 + selected_macsec_priv->mac_dev);
11606 + macsec_params.non_guest_params.exception_f = macsec_exception;
11607 + macsec_params.non_guest_params.app_h = selected_macsec_priv->mac_dev;
11608 +
11609 + selected_macsec_priv->fm_macsec = fm_macsec_config(&macsec_params);
11610 + if (unlikely(selected_macsec_priv->fm_macsec == NULL))
11611 + return -EINVAL;
11612 +
11613 + if (mdata->config_unknown_sci_treatment) {
11614 + rv = fm_macsec_config_unknown_sci_frame_treatment(
11615 + selected_macsec_priv->fm_macsec,
11616 + mdata->unknown_sci_treatment);
11617 + if (unlikely(rv < 0))
11618 + goto _return_fm_macsec_free;
11619 + }
11620 +
11621 + if (mdata->config_invalid_tag_treatment) {
11622 + rv = fm_macsec_config_invalid_tags_frame_treatment(
11623 + selected_macsec_priv->fm_macsec,
11624 + mdata->deliver_uncontrolled);
11625 + if (unlikely(rv < 0))
11626 + goto _return_fm_macsec_free;
11627 + }
11628 +
11629 + if (mdata->config_kay_frame_treatment) {
11630 + rv = fm_macsec_config_kay_frame_treatment(
11631 + selected_macsec_priv->fm_macsec,
11632 + mdata->discard_uncontrolled);
11633 + if (unlikely(rv < 0))
11634 + goto _return_fm_macsec_free;
11635 + }
11636 +
11637 + if (mdata->config_untag_treatment) {
11638 + rv = fm_macsec_config_untag_frame_treatment(
11639 + selected_macsec_priv->fm_macsec,
11640 + mdata->untag_treatment);
11641 + if (unlikely(rv < 0))
11642 + goto _return_fm_macsec_free;
11643 + }
11644 +
11645 + if (mdata->config_pn_exhaustion_threshold) {
11646 + rv = fm_macsec_config_pn_exhaustion_threshold(
11647 + selected_macsec_priv->fm_macsec,
11648 + mdata->pn_threshold);
11649 + if (unlikely(rv < 0))
11650 + goto _return_fm_macsec_free;
11651 + }
11652 +
11653 + if (mdata->config_keys_unreadable) {
11654 + rv = fm_macsec_config_keys_unreadable(
11655 + selected_macsec_priv->fm_macsec);
11656 + if (unlikely(rv < 0))
11657 + goto _return_fm_macsec_free;
11658 + }
11659 +
11660 + if (mdata->config_sectag_without_sci) {
11661 + rv = fm_macsec_config_sectag_without_sci(
11662 + selected_macsec_priv->fm_macsec);
11663 + if (unlikely(rv < 0))
11664 + goto _return_fm_macsec_free;
11665 + }
11666 +
11667 + if (mdata->config_exception) {
11668 + rv = fm_macsec_config_exception(selected_macsec_priv->fm_macsec,
11669 + mdata->exception,
11670 + mdata->enable_exception);
11671 + if (unlikely(rv < 0))
11672 + goto _return_fm_macsec_free;
11673 + }
11674 +
11675 + rv = fm_macsec_init(selected_macsec_priv->fm_macsec);
11676 + if (unlikely(rv < 0))
11677 + goto _return_fm_macsec_free;
11678 +
11679 + rv = fm_macsec_enable(selected_macsec_priv->fm_macsec);
11680 + if (unlikely(rv < 0))
11681 + goto _return_fm_macsec_free;
11682 +
11683 + return macsec_id;
11684 +
11685 +_return_fm_macsec_free:
11686 + fm_macsec_free(selected_macsec_priv->fm_macsec);
11687 + selected_macsec_priv->fm_macsec = NULL;
11688 + return rv;
11689 +}
11690 +
11691 +static int send_result(struct nlmsghdr *nlh, int pid, int result)
11692 +{
11693 + int res;
11694 + struct sk_buff *skb_out;
11695 + size_t msg_size = sizeof(result);
11696 +
11697 + skb_out = nlmsg_new(msg_size, 0);
11698 + if (unlikely(!skb_out)) {
11699 + pr_err("Failed to allocate new skb\n");
11700 + goto _ret_err;
11701 + }
11702 +
11703 + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
11704 + if (unlikely(!nlh)) {
11705 + pr_err("Failed to send\n");
11706 + goto _ret_err;
11707 + }
11708 +
11709 + NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */
11710 + memcpy(nlmsg_data(nlh), &result, msg_size);
11711 +
11712 + res = nlmsg_unicast(nl_sk, skb_out, pid);
11713 + if (unlikely(res < 0)) {
11714 + pr_err("Error while sending back to user\n");
11715 + goto _ret_err;
11716 + }
11717 +
11718 + return 0;
11719 +
11720 +_ret_err:
11721 + return -1;
11722 +}
11723 +
11724 +/* Kernel communicates with user space through netlink sockets. This function
11725 + * implements the responses of the kernel. The generic struct is used for
11726 + * easier handling of the code, which otherwise would have been duplicated.
11727 + */
11728 +static void switch_messages(struct sk_buff *skb)
11729 +{
11730 + struct nlmsghdr *nlh;
11731 + int pid, rv;
11732 + enum msg_type cmd;
11733 +
11734 + struct dpa_fq *dpa_fq, *tmp;
11735 + struct device *dev;
11736 +
11737 + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
11738 +
11739 + struct generic_msg *check;
11740 + int macsec_id = 0;
11741 + uint32_t sc_id, macsec_revision;
11742 + macsec_an_t ret_an;
11743 + int i;
11744 +
11745 + pr_debug("Entering: %s\n", __func__);
11746 +
11747 + if (unlikely(!skb)) {
11748 + pr_err("skb null\n");
11749 + return;
11750 + }
11751 +
11752 + nlh = (struct nlmsghdr *)skb->data;
11753 + check = kmalloc(sizeof(*check), GFP_KERNEL);
11754 + memcpy(check, nlmsg_data(nlh), sizeof(*check));
11755 + pid = nlh->nlmsg_pid; /*pid of sending process */
11756 + cmd = check->chf;
11757 +
11758 + switch (cmd) {
11759 + case ENABLE_MACSEC:
11760 + pr_debug("ENABLE_MACSEC\n");
11761 +
11762 + macsec_id = enable_macsec(check);
11763 +
11764 + if (macsec_id >= 0)
11765 + macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
11766 +
11767 + rv = send_result(nlh, pid, (macsec_id < 0) ? NACK : macsec_id);
11768 + if (unlikely(rv < 0))
11769 + goto _release;
11770 +
11771 + break;
11772 +
11773 + case SET_EXCEPTION:
11774 + pr_debug("SET_EXCEPTION\n");
11775 +
11776 + rv = set_macsec_exception(check);
11777 +
11778 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11779 + if (unlikely(rv < 0))
11780 + goto _release;
11781 +
11782 + break;
11783 +
11784 + case ENABLE_SECY:
11785 + pr_debug("ENABLE_SECY\n");
11786 +
11787 + rv = enable_secy(check, &macsec_id);
11788 +
11789 + if (rv == 0)
11790 + macsec_priv[macsec_id]->en_state = SECY_ENABLED;
11791 +
11792 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11793 + if (unlikely(rv < 0))
11794 + goto _release;
11795 +
11796 + break;
11797 +
11798 + case GET_REVISION:
11799 + pr_debug("GET_REVISION\n");
11800 +
11801 + rv = get_macsec_revision(check, &macsec_revision);
11802 +
11803 + rv = send_result(nlh, pid,
11804 + (rv < 0) ? NACK : (int)macsec_revision);
11805 + if (unlikely(rv < 0))
11806 + goto _release;
11807 +
11808 + break;
11809 +
11810 + case GET_TXSC_PHYS_ID:
11811 + pr_debug("GET_TXSC_PHYS_ID\n");
11812 +
11813 + rv = get_tx_sc_phys_id(check, &sc_id);
11814 +
11815 + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
11816 + if (unlikely(rv < 0))
11817 + goto _release;
11818 +
11819 + break;
11820 +
11821 + case TX_SA_CREATE:
11822 + pr_debug("TX_SA_CREATE\n");
11823 +
11824 + rv = create_tx_sa(check);
11825 +
11826 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11827 + if (unlikely(rv < 0))
11828 + goto _release;
11829 +
11830 + break;
11831 +
11832 + case MODIFY_TXSA_KEY:
11833 + pr_debug("MODIFY_TXSA_KEY\n");
11834 +
11835 + rv = modify_tx_sa_key(check);
11836 +
11837 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11838 + if (unlikely(rv < 0))
11839 + goto _release;
11840 +
11841 + break;
11842 +
11843 + case TX_SA_ACTIVATE:
11844 + pr_debug("TX_SA_ACTIVATE\n");
11845 +
11846 + rv = activate_tx_sa(check);
11847 +
11848 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11849 + if (unlikely(rv < 0))
11850 + goto _release;
11851 +
11852 + break;
11853 +
11854 + case GET_TXSA_AN:
11855 + pr_debug("GET_TXSA_AN\n");
11856 +
11857 + rv = get_tx_sa_an(check, &ret_an);
11858 +
11859 + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)ret_an);
11860 + if (unlikely(rv < 0))
11861 + goto _release;
11862 +
11863 + break;
11864 +
11865 + case RX_SC_CREATE:
11866 + pr_debug("RX_SC_CREATE\n");
11867 +
11868 + sc_id = create_rx_sc(check);
11869 +
11870 + rv = send_result(nlh, pid, (sc_id < 0) ? NACK : (int)sc_id);
11871 + if (unlikely(rv < 0))
11872 + goto _release;
11873 +
11874 + break;
11875 +
11876 + case GET_RXSC_PHYS_ID:
11877 + pr_debug("GET_RXSC_PHYS_ID\n");
11878 +
11879 + rv = get_rx_sc_phys_id(check, &sc_id);
11880 +
11881 + rv = send_result(nlh, pid, (rv < 0) ? NACK : (int)sc_id);
11882 + if (unlikely(rv < 0))
11883 + goto _release;
11884 +
11885 + break;
11886 +
11887 + case RX_SA_CREATE:
11888 + pr_debug("RX_SA_CREATE\n");
11889 +
11890 + rv = create_rx_sa(check);
11891 +
11892 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11893 + if (unlikely(rv < 0))
11894 + goto _release;
11895 +
11896 + break;
11897 +
11898 + case MODIFY_RXSA_KEY:
11899 + pr_debug("MODIFY_RXSA_KEY\n");
11900 +
11901 + rv = modify_rx_sa_key(check);
11902 +
11903 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11904 + if (unlikely(rv < 0))
11905 + goto _release;
11906 +
11907 + break;
11908 +
11909 + case UPDATE_NPN:
11910 + pr_debug("UPDATE_NPN\n");
11911 +
11912 + rv = update_npn(check);
11913 +
11914 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11915 + if (unlikely(rv < 0))
11916 + goto _release;
11917 +
11918 + break;
11919 +
11920 + case UPDATE_LPN:
11921 + pr_debug("UPDATE_LPN\n");
11922 +
11923 + rv = update_lpn(check);
11924 +
11925 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11926 + if (unlikely(rv < 0))
11927 + goto _release;
11928 +
11929 + break;
11930 +
11931 + case RX_SA_ACTIVATE:
11932 + pr_debug("RX_SA_ACTIVATE\n");
11933 +
11934 + rv = activate_rx_sa(check);
11935 +
11936 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11937 + if (unlikely(rv < 0))
11938 + goto _release;
11939 +
11940 + break;
11941 +
11942 + case RX_SA_DISABLE:
11943 + pr_debug("RX_SA_DISABLE\n");
11944 +
11945 + rv = rx_sa_disable(check);
11946 +
11947 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11948 + if (unlikely(rv < 0))
11949 + goto _release;
11950 +
11951 + break;
11952 +
11953 + case RX_SA_DELETE:
11954 + pr_debug("RX_SA_DELETE\n");
11955 +
11956 + rv = rx_sa_delete(check);
11957 +
11958 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11959 + if (unlikely(rv < 0))
11960 + goto _release;
11961 +
11962 + break;
11963 +
11964 + case RX_SC_DELETE:
11965 + pr_debug("RX_SC_DELETE\n");
11966 +
11967 + rv = rx_sc_delete(check);
11968 +
11969 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11970 + if (unlikely(rv < 0))
11971 + goto _release;
11972 +
11973 + break;
11974 +
11975 + case TX_SA_DELETE:
11976 + pr_debug("TX_SA_DELETE\n");
11977 +
11978 + rv = tx_sa_delete(check);
11979 +
11980 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11981 + if (unlikely(rv < 0))
11982 + goto _release;
11983 +
11984 + break;
11985 +
11986 + case DISABLE_SECY:
11987 + pr_debug("DISABLE_SECY\n");
11988 +
11989 + rv = disable_secy(check, &macsec_id);
11990 +
11991 + if (unlikely(rv < 0))
11992 + macsec_priv[macsec_id]->en_state = SECY_ENABLED;
11993 + else
11994 + macsec_priv[macsec_id]->en_state = MACSEC_ENABLED;
11995 +
11996 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
11997 + if (unlikely(rv < 0))
11998 + goto _release;
11999 +
12000 + break;
12001 +
12002 + case DISABLE_MACSEC:
12003 + pr_debug("DISABLE_MACSEC\n");
12004 +
12005 + rv = disable_macsec(check, &macsec_id);
12006 +
12007 + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
12008 +
12009 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
12010 + if (unlikely(rv < 0))
12011 + goto _release;
12012 +
12013 + break;
12014 +
12015 + case DISABLE_ALL:
12016 + pr_debug("DISABLE_ALL\n");
12017 +
12018 + rv = disable_all(check, &macsec_id);
12019 +
12020 + macsec_priv[macsec_id]->en_state = MACSEC_DISABLED;
12021 +
12022 + rv = send_result(nlh, pid, (rv < 0) ? NACK : ACK);
12023 + if (unlikely(rv < 0))
12024 + goto _release;
12025 + break;
12026 +
12027 + default:
12028 + /* should never get here */
12029 + pr_err("not a state\n");
12030 + break;
12031 + }
12032 +
12033 + return;
12034 +
12035 +_release:
12036 + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++)
12037 + deinit_macsec(i);
12038 +
12039 + /* Reset the TX hooks */
12040 + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
12041 + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
12042 +
12043 + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
12044 +
12045 + if (!macsec_priv[i]->net_dev)
12046 + continue;
12047 +
12048 + free_percpu(macsec_priv[i]->percpu_priv);
12049 +
12050 + /* Delete the fman queues */
12051 + list_for_each_entry_safe(dpa_fq,
12052 + tmp,
12053 + &macsec_priv[i]->dpa_fq_list,
12054 + list) {
12055 + dev = dpa_fq->net_dev->dev.parent;
12056 + rv = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
12057 + if (unlikely(rv < 0))
12058 + pr_err("_dpa_fq_fre=%d\n", rv);
12059 + }
12060 +
12061 + macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
12062 + kfree(macsec_priv[i]);
12063 + macsec_priv[i] = NULL;
12064 + }
12065 +
12066 + kfree(check);
12067 +
12068 + netlink_kernel_release(nl_sk);
12069 +}
12070 +
12071 +struct netlink_kernel_cfg ms_cfg = {
12072 + .groups = 1,
12073 + .input = switch_messages,
12074 +};
12075 +
12076 +static int __init macsec_init(void)
12077 +{
12078 + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
12079 + int ret, i;
12080 +
12081 + pr_debug("Entering: %s\n", __func__);
12082 +
12083 + /* If there is no interface we want macsec on, just exit. */
12084 + parse_ifs();
12085 + for (i = 0; i < macsec_ifs_cnt; i++) {
12086 + if (!macsec_ifs[i]) {
12087 + pr_err("Interface unknown\n");
12088 + return -EINVAL;
12089 + }
12090 + }
12091 +
12092 + /* Actually send the info to the user through a given socket. */
12093 + nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &ms_cfg);
12094 + if (unlikely(!nl_sk)) {
12095 + pr_err("Error creating socket.\n");
12096 + ret = -ENOMEM;
12097 + goto _release;
12098 + }
12099 +
12100 + ret = macsec_setup();
12101 + if (unlikely(ret != 0)) {
12102 + pr_err("Setup of macsec failed\n");
12103 + goto _release;
12104 + }
12105 +
12106 + /* set dpaa hooks for default queues */
12107 + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
12108 + macsec_dpaa_eth_hooks.tx = (dpaa_eth_egress_hook_t)(macsec_tx_hook);
12109 + macsec_dpaa_eth_hooks.rx_default =
12110 + (dpaa_eth_ingress_hook_t)(macsec_rx_hook);
12111 +
12112 + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
12113 +
12114 + return 0;
12115 +
12116 +_release:
12117 + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
12118 + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
12119 + netlink_kernel_release(nl_sk);
12120 + return ret;
12121 +}
12122 +
12123 +static void __exit macsec_exit(void)
12124 +{
12125 + int _errno;
12126 + struct dpa_fq *dpa_fq, *tmp;
12127 + struct device *dev;
12128 + struct dpaa_eth_hooks_s macsec_dpaa_eth_hooks;
12129 + int i;
12130 +
12131 + pr_debug("exiting macsec module\n");
12132 +
12133 + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
12134 + /* release has already been done, due to errors,
12135 + * in switch_messages we will return to exit the module properly
12136 + */
12137 + if (!macsec_priv[i]->net_dev) {
12138 + pr_debug("no release needed\n");
12139 + continue;
12140 + }
12141 + deinit_macsec(i);
12142 + }
12143 +
12144 + /* Reset the TX hooks before exiting */
12145 + memset(&macsec_dpaa_eth_hooks, 0, sizeof(macsec_dpaa_eth_hooks));
12146 + fsl_dpaa_eth_set_hooks(&macsec_dpaa_eth_hooks);
12147 +
12148 + for (i = 0; i < FM_MAX_NUM_OF_MACS; i++) {
12149 +
12150 + if (!macsec_priv[i]->net_dev) {
12151 + pr_debug("no release needed\n");
12152 + continue;
12153 + }
12154 +
12155 + free_percpu(macsec_priv[i]->percpu_priv);
12156 +
12157 + /* Delete the fman queues */
12158 + list_for_each_entry_safe(dpa_fq, tmp,
12159 + &macsec_priv[i]->dpa_fq_list, list) {
12160 + if (dpa_fq) {
12161 + dev = dpa_fq->net_dev->dev.parent;
12162 + _errno = _dpa_fq_free(dev,
12163 + (struct qman_fq *)dpa_fq);
12164 + if (unlikely(_errno < 0))
12165 + pr_err("_dpa_fq_fre=%d\n", _errno);
12166 + }
12167 + }
12168 +
12169 + /* restore ethtool ops to the previous private ones */
12170 + macsec_restore_ethtool_ops(macsec_priv[i]->net_dev);
12171 +
12172 + kfree(macsec_priv[i]);
12173 + }
12174 +
12175 + netlink_kernel_release(nl_sk);
12176 +
12177 + pr_debug("exited macsec module\n");
12178 +}
12179 +
12180 +module_init(macsec_init);
12181 +module_exit(macsec_exit);
12182 +
12183 +MODULE_LICENSE("Dual BSD/GPL");
12184 --- /dev/null
12185 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_macsec.h
12186 @@ -0,0 +1,294 @@
12187 +/* Copyright 2015 Freescale Semiconductor Inc.
12188 + *
12189 + * Redistribution and use in source and binary forms, with or without
12190 + * modification, are permitted provided that the following conditions are met:
12191 + * * Redistributions of source code must retain the above copyright
12192 + * notice, this list of conditions and the following disclaimer.
12193 + * * Redistributions in binary form must reproduce the above copyright
12194 + * notice, this list of conditions and the following disclaimer in the
12195 + * documentation and/or other materials provided with the distribution.
12196 + * * Neither the name of Freescale Semiconductor nor the
12197 + * names of its contributors may be used to endorse or promote products
12198 + * derived from this software without specific prior written permission.
12199 + *
12200 + *
12201 + * ALTERNATIVELY, this software may be distributed under the terms of the
12202 + * GNU General Public License ("GPL") as published by the Free Software
12203 + * Foundation, either version 2 of that License or (at your option) any
12204 + * later version.
12205 + *
12206 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12207 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12208 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12209 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12210 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12211 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12212 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12213 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12214 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12215 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12216 + */
12217 +
12218 +#ifndef __DPAA_ETH_MACSEC_H
12219 +#define __DPAA_ETH_MACSEC_H
12220 +
12221 +#include "mac.h"
12222 +
12223 +#define NETLINK_USER 31
12224 +#define MAX_NUM_OF_SECY 1
12225 +#define MAX_LEN 100
12226 +#define FM_FD_STAT_RX_MACSEC 0x00800000
12227 +#define MACSEC_ETH_TX_QUEUES NR_CPUS
12228 +#define MACSEC_REG_OFFSET 0x800
12229 +#define ACK 0
12230 +#define NACK -1
12231 +
12232 +extern const struct dpa_fq_cbs_t private_fq_cbs;
12233 +
12234 +extern int dpa_macsec_get_sset_count(struct net_device *net_dev, int type);
12235 +extern void
12236 +dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
12237 + struct ethtool_stats *stats, u64 *data);
12238 +extern void
12239 +dpa_macsec_get_strings(struct net_device *net_dev,
12240 + u32 stringset, u8 *data);
12241 +
12242 +enum msg_type {ENABLE_MACSEC,
12243 + SET_EXCEPTION,
12244 + ENABLE_SECY,
12245 + TX_SA_CREATE,
12246 + TX_SA_ACTIVATE,
12247 + RX_SC_CREATE,
12248 + RX_SA_CREATE,
12249 + RX_SA_ACTIVATE,
12250 + RX_SA_DISABLE,
12251 + RX_SA_DELETE,
12252 + RX_SC_DELETE,
12253 + TX_SA_DELETE,
12254 + DISABLE_MACSEC,
12255 + DISABLE_SECY,
12256 + DISABLE_ALL,
12257 + GET_REVISION,
12258 + UPDATE_NPN,
12259 + UPDATE_LPN,
12260 + GET_TXSC_PHYS_ID,
12261 + GET_RXSC_PHYS_ID,
12262 + GET_TXSA_AN,
12263 + MODIFY_TXSA_KEY,
12264 + MODIFY_RXSA_KEY,
12265 +};
12266 +
12267 +enum macsec_enablement {MACSEC_DISABLED, MACSEC_ENABLED, SECY_ENABLED};
12268 +
12269 +struct enable_secy {
12270 + int macsec_id;
12271 +
12272 + u64 sci; /* MAC address(48b) + port_id(16b) */
12273 +
12274 + bool config_insertion_mode;
12275 + fm_macsec_sci_insertion_mode sci_insertion_mode;
12276 +
12277 + bool config_protect_frames;
12278 + bool protect_frames;
12279 +
12280 + bool config_replay_window;
12281 + bool replay_protect;
12282 + uint32_t replay_window;
12283 +
12284 + bool config_validation_mode;
12285 + fm_macsec_valid_frame_behavior validate_frames;
12286 +
12287 + bool config_confidentiality;
12288 + bool confidentiality_enable;
12289 + uint32_t confidentiality_offset;
12290 +
12291 + bool config_point_to_point;
12292 +
12293 + bool config_exception;
12294 + bool enable_exception;
12295 + fm_macsec_secy_exception exception;
12296 +
12297 + bool config_event;
12298 + bool enable_event;
12299 + fm_macsec_secy_event event;
12300 +};
12301 +
12302 +struct macsec_data {
12303 + char *if_name;
12304 + size_t if_name_length; /* including string terminator */
12305 +
12306 + bool config_unknown_sci_treatment;
12307 + fm_macsec_unknown_sci_frame_treatment unknown_sci_treatment;
12308 +
12309 + bool config_invalid_tag_treatment;
12310 + bool deliver_uncontrolled;
12311 +
12312 + bool config_kay_frame_treatment;
12313 + bool discard_uncontrolled;
12314 +
12315 + bool config_untag_treatment;
12316 + fm_macsec_untag_frame_treatment untag_treatment;
12317 +
12318 + bool config_pn_exhaustion_threshold;
12319 + uint32_t pn_threshold;
12320 +
12321 + bool config_keys_unreadable;
12322 +
12323 + bool config_sectag_without_sci;
12324 +
12325 + bool config_exception;
12326 + bool enable_exception;
12327 + fm_macsec_exception exception;
12328 +};
12329 +
12330 +struct set_exception {
12331 + int macsec_id;
12332 + bool enable_exception;
12333 + fm_macsec_exception exception;
12334 +};
12335 +
12336 +struct create_tx_sa {
12337 + int macsec_id;
12338 + u8 an; /* association number */
12339 + u8 *sak; /* secure assoc key */
12340 + u32 sak_len; /* assoc key length */
12341 +};
12342 +
12343 +struct modify_tx_sa_key {
12344 + int macsec_id;
12345 + u8 an; /* association number */
12346 + u8 *sak; /* secure assoc key */
12347 + u32 sak_len; /* assoc key length */
12348 +};
12349 +
12350 +struct activate_tx_sa {
12351 + int macsec_id;
12352 + u8 an; /* association number */
12353 +};
12354 +
12355 +struct create_rx_sc {
12356 + int macsec_id;
12357 + u64 sci;
12358 +};
12359 +
12360 +struct delete_rx_sc {
12361 + int macsec_id;
12362 + u32 rx_sc_id;
12363 +};
12364 +
12365 +struct get_rx_sc_id {
12366 + int macsec_id;
12367 + u32 rx_sc_id;
12368 +};
12369 +
12370 +struct create_rx_sa {
12371 + int macsec_id;
12372 + u32 rx_sc_id;
12373 + u8 an;
12374 + u32 lpn;
12375 + u8 *sak;
12376 + u32 sak_len;
12377 +};
12378 +
12379 +struct activate_rx_sa {
12380 + int macsec_id;
12381 + u32 rx_sc_id;
12382 + u8 an;
12383 +};
12384 +
12385 +struct disable_rx_sa {
12386 + int macsec_id;
12387 + u32 rx_sc_id;
12388 + u8 an;
12389 +};
12390 +
12391 +struct delete_rx_sa {
12392 + int macsec_id;
12393 + u32 rx_sc_id;
12394 + u8 an;
12395 +};
12396 +
12397 +struct delete_tx_sa {
12398 + int macsec_id;
12399 + u32 rx_sc_id;
12400 + u8 an;
12401 +};
12402 +
12403 +struct update_npn {
12404 + int macsec_id;
12405 + u32 rx_sc_id;
12406 + u8 an;
12407 + u32 pn;
12408 +};
12409 +
12410 +struct update_lpn {
12411 + int macsec_id;
12412 + u32 rx_sc_id;
12413 + u8 an;
12414 + u32 pn;
12415 +};
12416 +
12417 +struct modify_rx_sa_key {
12418 + int macsec_id;
12419 + u32 rx_sc_id;
12420 + u8 an;
12421 + u8 *sak;
12422 + u32 sak_len;
12423 +};
12424 +
12425 +struct generic_msg {
12426 + enum msg_type chf;
12427 + union {
12428 + int macsec_id;
12429 + struct macsec_data en_macsec;
12430 + struct enable_secy secy;
12431 + struct create_tx_sa c_tx_sa;
12432 + struct activate_tx_sa a_tx_sa;
12433 + struct create_rx_sc c_rx_sc;
12434 + struct get_rx_sc_id get_rx_sc_id;
12435 + struct create_rx_sa c_rx_sa;
12436 + struct activate_rx_sa a_rx_sa;
12437 + struct disable_rx_sa d_rx_sa;
12438 + struct delete_rx_sa del_rx_sa;
12439 + struct delete_rx_sc del_rx_sc;
12440 + struct delete_tx_sa del_tx_sa;
12441 + struct update_npn update_npn;
12442 + struct update_lpn update_lpn;
12443 + struct modify_tx_sa_key modify_tx_sa_key;
12444 + struct modify_rx_sa_key modify_rx_sa_key;
12445 + struct set_exception set_ex;
12446 + } payload;
12447 +};
12448 +
12449 +struct macsec_percpu_priv_s {
12450 + u64 rx_macsec;
12451 + u64 tx_macsec;
12452 +};
12453 +
12454 +struct macsec_priv_s {
12455 + struct macsec_percpu_priv_s __percpu *percpu_priv;
12456 +
12457 + struct net_device *net_dev;
12458 + struct mac_device *mac_dev;
12459 +
12460 + struct qman_fq *egress_fqs[MACSEC_ETH_TX_QUEUES];
12461 + struct qman_fq *conf_fqs[MACSEC_ETH_TX_QUEUES];
12462 + struct list_head dpa_fq_list;
12463 + uint32_t msg_enable; /* net_device message level */
12464 + uint16_t channel;
12465 + struct fm_macsec_dev *fm_macsec;
12466 +
12467 + struct fm_macsec_secy_dev *fm_ms_secy;
12468 + uint8_t an;
12469 +
12470 + struct rx_sc_dev *rx_sc_dev[NUM_OF_RX_SC];
12471 + uint8_t *sa_key;
12472 + enum macsec_enablement en_state;
12473 +
12474 + uintptr_t vaddr;
12475 + struct resource *fman_resource;
12476 +};
12477 +
12478 +struct macsec_priv_s *dpa_macsec_get_priv(struct net_device *net_dev);
12479 +
12480 +#endif /* __DPAA_ETH_MACSEC_H */
12481 --- /dev/null
12482 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c
12483 @@ -0,0 +1,381 @@
12484 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
12485 + *
12486 + * Redistribution and use in source and binary forms, with or without
12487 + * modification, are permitted provided that the following conditions are met:
12488 + * * Redistributions of source code must retain the above copyright
12489 + * notice, this list of conditions and the following disclaimer.
12490 + * * Redistributions in binary form must reproduce the above copyright
12491 + * notice, this list of conditions and the following disclaimer in the
12492 + * documentation and/or other materials provided with the distribution.
12493 + * * Neither the name of Freescale Semiconductor nor the
12494 + * names of its contributors may be used to endorse or promote products
12495 + * derived from this software without specific prior written permission.
12496 + *
12497 + *
12498 + * ALTERNATIVELY, this software may be distributed under the terms of the
12499 + * GNU General Public License ("GPL") as published by the Free Software
12500 + * Foundation, either version 2 of that License or (at your option) any
12501 + * later version.
12502 + *
12503 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12504 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12505 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12506 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12507 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12508 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12509 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12510 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12511 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12512 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12513 + */
12514 +
12515 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
12516 +#define pr_fmt(fmt) \
12517 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
12518 + KBUILD_BASENAME".c", __LINE__, __func__
12519 +#else
12520 +#define pr_fmt(fmt) \
12521 + KBUILD_MODNAME ": " fmt
12522 +#endif
12523 +
12524 +#include <linux/init.h>
12525 +#include <linux/module.h>
12526 +#include <linux/of_platform.h>
12527 +#include "dpaa_eth.h"
12528 +#include "dpaa_eth_common.h"
12529 +#include "dpaa_eth_base.h"
12530 +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
12531 +#include "mac.h"
12532 +
12533 +#define DPA_DESCRIPTION "FSL DPAA Proxy initialization driver"
12534 +
12535 +MODULE_LICENSE("Dual BSD/GPL");
12536 +
12537 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
12538 +
12539 +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev);
12540 +#ifdef CONFIG_PM
12541 +
12542 +static int proxy_suspend(struct device *dev)
12543 +{
12544 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
12545 + struct mac_device *mac_dev = proxy_dev->mac_dev;
12546 + int err = 0;
12547 +
12548 + err = fm_port_suspend(mac_dev->port_dev[RX]);
12549 + if (err)
12550 + goto port_suspend_failed;
12551 +
12552 + err = fm_port_suspend(mac_dev->port_dev[TX]);
12553 + if (err)
12554 + err = fm_port_resume(mac_dev->port_dev[RX]);
12555 +
12556 +port_suspend_failed:
12557 + return err;
12558 +}
12559 +
12560 +static int proxy_resume(struct device *dev)
12561 +{
12562 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
12563 + struct mac_device *mac_dev = proxy_dev->mac_dev;
12564 + int err = 0;
12565 +
12566 + err = fm_port_resume(mac_dev->port_dev[TX]);
12567 + if (err)
12568 + goto port_resume_failed;
12569 +
12570 + err = fm_port_resume(mac_dev->port_dev[RX]);
12571 + if (err)
12572 + err = fm_port_suspend(mac_dev->port_dev[TX]);
12573 +
12574 +port_resume_failed:
12575 + return err;
12576 +}
12577 +
12578 +static const struct dev_pm_ops proxy_pm_ops = {
12579 + .suspend = proxy_suspend,
12580 + .resume = proxy_resume,
12581 +};
12582 +
12583 +#define PROXY_PM_OPS (&proxy_pm_ops)
12584 +
12585 +#else /* CONFIG_PM */
12586 +
12587 +#define PROXY_PM_OPS NULL
12588 +
12589 +#endif /* CONFIG_PM */
12590 +
12591 +static int dpaa_eth_proxy_probe(struct platform_device *_of_dev)
12592 +{
12593 + int err = 0, i;
12594 + struct device *dev;
12595 + struct device_node *dpa_node;
12596 + struct dpa_bp *dpa_bp;
12597 + struct list_head proxy_fq_list;
12598 + size_t count;
12599 + struct fm_port_fqs port_fqs;
12600 + struct dpa_buffer_layout_s *buf_layout = NULL;
12601 + struct mac_device *mac_dev;
12602 + struct proxy_device *proxy_dev;
12603 +
12604 + dev = &_of_dev->dev;
12605 +
12606 + dpa_node = dev->of_node;
12607 +
12608 + if (!of_device_is_available(dpa_node))
12609 + return -ENODEV;
12610 +
12611 + /* Get the buffer pools assigned to this interface */
12612 + dpa_bp = dpa_bp_probe(_of_dev, &count);
12613 + if (IS_ERR(dpa_bp))
12614 + return PTR_ERR(dpa_bp);
12615 +
12616 + mac_dev = dpa_mac_probe(_of_dev);
12617 + if (IS_ERR(mac_dev))
12618 + return PTR_ERR(mac_dev);
12619 +
12620 + proxy_dev = devm_kzalloc(dev, sizeof(*proxy_dev), GFP_KERNEL);
12621 + if (!proxy_dev) {
12622 + dev_err(dev, "devm_kzalloc() failed\n");
12623 + return -ENOMEM;
12624 + }
12625 +
12626 + proxy_dev->mac_dev = mac_dev;
12627 + dev_set_drvdata(dev, proxy_dev);
12628 +
12629 + /* We have physical ports, so we need to establish
12630 + * the buffer layout.
12631 + */
12632 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
12633 + GFP_KERNEL);
12634 + if (!buf_layout) {
12635 + dev_err(dev, "devm_kzalloc() failed\n");
12636 + return -ENOMEM;
12637 + }
12638 + dpa_set_buffers_layout(mac_dev, buf_layout);
12639 +
12640 + INIT_LIST_HEAD(&proxy_fq_list);
12641 +
12642 + memset(&port_fqs, 0, sizeof(port_fqs));
12643 +
12644 + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true, RX);
12645 + if (!err)
12646 + err = dpa_fq_probe_mac(dev, &proxy_fq_list, &port_fqs, true,
12647 + TX);
12648 + if (err < 0) {
12649 + devm_kfree(dev, buf_layout);
12650 + return err;
12651 + }
12652 +
12653 + /* Proxy initializer - Just configures the MAC on behalf of
12654 + * another partition.
12655 + */
12656 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
12657 + buf_layout, dev);
12658 +
12659 + /* Proxy interfaces need to be started, and the allocated
12660 + * memory freed
12661 + */
12662 + devm_kfree(dev, buf_layout);
12663 + devm_kfree(dev, dpa_bp);
12664 +
12665 + /* Free FQ structures */
12666 + devm_kfree(dev, port_fqs.rx_defq);
12667 + devm_kfree(dev, port_fqs.rx_errq);
12668 + devm_kfree(dev, port_fqs.tx_defq);
12669 + devm_kfree(dev, port_fqs.tx_errq);
12670 +
12671 + for_each_port_device(i, mac_dev->port_dev) {
12672 + err = fm_port_enable(mac_dev->port_dev[i]);
12673 + if (err)
12674 + goto port_enable_fail;
12675 + }
12676 +
12677 + dev_info(dev, "probed MAC device with MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
12678 + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
12679 + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
12680 +
12681 + return 0; /* Proxy interface initialization ended */
12682 +
12683 +port_enable_fail:
12684 + for_each_port_device(i, mac_dev->port_dev)
12685 + fm_port_disable(mac_dev->port_dev[i]);
12686 + dpa_eth_proxy_remove(_of_dev);
12687 +
12688 + return err;
12689 +}
12690 +
12691 +int dpa_proxy_set_mac_address(struct proxy_device *proxy_dev,
12692 + struct net_device *net_dev)
12693 +{
12694 + struct mac_device *mac_dev;
12695 + int _errno;
12696 +
12697 + mac_dev = proxy_dev->mac_dev;
12698 +
12699 + _errno = mac_dev->change_addr(mac_dev->get_mac_handle(mac_dev),
12700 + net_dev->dev_addr);
12701 + if (_errno < 0)
12702 + return _errno;
12703 +
12704 + return 0;
12705 +}
12706 +EXPORT_SYMBOL(dpa_proxy_set_mac_address);
12707 +
12708 +int dpa_proxy_set_rx_mode(struct proxy_device *proxy_dev,
12709 + struct net_device *net_dev)
12710 +{
12711 + struct mac_device *mac_dev = proxy_dev->mac_dev;
12712 + int _errno;
12713 +
12714 + if (!!(net_dev->flags & IFF_PROMISC) != mac_dev->promisc) {
12715 + mac_dev->promisc = !mac_dev->promisc;
12716 + _errno = mac_dev->set_promisc(mac_dev->get_mac_handle(mac_dev),
12717 + mac_dev->promisc);
12718 + if (unlikely(_errno < 0))
12719 + netdev_err(net_dev, "mac_dev->set_promisc() = %d\n",
12720 + _errno);
12721 + }
12722 +
12723 + _errno = mac_dev->set_multi(net_dev, mac_dev);
12724 + if (unlikely(_errno < 0))
12725 + return _errno;
12726 +
12727 + return 0;
12728 +}
12729 +EXPORT_SYMBOL(dpa_proxy_set_rx_mode);
12730 +
12731 +int dpa_proxy_start(struct net_device *net_dev)
12732 +{
12733 + struct mac_device *mac_dev;
12734 + const struct dpa_priv_s *priv;
12735 + struct proxy_device *proxy_dev;
12736 + int _errno;
12737 + int i;
12738 +
12739 + priv = netdev_priv(net_dev);
12740 + proxy_dev = (struct proxy_device *)priv->peer;
12741 + mac_dev = proxy_dev->mac_dev;
12742 +
12743 + _errno = mac_dev->init_phy(net_dev, mac_dev);
12744 + if (_errno < 0) {
12745 + if (netif_msg_drv(priv))
12746 + netdev_err(net_dev, "init_phy() = %d\n",
12747 + _errno);
12748 + return _errno;
12749 + }
12750 +
12751 + for_each_port_device(i, mac_dev->port_dev) {
12752 + _errno = fm_port_enable(mac_dev->port_dev[i]);
12753 + if (_errno)
12754 + goto port_enable_fail;
12755 + }
12756 +
12757 + _errno = mac_dev->start(mac_dev);
12758 + if (_errno < 0) {
12759 + if (netif_msg_drv(priv))
12760 + netdev_err(net_dev, "mac_dev->start() = %d\n",
12761 + _errno);
12762 + goto port_enable_fail;
12763 + }
12764 +
12765 + return _errno;
12766 +
12767 +port_enable_fail:
12768 + for_each_port_device(i, mac_dev->port_dev)
12769 + fm_port_disable(mac_dev->port_dev[i]);
12770 +
12771 + return _errno;
12772 +}
12773 +EXPORT_SYMBOL(dpa_proxy_start);
12774 +
12775 +int dpa_proxy_stop(struct proxy_device *proxy_dev, struct net_device *net_dev)
12776 +{
12777 + struct mac_device *mac_dev = proxy_dev->mac_dev;
12778 + const struct dpa_priv_s *priv = netdev_priv(net_dev);
12779 + int _errno, i, err;
12780 +
12781 + _errno = mac_dev->stop(mac_dev);
12782 + if (_errno < 0) {
12783 + if (netif_msg_drv(priv))
12784 + netdev_err(net_dev, "mac_dev->stop() = %d\n",
12785 + _errno);
12786 + return _errno;
12787 + }
12788 +
12789 + for_each_port_device(i, mac_dev->port_dev) {
12790 + err = fm_port_disable(mac_dev->port_dev[i]);
12791 + _errno = err ? err : _errno;
12792 + }
12793 +
12794 + if (mac_dev->phy_dev)
12795 + phy_disconnect(mac_dev->phy_dev);
12796 + mac_dev->phy_dev = NULL;
12797 +
12798 + return _errno;
12799 +}
12800 +EXPORT_SYMBOL(dpa_proxy_stop);
12801 +
12802 +static int __cold dpa_eth_proxy_remove(struct platform_device *of_dev)
12803 +{
12804 + struct device *dev = &of_dev->dev;
12805 + struct proxy_device *proxy_dev = dev_get_drvdata(dev);
12806 +
12807 + kfree(proxy_dev);
12808 +
12809 + dev_set_drvdata(dev, NULL);
12810 +
12811 + return 0;
12812 +}
12813 +
12814 +static const struct of_device_id dpa_proxy_match[] = {
12815 + {
12816 + .compatible = "fsl,dpa-ethernet-init"
12817 + },
12818 + {}
12819 +};
12820 +MODULE_DEVICE_TABLE(of, dpa_proxy_match);
12821 +
12822 +static struct platform_driver dpa_proxy_driver = {
12823 + .driver = {
12824 + .name = KBUILD_MODNAME "-proxy",
12825 + .of_match_table = dpa_proxy_match,
12826 + .owner = THIS_MODULE,
12827 + .pm = PROXY_PM_OPS,
12828 + },
12829 + .probe = dpaa_eth_proxy_probe,
12830 + .remove = dpa_eth_proxy_remove
12831 +};
12832 +
12833 +static int __init __cold dpa_proxy_load(void)
12834 +{
12835 + int _errno;
12836 +
12837 + pr_info(DPA_DESCRIPTION "\n");
12838 +
12839 + /* Initialize dpaa_eth mirror values */
12840 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
12841 + dpa_max_frm = fm_get_max_frm();
12842 +
12843 + _errno = platform_driver_register(&dpa_proxy_driver);
12844 + if (unlikely(_errno < 0)) {
12845 + pr_err(KBUILD_MODNAME
12846 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
12847 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
12848 + }
12849 +
12850 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
12851 + KBUILD_BASENAME".c", __func__);
12852 +
12853 + return _errno;
12854 +}
12855 +module_init(dpa_proxy_load);
12856 +
12857 +static void __exit __cold dpa_proxy_unload(void)
12858 +{
12859 + platform_driver_unregister(&dpa_proxy_driver);
12860 +
12861 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
12862 + KBUILD_BASENAME".c", __func__);
12863 +}
12864 +module_exit(dpa_proxy_unload);
12865 --- /dev/null
12866 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
12867 @@ -0,0 +1,1128 @@
12868 +/* Copyright 2012 Freescale Semiconductor Inc.
12869 + *
12870 + * Redistribution and use in source and binary forms, with or without
12871 + * modification, are permitted provided that the following conditions are met:
12872 + * * Redistributions of source code must retain the above copyright
12873 + * notice, this list of conditions and the following disclaimer.
12874 + * * Redistributions in binary form must reproduce the above copyright
12875 + * notice, this list of conditions and the following disclaimer in the
12876 + * documentation and/or other materials provided with the distribution.
12877 + * * Neither the name of Freescale Semiconductor nor the
12878 + * names of its contributors may be used to endorse or promote products
12879 + * derived from this software without specific prior written permission.
12880 + *
12881 + *
12882 + * ALTERNATIVELY, this software may be distributed under the terms of the
12883 + * GNU General Public License ("GPL") as published by the Free Software
12884 + * Foundation, either version 2 of that License or (at your option) any
12885 + * later version.
12886 + *
12887 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12888 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12889 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12890 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12891 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12892 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12893 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12894 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12895 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12896 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12897 + */
12898 +
12899 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
12900 +#define pr_fmt(fmt) \
12901 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
12902 + KBUILD_BASENAME".c", __LINE__, __func__
12903 +#else
12904 +#define pr_fmt(fmt) \
12905 + KBUILD_MODNAME ": " fmt
12906 +#endif
12907 +
12908 +#include <linux/init.h>
12909 +#include <linux/skbuff.h>
12910 +#include <linux/highmem.h>
12911 +#include <linux/fsl_bman.h>
12912 +
12913 +#include "dpaa_eth.h"
12914 +#include "dpaa_eth_common.h"
12915 +#ifdef CONFIG_FSL_DPAA_1588
12916 +#include "dpaa_1588.h"
12917 +#endif
12918 +#ifdef CONFIG_FSL_DPAA_CEETM
12919 +#include "dpaa_eth_ceetm.h"
12920 +#endif
12921 +
12922 +/* DMA map and add a page frag back into the bpool.
12923 + * @vaddr fragment must have been allocated with netdev_alloc_frag(),
12924 + * specifically for fitting into @dpa_bp.
12925 + */
12926 +static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
12927 + int *count_ptr)
12928 +{
12929 + struct bm_buffer bmb;
12930 + dma_addr_t addr;
12931 +
12932 + memset(&bmb, 0, sizeof(struct bm_buffer));
12933 +
12934 + addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
12935 + DMA_BIDIRECTIONAL);
12936 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
12937 + dev_err(dpa_bp->dev, "DMA mapping failed");
12938 + return;
12939 + }
12940 +
12941 + bm_buffer_set64(&bmb, addr);
12942 +
12943 + while (bman_release(dpa_bp->pool, &bmb, 1, 0))
12944 + cpu_relax();
12945 +
12946 + (*count_ptr)++;
12947 +}
12948 +
12949 +static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
12950 +{
12951 + struct bm_buffer bmb[8];
12952 + void *new_buf;
12953 + dma_addr_t addr;
12954 + uint8_t i;
12955 + struct device *dev = dpa_bp->dev;
12956 + struct sk_buff *skb, **skbh;
12957 +
12958 + memset(bmb, 0, sizeof(struct bm_buffer) * 8);
12959 +
12960 + for (i = 0; i < 8; i++) {
12961 + /* We'll prepend the skb back-pointer; can't use the DPA
12962 + * priv space, because FMan will overwrite it (from offset 0)
12963 + * if it ends up being the second, third, etc. fragment
12964 + * in a S/G frame.
12965 + *
12966 + * We only need enough space to store a pointer, but allocate
12967 + * an entire cacheline for performance reasons.
12968 + */
12969 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
12970 + new_buf = page_address(alloc_page(GFP_ATOMIC));
12971 +#else
12972 + new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
12973 +#endif
12974 + if (unlikely(!new_buf))
12975 + goto netdev_alloc_failed;
12976 + new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
12977 +
12978 + skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
12979 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
12980 + if (unlikely(!skb)) {
12981 + put_page(virt_to_head_page(new_buf));
12982 + goto build_skb_failed;
12983 + }
12984 + DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
12985 +
12986 + addr = dma_map_single(dev, new_buf,
12987 + dpa_bp->size, DMA_BIDIRECTIONAL);
12988 + if (unlikely(dma_mapping_error(dev, addr)))
12989 + goto dma_map_failed;
12990 +
12991 + bm_buffer_set64(&bmb[i], addr);
12992 + }
12993 +
12994 +release_bufs:
12995 + /* Release the buffers. In case bman is busy, keep trying
12996 + * until successful. bman_release() is guaranteed to succeed
12997 + * in a reasonable amount of time
12998 + */
12999 + while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
13000 + cpu_relax();
13001 + return i;
13002 +
13003 +dma_map_failed:
13004 + kfree_skb(skb);
13005 +
13006 +build_skb_failed:
13007 +netdev_alloc_failed:
13008 + net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
13009 + WARN_ONCE(1, "Memory allocation failure on Rx\n");
13010 +
13011 + bm_buffer_set64(&bmb[i], 0);
13012 + /* Avoid releasing a completely null buffer; bman_release() requires
13013 + * at least one buffer.
13014 + */
13015 + if (likely(i))
13016 + goto release_bufs;
13017 +
13018 + return 0;
13019 +}
13020 +
13021 +/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
13022 +static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
13023 +{
13024 + int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
13025 + *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
13026 +}
13027 +
13028 +int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
13029 +{
13030 + int i;
13031 +
13032 + /* Give each CPU an allotment of "config_count" buffers */
13033 + for_each_possible_cpu(i) {
13034 + int j;
13035 +
13036 + /* Although we access another CPU's counters here
13037 + * we do it at boot time so it is safe
13038 + */
13039 + for (j = 0; j < dpa_bp->config_count; j += 8)
13040 + dpa_bp_add_8_bufs(dpa_bp, i);
13041 + }
13042 + return 0;
13043 +}
13044 +EXPORT_SYMBOL(dpa_bp_priv_seed);
13045 +
13046 +/* Add buffers/(pages) for Rx processing whenever bpool count falls below
13047 + * REFILL_THRESHOLD.
13048 + */
13049 +int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
13050 +{
13051 + int count = *countptr;
13052 + int new_bufs;
13053 +
13054 + if (unlikely(count < CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD)) {
13055 + do {
13056 + new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
13057 + if (unlikely(!new_bufs)) {
13058 + /* Avoid looping forever if we've temporarily
13059 + * run out of memory. We'll try again at the
13060 + * next NAPI cycle.
13061 + */
13062 + break;
13063 + }
13064 + count += new_bufs;
13065 + } while (count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT);
13066 +
13067 + *countptr = count;
13068 + if (unlikely(count < CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT))
13069 + return -ENOMEM;
13070 + }
13071 +
13072 + return 0;
13073 +}
13074 +EXPORT_SYMBOL(dpaa_eth_refill_bpools);
13075 +
13076 +/* Cleanup function for outgoing frame descriptors that were built on Tx path,
13077 + * either contiguous frames or scatter/gather ones.
13078 + * Skb freeing is not handled here.
13079 + *
13080 + * This function may be called on error paths in the Tx function, so guard
13081 + * against cases when not all fd relevant fields were filled in.
13082 + *
13083 + * Return the skb backpointer, since for S/G frames the buffer containing it
13084 + * gets freed here.
13085 + */
13086 +struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
13087 + const struct qm_fd *fd)
13088 +{
13089 + const struct qm_sg_entry *sgt;
13090 + int i;
13091 + struct dpa_bp *dpa_bp = priv->dpa_bp;
13092 + dma_addr_t addr = qm_fd_addr(fd);
13093 + dma_addr_t sg_addr;
13094 + struct sk_buff **skbh;
13095 + struct sk_buff *skb = NULL;
13096 + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
13097 + int nr_frags;
13098 + int sg_len;
13099 +
13100 + /* retrieve skb back pointer */
13101 + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
13102 +
13103 + if (unlikely(fd->format == qm_fd_sg)) {
13104 + nr_frags = skb_shinfo(skb)->nr_frags;
13105 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13106 +/* addressing the 4k DMA issue can yield a larger number of fragments than
13107 + * the skb had
13108 + */
13109 + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
13110 + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
13111 + dma_dir);
13112 +#else
13113 + dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
13114 + sizeof(struct qm_sg_entry) * (1 + nr_frags),
13115 + dma_dir);
13116 +#endif
13117 + /* The sgt buffer has been allocated with netdev_alloc_frag(),
13118 + * it's from lowmem.
13119 + */
13120 + sgt = phys_to_virt(addr + dpa_fd_offset(fd));
13121 +#ifdef CONFIG_FSL_DPAA_1588
13122 + if (priv->tsu && priv->tsu->valid &&
13123 + priv->tsu->hwts_tx_en_ioctl)
13124 + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
13125 +#endif
13126 +#ifdef CONFIG_FSL_DPAA_TS
13127 + if (unlikely(priv->ts_tx_en &&
13128 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
13129 + struct skb_shared_hwtstamps shhwtstamps;
13130 +
13131 + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
13132 + skb_tstamp_tx(skb, &shhwtstamps);
13133 + }
13134 +#endif /* CONFIG_FSL_DPAA_TS */
13135 +
13136 + /* sgt[0] is from lowmem, was dma_map_single()-ed */
13137 + sg_addr = qm_sg_addr(&sgt[0]);
13138 + sg_len = qm_sg_entry_get_len(&sgt[0]);
13139 + dma_unmap_single(dpa_bp->dev, sg_addr, sg_len, dma_dir);
13140 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13141 + i = 1;
13142 + do {
13143 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
13144 + sg_addr = qm_sg_addr(&sgt[i]);
13145 + sg_len = qm_sg_entry_get_len(&sgt[i]);
13146 + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
13147 + } while (!qm_sg_entry_get_final(&sgt[i++]));
13148 +#else
13149 + /* remaining pages were mapped with dma_map_page() */
13150 + for (i = 1; i <= nr_frags; i++) {
13151 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
13152 + sg_addr = qm_sg_addr(&sgt[i]);
13153 + sg_len = qm_sg_entry_get_len(&sgt[i]);
13154 + dma_unmap_page(dpa_bp->dev, sg_addr, sg_len, dma_dir);
13155 + }
13156 +#endif
13157 +
13158 + /* Free the page frag that we allocated on Tx */
13159 + put_page(virt_to_head_page(sgt));
13160 + } else {
13161 + dma_unmap_single(dpa_bp->dev, addr,
13162 + skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
13163 +#ifdef CONFIG_FSL_DPAA_TS
13164 + /* get the timestamp for non-SG frames */
13165 +#ifdef CONFIG_FSL_DPAA_1588
13166 + if (priv->tsu && priv->tsu->valid &&
13167 + priv->tsu->hwts_tx_en_ioctl)
13168 + dpa_ptp_store_txstamp(priv, skb, (void *)skbh);
13169 +#endif
13170 + if (unlikely(priv->ts_tx_en &&
13171 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
13172 + struct skb_shared_hwtstamps shhwtstamps;
13173 +
13174 + dpa_get_ts(priv, TX, &shhwtstamps, (void *)skbh);
13175 + skb_tstamp_tx(skb, &shhwtstamps);
13176 + }
13177 +#endif
13178 + }
13179 +
13180 + return skb;
13181 +}
13182 +EXPORT_SYMBOL(_dpa_cleanup_tx_fd);
13183 +
13184 +#ifndef CONFIG_FSL_DPAA_TS
13185 +bool dpa_skb_is_recyclable(struct sk_buff *skb)
13186 +{
13187 + /* No recycling possible if skb buffer is kmalloc'ed */
13188 + if (skb->head_frag == 0)
13189 + return false;
13190 +
13191 + /* or if it's an userspace buffer */
13192 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
13193 + return false;
13194 +
13195 + /* or if it's cloned or shared */
13196 + if (skb_shared(skb) || skb_cloned(skb) ||
13197 + skb->fclone != SKB_FCLONE_UNAVAILABLE)
13198 + return false;
13199 +
13200 + return true;
13201 +}
13202 +EXPORT_SYMBOL(dpa_skb_is_recyclable);
13203 +
13204 +bool dpa_buf_is_recyclable(struct sk_buff *skb,
13205 + uint32_t min_size,
13206 + uint16_t min_offset,
13207 + unsigned char **new_buf_start)
13208 +{
13209 + unsigned char *new;
13210 +
13211 + /* In order to recycle a buffer, the following conditions must be met:
13212 + * - buffer size no less than the buffer pool size
13213 + * - buffer size no higher than an upper limit (to avoid moving too much
13214 + * system memory to the buffer pools)
13215 + * - buffer address aligned to cacheline bytes
13216 + * - offset of data from start of buffer no lower than a minimum value
13217 + * - offset of data from start of buffer no higher than a maximum value
13218 + */
13219 + new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
13220 +
13221 + /* left align to the nearest cacheline */
13222 + new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
13223 +
13224 + if (likely(new >= skb->head &&
13225 + new >= (skb->data - DPA_MAX_FD_OFFSET) &&
13226 + skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
13227 + *new_buf_start = new;
13228 + return true;
13229 + }
13230 +
13231 + return false;
13232 +}
13233 +EXPORT_SYMBOL(dpa_buf_is_recyclable);
13234 +#endif
13235 +
13236 +/* Build a linear skb around the received buffer.
13237 + * We are guaranteed there is enough room at the end of the data buffer to
13238 + * accommodate the shared info area of the skb.
13239 + */
13240 +static struct sk_buff *__hot contig_fd_to_skb(const struct dpa_priv_s *priv,
13241 + const struct qm_fd *fd, int *use_gro)
13242 +{
13243 + dma_addr_t addr = qm_fd_addr(fd);
13244 + ssize_t fd_off = dpa_fd_offset(fd);
13245 + void *vaddr;
13246 + const fm_prs_result_t *parse_results;
13247 + struct sk_buff *skb = NULL, **skbh;
13248 +
13249 + vaddr = phys_to_virt(addr);
13250 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
13251 +
13252 + /* Retrieve the skb and adjust data and tail pointers, to make sure
13253 + * forwarded skbs will have enough space on Tx if extra headers
13254 + * are added.
13255 + */
13256 + DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
13257 +
13258 +#ifdef CONFIG_FSL_DPAA_ETH_JUMBO_FRAME
13259 + /* When using jumbo Rx buffers, we risk having frames dropped due to
13260 + * the socket backlog reaching its maximum allowed size.
13261 + * Use the frame length for the skb truesize instead of the buffer
13262 + * size, as this is the size of the data that actually gets copied to
13263 + * userspace.
13264 + */
13265 + skb->truesize = SKB_TRUESIZE(dpa_fd_length(fd));
13266 +#endif
13267 +
13268 + DPA_BUG_ON(fd_off != priv->rx_headroom);
13269 + skb_reserve(skb, fd_off);
13270 + skb_put(skb, dpa_fd_length(fd));
13271 +
13272 + /* Peek at the parse results for csum validation */
13273 + parse_results = (const fm_prs_result_t *)(vaddr +
13274 + DPA_RX_PRIV_DATA_SIZE);
13275 + _dpa_process_parse_results(parse_results, fd, skb, use_gro);
13276 +
13277 +#ifdef CONFIG_FSL_DPAA_1588
13278 + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_rx_en_ioctl)
13279 + dpa_ptp_store_rxstamp(priv, skb, vaddr);
13280 +#endif
13281 +#ifdef CONFIG_FSL_DPAA_TS
13282 + if (priv->ts_rx_en)
13283 + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
13284 +#endif /* CONFIG_FSL_DPAA_TS */
13285 +
13286 + return skb;
13287 +}
13288 +
13289 +
13290 +/* Build an skb with the data of the first S/G entry in the linear portion and
13291 + * the rest of the frame as skb fragments.
13292 + *
13293 + * The page fragment holding the S/G Table is recycled here.
13294 + */
13295 +static struct sk_buff *__hot sg_fd_to_skb(const struct dpa_priv_s *priv,
13296 + const struct qm_fd *fd, int *use_gro,
13297 + int *count_ptr)
13298 +{
13299 + const struct qm_sg_entry *sgt;
13300 + dma_addr_t addr = qm_fd_addr(fd);
13301 + ssize_t fd_off = dpa_fd_offset(fd);
13302 + dma_addr_t sg_addr;
13303 + void *vaddr, *sg_vaddr;
13304 + struct dpa_bp *dpa_bp;
13305 + struct page *page, *head_page;
13306 + int frag_offset, frag_len;
13307 + int page_offset;
13308 + int i;
13309 + const fm_prs_result_t *parse_results;
13310 + struct sk_buff *skb = NULL, *skb_tmp, **skbh;
13311 +
13312 + vaddr = phys_to_virt(addr);
13313 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
13314 +
13315 + dpa_bp = priv->dpa_bp;
13316 + /* Iterate through the SGT entries and add data buffers to the skb */
13317 + sgt = vaddr + fd_off;
13318 + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
13319 + /* Extension bit is not supported */
13320 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
13321 +
13322 + /* We use a single global Rx pool */
13323 + DPA_BUG_ON(dpa_bp !=
13324 + dpa_bpid2pool(qm_sg_entry_get_bpid(&sgt[i])));
13325 +
13326 + sg_addr = qm_sg_addr(&sgt[i]);
13327 + sg_vaddr = phys_to_virt(sg_addr);
13328 + DPA_BUG_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
13329 + SMP_CACHE_BYTES));
13330 +
13331 + dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
13332 + DMA_BIDIRECTIONAL);
13333 + if (i == 0) {
13334 + DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
13335 + DPA_BUG_ON(skb->head != sg_vaddr);
13336 +#ifdef CONFIG_FSL_DPAA_1588
13337 + if (priv->tsu && priv->tsu->valid &&
13338 + priv->tsu->hwts_rx_en_ioctl)
13339 + dpa_ptp_store_rxstamp(priv, skb, vaddr);
13340 +#endif
13341 +#ifdef CONFIG_FSL_DPAA_TS
13342 + if (priv->ts_rx_en)
13343 + dpa_get_ts(priv, RX, skb_hwtstamps(skb), vaddr);
13344 +#endif /* CONFIG_FSL_DPAA_TS */
13345 +
13346 + /* In the case of a SG frame, FMan stores the Internal
13347 + * Context in the buffer containing the sgt.
13348 + * Inspect the parse results before anything else.
13349 + */
13350 + parse_results = (const fm_prs_result_t *)(vaddr +
13351 + DPA_RX_PRIV_DATA_SIZE);
13352 + _dpa_process_parse_results(parse_results, fd, skb,
13353 + use_gro);
13354 +
13355 + /* Make sure forwarded skbs will have enough space
13356 + * on Tx, if extra headers are added.
13357 + */
13358 + DPA_BUG_ON(fd_off != priv->rx_headroom);
13359 + skb_reserve(skb, fd_off);
13360 + skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
13361 + } else {
13362 + /* Not the first S/G entry; all data from buffer will
13363 + * be added in an skb fragment; fragment index is offset
13364 + * by one since first S/G entry was incorporated in the
13365 + * linear part of the skb.
13366 + *
13367 + * Caution: 'page' may be a tail page.
13368 + */
13369 + DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
13370 + page = virt_to_page(sg_vaddr);
13371 + head_page = virt_to_head_page(sg_vaddr);
13372 +
13373 + /* Free (only) the skbuff shell because its data buffer
13374 + * is already a frag in the main skb.
13375 + */
13376 + get_page(head_page);
13377 + dev_kfree_skb(skb_tmp);
13378 +
13379 + /* Compute offset in (possibly tail) page */
13380 + page_offset = ((unsigned long)sg_vaddr &
13381 + (PAGE_SIZE - 1)) +
13382 + (page_address(page) - page_address(head_page));
13383 + /* page_offset only refers to the beginning of sgt[i];
13384 + * but the buffer itself may have an internal offset.
13385 + */
13386 + frag_offset = qm_sg_entry_get_offset(&sgt[i]) +
13387 + page_offset;
13388 + frag_len = qm_sg_entry_get_len(&sgt[i]);
13389 + /* skb_add_rx_frag() does no checking on the page; if
13390 + * we pass it a tail page, we'll end up with
13391 + * bad page accounting and eventually with segafults.
13392 + */
13393 + skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
13394 + frag_len, dpa_bp->size);
13395 + }
13396 + /* Update the pool count for the current {cpu x bpool} */
13397 + (*count_ptr)--;
13398 +
13399 + if (qm_sg_entry_get_final(&sgt[i]))
13400 + break;
13401 + }
13402 + WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
13403 +
13404 + /* recycle the SGT fragment */
13405 + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
13406 + dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
13407 + return skb;
13408 +}
13409 +
13410 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
13411 +static inline int dpa_skb_loop(const struct dpa_priv_s *priv,
13412 + struct sk_buff *skb)
13413 +{
13414 + if (unlikely(priv->loop_to < 0))
13415 + return 0; /* loop disabled by default */
13416 +
13417 + skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
13418 + dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
13419 +
13420 + return 1; /* Frame Tx on the selected interface */
13421 +}
13422 +#endif
13423 +
13424 +void __hot _dpa_rx(struct net_device *net_dev,
13425 + struct qman_portal *portal,
13426 + const struct dpa_priv_s *priv,
13427 + struct dpa_percpu_priv_s *percpu_priv,
13428 + const struct qm_fd *fd,
13429 + u32 fqid,
13430 + int *count_ptr)
13431 +{
13432 + struct dpa_bp *dpa_bp;
13433 + struct sk_buff *skb;
13434 + dma_addr_t addr = qm_fd_addr(fd);
13435 + u32 fd_status = fd->status;
13436 + unsigned int skb_len;
13437 + struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
13438 + int use_gro = net_dev->features & NETIF_F_GRO;
13439 +
13440 + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
13441 + if (netif_msg_hw(priv) && net_ratelimit())
13442 + netdev_warn(net_dev, "FD status = 0x%08x\n",
13443 + fd_status & FM_FD_STAT_RX_ERRORS);
13444 +
13445 + percpu_stats->rx_errors++;
13446 + goto _release_frame;
13447 + }
13448 +
13449 + dpa_bp = priv->dpa_bp;
13450 + DPA_BUG_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
13451 +
13452 + /* prefetch the first 64 bytes of the frame or the SGT start */
13453 + dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
13454 + prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
13455 +
13456 + /* The only FD types that we may receive are contig and S/G */
13457 + DPA_BUG_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
13458 +
13459 + if (likely(fd->format == qm_fd_contig)) {
13460 +#ifdef CONFIG_FSL_DPAA_HOOKS
13461 + /* Execute the Rx processing hook, if it exists. */
13462 + if (dpaa_eth_hooks.rx_default &&
13463 + dpaa_eth_hooks.rx_default((void *)fd, net_dev,
13464 + fqid) == DPAA_ETH_STOLEN) {
13465 + /* won't count the rx bytes in */
13466 + return;
13467 + }
13468 +#endif
13469 + skb = contig_fd_to_skb(priv, fd, &use_gro);
13470 + } else {
13471 + skb = sg_fd_to_skb(priv, fd, &use_gro, count_ptr);
13472 + percpu_priv->rx_sg++;
13473 + }
13474 +
13475 + /* Account for either the contig buffer or the SGT buffer (depending on
13476 + * which case we were in) having been removed from the pool.
13477 + */
13478 + (*count_ptr)--;
13479 + skb->protocol = eth_type_trans(skb, net_dev);
13480 +
13481 + /* IP Reassembled frames are allowed to be larger than MTU */
13482 + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
13483 + !(fd_status & FM_FD_IPR))) {
13484 + percpu_stats->rx_dropped++;
13485 + goto drop_bad_frame;
13486 + }
13487 +
13488 + skb_len = skb->len;
13489 +
13490 +#ifdef CONFIG_FSL_DPAA_DBG_LOOP
13491 + if (dpa_skb_loop(priv, skb)) {
13492 + percpu_stats->rx_packets++;
13493 + percpu_stats->rx_bytes += skb_len;
13494 + return;
13495 + }
13496 +#endif
13497 +
13498 + if (use_gro) {
13499 + gro_result_t gro_result;
13500 + const struct qman_portal_config *pc =
13501 + qman_p_get_portal_config(portal);
13502 + struct dpa_napi_portal *np = &percpu_priv->np[pc->index];
13503 +
13504 + np->p = portal;
13505 + gro_result = napi_gro_receive(&np->napi, skb);
13506 + /* If frame is dropped by the stack, rx_dropped counter is
13507 + * incremented automatically, so no need for us to update it
13508 + */
13509 + if (unlikely(gro_result == GRO_DROP))
13510 + goto packet_dropped;
13511 + } else if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
13512 + goto packet_dropped;
13513 +
13514 + percpu_stats->rx_packets++;
13515 + percpu_stats->rx_bytes += skb_len;
13516 +
13517 +packet_dropped:
13518 + return;
13519 +
13520 +drop_bad_frame:
13521 + dev_kfree_skb(skb);
13522 + return;
13523 +
13524 +_release_frame:
13525 + dpa_fd_release(net_dev, fd);
13526 +}
13527 +
13528 +int __hot skb_to_contig_fd(struct dpa_priv_s *priv,
13529 + struct sk_buff *skb, struct qm_fd *fd,
13530 + int *count_ptr, int *offset)
13531 +{
13532 + struct sk_buff **skbh;
13533 + dma_addr_t addr;
13534 + struct dpa_bp *dpa_bp = priv->dpa_bp;
13535 + struct net_device *net_dev = priv->net_dev;
13536 + int err;
13537 + enum dma_data_direction dma_dir;
13538 + unsigned char *buffer_start;
13539 +
13540 +#ifndef CONFIG_FSL_DPAA_TS
13541 + /* Check recycling conditions; only if timestamp support is not
13542 + * enabled, otherwise we need the fd back on tx confirmation
13543 + */
13544 +
13545 + /* We can recycle the buffer if:
13546 + * - the pool is not full
13547 + * - the buffer meets the skb recycling conditions
13548 + * - the buffer meets our own (size, offset, align) conditions
13549 + */
13550 + if (likely((*count_ptr < dpa_bp->target_count) &&
13551 + dpa_skb_is_recyclable(skb) &&
13552 + dpa_buf_is_recyclable(skb, dpa_bp->size,
13553 + priv->tx_headroom, &buffer_start))) {
13554 + /* Buffer is recyclable; use the new start address
13555 + * and set fd parameters and DMA mapping direction
13556 + */
13557 + fd->bpid = dpa_bp->bpid;
13558 + DPA_BUG_ON(skb->data - buffer_start > DPA_MAX_FD_OFFSET);
13559 + fd->offset = (uint16_t)(skb->data - buffer_start);
13560 + dma_dir = DMA_BIDIRECTIONAL;
13561 +
13562 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
13563 + *offset = skb_headroom(skb) - fd->offset;
13564 + } else
13565 +#endif
13566 + {
13567 + /* Not recyclable.
13568 + * We are guaranteed to have at least tx_headroom bytes
13569 + * available, so just use that for offset.
13570 + */
13571 + fd->bpid = 0xff;
13572 + buffer_start = skb->data - priv->tx_headroom;
13573 + fd->offset = priv->tx_headroom;
13574 + dma_dir = DMA_TO_DEVICE;
13575 +
13576 + /* The buffer will be Tx-confirmed, but the TxConf cb must
13577 + * necessarily look at our Tx private data to retrieve the
13578 + * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
13579 + */
13580 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
13581 + }
13582 +
13583 + /* Enable L3/L4 hardware checksum computation.
13584 + *
13585 + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
13586 + * need to write into the skb.
13587 + */
13588 + err = dpa_enable_tx_csum(priv, skb, fd,
13589 + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
13590 + if (unlikely(err < 0)) {
13591 + if (netif_msg_tx_err(priv) && net_ratelimit())
13592 + netdev_err(net_dev, "HW csum error: %d\n", err);
13593 + return err;
13594 + }
13595 +
13596 + /* Fill in the rest of the FD fields */
13597 + fd->format = qm_fd_contig;
13598 + fd->length20 = skb->len;
13599 + fd->cmd |= FM_FD_CMD_FCO;
13600 +
13601 + /* Map the entire buffer size that may be seen by FMan, but no more */
13602 + addr = dma_map_single(dpa_bp->dev, skbh,
13603 + skb_tail_pointer(skb) - buffer_start, dma_dir);
13604 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
13605 + if (netif_msg_tx_err(priv) && net_ratelimit())
13606 + netdev_err(net_dev, "dma_map_single() failed\n");
13607 + return -EINVAL;
13608 + }
13609 + fd->addr = addr;
13610 +
13611 +
13612 + return 0;
13613 +}
13614 +EXPORT_SYMBOL(skb_to_contig_fd);
13615 +
13616 +int __hot skb_to_sg_fd(struct dpa_priv_s *priv,
13617 + struct sk_buff *skb, struct qm_fd *fd)
13618 +{
13619 + struct dpa_bp *dpa_bp = priv->dpa_bp;
13620 + dma_addr_t addr;
13621 + dma_addr_t sg_addr;
13622 + struct sk_buff **skbh;
13623 + struct net_device *net_dev = priv->net_dev;
13624 + int sg_len;
13625 + int err;
13626 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13627 + unsigned long boundary;
13628 + int k;
13629 +#endif
13630 +
13631 + struct qm_sg_entry *sgt;
13632 + void *sgt_buf;
13633 + void *buffer_start;
13634 + skb_frag_t *frag;
13635 + int i, j;
13636 + const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
13637 + const int nr_frags = skb_shinfo(skb)->nr_frags;
13638 +
13639 + fd->format = qm_fd_sg;
13640 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13641 + /* get a page frag to store the SGTable */
13642 + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
13643 + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
13644 + if (unlikely(!sgt_buf)) {
13645 + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
13646 + return -ENOMEM;
13647 + }
13648 +
13649 + /* it seems that the memory allocator does not zero the allocated mem */
13650 + memset(sgt_buf, 0, priv->tx_headroom +
13651 + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES);
13652 +#else
13653 + /* get a page frag to store the SGTable */
13654 + sgt_buf = netdev_alloc_frag(priv->tx_headroom +
13655 + sizeof(struct qm_sg_entry) * (1 + nr_frags));
13656 + if (unlikely(!sgt_buf)) {
13657 + dev_err(dpa_bp->dev, "netdev_alloc_frag() failed\n");
13658 + return -ENOMEM;
13659 + }
13660 +
13661 + memset(sgt_buf, 0, priv->tx_headroom +
13662 + sizeof(struct qm_sg_entry) * (1 + nr_frags));
13663 +#endif
13664 +
13665 + /* Enable L3/L4 hardware checksum computation.
13666 + *
13667 + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
13668 + * need to write into the skb.
13669 + */
13670 + err = dpa_enable_tx_csum(priv, skb, fd,
13671 + sgt_buf + DPA_TX_PRIV_DATA_SIZE);
13672 + if (unlikely(err < 0)) {
13673 + if (netif_msg_tx_err(priv) && net_ratelimit())
13674 + netdev_err(net_dev, "HW csum error: %d\n", err);
13675 + goto csum_failed;
13676 + }
13677 +
13678 + sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
13679 + sg_len = skb_headlen(skb);
13680 + qm_sg_entry_set_bpid(&sgt[0], 0xff);
13681 + qm_sg_entry_set_offset(&sgt[0], 0);
13682 + qm_sg_entry_set_len(&sgt[0], sg_len);
13683 + qm_sg_entry_set_ext(&sgt[0], 0);
13684 + qm_sg_entry_set_final(&sgt[0], 0);
13685 +
13686 + addr = dma_map_single(dpa_bp->dev, skb->data, sg_len, dma_dir);
13687 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
13688 + dev_err(dpa_bp->dev, "DMA mapping failed");
13689 + err = -EINVAL;
13690 + goto sg0_map_failed;
13691 +
13692 + }
13693 +
13694 + qm_sg_entry_set64(&sgt[0], addr);
13695 +
13696 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13697 + j = 0;
13698 + if (unlikely(HAS_DMA_ISSUE(skb->data, sg_len))) {
13699 + boundary = BOUNDARY_4K(skb->data, sg_len);
13700 + qm_sg_entry_set_len(&sgt[j], boundary -
13701 + (unsigned long)skb->data);
13702 +
13703 + j++;
13704 + qm_sg_entry_set_bpid(&sgt[j], 0xff);
13705 + qm_sg_entry_set_offset(&sgt[j], 0);
13706 + qm_sg_entry_set_len(&sgt[j],
13707 + ((unsigned long)skb->data + (unsigned long)sg_len) -
13708 + boundary);
13709 + qm_sg_entry_set_ext(&sgt[j], 0);
13710 + qm_sg_entry_set_final(&sgt[j], 0);
13711 +
13712 + /* keep the offset in the address */
13713 + qm_sg_entry_set64(&sgt[j], addr +
13714 + (boundary -
13715 + (unsigned long)skb->data));
13716 + }
13717 + j++;
13718 +
13719 + /* populate the rest of SGT entries */
13720 + for (i = 1; i <= nr_frags; i++, j++) {
13721 + frag = &skb_shinfo(skb)->frags[i - 1];
13722 + qm_sg_entry_set_bpid(&sgt[j], 0xff);
13723 + qm_sg_entry_set_offset(&sgt[j], 0);
13724 + qm_sg_entry_set_len(&sgt[j], frag->size);
13725 + qm_sg_entry_set_ext(&sgt[j], 0);
13726 +
13727 + DPA_BUG_ON(!skb_frag_page(frag));
13728 + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
13729 + dma_dir);
13730 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
13731 + dev_err(dpa_bp->dev, "DMA mapping failed");
13732 + err = -EINVAL;
13733 + goto sg_map_failed;
13734 + }
13735 +
13736 + /* keep the offset in the address */
13737 + qm_sg_entry_set64(&sgt[j], addr);
13738 +
13739 + if (unlikely(HAS_DMA_ISSUE(frag, frag->size))) {
13740 + boundary = BOUNDARY_4K(frag, frag->size);
13741 + qm_sg_entry_set_len(&sgt[j], boundary -
13742 + (unsigned long)frag);
13743 +
13744 + j++;
13745 + qm_sg_entry_set_bpid(&sgt[j], 0xff);
13746 + qm_sg_entry_set_offset(&sgt[j], 0);
13747 + qm_sg_entry_set_len(&sgt[j],
13748 + ((unsigned long)frag->size -
13749 + (boundary - (unsigned long)frag)));
13750 + qm_sg_entry_set_ext(&sgt[j], 0);
13751 +
13752 + /* keep the offset in the address */
13753 + qm_sg_entry_set64(&sgt[j], addr +
13754 + (boundary - (unsigned long)frag));
13755 + }
13756 +
13757 + if (i == nr_frags)
13758 + qm_sg_entry_set_final(&sgt[j], 1);
13759 + else
13760 + qm_sg_entry_set_final(&sgt[j], 0);
13761 +#else
13762 +
13763 + /* populate the rest of SGT entries */
13764 + for (i = 1; i <= nr_frags; i++) {
13765 + frag = &skb_shinfo(skb)->frags[i - 1];
13766 + qm_sg_entry_set_bpid(&sgt[i], 0xff);
13767 + qm_sg_entry_set_offset(&sgt[i], 0);
13768 + qm_sg_entry_set_len(&sgt[i], frag->size);
13769 + qm_sg_entry_set_ext(&sgt[i], 0);
13770 +
13771 + if (i == nr_frags)
13772 + qm_sg_entry_set_final(&sgt[i], 1);
13773 + else
13774 + qm_sg_entry_set_final(&sgt[i], 0);
13775 +
13776 + DPA_BUG_ON(!skb_frag_page(frag));
13777 + addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, frag->size,
13778 + dma_dir);
13779 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
13780 + dev_err(dpa_bp->dev, "DMA mapping failed");
13781 + err = -EINVAL;
13782 + goto sg_map_failed;
13783 + }
13784 +
13785 + /* keep the offset in the address */
13786 + qm_sg_entry_set64(&sgt[i], addr);
13787 +#endif
13788 + }
13789 +
13790 + fd->length20 = skb->len;
13791 + fd->offset = priv->tx_headroom;
13792 +
13793 + /* DMA map the SGT page */
13794 + buffer_start = (void *)sgt - priv->tx_headroom;
13795 + DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
13796 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13797 + addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
13798 + sizeof(struct qm_sg_entry) * DPA_SGT_MAX_ENTRIES,
13799 + dma_dir);
13800 +#else
13801 + addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
13802 + sizeof(struct qm_sg_entry) * (1 + nr_frags),
13803 + dma_dir);
13804 +#endif
13805 + if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
13806 + dev_err(dpa_bp->dev, "DMA mapping failed");
13807 + err = -EINVAL;
13808 + goto sgt_map_failed;
13809 + }
13810 +
13811 + fd->bpid = 0xff;
13812 + fd->cmd |= FM_FD_CMD_FCO;
13813 + fd->addr = addr;
13814 +
13815 + return 0;
13816 +
13817 +sgt_map_failed:
13818 +sg_map_failed:
13819 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13820 + for (k = 0; k < j; k++) {
13821 + sg_addr = qm_sg_addr(&sgt[k]);
13822 + dma_unmap_page(dpa_bp->dev, sg_addr,
13823 + qm_sg_entry_get_len(&sgt[k]), dma_dir);
13824 + }
13825 +#else
13826 + for (j = 0; j < i; j++) {
13827 + sg_addr = qm_sg_addr(&sgt[j]);
13828 + dma_unmap_page(dpa_bp->dev, sg_addr,
13829 + qm_sg_entry_get_len(&sgt[j]), dma_dir);
13830 + }
13831 +#endif
13832 +sg0_map_failed:
13833 +csum_failed:
13834 + put_page(virt_to_head_page(sgt_buf));
13835 +
13836 + return err;
13837 +}
13838 +EXPORT_SYMBOL(skb_to_sg_fd);
13839 +
13840 +int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
13841 +{
13842 + struct dpa_priv_s *priv;
13843 + const int queue_mapping = dpa_get_queue_mapping(skb);
13844 + struct qman_fq *egress_fq, *conf_fq;
13845 +
13846 +#ifdef CONFIG_FSL_DPAA_HOOKS
13847 + /* If there is a Tx hook, run it. */
13848 + if (dpaa_eth_hooks.tx &&
13849 + dpaa_eth_hooks.tx(skb, net_dev) == DPAA_ETH_STOLEN)
13850 + /* won't update any Tx stats */
13851 + return NETDEV_TX_OK;
13852 +#endif
13853 +
13854 + priv = netdev_priv(net_dev);
13855 +
13856 +#ifdef CONFIG_FSL_DPAA_CEETM
13857 + if (priv->ceetm_en)
13858 + return ceetm_tx(skb, net_dev);
13859 +#endif
13860 +
13861 + egress_fq = priv->egress_fqs[queue_mapping];
13862 + conf_fq = priv->conf_fqs[queue_mapping];
13863 +
13864 + return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq);
13865 +}
13866 +
13867 +int __hot dpa_tx_extended(struct sk_buff *skb, struct net_device *net_dev,
13868 + struct qman_fq *egress_fq, struct qman_fq *conf_fq)
13869 +{
13870 + struct dpa_priv_s *priv;
13871 + struct qm_fd fd;
13872 + struct dpa_percpu_priv_s *percpu_priv;
13873 + struct rtnl_link_stats64 *percpu_stats;
13874 + int err = 0;
13875 + const bool nonlinear = skb_is_nonlinear(skb);
13876 + int *countptr, offset = 0;
13877 +
13878 + priv = netdev_priv(net_dev);
13879 + /* Non-migratable context, safe to use raw_cpu_ptr */
13880 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
13881 + percpu_stats = &percpu_priv->stats;
13882 + countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
13883 +
13884 + clear_fd(&fd);
13885 +
13886 +#ifdef CONFIG_FSL_DPAA_1588
13887 + if (priv->tsu && priv->tsu->valid && priv->tsu->hwts_tx_en_ioctl)
13888 + fd.cmd |= FM_FD_CMD_UPD;
13889 +#endif
13890 +#ifdef CONFIG_FSL_DPAA_TS
13891 + if (unlikely(priv->ts_tx_en &&
13892 + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
13893 + fd.cmd |= FM_FD_CMD_UPD;
13894 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
13895 +#endif /* CONFIG_FSL_DPAA_TS */
13896 +
13897 + /* MAX_SKB_FRAGS is larger than our DPA_SGT_MAX_ENTRIES; make sure
13898 + * we don't feed FMan with more fragments than it supports.
13899 + * Btw, we're using the first sgt entry to store the linear part of
13900 + * the skb, so we're one extra frag short.
13901 + */
13902 + if (nonlinear &&
13903 + likely(skb_shinfo(skb)->nr_frags < DPA_SGT_ENTRIES_THRESHOLD)) {
13904 + /* Just create a S/G fd based on the skb */
13905 + err = skb_to_sg_fd(priv, skb, &fd);
13906 + percpu_priv->tx_frag_skbuffs++;
13907 + } else {
13908 + /* Make sure we have enough headroom to accommodate private
13909 + * data, parse results, etc. Normally this shouldn't happen if
13910 + * we're here via the standard kernel stack.
13911 + */
13912 + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) {
13913 + struct sk_buff *skb_new;
13914 +
13915 + skb_new = skb_realloc_headroom(skb, priv->tx_headroom);
13916 + if (unlikely(!skb_new)) {
13917 + dev_kfree_skb(skb);
13918 + percpu_stats->tx_errors++;
13919 + return NETDEV_TX_OK;
13920 + }
13921 + dev_kfree_skb(skb);
13922 + skb = skb_new;
13923 + }
13924 +
13925 + /* We're going to store the skb backpointer at the beginning
13926 + * of the data buffer, so we need a privately owned skb
13927 + */
13928 +
13929 + /* Code borrowed from skb_unshare(). */
13930 + if (skb_cloned(skb)) {
13931 + struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
13932 + kfree_skb(skb);
13933 + skb = nskb;
13934 + /* skb_copy() has now linearized the skbuff. */
13935 + } else if (unlikely(nonlinear)) {
13936 + /* We are here because the egress skb contains
13937 + * more fragments than we support. In this case,
13938 + * we have no choice but to linearize it ourselves.
13939 + */
13940 + err = __skb_linearize(skb);
13941 + }
13942 + if (unlikely(!skb || err < 0))
13943 + /* Common out-of-memory error path */
13944 + goto enomem;
13945 +
13946 +#ifdef DPAA_LS1043A_DMA_4K_ISSUE
13947 + if (unlikely(HAS_DMA_ISSUE(skb->data, skb->len))) {
13948 + err = skb_to_sg_fd(priv, skb, &fd);
13949 + percpu_priv->tx_frag_skbuffs++;
13950 + } else {
13951 + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
13952 + }
13953 +#else
13954 + /* Finally, create a contig FD from this skb */
13955 + err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
13956 +#endif
13957 + }
13958 + if (unlikely(err < 0))
13959 + goto skb_to_fd_failed;
13960 +
13961 + if (fd.bpid != 0xff) {
13962 + skb_recycle(skb);
13963 + /* skb_recycle() reserves NET_SKB_PAD as skb headroom,
13964 + * but we need the skb to look as if returned by build_skb().
13965 + * We need to manually adjust the tailptr as well.
13966 + */
13967 + skb->data = skb->head + offset;
13968 + skb_reset_tail_pointer(skb);
13969 +
13970 + (*countptr)++;
13971 + percpu_priv->tx_returned++;
13972 + }
13973 +
13974 + if (unlikely(dpa_xmit(priv, percpu_stats, &fd, egress_fq, conf_fq) < 0))
13975 + goto xmit_failed;
13976 +
13977 + net_dev->trans_start = jiffies;
13978 + return NETDEV_TX_OK;
13979 +
13980 +xmit_failed:
13981 + if (fd.bpid != 0xff) {
13982 + (*countptr)--;
13983 + percpu_priv->tx_returned--;
13984 + dpa_fd_release(net_dev, &fd);
13985 + percpu_stats->tx_errors++;
13986 + return NETDEV_TX_OK;
13987 + }
13988 + _dpa_cleanup_tx_fd(priv, &fd);
13989 +skb_to_fd_failed:
13990 +enomem:
13991 + percpu_stats->tx_errors++;
13992 + dev_kfree_skb(skb);
13993 + return NETDEV_TX_OK;
13994 +}
13995 +EXPORT_SYMBOL(dpa_tx_extended);
13996 --- /dev/null
13997 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_shared.c
13998 @@ -0,0 +1,914 @@
13999 +/* Copyright 2008-2013 Freescale Semiconductor Inc.
14000 + *
14001 + * Redistribution and use in source and binary forms, with or without
14002 + * modification, are permitted provided that the following conditions are met:
14003 + * * Redistributions of source code must retain the above copyright
14004 + * notice, this list of conditions and the following disclaimer.
14005 + * * Redistributions in binary form must reproduce the above copyright
14006 + * notice, this list of conditions and the following disclaimer in the
14007 + * documentation and/or other materials provided with the distribution.
14008 + * * Neither the name of Freescale Semiconductor nor the
14009 + * names of its contributors may be used to endorse or promote products
14010 + * derived from this software without specific prior written permission.
14011 + *
14012 + *
14013 + * ALTERNATIVELY, this software may be distributed under the terms of the
14014 + * GNU General Public License ("GPL") as published by the Free Software
14015 + * Foundation, either version 2 of that License or (at your option) any
14016 + * later version.
14017 + *
14018 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
14019 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
14020 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14021 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
14022 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
14023 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
14024 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
14025 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14026 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
14027 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14028 + */
14029 +
14030 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
14031 +#define pr_fmt(fmt) \
14032 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
14033 + KBUILD_BASENAME".c", __LINE__, __func__
14034 +#else
14035 +#define pr_fmt(fmt) \
14036 + KBUILD_MODNAME ": " fmt
14037 +#endif
14038 +
14039 +#include <linux/init.h>
14040 +#include <linux/module.h>
14041 +#include <linux/of_platform.h>
14042 +#include <linux/etherdevice.h>
14043 +#include <linux/kthread.h>
14044 +#include <linux/percpu.h>
14045 +#include <linux/highmem.h>
14046 +#include <linux/fsl_qman.h>
14047 +#include "dpaa_eth.h"
14048 +#include "dpaa_eth_common.h"
14049 +#include "dpaa_eth_base.h"
14050 +#include "lnxwrp_fsl_fman.h" /* fm_get_rx_extra_headroom(), fm_get_max_frm() */
14051 +#include "mac.h"
14052 +
14053 +/* forward declarations */
14054 +static enum qman_cb_dqrr_result __hot
14055 +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
14056 + const struct qm_dqrr_entry *dq);
14057 +static enum qman_cb_dqrr_result __hot
14058 +shared_tx_default_dqrr(struct qman_portal *portal,
14059 + struct qman_fq *fq,
14060 + const struct qm_dqrr_entry *dq);
14061 +static enum qman_cb_dqrr_result
14062 +shared_tx_error_dqrr(struct qman_portal *portal,
14063 + struct qman_fq *fq,
14064 + const struct qm_dqrr_entry *dq);
14065 +static void shared_ern(struct qman_portal *portal,
14066 + struct qman_fq *fq,
14067 + const struct qm_mr_entry *msg);
14068 +
14069 +#define DPA_DESCRIPTION "FSL DPAA Shared Ethernet driver"
14070 +
14071 +MODULE_LICENSE("Dual BSD/GPL");
14072 +
14073 +MODULE_DESCRIPTION(DPA_DESCRIPTION);
14074 +
14075 +/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
14076 +static uint16_t shared_tx_timeout = 1000;
14077 +module_param(shared_tx_timeout, ushort, S_IRUGO);
14078 +MODULE_PARM_DESC(shared_tx_timeout, "The Tx timeout in ms");
14079 +
14080 +static const struct of_device_id dpa_shared_match[];
14081 +
14082 +static const struct net_device_ops dpa_shared_ops = {
14083 + .ndo_open = dpa_start,
14084 + .ndo_start_xmit = dpa_shared_tx,
14085 + .ndo_stop = dpa_stop,
14086 + .ndo_tx_timeout = dpa_timeout,
14087 + .ndo_get_stats64 = dpa_get_stats64,
14088 + .ndo_set_mac_address = dpa_set_mac_address,
14089 + .ndo_validate_addr = eth_validate_addr,
14090 +#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
14091 + .ndo_select_queue = dpa_select_queue,
14092 +#endif
14093 + .ndo_change_mtu = dpa_change_mtu,
14094 + .ndo_set_rx_mode = dpa_set_rx_mode,
14095 + .ndo_init = dpa_ndo_init,
14096 + .ndo_set_features = dpa_set_features,
14097 + .ndo_fix_features = dpa_fix_features,
14098 + .ndo_do_ioctl = dpa_ioctl,
14099 +};
14100 +
14101 +const struct dpa_fq_cbs_t shared_fq_cbs = {
14102 + .rx_defq = { .cb = { .dqrr = shared_rx_dqrr } },
14103 + .tx_defq = { .cb = { .dqrr = shared_tx_default_dqrr } },
14104 + .rx_errq = { .cb = { .dqrr = shared_rx_dqrr } },
14105 + .tx_errq = { .cb = { .dqrr = shared_tx_error_dqrr } },
14106 + .egress_ern = { .cb = { .ern = shared_ern } }
14107 +};
14108 +EXPORT_SYMBOL(shared_fq_cbs);
14109 +
14110 +static inline void * __must_check __attribute__((nonnull))
14111 +dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr)
14112 +{
14113 + return dpa_bp->vaddr + (addr - dpa_bp->paddr);
14114 +}
14115 +
14116 +static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size)
14117 +{
14118 + int i;
14119 +
14120 + for (i = 0; i < priv->bp_count; i++)
14121 + if ((size + priv->tx_headroom) <= priv->dpa_bp[i].size)
14122 + return dpa_bpid2pool(priv->dpa_bp[i].bpid);
14123 + return ERR_PTR(-ENODEV);
14124 +}
14125 +
14126 +/* Copy to a memory region that requires kmapping from a linear buffer,
14127 + * taking into account page boundaries in the destination
14128 + */
14129 +static void
14130 +copy_to_unmapped_area(dma_addr_t phys_start, void *src, size_t buf_size)
14131 +{
14132 + struct page *page;
14133 + size_t size, offset;
14134 + void *page_vaddr;
14135 +
14136 + while (buf_size > 0) {
14137 + offset = offset_in_page(phys_start);
14138 + size = (offset + buf_size > PAGE_SIZE) ?
14139 + PAGE_SIZE - offset : buf_size;
14140 +
14141 + page = pfn_to_page(phys_start >> PAGE_SHIFT);
14142 + page_vaddr = kmap_atomic(page);
14143 +
14144 + memcpy(page_vaddr + offset, src, size);
14145 +
14146 + kunmap_atomic(page_vaddr);
14147 +
14148 + phys_start += size;
14149 + src += size;
14150 + buf_size -= size;
14151 + }
14152 +}
14153 +
14154 +/* Copy from a memory region that requires kmapping to a linear buffer,
14155 + * taking into account page boundaries in the source
14156 + */
14157 +static void
14158 +copy_from_unmapped_area(void *dest, dma_addr_t phys_start, size_t buf_size)
14159 +{
14160 + struct page *page;
14161 + size_t size, offset;
14162 + void *page_vaddr;
14163 +
14164 + while (buf_size > 0) {
14165 + offset = offset_in_page(phys_start);
14166 + size = (offset + buf_size > PAGE_SIZE) ?
14167 + PAGE_SIZE - offset : buf_size;
14168 +
14169 + page = pfn_to_page(phys_start >> PAGE_SHIFT);
14170 + page_vaddr = kmap_atomic(page);
14171 +
14172 + memcpy(dest, page_vaddr + offset, size);
14173 +
14174 + kunmap_atomic(page_vaddr);
14175 +
14176 + phys_start += size;
14177 + dest += size;
14178 + buf_size -= size;
14179 + }
14180 +}
14181 +
14182 +static void
14183 +dpa_fd_release_sg(const struct net_device *net_dev,
14184 + const struct qm_fd *fd)
14185 +{
14186 + const struct dpa_priv_s *priv;
14187 + struct qm_sg_entry *sgt;
14188 + struct dpa_bp *_dpa_bp;
14189 + struct bm_buffer _bmb;
14190 +
14191 + priv = netdev_priv(net_dev);
14192 +
14193 + _bmb.hi = fd->addr_hi;
14194 + _bmb.lo = fd->addr_lo;
14195 +
14196 + _dpa_bp = dpa_bpid2pool(fd->bpid);
14197 + BUG_ON(!_dpa_bp);
14198 +
14199 + if (_dpa_bp->vaddr) {
14200 + sgt = dpa_phys2virt(_dpa_bp, bm_buf_addr(&_bmb)) +
14201 + dpa_fd_offset(fd);
14202 + dpa_release_sgt(sgt);
14203 + } else {
14204 + sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt), GFP_ATOMIC);
14205 + if (sgt == NULL) {
14206 + if (netif_msg_tx_err(priv) && net_ratelimit())
14207 + netdev_err(net_dev,
14208 + "Memory allocation failed\n");
14209 + return;
14210 + }
14211 +
14212 + copy_from_unmapped_area(sgt, bm_buf_addr(&_bmb) +
14213 + dpa_fd_offset(fd),
14214 + min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
14215 + _dpa_bp->size));
14216 + dpa_release_sgt(sgt);
14217 + kfree(sgt);
14218 + }
14219 +
14220 + while (bman_release(_dpa_bp->pool, &_bmb, 1, 0))
14221 + cpu_relax();
14222 +}
14223 +
14224 +static enum qman_cb_dqrr_result __hot
14225 +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq,
14226 + const struct qm_dqrr_entry *dq)
14227 +{
14228 + struct net_device *net_dev;
14229 + struct dpa_priv_s *priv;
14230 + struct dpa_percpu_priv_s *percpu_priv;
14231 + const struct qm_fd *fd = &dq->fd;
14232 + struct dpa_bp *dpa_bp;
14233 + struct sk_buff *skb;
14234 + struct qm_sg_entry *sgt;
14235 + int i;
14236 + void *frag_addr;
14237 + u32 frag_length;
14238 + u32 offset;
14239 +
14240 + net_dev = ((struct dpa_fq *)fq)->net_dev;
14241 + priv = netdev_priv(net_dev);
14242 +
14243 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
14244 +
14245 + dpa_bp = dpa_bpid2pool(fd->bpid);
14246 + BUG_ON(!dpa_bp);
14247 +
14248 + if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
14249 + if (netif_msg_hw(priv) && net_ratelimit())
14250 + netdev_warn(net_dev, "FD status = 0x%08x\n",
14251 + fd->status & FM_FD_STAT_RX_ERRORS);
14252 +
14253 + percpu_priv->stats.rx_errors++;
14254 +
14255 + goto out;
14256 + }
14257 +
14258 + skb = __netdev_alloc_skb(net_dev,
14259 + priv->tx_headroom + dpa_fd_length(fd),
14260 + GFP_ATOMIC);
14261 + if (unlikely(skb == NULL)) {
14262 + if (netif_msg_rx_err(priv) && net_ratelimit())
14263 + netdev_err(net_dev, "Could not alloc skb\n");
14264 +
14265 + percpu_priv->stats.rx_dropped++;
14266 +
14267 + goto out;
14268 + }
14269 +
14270 + skb_reserve(skb, priv->tx_headroom);
14271 +
14272 + if (fd->format == qm_fd_sg) {
14273 + if (dpa_bp->vaddr) {
14274 + sgt = dpa_phys2virt(dpa_bp,
14275 + qm_fd_addr(fd)) + dpa_fd_offset(fd);
14276 +
14277 + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
14278 + offset = qm_sg_entry_get_offset(&sgt[i]);
14279 + frag_addr = dpa_phys2virt(dpa_bp,
14280 + qm_sg_addr(&sgt[i]) +
14281 + offset);
14282 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
14283 + frag_length = qm_sg_entry_get_len(&sgt[i]);
14284 +
14285 + /* copy from sgt[i] */
14286 + memcpy(skb_put(skb, frag_length), frag_addr,
14287 + frag_length);
14288 + if (qm_sg_entry_get_final(&sgt[i]))
14289 + break;
14290 + }
14291 + } else {
14292 + sgt = kmalloc(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
14293 + GFP_ATOMIC);
14294 + if (unlikely(sgt == NULL)) {
14295 + if (netif_msg_tx_err(priv) && net_ratelimit())
14296 + netdev_err(net_dev,
14297 + "Memory allocation failed\n");
14298 + return -ENOMEM;
14299 + }
14300 +
14301 + copy_from_unmapped_area(sgt,
14302 + qm_fd_addr(fd) + dpa_fd_offset(fd),
14303 + min(DPA_SGT_MAX_ENTRIES * sizeof(*sgt),
14304 + dpa_bp->size));
14305 +
14306 + for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
14307 + DPA_BUG_ON(qm_sg_entry_get_ext(&sgt[i]));
14308 + frag_length = qm_sg_entry_get_len(&sgt[i]);
14309 + copy_from_unmapped_area(
14310 + skb_put(skb, frag_length),
14311 + qm_sg_addr(&sgt[i]) +
14312 + qm_sg_entry_get_offset(&sgt[i]),
14313 + frag_length);
14314 +
14315 + if (qm_sg_entry_get_final(&sgt[i]))
14316 + break;
14317 + }
14318 +
14319 + kfree(sgt);
14320 + }
14321 + goto skb_copied;
14322 + }
14323 +
14324 + /* otherwise fd->format == qm_fd_contig */
14325 + if (dpa_bp->vaddr) {
14326 + /* Fill the SKB */
14327 + memcpy(skb_put(skb, dpa_fd_length(fd)),
14328 + dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) +
14329 + dpa_fd_offset(fd), dpa_fd_length(fd));
14330 + } else {
14331 + copy_from_unmapped_area(skb_put(skb, dpa_fd_length(fd)),
14332 + qm_fd_addr(fd) + dpa_fd_offset(fd),
14333 + dpa_fd_length(fd));
14334 + }
14335 +
14336 +skb_copied:
14337 + skb->protocol = eth_type_trans(skb, net_dev);
14338 +
14339 + /* IP Reassembled frames are allowed to be larger than MTU */
14340 + if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
14341 + !(fd->status & FM_FD_IPR))) {
14342 + percpu_priv->stats.rx_dropped++;
14343 + dev_kfree_skb_any(skb);
14344 + goto out;
14345 + }
14346 +
14347 + if (unlikely(netif_rx(skb) != NET_RX_SUCCESS))
14348 + goto out;
14349 + else {
14350 + percpu_priv->stats.rx_packets++;
14351 + percpu_priv->stats.rx_bytes += dpa_fd_length(fd);
14352 + }
14353 +
14354 +out:
14355 + if (fd->format == qm_fd_sg)
14356 + dpa_fd_release_sg(net_dev, fd);
14357 + else
14358 + dpa_fd_release(net_dev, fd);
14359 +
14360 + return qman_cb_dqrr_consume;
14361 +}
14362 +
14363 +static enum qman_cb_dqrr_result
14364 +shared_tx_error_dqrr(struct qman_portal *portal,
14365 + struct qman_fq *fq,
14366 + const struct qm_dqrr_entry *dq)
14367 +{
14368 + struct net_device *net_dev;
14369 + struct dpa_priv_s *priv;
14370 + struct dpa_percpu_priv_s *percpu_priv;
14371 + struct dpa_bp *dpa_bp;
14372 + const struct qm_fd *fd = &dq->fd;
14373 +
14374 + net_dev = ((struct dpa_fq *)fq)->net_dev;
14375 + priv = netdev_priv(net_dev);
14376 +
14377 + dpa_bp = dpa_bpid2pool(fd->bpid);
14378 + BUG_ON(!dpa_bp);
14379 +
14380 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
14381 +
14382 + if (netif_msg_hw(priv) && net_ratelimit())
14383 + netdev_warn(net_dev, "FD status = 0x%08x\n",
14384 + fd->status & FM_FD_STAT_TX_ERRORS);
14385 +
14386 + if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
14387 + dpa_fd_release_sg(net_dev, fd);
14388 + else
14389 + dpa_fd_release(net_dev, fd);
14390 +
14391 + percpu_priv->stats.tx_errors++;
14392 +
14393 + return qman_cb_dqrr_consume;
14394 +}
14395 +
14396 +static enum qman_cb_dqrr_result __hot
14397 +shared_tx_default_dqrr(struct qman_portal *portal,
14398 + struct qman_fq *fq,
14399 + const struct qm_dqrr_entry *dq)
14400 +{
14401 + struct net_device *net_dev;
14402 + struct dpa_priv_s *priv;
14403 + struct dpa_percpu_priv_s *percpu_priv;
14404 + struct dpa_bp *dpa_bp;
14405 + const struct qm_fd *fd = &dq->fd;
14406 +
14407 + net_dev = ((struct dpa_fq *)fq)->net_dev;
14408 + priv = netdev_priv(net_dev);
14409 +
14410 + dpa_bp = dpa_bpid2pool(fd->bpid);
14411 + BUG_ON(!dpa_bp);
14412 +
14413 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
14414 +
14415 + if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
14416 + if (netif_msg_hw(priv) && net_ratelimit())
14417 + netdev_warn(net_dev, "FD status = 0x%08x\n",
14418 + fd->status & FM_FD_STAT_TX_ERRORS);
14419 +
14420 + percpu_priv->stats.tx_errors++;
14421 + }
14422 +
14423 + if ((fd->format == qm_fd_sg) && (!dpa_bp->vaddr))
14424 + dpa_fd_release_sg(net_dev, fd);
14425 + else
14426 + dpa_fd_release(net_dev, fd);
14427 +
14428 + percpu_priv->tx_confirm++;
14429 +
14430 + return qman_cb_dqrr_consume;
14431 +}
14432 +
14433 +static void shared_ern(struct qman_portal *portal,
14434 + struct qman_fq *fq,
14435 + const struct qm_mr_entry *msg)
14436 +{
14437 + struct net_device *net_dev;
14438 + const struct dpa_priv_s *priv;
14439 + struct dpa_percpu_priv_s *percpu_priv;
14440 + struct dpa_fq *dpa_fq = (struct dpa_fq *)fq;
14441 +
14442 + net_dev = dpa_fq->net_dev;
14443 + priv = netdev_priv(net_dev);
14444 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
14445 +
14446 + dpa_fd_release(net_dev, &msg->ern.fd);
14447 +
14448 + percpu_priv->stats.tx_dropped++;
14449 + percpu_priv->stats.tx_fifo_errors++;
14450 + count_ern(percpu_priv, msg);
14451 +}
14452 +
14453 +int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev)
14454 +{
14455 + struct dpa_bp *dpa_bp;
14456 + struct bm_buffer bmb;
14457 + struct dpa_percpu_priv_s *percpu_priv;
14458 + struct dpa_priv_s *priv;
14459 + struct qm_fd fd;
14460 + int queue_mapping;
14461 + int err;
14462 + void *dpa_bp_vaddr;
14463 + fm_prs_result_t parse_results;
14464 + fm_prs_result_t *parse_results_ref;
14465 + struct qman_fq *egress_fq, *conf_fq;
14466 +
14467 + priv = netdev_priv(net_dev);
14468 + percpu_priv = raw_cpu_ptr(priv->percpu_priv);
14469 +
14470 + memset(&fd, 0, sizeof(fd));
14471 + fd.format = qm_fd_contig;
14472 +
14473 + queue_mapping = smp_processor_id();
14474 +
14475 + dpa_bp = dpa_size2pool(priv, skb_headlen(skb));
14476 + if (unlikely(!dpa_bp)) {
14477 + percpu_priv->stats.tx_errors++;
14478 + err = PTR_ERR(dpa_bp);
14479 + goto bpools_too_small_error;
14480 + }
14481 +
14482 + err = bman_acquire(dpa_bp->pool, &bmb, 1, 0);
14483 + if (unlikely(err <= 0)) {
14484 + percpu_priv->stats.tx_errors++;
14485 + if (err == 0)
14486 + err = -ENOMEM;
14487 + goto buf_acquire_failed;
14488 + }
14489 + fd.bpid = dpa_bp->bpid;
14490 +
14491 + fd.length20 = skb_headlen(skb);
14492 + fd.addr_hi = (uint8_t)bmb.hi;
14493 + fd.addr_lo = bmb.lo;
14494 + fd.offset = priv->tx_headroom;
14495 +
14496 + /* The virtual address of the buffer pool is expected to be NULL
14497 + * in scenarios like MAC-less or Shared-MAC between Linux and
14498 + * USDPAA. In this case the buffers are dynamically mapped/unmapped.
14499 + */
14500 + if (dpa_bp->vaddr) {
14501 + dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb));
14502 +
14503 + /* Copy the packet payload */
14504 + skb_copy_from_linear_data(skb,
14505 + dpa_bp_vaddr + dpa_fd_offset(&fd),
14506 + dpa_fd_length(&fd));
14507 +
14508 + /* if no mac device or peer set it's macless */
14509 + if (!priv->mac_dev || priv->peer) {
14510 + parse_results_ref = (fm_prs_result_t *) (dpa_bp_vaddr +
14511 + DPA_TX_PRIV_DATA_SIZE);
14512 + /* Default values; FMan will not generate/validate
14513 + * CSUM;
14514 + */
14515 + parse_results_ref->l3r = 0;
14516 + parse_results_ref->l4r = 0;
14517 + parse_results_ref->ip_off[0] = 0xff;
14518 + parse_results_ref->ip_off[1] = 0xff;
14519 + parse_results_ref->l4_off = 0xff;
14520 +
14521 + fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
14522 + } else {
14523 + /* Enable L3/L4 hardware checksum computation,
14524 + * if applicable
14525 + */
14526 + err = dpa_enable_tx_csum(priv, skb, &fd,
14527 + dpa_bp_vaddr + DPA_TX_PRIV_DATA_SIZE);
14528 +
14529 + if (unlikely(err < 0)) {
14530 + if (netif_msg_tx_err(priv) && net_ratelimit())
14531 + netdev_err(net_dev,
14532 + "Tx HW csum error: %d\n", err);
14533 + percpu_priv->stats.tx_errors++;
14534 + goto l3_l4_csum_failed;
14535 + }
14536 + }
14537 +
14538 + } else {
14539 + if (!priv->mac_dev || priv->peer) {
14540 + /* Default values; FMan will not generate/validate
14541 + * CSUM;
14542 + */
14543 + parse_results.l3r = 0;
14544 + parse_results.l4r = 0;
14545 + parse_results.ip_off[0] = 0xff;
14546 + parse_results.ip_off[1] = 0xff;
14547 + parse_results.l4_off = 0xff;
14548 +
14549 + fd.cmd |= FM_FD_CMD_DTC | FM_FD_CMD_RPD;
14550 + } else {
14551 + /* Enable L3/L4 hardware checksum computation,
14552 + * if applicable
14553 + */
14554 + err = dpa_enable_tx_csum(priv, skb, &fd,
14555 + (char *)&parse_results);
14556 +
14557 + if (unlikely(err < 0)) {
14558 + if (netif_msg_tx_err(priv) && net_ratelimit())
14559 + netdev_err(net_dev,
14560 + "Tx HW csum error: %d\n", err);
14561 + percpu_priv->stats.tx_errors++;
14562 + goto l3_l4_csum_failed;
14563 + }
14564 +
14565 + }
14566 +
14567 + copy_to_unmapped_area(bm_buf_addr(&bmb) + DPA_TX_PRIV_DATA_SIZE,
14568 + &parse_results,
14569 + DPA_PARSE_RESULTS_SIZE);
14570 +
14571 + copy_to_unmapped_area(bm_buf_addr(&bmb) + dpa_fd_offset(&fd),
14572 + skb->data,
14573 + dpa_fd_length(&fd));
14574 + }
14575 +
14576 + egress_fq = priv->egress_fqs[queue_mapping];
14577 + conf_fq = priv->conf_fqs[queue_mapping];
14578 +
14579 + err = dpa_xmit(priv, &percpu_priv->stats, &fd, egress_fq, conf_fq);
14580 +
14581 +l3_l4_csum_failed:
14582 +bpools_too_small_error:
14583 +buf_acquire_failed:
14584 + /* We're done with the skb */
14585 + dev_kfree_skb(skb);
14586 +
14587 + /* err remains unused, NETDEV_TX_OK must be returned here */
14588 + return NETDEV_TX_OK;
14589 +}
14590 +EXPORT_SYMBOL(dpa_shared_tx);
14591 +
14592 +static int dpa_shared_netdev_init(struct device_node *dpa_node,
14593 + struct net_device *net_dev)
14594 +{
14595 + struct dpa_priv_s *priv = netdev_priv(net_dev);
14596 + const uint8_t *mac_addr;
14597 +
14598 + net_dev->netdev_ops = &dpa_shared_ops;
14599 +
14600 + net_dev->mem_start = priv->mac_dev->res->start;
14601 + net_dev->mem_end = priv->mac_dev->res->end;
14602 +
14603 + mac_addr = priv->mac_dev->addr;
14604 +
14605 + net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
14606 + NETIF_F_LLTX);
14607 +
14608 + return dpa_netdev_init(net_dev, mac_addr, shared_tx_timeout);
14609 +}
14610 +
14611 +#ifdef CONFIG_PM
14612 +
14613 +static int dpa_shared_suspend(struct device *dev)
14614 +{
14615 + struct net_device *net_dev;
14616 + struct dpa_priv_s *priv;
14617 + struct mac_device *mac_dev;
14618 + int err = 0;
14619 +
14620 + net_dev = dev_get_drvdata(dev);
14621 + if (net_dev->flags & IFF_UP) {
14622 + priv = netdev_priv(net_dev);
14623 + mac_dev = priv->mac_dev;
14624 +
14625 + err = fm_port_suspend(mac_dev->port_dev[RX]);
14626 + if (err)
14627 + goto port_suspend_failed;
14628 +
14629 + err = fm_port_suspend(mac_dev->port_dev[TX]);
14630 + if (err)
14631 + err = fm_port_resume(mac_dev->port_dev[RX]);
14632 + }
14633 +
14634 +port_suspend_failed:
14635 + return err;
14636 +}
14637 +
14638 +static int dpa_shared_resume(struct device *dev)
14639 +{
14640 + struct net_device *net_dev;
14641 + struct dpa_priv_s *priv;
14642 + struct mac_device *mac_dev;
14643 + int err = 0;
14644 +
14645 + net_dev = dev_get_drvdata(dev);
14646 + if (net_dev->flags & IFF_UP) {
14647 + priv = netdev_priv(net_dev);
14648 + mac_dev = priv->mac_dev;
14649 +
14650 + err = fm_port_resume(mac_dev->port_dev[TX]);
14651 + if (err)
14652 + goto port_resume_failed;
14653 +
14654 + err = fm_port_resume(mac_dev->port_dev[RX]);
14655 + if (err)
14656 + err = fm_port_suspend(mac_dev->port_dev[TX]);
14657 + }
14658 +
14659 +port_resume_failed:
14660 + return err;
14661 +}
14662 +
14663 +static const struct dev_pm_ops shared_pm_ops = {
14664 + .suspend = dpa_shared_suspend,
14665 + .resume = dpa_shared_resume,
14666 +};
14667 +
14668 +#define SHARED_PM_OPS (&shared_pm_ops)
14669 +
14670 +#else /* CONFIG_PM */
14671 +
14672 +#define SHARED_PM_OPS NULL
14673 +
14674 +#endif /* CONFIG_PM */
14675 +
14676 +static int
14677 +dpaa_eth_shared_probe(struct platform_device *_of_dev)
14678 +{
14679 + int err = 0, i, channel;
14680 + struct device *dev;
14681 + struct device_node *dpa_node;
14682 + struct dpa_bp *dpa_bp;
14683 + struct dpa_fq *dpa_fq, *tmp;
14684 + size_t count;
14685 + struct net_device *net_dev = NULL;
14686 + struct dpa_priv_s *priv = NULL;
14687 + struct dpa_percpu_priv_s *percpu_priv;
14688 + struct fm_port_fqs port_fqs;
14689 + struct dpa_buffer_layout_s *buf_layout = NULL;
14690 + struct mac_device *mac_dev;
14691 + struct task_struct *kth;
14692 +
14693 + dev = &_of_dev->dev;
14694 +
14695 + dpa_node = dev->of_node;
14696 +
14697 + if (!of_device_is_available(dpa_node))
14698 + return -ENODEV;
14699 +
14700 + /* Get the buffer pools assigned to this interface */
14701 + dpa_bp = dpa_bp_probe(_of_dev, &count);
14702 + if (IS_ERR(dpa_bp))
14703 + return PTR_ERR(dpa_bp);
14704 +
14705 + for (i = 0; i < count; i++)
14706 + dpa_bp[i].seed_cb = dpa_bp_shared_port_seed;
14707 +
14708 + /* Allocate this early, so we can store relevant information in
14709 + * the private area (needed by 1588 code in dpa_mac_probe)
14710 + */
14711 + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
14712 + if (!net_dev) {
14713 + dev_err(dev, "alloc_etherdev_mq() failed\n");
14714 + return -ENOMEM;
14715 + }
14716 +
14717 + /* Do this here, so we can be verbose early */
14718 + SET_NETDEV_DEV(net_dev, dev);
14719 + dev_set_drvdata(dev, net_dev);
14720 +
14721 + priv = netdev_priv(net_dev);
14722 + priv->net_dev = net_dev;
14723 + strcpy(priv->if_type, "shared");
14724 +
14725 + priv->msg_enable = netif_msg_init(advanced_debug, -1);
14726 +
14727 + mac_dev = dpa_mac_probe(_of_dev);
14728 + if (IS_ERR(mac_dev) || !mac_dev) {
14729 + err = PTR_ERR(mac_dev);
14730 + goto mac_probe_failed;
14731 + }
14732 +
14733 + /* We have physical ports, so we need to establish
14734 + * the buffer layout.
14735 + */
14736 + buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
14737 + GFP_KERNEL);
14738 + if (!buf_layout) {
14739 + dev_err(dev, "devm_kzalloc() failed\n");
14740 + goto alloc_failed;
14741 + }
14742 + dpa_set_buffers_layout(mac_dev, buf_layout);
14743 +
14744 + INIT_LIST_HEAD(&priv->dpa_fq_list);
14745 +
14746 + memset(&port_fqs, 0, sizeof(port_fqs));
14747 +
14748 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs,
14749 + false, RX);
14750 + if (!err)
14751 + err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
14752 + &port_fqs, false, TX);
14753 + if (err < 0)
14754 + goto fq_probe_failed;
14755 +
14756 + /* bp init */
14757 + priv->bp_count = count;
14758 + err = dpa_bp_create(net_dev, dpa_bp, count);
14759 + if (err < 0)
14760 + goto bp_create_failed;
14761 +
14762 + priv->mac_dev = mac_dev;
14763 +
14764 + channel = dpa_get_channel();
14765 +
14766 + if (channel < 0) {
14767 + err = channel;
14768 + goto get_channel_failed;
14769 + }
14770 +
14771 + priv->channel = (uint16_t)channel;
14772 +
14773 + /* Start a thread that will walk the cpus with affine portals
14774 + * and add this pool channel to each's dequeue mask.
14775 + */
14776 + kth = kthread_run(dpaa_eth_add_channel,
14777 + (void *)(unsigned long)priv->channel,
14778 + "dpaa_%p:%d", net_dev, priv->channel);
14779 + if (!kth) {
14780 + err = -ENOMEM;
14781 + goto add_channel_failed;
14782 + }
14783 +
14784 + dpa_fq_setup(priv, &shared_fq_cbs, priv->mac_dev->port_dev[TX]);
14785 +
14786 + /* Create a congestion group for this netdev, with
14787 + * dynamically-allocated CGR ID.
14788 + * Must be executed after probing the MAC, but before
14789 + * assigning the egress FQs to the CGRs.
14790 + */
14791 + err = dpaa_eth_cgr_init(priv);
14792 + if (err < 0) {
14793 + dev_err(dev, "Error initializing CGR\n");
14794 + goto cgr_init_failed;
14795 + }
14796 +
14797 + /* Add the FQs to the interface, and make them active */
14798 + list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
14799 + err = dpa_fq_init(dpa_fq, false);
14800 + if (err < 0)
14801 + goto fq_alloc_failed;
14802 + }
14803 +
14804 + priv->buf_layout = buf_layout;
14805 + priv->tx_headroom =
14806 + dpa_get_headroom(&priv->buf_layout[TX]);
14807 +
14808 + /* All real interfaces need their ports initialized */
14809 + dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
14810 + buf_layout, dev);
14811 +
14812 + /* Now we need to initialize either a private or shared interface */
14813 + priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
14814 +
14815 + if (priv->percpu_priv == NULL) {
14816 + dev_err(dev, "devm_alloc_percpu() failed\n");
14817 + err = -ENOMEM;
14818 + goto alloc_percpu_failed;
14819 + }
14820 + for_each_possible_cpu(i) {
14821 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
14822 + memset(percpu_priv, 0, sizeof(*percpu_priv));
14823 + }
14824 +
14825 + err = dpa_shared_netdev_init(dpa_node, net_dev);
14826 +
14827 + if (err < 0)
14828 + goto netdev_init_failed;
14829 +
14830 + dpaa_eth_sysfs_init(&net_dev->dev);
14831 +
14832 + pr_info("fsl_dpa_shared: Probed shared interface %s\n",
14833 + net_dev->name);
14834 +
14835 + return 0;
14836 +
14837 +netdev_init_failed:
14838 +alloc_percpu_failed:
14839 +fq_alloc_failed:
14840 + if (net_dev) {
14841 + dpa_fq_free(dev, &priv->dpa_fq_list);
14842 + qman_release_cgrid(priv->cgr_data.cgr.cgrid);
14843 + qman_delete_cgr(&priv->cgr_data.cgr);
14844 + }
14845 +cgr_init_failed:
14846 +add_channel_failed:
14847 +get_channel_failed:
14848 + if (net_dev)
14849 + dpa_bp_free(priv);
14850 +bp_create_failed:
14851 +fq_probe_failed:
14852 + devm_kfree(dev, buf_layout);
14853 +alloc_failed:
14854 +mac_probe_failed:
14855 + dev_set_drvdata(dev, NULL);
14856 + if (net_dev)
14857 + free_netdev(net_dev);
14858 +
14859 + return err;
14860 +}
14861 +
14862 +static const struct of_device_id dpa_shared_match[] = {
14863 + {
14864 + .compatible = "fsl,dpa-ethernet-shared"
14865 + },
14866 + {}
14867 +};
14868 +MODULE_DEVICE_TABLE(of, dpa_shared_match);
14869 +
14870 +static struct platform_driver dpa_shared_driver = {
14871 + .driver = {
14872 + .name = KBUILD_MODNAME "-shared",
14873 + .of_match_table = dpa_shared_match,
14874 + .owner = THIS_MODULE,
14875 + .pm = SHARED_PM_OPS,
14876 + },
14877 + .probe = dpaa_eth_shared_probe,
14878 + .remove = dpa_remove
14879 +};
14880 +
14881 +static int __init __cold dpa_shared_load(void)
14882 +{
14883 + int _errno;
14884 +
14885 + pr_info(DPA_DESCRIPTION "\n");
14886 +
14887 + /* Initialize dpaa_eth mirror values */
14888 + dpa_rx_extra_headroom = fm_get_rx_extra_headroom();
14889 + dpa_max_frm = fm_get_max_frm();
14890 +
14891 + _errno = platform_driver_register(&dpa_shared_driver);
14892 + if (unlikely(_errno < 0)) {
14893 + pr_err(KBUILD_MODNAME
14894 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
14895 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
14896 + }
14897 +
14898 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
14899 + KBUILD_BASENAME".c", __func__);
14900 +
14901 + return _errno;
14902 +}
14903 +module_init(dpa_shared_load);
14904 +
14905 +static void __exit __cold dpa_shared_unload(void)
14906 +{
14907 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
14908 + KBUILD_BASENAME".c", __func__);
14909 +
14910 + platform_driver_unregister(&dpa_shared_driver);
14911 +}
14912 +module_exit(dpa_shared_unload);
14913 --- /dev/null
14914 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
14915 @@ -0,0 +1,278 @@
14916 +/* Copyright 2008-2012 Freescale Semiconductor Inc.
14917 + *
14918 + * Redistribution and use in source and binary forms, with or without
14919 + * modification, are permitted provided that the following conditions are met:
14920 + * * Redistributions of source code must retain the above copyright
14921 + * notice, this list of conditions and the following disclaimer.
14922 + * * Redistributions in binary form must reproduce the above copyright
14923 + * notice, this list of conditions and the following disclaimer in the
14924 + * documentation and/or other materials provided with the distribution.
14925 + * * Neither the name of Freescale Semiconductor nor the
14926 + * names of its contributors may be used to endorse or promote products
14927 + * derived from this software without specific prior written permission.
14928 + *
14929 + *
14930 + * ALTERNATIVELY, this software may be distributed under the terms of the
14931 + * GNU General Public License ("GPL") as published by the Free Software
14932 + * Foundation, either version 2 of that License or (at your option) any
14933 + * later version.
14934 + *
14935 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
14936 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
14937 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14938 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
14939 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
14940 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
14941 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
14942 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14943 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
14944 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14945 + */
14946 +
14947 +#include <linux/init.h>
14948 +#include <linux/module.h>
14949 +#include <linux/kthread.h>
14950 +#include <linux/io.h>
14951 +#include <linux/of_net.h>
14952 +#include "dpaa_eth.h"
14953 +#include "mac.h" /* struct mac_device */
14954 +#ifdef CONFIG_FSL_DPAA_1588
14955 +#include "dpaa_1588.h"
14956 +#endif
14957 +
14958 +static ssize_t dpaa_eth_show_addr(struct device *dev,
14959 + struct device_attribute *attr, char *buf)
14960 +{
14961 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
14962 + struct mac_device *mac_dev = priv->mac_dev;
14963 +
14964 + if (mac_dev)
14965 + return sprintf(buf, "%llx",
14966 + (unsigned long long)mac_dev->res->start);
14967 + else
14968 + return sprintf(buf, "none");
14969 +}
14970 +
14971 +static ssize_t dpaa_eth_show_type(struct device *dev,
14972 + struct device_attribute *attr, char *buf)
14973 +{
14974 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
14975 + ssize_t res = 0;
14976 +
14977 + if (priv)
14978 + res = sprintf(buf, "%s", priv->if_type);
14979 +
14980 + return res;
14981 +}
14982 +
14983 +static ssize_t dpaa_eth_show_fqids(struct device *dev,
14984 + struct device_attribute *attr, char *buf)
14985 +{
14986 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
14987 + ssize_t bytes = 0;
14988 + int i = 0;
14989 + char *str;
14990 + struct dpa_fq *fq;
14991 + struct dpa_fq *tmp;
14992 + struct dpa_fq *prev = NULL;
14993 + u32 first_fqid = 0;
14994 + u32 last_fqid = 0;
14995 + char *prevstr = NULL;
14996 +
14997 + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
14998 + switch (fq->fq_type) {
14999 + case FQ_TYPE_RX_DEFAULT:
15000 + str = "Rx default";
15001 + break;
15002 + case FQ_TYPE_RX_ERROR:
15003 + str = "Rx error";
15004 + break;
15005 + case FQ_TYPE_RX_PCD:
15006 + str = "Rx PCD";
15007 + break;
15008 + case FQ_TYPE_TX_CONFIRM:
15009 + str = "Tx default confirmation";
15010 + break;
15011 + case FQ_TYPE_TX_CONF_MQ:
15012 + str = "Tx confirmation (mq)";
15013 + break;
15014 + case FQ_TYPE_TX_ERROR:
15015 + str = "Tx error";
15016 + break;
15017 + case FQ_TYPE_TX:
15018 + str = "Tx";
15019 + break;
15020 + case FQ_TYPE_RX_PCD_HI_PRIO:
15021 + str ="Rx PCD High Priority";
15022 + break;
15023 + default:
15024 + str = "Unknown";
15025 + }
15026 +
15027 + if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
15028 + str != prevstr)) {
15029 + if (last_fqid == first_fqid)
15030 + bytes += sprintf(buf + bytes,
15031 + "%s: %d\n", prevstr, prev->fqid);
15032 + else
15033 + bytes += sprintf(buf + bytes,
15034 + "%s: %d - %d\n", prevstr,
15035 + first_fqid, last_fqid);
15036 + }
15037 +
15038 + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr)
15039 + last_fqid = fq->fqid;
15040 + else
15041 + first_fqid = last_fqid = fq->fqid;
15042 +
15043 + prev = fq;
15044 + prevstr = str;
15045 + i++;
15046 + }
15047 +
15048 + if (prev) {
15049 + if (last_fqid == first_fqid)
15050 + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
15051 + prev->fqid);
15052 + else
15053 + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
15054 + first_fqid, last_fqid);
15055 + }
15056 +
15057 + return bytes;
15058 +}
15059 +
15060 +static ssize_t dpaa_eth_show_bpids(struct device *dev,
15061 + struct device_attribute *attr, char *buf)
15062 +{
15063 + ssize_t bytes = 0;
15064 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
15065 + struct dpa_bp *dpa_bp = priv->dpa_bp;
15066 + int i = 0;
15067 +
15068 + for (i = 0; i < priv->bp_count; i++)
15069 + bytes += snprintf(buf + bytes, PAGE_SIZE, "%u\n",
15070 + dpa_bp[i].bpid);
15071 +
15072 + return bytes;
15073 +}
15074 +
15075 +static ssize_t dpaa_eth_show_mac_regs(struct device *dev,
15076 + struct device_attribute *attr, char *buf)
15077 +{
15078 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
15079 + struct mac_device *mac_dev = priv->mac_dev;
15080 + int n = 0;
15081 +
15082 + if (mac_dev)
15083 + n = fm_mac_dump_regs(mac_dev, buf, n);
15084 + else
15085 + return sprintf(buf, "no mac registers\n");
15086 +
15087 + return n;
15088 +}
15089 +
15090 +static ssize_t dpaa_eth_show_mac_rx_stats(struct device *dev,
15091 + struct device_attribute *attr, char *buf)
15092 +{
15093 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
15094 + struct mac_device *mac_dev = priv->mac_dev;
15095 + int n = 0;
15096 +
15097 + if (mac_dev)
15098 + n = fm_mac_dump_rx_stats(mac_dev, buf, n);
15099 + else
15100 + return sprintf(buf, "no mac rx stats\n");
15101 +
15102 + return n;
15103 +}
15104 +
15105 +static ssize_t dpaa_eth_show_mac_tx_stats(struct device *dev,
15106 + struct device_attribute *attr, char *buf)
15107 +{
15108 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
15109 + struct mac_device *mac_dev = priv->mac_dev;
15110 + int n = 0;
15111 +
15112 + if (mac_dev)
15113 + n = fm_mac_dump_tx_stats(mac_dev, buf, n);
15114 + else
15115 + return sprintf(buf, "no mac tx stats\n");
15116 +
15117 + return n;
15118 +}
15119 +
15120 +#ifdef CONFIG_FSL_DPAA_1588
15121 +static ssize_t dpaa_eth_show_ptp_1588(struct device *dev,
15122 + struct device_attribute *attr, char *buf)
15123 +{
15124 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
15125 +
15126 + if (priv->tsu && priv->tsu->valid)
15127 + return sprintf(buf, "1\n");
15128 + else
15129 + return sprintf(buf, "0\n");
15130 +}
15131 +
15132 +static ssize_t dpaa_eth_set_ptp_1588(struct device *dev,
15133 + struct device_attribute *attr,
15134 + const char *buf, size_t count)
15135 +{
15136 + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
15137 + unsigned int num;
15138 + unsigned long flags;
15139 +
15140 + if (kstrtouint(buf, 0, &num) < 0)
15141 + return -EINVAL;
15142 +
15143 + local_irq_save(flags);
15144 +
15145 + if (num) {
15146 + if (priv->tsu)
15147 + priv->tsu->valid = TRUE;
15148 + } else {
15149 + if (priv->tsu)
15150 + priv->tsu->valid = FALSE;
15151 + }
15152 +
15153 + local_irq_restore(flags);
15154 +
15155 + return count;
15156 +}
15157 +#endif
15158 +
15159 +static struct device_attribute dpaa_eth_attrs[] = {
15160 + __ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
15161 + __ATTR(device_type, S_IRUGO, dpaa_eth_show_type, NULL),
15162 + __ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
15163 + __ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
15164 + __ATTR(mac_regs, S_IRUGO, dpaa_eth_show_mac_regs, NULL),
15165 + __ATTR(mac_rx_stats, S_IRUGO, dpaa_eth_show_mac_rx_stats, NULL),
15166 + __ATTR(mac_tx_stats, S_IRUGO, dpaa_eth_show_mac_tx_stats, NULL),
15167 +#ifdef CONFIG_FSL_DPAA_1588
15168 + __ATTR(ptp_1588, S_IRUGO | S_IWUSR, dpaa_eth_show_ptp_1588,
15169 + dpaa_eth_set_ptp_1588),
15170 +#endif
15171 +};
15172 +
15173 +void dpaa_eth_sysfs_init(struct device *dev)
15174 +{
15175 + int i;
15176 +
15177 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
15178 + if (device_create_file(dev, &dpaa_eth_attrs[i])) {
15179 + dev_err(dev, "Error creating sysfs file\n");
15180 + while (i > 0)
15181 + device_remove_file(dev, &dpaa_eth_attrs[--i]);
15182 + return;
15183 + }
15184 +}
15185 +EXPORT_SYMBOL(dpaa_eth_sysfs_init);
15186 +
15187 +void dpaa_eth_sysfs_remove(struct device *dev)
15188 +{
15189 + int i;
15190 +
15191 + for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
15192 + device_remove_file(dev, &dpaa_eth_attrs[i]);
15193 +}
15194 --- /dev/null
15195 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
15196 @@ -0,0 +1,144 @@
15197 +/* Copyright 2013 Freescale Semiconductor Inc.
15198 + *
15199 + * Redistribution and use in source and binary forms, with or without
15200 + * modification, are permitted provided that the following conditions are met:
15201 + * * Redistributions of source code must retain the above copyright
15202 + * notice, this list of conditions and the following disclaimer.
15203 + * * Redistributions in binary form must reproduce the above copyright
15204 + * notice, this list of conditions and the following disclaimer in the
15205 + * documentation and/or other materials provided with the distribution.
15206 + * * Neither the name of Freescale Semiconductor nor the
15207 + * names of its contributors may be used to endorse or promote products
15208 + * derived from this software without specific prior written permission.
15209 + *
15210 + *
15211 + * ALTERNATIVELY, this software may be distributed under the terms of the
15212 + * GNU General Public License ("GPL") as published by the Free Software
15213 + * Foundation, either version 2 of that License or (at your option) any
15214 + * later version.
15215 + *
15216 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
15217 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15218 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
15219 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
15220 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
15221 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
15222 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
15223 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15224 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
15225 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15226 + */
15227 +
15228 +#undef TRACE_SYSTEM
15229 +#define TRACE_SYSTEM dpaa_eth
15230 +
15231 +#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
15232 +#define _DPAA_ETH_TRACE_H
15233 +
15234 +#include <linux/skbuff.h>
15235 +#include <linux/netdevice.h>
15236 +#include "dpaa_eth.h"
15237 +#include <linux/tracepoint.h>
15238 +
15239 +#define fd_format_name(format) { qm_fd_##format, #format }
15240 +#define fd_format_list \
15241 + fd_format_name(contig), \
15242 + fd_format_name(sg)
15243 +#define TR_FMT "[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u," \
15244 + " status=0x%08x"
15245 +
15246 +/* This is used to declare a class of events.
15247 + * individual events of this type will be defined below.
15248 + */
15249 +
15250 +/* Store details about a frame descriptor and the FQ on which it was
15251 + * transmitted/received.
15252 + */
15253 +DECLARE_EVENT_CLASS(dpaa_eth_fd,
15254 + /* Trace function prototype */
15255 + TP_PROTO(struct net_device *netdev,
15256 + struct qman_fq *fq,
15257 + const struct qm_fd *fd),
15258 +
15259 + /* Repeat argument list here */
15260 + TP_ARGS(netdev, fq, fd),
15261 +
15262 + /* A structure containing the relevant information we want to record.
15263 + * Declare name and type for each normal element, name, type and size
15264 + * for arrays. Use __string for variable length strings.
15265 + */
15266 + TP_STRUCT__entry(
15267 + __field(u32, fqid)
15268 + __field(u64, fd_addr)
15269 + __field(u8, fd_format)
15270 + __field(u16, fd_offset)
15271 + __field(u32, fd_length)
15272 + __field(u32, fd_status)
15273 + __string(name, netdev->name)
15274 + ),
15275 +
15276 + /* The function that assigns values to the above declared fields */
15277 + TP_fast_assign(
15278 + __entry->fqid = fq->fqid;
15279 + __entry->fd_addr = qm_fd_addr_get64(fd);
15280 + __entry->fd_format = fd->format;
15281 + __entry->fd_offset = dpa_fd_offset(fd);
15282 + __entry->fd_length = dpa_fd_length(fd);
15283 + __entry->fd_status = fd->status;
15284 + __assign_str(name, netdev->name);
15285 + ),
15286 +
15287 + /* This is what gets printed when the trace event is triggered */
15288 + /* TODO: print the status using __print_flags() */
15289 + TP_printk(TR_FMT,
15290 + __get_str(name), __entry->fqid, __entry->fd_addr,
15291 + __print_symbolic(__entry->fd_format, fd_format_list),
15292 + __entry->fd_offset, __entry->fd_length, __entry->fd_status)
15293 +);
15294 +
15295 +/* Now declare events of the above type. Format is:
15296 + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
15297 + */
15298 +
15299 +/* Tx (egress) fd */
15300 +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
15301 +
15302 + TP_PROTO(struct net_device *netdev,
15303 + struct qman_fq *fq,
15304 + const struct qm_fd *fd),
15305 +
15306 + TP_ARGS(netdev, fq, fd)
15307 +);
15308 +
15309 +/* Rx fd */
15310 +DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
15311 +
15312 + TP_PROTO(struct net_device *netdev,
15313 + struct qman_fq *fq,
15314 + const struct qm_fd *fd),
15315 +
15316 + TP_ARGS(netdev, fq, fd)
15317 +);
15318 +
15319 +/* Tx confirmation fd */
15320 +DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
15321 +
15322 + TP_PROTO(struct net_device *netdev,
15323 + struct qman_fq *fq,
15324 + const struct qm_fd *fd),
15325 +
15326 + TP_ARGS(netdev, fq, fd)
15327 +);
15328 +
15329 +/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
15330 + * The syntax is the same as for DECLARE_EVENT_CLASS().
15331 + */
15332 +
15333 +#endif /* _DPAA_ETH_TRACE_H */
15334 +
15335 +/* This must be outside ifdef _DPAA_ETH_TRACE_H */
15336 +#undef TRACE_INCLUDE_PATH
15337 +#define TRACE_INCLUDE_PATH .
15338 +#undef TRACE_INCLUDE_FILE
15339 +#define TRACE_INCLUDE_FILE dpaa_eth_trace
15340 +#include <trace/define_trace.h>
15341 --- /dev/null
15342 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
15343 @@ -0,0 +1,544 @@
15344 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
15345 + *
15346 + * Redistribution and use in source and binary forms, with or without
15347 + * modification, are permitted provided that the following conditions are met:
15348 + * * Redistributions of source code must retain the above copyright
15349 + * notice, this list of conditions and the following disclaimer.
15350 + * * Redistributions in binary form must reproduce the above copyright
15351 + * notice, this list of conditions and the following disclaimer in the
15352 + * documentation and/or other materials provided with the distribution.
15353 + * * Neither the name of Freescale Semiconductor nor the
15354 + * names of its contributors may be used to endorse or promote products
15355 + * derived from this software without specific prior written permission.
15356 + *
15357 + *
15358 + * ALTERNATIVELY, this software may be distributed under the terms of the
15359 + * GNU General Public License ("GPL") as published by the Free Software
15360 + * Foundation, either version 2 of that License or (at your option) any
15361 + * later version.
15362 + *
15363 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
15364 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15365 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
15366 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
15367 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
15368 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
15369 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
15370 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15371 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
15372 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15373 + */
15374 +
15375 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
15376 +#define pr_fmt(fmt) \
15377 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
15378 + KBUILD_BASENAME".c", __LINE__, __func__
15379 +#else
15380 +#define pr_fmt(fmt) \
15381 + KBUILD_MODNAME ": " fmt
15382 +#endif
15383 +
15384 +#include <linux/string.h>
15385 +
15386 +#include "dpaa_eth.h"
15387 +#include "mac.h" /* struct mac_device */
15388 +#include "dpaa_eth_common.h"
15389 +
15390 +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
15391 + "interrupts",
15392 + "rx packets",
15393 + "tx packets",
15394 + "tx recycled",
15395 + "tx confirm",
15396 + "tx S/G",
15397 + "rx S/G",
15398 + "tx error",
15399 + "rx error",
15400 + "bp count"
15401 +};
15402 +
15403 +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
15404 + /* dpa rx errors */
15405 + "rx dma error",
15406 + "rx frame physical error",
15407 + "rx frame size error",
15408 + "rx header error",
15409 + "rx csum error",
15410 +
15411 + /* demultiplexing errors */
15412 + "qman cg_tdrop",
15413 + "qman wred",
15414 + "qman error cond",
15415 + "qman early window",
15416 + "qman late window",
15417 + "qman fq tdrop",
15418 + "qman fq retired",
15419 + "qman orp disabled",
15420 +
15421 + /* congestion related stats */
15422 + "congestion time (ms)",
15423 + "entered congestion",
15424 + "congested (0/1)"
15425 +};
15426 +
15427 +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
15428 +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
15429 +
15430 +static int __cold dpa_get_settings(struct net_device *net_dev,
15431 + struct ethtool_cmd *et_cmd)
15432 +{
15433 + int _errno;
15434 + struct dpa_priv_s *priv;
15435 +
15436 + priv = netdev_priv(net_dev);
15437 +
15438 + if (priv->mac_dev == NULL) {
15439 + netdev_info(net_dev, "This is a MAC-less interface\n");
15440 + return -ENODEV;
15441 + }
15442 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
15443 + netdev_dbg(net_dev, "phy device not initialized\n");
15444 + return 0;
15445 + }
15446 +
15447 + _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
15448 + if (unlikely(_errno < 0))
15449 + netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno);
15450 +
15451 + return _errno;
15452 +}
15453 +
15454 +static int __cold dpa_set_settings(struct net_device *net_dev,
15455 + struct ethtool_cmd *et_cmd)
15456 +{
15457 + int _errno;
15458 + struct dpa_priv_s *priv;
15459 +
15460 + priv = netdev_priv(net_dev);
15461 +
15462 + if (priv->mac_dev == NULL) {
15463 + netdev_info(net_dev, "This is a MAC-less interface\n");
15464 + return -ENODEV;
15465 + }
15466 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
15467 + netdev_err(net_dev, "phy device not initialized\n");
15468 + return -ENODEV;
15469 + }
15470 +
15471 + _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
15472 + if (unlikely(_errno < 0))
15473 + netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno);
15474 +
15475 + return _errno;
15476 +}
15477 +
15478 +static void __cold dpa_get_drvinfo(struct net_device *net_dev,
15479 + struct ethtool_drvinfo *drvinfo)
15480 +{
15481 + int _errno;
15482 +
15483 + strncpy(drvinfo->driver, KBUILD_MODNAME,
15484 + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
15485 + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
15486 + "%X", 0);
15487 +
15488 + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
15489 + /* Truncated output */
15490 + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
15491 + } else if (unlikely(_errno < 0)) {
15492 + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
15493 + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
15494 + }
15495 + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
15496 + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
15497 +}
15498 +
15499 +static uint32_t __cold dpa_get_msglevel(struct net_device *net_dev)
15500 +{
15501 + return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
15502 +}
15503 +
15504 +static void __cold dpa_set_msglevel(struct net_device *net_dev,
15505 + uint32_t msg_enable)
15506 +{
15507 + ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
15508 +}
15509 +
15510 +static int __cold dpa_nway_reset(struct net_device *net_dev)
15511 +{
15512 + int _errno;
15513 + struct dpa_priv_s *priv;
15514 +
15515 + priv = netdev_priv(net_dev);
15516 +
15517 + if (priv->mac_dev == NULL) {
15518 + netdev_info(net_dev, "This is a MAC-less interface\n");
15519 + return -ENODEV;
15520 + }
15521 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
15522 + netdev_err(net_dev, "phy device not initialized\n");
15523 + return -ENODEV;
15524 + }
15525 +
15526 + _errno = 0;
15527 + if (priv->mac_dev->phy_dev->autoneg) {
15528 + _errno = phy_start_aneg(priv->mac_dev->phy_dev);
15529 + if (unlikely(_errno < 0))
15530 + netdev_err(net_dev, "phy_start_aneg() = %d\n",
15531 + _errno);
15532 + }
15533 +
15534 + return _errno;
15535 +}
15536 +
15537 +static void __cold dpa_get_pauseparam(struct net_device *net_dev,
15538 + struct ethtool_pauseparam *epause)
15539 +{
15540 + struct dpa_priv_s *priv;
15541 + struct mac_device *mac_dev;
15542 + struct phy_device *phy_dev;
15543 +
15544 + priv = netdev_priv(net_dev);
15545 + mac_dev = priv->mac_dev;
15546 +
15547 + if (mac_dev == NULL) {
15548 + netdev_info(net_dev, "This is a MAC-less interface\n");
15549 + return;
15550 + }
15551 +
15552 + phy_dev = mac_dev->phy_dev;
15553 + if (unlikely(phy_dev == NULL)) {
15554 + netdev_err(net_dev, "phy device not initialized\n");
15555 + return;
15556 + }
15557 +
15558 + epause->autoneg = mac_dev->autoneg_pause;
15559 + epause->rx_pause = mac_dev->rx_pause_active;
15560 + epause->tx_pause = mac_dev->tx_pause_active;
15561 +}
15562 +
15563 +static int __cold dpa_set_pauseparam(struct net_device *net_dev,
15564 + struct ethtool_pauseparam *epause)
15565 +{
15566 + struct dpa_priv_s *priv;
15567 + struct mac_device *mac_dev;
15568 + struct phy_device *phy_dev;
15569 + int _errno;
15570 + u32 newadv, oldadv;
15571 + bool rx_pause, tx_pause;
15572 +
15573 + priv = netdev_priv(net_dev);
15574 + mac_dev = priv->mac_dev;
15575 +
15576 + if (mac_dev == NULL) {
15577 + netdev_info(net_dev, "This is a MAC-less interface\n");
15578 + return -ENODEV;
15579 + }
15580 +
15581 + phy_dev = mac_dev->phy_dev;
15582 + if (unlikely(phy_dev == NULL)) {
15583 + netdev_err(net_dev, "phy device not initialized\n");
15584 + return -ENODEV;
15585 + }
15586 +
15587 + if (!(phy_dev->supported & SUPPORTED_Pause) ||
15588 + (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
15589 + (epause->rx_pause != epause->tx_pause)))
15590 + return -EINVAL;
15591 +
15592 + /* The MAC should know how to handle PAUSE frame autonegotiation before
15593 + * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
15594 + * settings.
15595 + */
15596 + mac_dev->autoneg_pause = !!epause->autoneg;
15597 + mac_dev->rx_pause_req = !!epause->rx_pause;
15598 + mac_dev->tx_pause_req = !!epause->tx_pause;
15599 +
15600 + /* Determine the sym/asym advertised PAUSE capabilities from the desired
15601 + * rx/tx pause settings.
15602 + */
15603 + newadv = 0;
15604 + if (epause->rx_pause)
15605 + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
15606 + if (epause->tx_pause)
15607 + newadv |= ADVERTISED_Asym_Pause;
15608 +
15609 + oldadv = phy_dev->advertising &
15610 + (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
15611 +
15612 + /* If there are differences between the old and the new advertised
15613 + * values, restart PHY autonegotiation and advertise the new values.
15614 + */
15615 + if (oldadv != newadv) {
15616 + phy_dev->advertising &= ~(ADVERTISED_Pause
15617 + | ADVERTISED_Asym_Pause);
15618 + phy_dev->advertising |= newadv;
15619 + if (phy_dev->autoneg) {
15620 + _errno = phy_start_aneg(phy_dev);
15621 + if (unlikely(_errno < 0))
15622 + netdev_err(net_dev, "phy_start_aneg() = %d\n",
15623 + _errno);
15624 + }
15625 + }
15626 +
15627 + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
15628 + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
15629 + if (unlikely(_errno < 0))
15630 + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
15631 +
15632 + return _errno;
15633 +}
15634 +
15635 +#ifdef CONFIG_PM
15636 +static void dpa_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
15637 +{
15638 + struct dpa_priv_s *priv = netdev_priv(net_dev);
15639 +
15640 + wol->supported = 0;
15641 + wol->wolopts = 0;
15642 +
15643 + if (!priv->wol || !device_can_wakeup(net_dev->dev.parent))
15644 + return;
15645 +
15646 + if (priv->wol & DPAA_WOL_MAGIC) {
15647 + wol->supported = WAKE_MAGIC;
15648 + wol->wolopts = WAKE_MAGIC;
15649 + }
15650 +}
15651 +
15652 +static int dpa_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
15653 +{
15654 + struct dpa_priv_s *priv = netdev_priv(net_dev);
15655 +
15656 + if (priv->mac_dev == NULL) {
15657 + netdev_info(net_dev, "This is a MAC-less interface\n");
15658 + return -ENODEV;
15659 + }
15660 +
15661 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
15662 + netdev_dbg(net_dev, "phy device not initialized\n");
15663 + return -ENODEV;
15664 + }
15665 +
15666 + if (!device_can_wakeup(net_dev->dev.parent) ||
15667 + (wol->wolopts & ~WAKE_MAGIC))
15668 + return -EOPNOTSUPP;
15669 +
15670 + priv->wol = 0;
15671 +
15672 + if (wol->wolopts & WAKE_MAGIC) {
15673 + priv->wol = DPAA_WOL_MAGIC;
15674 + device_set_wakeup_enable(net_dev->dev.parent, 1);
15675 + } else {
15676 + device_set_wakeup_enable(net_dev->dev.parent, 0);
15677 + }
15678 +
15679 + return 0;
15680 +}
15681 +#endif
15682 +
15683 +static int dpa_get_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
15684 +{
15685 + struct dpa_priv_s *priv;
15686 +
15687 + priv = netdev_priv(net_dev);
15688 + if (priv->mac_dev == NULL) {
15689 + netdev_info(net_dev, "This is a MAC-less interface\n");
15690 + return -ENODEV;
15691 + }
15692 +
15693 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
15694 + netdev_err(net_dev, "phy device not initialized\n");
15695 + return -ENODEV;
15696 + }
15697 +
15698 + return phy_ethtool_get_eee(priv->mac_dev->phy_dev, et_eee);
15699 +}
15700 +
15701 +static int dpa_set_eee(struct net_device *net_dev, struct ethtool_eee *et_eee)
15702 +{
15703 + struct dpa_priv_s *priv;
15704 +
15705 + priv = netdev_priv(net_dev);
15706 + if (priv->mac_dev == NULL) {
15707 + netdev_info(net_dev, "This is a MAC-less interface\n");
15708 + return -ENODEV;
15709 + }
15710 +
15711 + if (unlikely(priv->mac_dev->phy_dev == NULL)) {
15712 + netdev_err(net_dev, "phy device not initialized\n");
15713 + return -ENODEV;
15714 + }
15715 +
15716 + return phy_ethtool_set_eee(priv->mac_dev->phy_dev, et_eee);
15717 +}
15718 +
15719 +static int dpa_get_sset_count(struct net_device *net_dev, int type)
15720 +{
15721 + unsigned int total_stats, num_stats;
15722 +
15723 + num_stats = num_online_cpus() + 1;
15724 + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
15725 +
15726 + switch (type) {
15727 + case ETH_SS_STATS:
15728 + return total_stats;
15729 + default:
15730 + return -EOPNOTSUPP;
15731 + }
15732 +}
15733 +
15734 +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
15735 + int crr_cpu, u64 bp_count, u64 *data)
15736 +{
15737 + int num_stat_values = num_cpus + 1;
15738 + int crr_stat = 0;
15739 +
15740 + /* update current CPU's stats and also add them to the total values */
15741 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->in_interrupt;
15742 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->in_interrupt;
15743 +
15744 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_packets;
15745 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_packets;
15746 +
15747 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_packets;
15748 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_packets;
15749 +
15750 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_returned;
15751 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_returned;
15752 +
15753 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_confirm;
15754 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_confirm;
15755 +
15756 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
15757 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
15758 +
15759 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->rx_sg;
15760 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->rx_sg;
15761 +
15762 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.tx_errors;
15763 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.tx_errors;
15764 +
15765 + data[crr_stat * num_stat_values + crr_cpu] = percpu_priv->stats.rx_errors;
15766 + data[crr_stat++ * num_stat_values + num_cpus] += percpu_priv->stats.rx_errors;
15767 +
15768 + data[crr_stat * num_stat_values + crr_cpu] = bp_count;
15769 + data[crr_stat++ * num_stat_values + num_cpus] += bp_count;
15770 +}
15771 +
15772 +static void dpa_get_ethtool_stats(struct net_device *net_dev,
15773 + struct ethtool_stats *stats, u64 *data)
15774 +{
15775 + u64 bp_count, cg_time, cg_num, cg_status;
15776 + struct dpa_percpu_priv_s *percpu_priv;
15777 + struct qm_mcr_querycgr query_cgr;
15778 + struct dpa_rx_errors rx_errors;
15779 + struct dpa_ern_cnt ern_cnt;
15780 + struct dpa_priv_s *priv;
15781 + unsigned int num_cpus, offset;
15782 + struct dpa_bp *dpa_bp;
15783 + int total_stats, i;
15784 +
15785 + total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
15786 + priv = netdev_priv(net_dev);
15787 + dpa_bp = priv->dpa_bp;
15788 + num_cpus = num_online_cpus();
15789 + bp_count = 0;
15790 +
15791 + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
15792 + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
15793 + memset(data, 0, total_stats * sizeof(u64));
15794 +
15795 + for_each_online_cpu(i) {
15796 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
15797 +
15798 + if (dpa_bp->percpu_count)
15799 + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
15800 +
15801 + rx_errors.dme += percpu_priv->rx_errors.dme;
15802 + rx_errors.fpe += percpu_priv->rx_errors.fpe;
15803 + rx_errors.fse += percpu_priv->rx_errors.fse;
15804 + rx_errors.phe += percpu_priv->rx_errors.phe;
15805 + rx_errors.cse += percpu_priv->rx_errors.cse;
15806 +
15807 + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
15808 + ern_cnt.wred += percpu_priv->ern_cnt.wred;
15809 + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
15810 + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
15811 + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
15812 + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
15813 + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
15814 + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
15815 +
15816 + copy_stats(percpu_priv, num_cpus, i, bp_count, data);
15817 + }
15818 +
15819 + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
15820 + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
15821 +
15822 + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
15823 + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
15824 +
15825 + /* gather congestion related counters */
15826 + cg_num = 0;
15827 + cg_status = 0;
15828 + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
15829 + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
15830 + cg_num = priv->cgr_data.cgr_congested_count;
15831 + cg_status = query_cgr.cgr.cs;
15832 +
15833 + /* reset congestion stats (like QMan API does */
15834 + priv->cgr_data.congested_jiffies = 0;
15835 + priv->cgr_data.cgr_congested_count = 0;
15836 + }
15837 +
15838 + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
15839 + data[offset++] = cg_time;
15840 + data[offset++] = cg_num;
15841 + data[offset++] = cg_status;
15842 +}
15843 +
15844 +static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
15845 +{
15846 + unsigned int i, j, num_cpus, size;
15847 + char stat_string_cpu[ETH_GSTRING_LEN];
15848 + u8 *strings;
15849 +
15850 + strings = data;
15851 + num_cpus = num_online_cpus();
15852 + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
15853 +
15854 + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
15855 + for (j = 0; j < num_cpus; j++) {
15856 + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", dpa_stats_percpu[i], j);
15857 + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
15858 + strings += ETH_GSTRING_LEN;
15859 + }
15860 + snprintf(stat_string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", dpa_stats_percpu[i]);
15861 + memcpy(strings, stat_string_cpu, ETH_GSTRING_LEN);
15862 + strings += ETH_GSTRING_LEN;
15863 + }
15864 + memcpy(strings, dpa_stats_global, size);
15865 +}
15866 +
15867 +const struct ethtool_ops dpa_ethtool_ops = {
15868 + .get_settings = dpa_get_settings,
15869 + .set_settings = dpa_set_settings,
15870 + .get_drvinfo = dpa_get_drvinfo,
15871 + .get_msglevel = dpa_get_msglevel,
15872 + .set_msglevel = dpa_set_msglevel,
15873 + .nway_reset = dpa_nway_reset,
15874 + .get_pauseparam = dpa_get_pauseparam,
15875 + .set_pauseparam = dpa_set_pauseparam,
15876 + .self_test = NULL, /* TODO invoke the cold-boot unit-test? */
15877 + .get_link = ethtool_op_get_link,
15878 + .get_eee = dpa_get_eee,
15879 + .set_eee = dpa_set_eee,
15880 + .get_sset_count = dpa_get_sset_count,
15881 + .get_ethtool_stats = dpa_get_ethtool_stats,
15882 + .get_strings = dpa_get_strings,
15883 +#ifdef CONFIG_PM
15884 + .get_wol = dpa_get_wol,
15885 + .set_wol = dpa_set_wol,
15886 +#endif
15887 +};
15888 --- /dev/null
15889 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_generic_ethtool.c
15890 @@ -0,0 +1,286 @@
15891 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
15892 + *
15893 + * Redistribution and use in source and binary forms, with or without
15894 + * modification, are permitted provided that the following conditions are met:
15895 + * * Redistributions of source code must retain the above copyright
15896 + * notice, this list of conditions and the following disclaimer.
15897 + * * Redistributions in binary form must reproduce the above copyright
15898 + * notice, this list of conditions and the following disclaimer in the
15899 + * documentation and/or other materials provided with the distribution.
15900 + * * Neither the name of Freescale Semiconductor nor the
15901 + * names of its contributors may be used to endorse or promote products
15902 + * derived from this software without specific prior written permission.
15903 + *
15904 + *
15905 + * ALTERNATIVELY, this software may be distributed under the terms of the
15906 + * GNU General Public License ("GPL") as published by the Free Software
15907 + * Foundation, either version 2 of that License or (at your option) any
15908 + * later version.
15909 + *
15910 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
15911 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15912 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
15913 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
15914 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
15915 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
15916 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
15917 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15918 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
15919 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15920 + */
15921 +
15922 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
15923 +#define pr_fmt(fmt) \
15924 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
15925 + KBUILD_BASENAME".c", __LINE__, __func__
15926 +#else
15927 +#define pr_fmt(fmt) \
15928 + KBUILD_MODNAME ": " fmt
15929 +#endif
15930 +
15931 +#include <linux/string.h>
15932 +
15933 +#include "dpaa_eth.h"
15934 +#include "dpaa_eth_common.h"
15935 +#include "dpaa_eth_generic.h"
15936 +
15937 +static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
15938 + "interrupts",
15939 + "rx packets",
15940 + "tx packets",
15941 + "tx recycled",
15942 + "tx confirm",
15943 + "tx S/G",
15944 + "rx S/G (N/A)",
15945 + "tx error",
15946 + "rx error",
15947 + "bp count",
15948 + "bp draining count"
15949 +};
15950 +
15951 +static char dpa_stats_global[][ETH_GSTRING_LEN] = {
15952 + /* dpa rx errors */
15953 + "rx dma error",
15954 + "rx frame physical error",
15955 + "rx frame size error",
15956 + "rx header error",
15957 + "rx csum error",
15958 +
15959 + /* demultiplexing errors */
15960 + "qman cg_tdrop",
15961 + "qman wred",
15962 + "qman error cond",
15963 + "qman early window",
15964 + "qman late window",
15965 + "qman fq tdrop",
15966 + "qman fq retired",
15967 + "qman orp disabled",
15968 +};
15969 +
15970 +#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
15971 +#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
15972 +
15973 +static int __cold dpa_generic_get_settings(struct net_device *net_dev,
15974 + struct ethtool_cmd *et_cmd)
15975 +{
15976 + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
15977 + return -ENODEV;
15978 +}
15979 +
15980 +static int __cold dpa_generic_set_settings(struct net_device *net_dev,
15981 + struct ethtool_cmd *et_cmd)
15982 +{
15983 + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
15984 + return -ENODEV;
15985 +}
15986 +
15987 +static void __cold dpa_generic_get_drvinfo(struct net_device *net_dev,
15988 + struct ethtool_drvinfo *drvinfo)
15989 +{
15990 + int _errno;
15991 +
15992 + strncpy(drvinfo->driver, KBUILD_MODNAME,
15993 + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0;
15994 + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
15995 + "%X", 0);
15996 +
15997 + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) {
15998 + /* Truncated output */
15999 + netdev_notice(net_dev, "snprintf() = %d\n", _errno);
16000 + } else if (unlikely(_errno < 0)) {
16001 + netdev_warn(net_dev, "snprintf() = %d\n", _errno);
16002 + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version));
16003 + }
16004 + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
16005 + sizeof(drvinfo->bus_info)-1)[sizeof(drvinfo->bus_info)-1] = 0;
16006 +}
16007 +
16008 +static uint32_t __cold dpa_generic_get_msglevel(struct net_device *net_dev)
16009 +{
16010 + return ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable;
16011 +}
16012 +
16013 +static void __cold dpa_generic_set_msglevel(struct net_device *net_dev,
16014 + uint32_t msg_enable)
16015 +{
16016 + ((struct dpa_generic_priv_s *)netdev_priv(net_dev))->msg_enable =
16017 + msg_enable;
16018 +}
16019 +
16020 +static int __cold dpa_generic_nway_reset(struct net_device *net_dev)
16021 +{
16022 + netdev_info(net_dev, "This interface does not have a MAC device in its control\n");
16023 + return -ENODEV;
16024 +}
16025 +
16026 +static int dpa_generic_get_sset_count(struct net_device *net_dev, int type)
16027 +{
16028 + unsigned int total_stats, num_stats;
16029 +
16030 + num_stats = num_online_cpus() + 1;
16031 + total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
16032 +
16033 + switch (type) {
16034 + case ETH_SS_STATS:
16035 + return total_stats;
16036 + default:
16037 + return -EOPNOTSUPP;
16038 + }
16039 +}
16040 +
16041 +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv,
16042 + int num_cpus, int crr_cpu, u64 bp_count,
16043 + u64 bp_drain_count, u64 *data)
16044 +{
16045 + int num_values = num_cpus + 1;
16046 + int crr = 0;
16047 +
16048 + /* update current CPU's stats and also add them to the total values */
16049 + data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
16050 + data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
16051 +
16052 + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
16053 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
16054 +
16055 + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
16056 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
16057 +
16058 + data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
16059 + data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
16060 +
16061 + data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
16062 + data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
16063 +
16064 + data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
16065 + data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
16066 +
16067 + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
16068 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
16069 +
16070 + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
16071 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
16072 +
16073 + data[crr * num_values + crr_cpu] = bp_count;
16074 + data[crr++ * num_values + num_cpus] += bp_count;
16075 +
16076 + data[crr * num_values + crr_cpu] = bp_drain_count;
16077 + data[crr++ * num_values + num_cpus] += bp_drain_count;
16078 +}
16079 +
16080 +static void dpa_generic_get_ethtool_stats(struct net_device *net_dev,
16081 + struct ethtool_stats *stats,
16082 + u64 *data)
16083 +{
16084 + struct dpa_percpu_priv_s *percpu_priv;
16085 + struct dpa_bp *dpa_bp, *drain_bp;
16086 + struct dpa_generic_priv_s *priv;
16087 + struct dpa_rx_errors rx_errors;
16088 + struct dpa_ern_cnt ern_cnt;
16089 + unsigned int num_cpus, offset;
16090 + u64 bp_cnt, drain_cnt;
16091 + int total_stats, i;
16092 +
16093 + total_stats = dpa_generic_get_sset_count(net_dev, ETH_SS_STATS);
16094 + priv = netdev_priv(net_dev);
16095 + drain_bp = priv->draining_tx_bp;
16096 + dpa_bp = priv->rx_bp;
16097 + num_cpus = num_online_cpus();
16098 + drain_cnt = 0;
16099 + bp_cnt = 0;
16100 +
16101 + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
16102 + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
16103 + memset(data, 0, total_stats * sizeof(u64));
16104 +
16105 + for_each_online_cpu(i) {
16106 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
16107 +
16108 + if (dpa_bp->percpu_count)
16109 + bp_cnt = *(per_cpu_ptr(dpa_bp->percpu_count, i));
16110 +
16111 + if (drain_bp->percpu_count)
16112 + drain_cnt = *(per_cpu_ptr(drain_bp->percpu_count, i));
16113 +
16114 + rx_errors.dme += percpu_priv->rx_errors.dme;
16115 + rx_errors.fpe += percpu_priv->rx_errors.fpe;
16116 + rx_errors.fse += percpu_priv->rx_errors.fse;
16117 + rx_errors.phe += percpu_priv->rx_errors.phe;
16118 + rx_errors.cse += percpu_priv->rx_errors.cse;
16119 +
16120 + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
16121 + ern_cnt.wred += percpu_priv->ern_cnt.wred;
16122 + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
16123 + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
16124 + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
16125 + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
16126 + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
16127 + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
16128 +
16129 + copy_stats(percpu_priv, num_cpus, i, bp_cnt, drain_cnt, data);
16130 + }
16131 +
16132 + offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
16133 + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
16134 +
16135 + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
16136 + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
16137 +}
16138 +
16139 +static void dpa_generic_get_strings(struct net_device *net_dev,
16140 + u32 stringset, u8 *data)
16141 +{
16142 + unsigned int i, j, num_cpus, size;
16143 + char string_cpu[ETH_GSTRING_LEN];
16144 + u8 *strings;
16145 +
16146 + strings = data;
16147 + num_cpus = num_online_cpus();
16148 + size = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
16149 +
16150 + for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
16151 + for (j = 0; j < num_cpus; j++) {
16152 + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
16153 + dpa_stats_percpu[i], j);
16154 + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
16155 + strings += ETH_GSTRING_LEN;
16156 + }
16157 + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
16158 + dpa_stats_percpu[i]);
16159 + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
16160 + strings += ETH_GSTRING_LEN;
16161 + }
16162 + memcpy(strings, dpa_stats_global, size);
16163 +}
16164 +
16165 +const struct ethtool_ops dpa_generic_ethtool_ops = {
16166 + .get_settings = dpa_generic_get_settings,
16167 + .set_settings = dpa_generic_set_settings,
16168 + .get_drvinfo = dpa_generic_get_drvinfo,
16169 + .get_msglevel = dpa_generic_get_msglevel,
16170 + .set_msglevel = dpa_generic_set_msglevel,
16171 + .nway_reset = dpa_generic_nway_reset,
16172 + .get_link = ethtool_op_get_link,
16173 + .get_sset_count = dpa_generic_get_sset_count,
16174 + .get_ethtool_stats = dpa_generic_get_ethtool_stats,
16175 + .get_strings = dpa_generic_get_strings,
16176 +};
16177 --- /dev/null
16178 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_macsec_ethtool.c
16179 @@ -0,0 +1,250 @@
16180 +/* Copyright 2015 Freescale Semiconductor, Inc.
16181 + *
16182 + * Redistribution and use in source and binary forms, with or without
16183 + * modification, are permitted provided that the following conditions are met:
16184 + * * Redistributions of source code must retain the above copyright
16185 + * notice, this list of conditions and the following disclaimer.
16186 + * * Redistributions in binary form must reproduce the above copyright
16187 + * notice, this list of conditions and the following disclaimer in the
16188 + * documentation and/or other materials provided with the distribution.
16189 + * * Neither the name of Freescale Semiconductor nor the
16190 + * names of its contributors may be used to endorse or promote products
16191 + * derived from this software without specific prior written permission.
16192 + *
16193 + *
16194 + * ALTERNATIVELY, this software may be distributed under the terms of the
16195 + * GNU General Public License ("GPL") as published by the Free Software
16196 + * Foundation, either version 2 of that License or (at your option) any
16197 + * later version.
16198 + *
16199 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
16200 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16201 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16202 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
16203 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
16204 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
16205 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
16206 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
16207 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
16208 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
16209 + */
16210 +
16211 +#include <linux/string.h>
16212 +
16213 +#include "dpaa_eth.h"
16214 +#include "dpaa_eth_macsec.h"
16215 +
16216 +static const char dpa_macsec_stats_percpu[][ETH_GSTRING_LEN] = {
16217 + "interrupts",
16218 + "rx packets",
16219 + "tx packets",
16220 + "tx recycled",
16221 + "tx confirm",
16222 + "tx S/G",
16223 + "rx S/G",
16224 + "tx error",
16225 + "rx error",
16226 + "bp count",
16227 + "tx macsec",
16228 + "rx macsec"
16229 +};
16230 +
16231 +static char dpa_macsec_stats_global[][ETH_GSTRING_LEN] = {
16232 + /* dpa rx errors */
16233 + "rx dma error",
16234 + "rx frame physical error",
16235 + "rx frame size error",
16236 + "rx header error",
16237 + "rx csum error",
16238 +
16239 + /* demultiplexing errors */
16240 + "qman cg_tdrop",
16241 + "qman wred",
16242 + "qman error cond",
16243 + "qman early window",
16244 + "qman late window",
16245 + "qman fq tdrop",
16246 + "qman fq retired",
16247 + "qman orp disabled",
16248 +
16249 + /* congestion related stats */
16250 + "congestion time (ms)",
16251 + "entered congestion",
16252 + "congested (0/1)"
16253 +};
16254 +
16255 +#define DPA_MACSEC_STATS_PERCPU_LEN ARRAY_SIZE(dpa_macsec_stats_percpu)
16256 +#define DPA_MACSEC_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_macsec_stats_global)
16257 +
16258 +static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
16259 + int crr_cpu, u64 bp_count, u64 tx_macsec,
16260 + u64 rx_macsec, u64 *data)
16261 +{
16262 + int num_values = num_cpus + 1;
16263 + int crr = 0;
16264 +
16265 + /* update current CPU's stats and also add them to the total values */
16266 + data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
16267 + data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
16268 +
16269 + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
16270 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
16271 +
16272 + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
16273 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
16274 +
16275 + data[crr * num_values + crr_cpu] = percpu_priv->tx_returned;
16276 + data[crr++ * num_values + num_cpus] += percpu_priv->tx_returned;
16277 +
16278 + data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
16279 + data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
16280 +
16281 + data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
16282 + data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
16283 +
16284 + data[crr * num_values + crr_cpu] = percpu_priv->rx_sg;
16285 + data[crr++ * num_values + num_cpus] += percpu_priv->rx_sg;
16286 +
16287 + data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
16288 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
16289 +
16290 + data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
16291 + data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
16292 +
16293 + data[crr * num_values + crr_cpu] = bp_count;
16294 + data[crr++ * num_values + num_cpus] += bp_count;
16295 +
16296 + data[crr * num_values + crr_cpu] = tx_macsec;
16297 + data[crr++ * num_values + num_cpus] += tx_macsec;
16298 +
16299 + data[crr * num_values + crr_cpu] = rx_macsec;
16300 + data[crr++ * num_values + num_cpus] += rx_macsec;
16301 +}
16302 +
16303 +int dpa_macsec_get_sset_count(struct net_device *net_dev, int type)
16304 +{
16305 + unsigned int total_stats, num_stats;
16306 +
16307 + num_stats = num_online_cpus() + 1;
16308 + total_stats = num_stats * DPA_MACSEC_STATS_PERCPU_LEN +
16309 + DPA_MACSEC_STATS_GLOBAL_LEN;
16310 +
16311 + switch (type) {
16312 + case ETH_SS_STATS:
16313 + return total_stats;
16314 + default:
16315 + return -EOPNOTSUPP;
16316 + }
16317 +}
16318 +
16319 +void dpa_macsec_get_ethtool_stats(struct net_device *net_dev,
16320 + struct ethtool_stats *stats, u64 *data)
16321 +{
16322 + u64 bp_count, bp_total, cg_time, cg_num, cg_status;
16323 + struct macsec_percpu_priv_s *percpu_priv_macsec;
16324 + struct dpa_percpu_priv_s *percpu_priv;
16325 + struct macsec_priv_s *macsec_priv;
16326 + struct qm_mcr_querycgr query_cgr;
16327 + struct dpa_rx_errors rx_errors;
16328 + struct dpa_ern_cnt ern_cnt;
16329 + struct dpa_priv_s *priv;
16330 + unsigned int num_cpus, offset;
16331 + struct dpa_bp *dpa_bp;
16332 + int total_stats, i;
16333 +
16334 + macsec_priv = dpa_macsec_get_priv(net_dev);
16335 + if (unlikely(!macsec_priv)) {
16336 + pr_err("selected macsec_priv is NULL\n");
16337 + return;
16338 + }
16339 +
16340 + total_stats = dpa_macsec_get_sset_count(net_dev, ETH_SS_STATS);
16341 + priv = netdev_priv(net_dev);
16342 + dpa_bp = priv->dpa_bp;
16343 + num_cpus = num_online_cpus();
16344 + bp_count = 0;
16345 + bp_total = 0;
16346 +
16347 + memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
16348 + memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
16349 + memset(data, 0, total_stats * sizeof(u64));
16350 +
16351 + for_each_online_cpu(i) {
16352 + percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
16353 + percpu_priv_macsec = per_cpu_ptr(macsec_priv->percpu_priv, i);
16354 +
16355 + if (dpa_bp->percpu_count)
16356 + bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
16357 +
16358 + rx_errors.dme += percpu_priv->rx_errors.dme;
16359 + rx_errors.fpe += percpu_priv->rx_errors.fpe;
16360 + rx_errors.fse += percpu_priv->rx_errors.fse;
16361 + rx_errors.phe += percpu_priv->rx_errors.phe;
16362 + rx_errors.cse += percpu_priv->rx_errors.cse;
16363 +
16364 + ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
16365 + ern_cnt.wred += percpu_priv->ern_cnt.wred;
16366 + ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
16367 + ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
16368 + ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
16369 + ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
16370 + ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
16371 + ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
16372 +
16373 + copy_stats(percpu_priv, num_cpus, i, bp_count,
16374 + percpu_priv_macsec->tx_macsec,
16375 + percpu_priv_macsec->rx_macsec,
16376 + data);
16377 + }
16378 +
16379 + offset = (num_cpus + 1) * DPA_MACSEC_STATS_PERCPU_LEN;
16380 + memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
16381 +
16382 + offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
16383 + memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
16384 +
16385 + /* gather congestion related counters */
16386 + cg_num = 0;
16387 + cg_status = 0;
16388 + cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
16389 + if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
16390 + cg_num = priv->cgr_data.cgr_congested_count;
16391 + cg_status = query_cgr.cgr.cs;
16392 +
16393 + /* reset congestion stats (like QMan API does */
16394 + priv->cgr_data.congested_jiffies = 0;
16395 + priv->cgr_data.cgr_congested_count = 0;
16396 + }
16397 +
16398 + offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
16399 + data[offset++] = cg_time;
16400 + data[offset++] = cg_num;
16401 + data[offset++] = cg_status;
16402 +}
16403 +
16404 +void dpa_macsec_get_strings(struct net_device *net_dev,
16405 + u32 stringset, u8 *data)
16406 +{
16407 + unsigned int i, j, num_cpus, size;
16408 + char string_cpu[ETH_GSTRING_LEN];
16409 + u8 *strings;
16410 +
16411 + strings = data;
16412 + num_cpus = num_online_cpus();
16413 + size = DPA_MACSEC_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
16414 +
16415 + for (i = 0; i < DPA_MACSEC_STATS_PERCPU_LEN; i++) {
16416 + for (j = 0; j < num_cpus; j++) {
16417 + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
16418 + dpa_macsec_stats_percpu[i], j);
16419 + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
16420 + strings += ETH_GSTRING_LEN;
16421 + }
16422 + snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
16423 + dpa_macsec_stats_percpu[i]);
16424 + memcpy(strings, string_cpu, ETH_GSTRING_LEN);
16425 + strings += ETH_GSTRING_LEN;
16426 + }
16427 + memcpy(strings, dpa_macsec_stats_global, size);
16428 +}
16429 +
16430 --- /dev/null
16431 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
16432 @@ -0,0 +1,287 @@
16433 +/*
16434 + * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
16435 + *
16436 + * Author: Yangbo Lu <yangbo.lu@freescale.com>
16437 + *
16438 + * Copyright 2014 Freescale Semiconductor, Inc.
16439 + *
16440 + * This program is free software; you can redistribute it and/or modify it
16441 + * under the terms of the GNU General Public License as published by the
16442 + * Free Software Foundation; either version 2 of the License, or (at your
16443 + * option) any later version.
16444 +*/
16445 +
16446 +#include <linux/device.h>
16447 +#include <linux/hrtimer.h>
16448 +#include <linux/init.h>
16449 +#include <linux/interrupt.h>
16450 +#include <linux/kernel.h>
16451 +#include <linux/module.h>
16452 +#include <linux/of.h>
16453 +#include <linux/of_platform.h>
16454 +#include <linux/timex.h>
16455 +#include <linux/io.h>
16456 +
16457 +#include <linux/ptp_clock_kernel.h>
16458 +
16459 +#include "dpaa_eth.h"
16460 +#include "mac.h"
16461 +
16462 +struct ptp_clock *clock;
16463 +
16464 +static struct mac_device *mac_dev;
16465 +static u32 freqCompensation;
16466 +
16467 +/* Bit definitions for the TMR_CTRL register */
16468 +#define ALM1P (1<<31) /* Alarm1 output polarity */
16469 +#define ALM2P (1<<30) /* Alarm2 output polarity */
16470 +#define FS (1<<28) /* FIPER start indication */
16471 +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
16472 +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
16473 +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
16474 +#define TCLK_PERIOD_MASK (0x3ff)
16475 +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
16476 +#define FRD (1<<14) /* FIPER Realignment Disable */
16477 +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
16478 +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
16479 +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
16480 +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
16481 +#define COPH (1<<7) /* Generated clock output phase. */
16482 +#define CIPH (1<<6) /* External oscillator input clock phase */
16483 +#define TMSR (1<<5) /* Timer soft reset. */
16484 +#define BYP (1<<3) /* Bypass drift compensated clock */
16485 +#define TE (1<<2) /* 1588 timer enable. */
16486 +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
16487 +#define CKSEL_MASK (0x3)
16488 +
16489 +/* Bit definitions for the TMR_TEVENT register */
16490 +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
16491 +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
16492 +#define ALM2 (1<<17) /* Current time = alarm time register 2 */
16493 +#define ALM1 (1<<16) /* Current time = alarm time register 1 */
16494 +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
16495 +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
16496 +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
16497 +
16498 +/* Bit definitions for the TMR_TEMASK register */
16499 +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
16500 +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
16501 +#define ALM2EN (1<<17) /* Timer ALM2 event enable */
16502 +#define ALM1EN (1<<16) /* Timer ALM1 event enable */
16503 +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
16504 +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
16505 +
16506 +/* Bit definitions for the TMR_PEVENT register */
16507 +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
16508 +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
16509 +#define RXP (1<<0) /* PTP frame has been received */
16510 +
16511 +/* Bit definitions for the TMR_PEMASK register */
16512 +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
16513 +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
16514 +#define RXPEN (1<<0) /* Receive PTP packet event enable */
16515 +
16516 +/* Bit definitions for the TMR_STAT register */
16517 +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
16518 +#define STAT_VEC_MASK (0x3f)
16519 +
16520 +/* Bit definitions for the TMR_PRSC register */
16521 +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
16522 +#define PRSC_OCK_MASK (0xffff)
16523 +
16524 +
16525 +#define N_EXT_TS 2
16526 +
16527 +static void set_alarm(void)
16528 +{
16529 + u64 ns;
16530 +
16531 + if (mac_dev->fm_rtc_get_cnt)
16532 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
16533 + ns += 1500000000ULL;
16534 + ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
16535 + ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
16536 + if (mac_dev->fm_rtc_set_alarm)
16537 + mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
16538 +}
16539 +
16540 +static void set_fipers(void)
16541 +{
16542 + u64 fiper;
16543 +
16544 + if (mac_dev->fm_rtc_disable)
16545 + mac_dev->fm_rtc_disable(mac_dev->fm_dev);
16546 +
16547 + set_alarm();
16548 + fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
16549 + if (mac_dev->fm_rtc_set_fiper)
16550 + mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
16551 +
16552 + if (mac_dev->fm_rtc_enable)
16553 + mac_dev->fm_rtc_enable(mac_dev->fm_dev);
16554 +}
16555 +
16556 +/* PTP clock operations */
16557 +
16558 +static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
16559 +{
16560 + u64 adj;
16561 + u32 diff, tmr_add;
16562 + int neg_adj = 0;
16563 +
16564 + if (ppb < 0) {
16565 + neg_adj = 1;
16566 + ppb = -ppb;
16567 + }
16568 +
16569 + tmr_add = freqCompensation;
16570 + adj = tmr_add;
16571 + adj *= ppb;
16572 + diff = div_u64(adj, 1000000000ULL);
16573 +
16574 + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
16575 +
16576 + if (mac_dev->fm_rtc_set_drift)
16577 + mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
16578 +
16579 + return 0;
16580 +}
16581 +
16582 +static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
16583 +{
16584 + s64 now;
16585 +
16586 + if (mac_dev->fm_rtc_get_cnt)
16587 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
16588 +
16589 + now += delta;
16590 +
16591 + if (mac_dev->fm_rtc_set_cnt)
16592 + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
16593 + set_fipers();
16594 +
16595 + return 0;
16596 +}
16597 +
16598 +static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
16599 +{
16600 + u64 ns;
16601 + u32 remainder;
16602 +
16603 + if (mac_dev->fm_rtc_get_cnt)
16604 + mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
16605 +
16606 + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
16607 + ts->tv_nsec = remainder;
16608 + return 0;
16609 +}
16610 +
16611 +static int ptp_dpa_settime(struct ptp_clock_info *ptp,
16612 + const struct timespec *ts)
16613 +{
16614 + u64 ns;
16615 +
16616 + ns = ts->tv_sec * 1000000000ULL;
16617 + ns += ts->tv_nsec;
16618 +
16619 + if (mac_dev->fm_rtc_set_cnt)
16620 + mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
16621 + set_fipers();
16622 + return 0;
16623 +}
16624 +
16625 +static int ptp_dpa_enable(struct ptp_clock_info *ptp,
16626 + struct ptp_clock_request *rq, int on)
16627 +{
16628 + u32 bit;
16629 +
16630 + switch (rq->type) {
16631 + case PTP_CLK_REQ_EXTTS:
16632 + switch (rq->extts.index) {
16633 + case 0:
16634 + bit = ETS1EN;
16635 + break;
16636 + case 1:
16637 + bit = ETS2EN;
16638 + break;
16639 + default:
16640 + return -EINVAL;
16641 + }
16642 + if (on) {
16643 + if (mac_dev->fm_rtc_enable_interrupt)
16644 + mac_dev->fm_rtc_enable_interrupt(
16645 + mac_dev->fm_dev, bit);
16646 + } else {
16647 + if (mac_dev->fm_rtc_disable_interrupt)
16648 + mac_dev->fm_rtc_disable_interrupt(
16649 + mac_dev->fm_dev, bit);
16650 + }
16651 + return 0;
16652 +
16653 + case PTP_CLK_REQ_PPS:
16654 + if (on) {
16655 + if (mac_dev->fm_rtc_enable_interrupt)
16656 + mac_dev->fm_rtc_enable_interrupt(
16657 + mac_dev->fm_dev, PP1EN);
16658 + } else {
16659 + if (mac_dev->fm_rtc_disable_interrupt)
16660 + mac_dev->fm_rtc_disable_interrupt(
16661 + mac_dev->fm_dev, PP1EN);
16662 + }
16663 + return 0;
16664 +
16665 + default:
16666 + break;
16667 + }
16668 +
16669 + return -EOPNOTSUPP;
16670 +}
16671 +
16672 +static struct ptp_clock_info ptp_dpa_caps = {
16673 + .owner = THIS_MODULE,
16674 + .name = "dpaa clock",
16675 + .max_adj = 512000,
16676 + .n_alarm = 0,
16677 + .n_ext_ts = N_EXT_TS,
16678 + .n_per_out = 0,
16679 + .pps = 1,
16680 + .adjfreq = ptp_dpa_adjfreq,
16681 + .adjtime = ptp_dpa_adjtime,
16682 + .gettime = ptp_dpa_gettime,
16683 + .settime = ptp_dpa_settime,
16684 + .enable = ptp_dpa_enable,
16685 +};
16686 +
16687 +static int __init __cold dpa_ptp_load(void)
16688 +{
16689 + struct device *ptp_dev;
16690 + struct timespec now;
16691 + int dpa_phc_index;
16692 + int err;
16693 +
16694 + ptp_dev = &ptp_priv.of_dev->dev;
16695 + mac_dev = ptp_priv.mac_dev;
16696 +
16697 + if (mac_dev->fm_rtc_get_drift)
16698 + mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
16699 +
16700 + getnstimeofday(&now);
16701 + ptp_dpa_settime(&ptp_dpa_caps, &now);
16702 +
16703 + clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
16704 + if (IS_ERR(clock)) {
16705 + err = PTR_ERR(clock);
16706 + return err;
16707 + }
16708 + dpa_phc_index = ptp_clock_index(clock);
16709 + return 0;
16710 +}
16711 +module_init(dpa_ptp_load);
16712 +
16713 +static void __exit __cold dpa_ptp_unload(void)
16714 +{
16715 + if (mac_dev->fm_rtc_disable_interrupt)
16716 + mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
16717 + ptp_clock_unregister(clock);
16718 +}
16719 +module_exit(dpa_ptp_unload);
16720 --- /dev/null
16721 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
16722 @@ -0,0 +1,915 @@
16723 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
16724 + *
16725 + * Redistribution and use in source and binary forms, with or without
16726 + * modification, are permitted provided that the following conditions are met:
16727 + * * Redistributions of source code must retain the above copyright
16728 + * notice, this list of conditions and the following disclaimer.
16729 + * * Redistributions in binary form must reproduce the above copyright
16730 + * notice, this list of conditions and the following disclaimer in the
16731 + * documentation and/or other materials provided with the distribution.
16732 + * * Neither the name of Freescale Semiconductor nor the
16733 + * names of its contributors may be used to endorse or promote products
16734 + * derived from this software without specific prior written permission.
16735 + *
16736 + *
16737 + * ALTERNATIVELY, this software may be distributed under the terms of the
16738 + * GNU General Public License ("GPL") as published by the Free Software
16739 + * Foundation, either version 2 of that License or (at your option) any
16740 + * later version.
16741 + *
16742 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
16743 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16744 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16745 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
16746 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
16747 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
16748 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
16749 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
16750 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
16751 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
16752 + */
16753 +
16754 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
16755 +#define pr_fmt(fmt) \
16756 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
16757 + KBUILD_BASENAME".c", __LINE__, __func__
16758 +#else
16759 +#define pr_fmt(fmt) \
16760 + KBUILD_MODNAME ": " fmt
16761 +#endif
16762 +
16763 +#include <linux/init.h>
16764 +#include <linux/module.h>
16765 +#include <linux/io.h>
16766 +#include <linux/of_platform.h>
16767 +#include <linux/of_mdio.h>
16768 +#include <linux/phy.h>
16769 +#include <linux/netdevice.h>
16770 +
16771 +#include "dpaa_eth.h"
16772 +#include "mac.h"
16773 +#include "lnxwrp_fsl_fman.h"
16774 +
16775 +#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */
16776 +
16777 +#include "fsl_fman_dtsec.h"
16778 +#include "fsl_fman_tgec.h"
16779 +#include "fsl_fman_memac.h"
16780 +#include "../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h"
16781 +
16782 +#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
16783 +
16784 +MODULE_LICENSE("Dual BSD/GPL");
16785 +
16786 +MODULE_AUTHOR("Emil Medve <Emilian.Medve@Freescale.com>");
16787 +
16788 +MODULE_DESCRIPTION(MAC_DESCRIPTION);
16789 +
16790 +struct mac_priv_s {
16791 + struct fm_mac_dev *fm_mac;
16792 +};
16793 +
16794 +const char *mac_driver_description __initconst = MAC_DESCRIPTION;
16795 +const size_t mac_sizeof_priv[] = {
16796 + [DTSEC] = sizeof(struct mac_priv_s),
16797 + [XGMAC] = sizeof(struct mac_priv_s),
16798 + [MEMAC] = sizeof(struct mac_priv_s)
16799 +};
16800 +
16801 +static const enet_mode_t _100[] = {
16802 + [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100,
16803 + [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100
16804 +};
16805 +
16806 +static const enet_mode_t _1000[] = {
16807 + [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000,
16808 + [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000,
16809 + [PHY_INTERFACE_MODE_QSGMII] = e_ENET_MODE_QSGMII_1000,
16810 + [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000,
16811 + [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000,
16812 + [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000,
16813 + [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000,
16814 + [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000,
16815 + [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000
16816 +};
16817 +
16818 +static enet_mode_t __cold __attribute__((nonnull))
16819 +macdev2enetinterface(const struct mac_device *mac_dev)
16820 +{
16821 + switch (mac_dev->max_speed) {
16822 + case SPEED_100:
16823 + return _100[mac_dev->phy_if];
16824 + case SPEED_1000:
16825 + return _1000[mac_dev->phy_if];
16826 + case SPEED_2500:
16827 + return e_ENET_MODE_SGMII_2500;
16828 + case SPEED_10000:
16829 + return e_ENET_MODE_XGMII_10000;
16830 + default:
16831 + return e_ENET_MODE_MII_100;
16832 + }
16833 +}
16834 +
16835 +static void mac_exception(handle_t _mac_dev, e_FmMacExceptions exception)
16836 +{
16837 + struct mac_device *mac_dev;
16838 +
16839 + mac_dev = (struct mac_device *)_mac_dev;
16840 +
16841 + if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) {
16842 + /* don't flag RX FIFO after the first */
16843 + fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
16844 + e_FM_MAC_EX_10G_RX_FIFO_OVFL, false);
16845 + dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n",
16846 + exception);
16847 + }
16848 +
16849 + dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME".c", __func__,
16850 + exception);
16851 +}
16852 +
16853 +static int __cold init(struct mac_device *mac_dev)
16854 +{
16855 + int _errno;
16856 + struct mac_priv_s *priv;
16857 + t_FmMacParams param;
16858 + uint32_t version;
16859 +
16860 + priv = macdev_priv(mac_dev);
16861 +
16862 + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
16863 + mac_dev->dev, mac_dev->res->start, 0x2000);
16864 + param.enetMode = macdev2enetinterface(mac_dev);
16865 + memcpy(&param.addr, mac_dev->addr, min(sizeof(param.addr),
16866 + sizeof(mac_dev->addr)));
16867 + param.macId = mac_dev->cell_index;
16868 + param.h_Fm = (handle_t)mac_dev->fm;
16869 + param.mdioIrq = NO_IRQ;
16870 + param.f_Exception = mac_exception;
16871 + param.f_Event = mac_exception;
16872 + param.h_App = mac_dev;
16873 +
16874 + priv->fm_mac = fm_mac_config(&param);
16875 + if (unlikely(priv->fm_mac == NULL)) {
16876 + _errno = -EINVAL;
16877 + goto _return;
16878 + }
16879 +
16880 + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
16881 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
16882 + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
16883 +
16884 + _errno = fm_mac_config_max_frame_length(priv->fm_mac,
16885 + fm_get_max_frm());
16886 + if (unlikely(_errno < 0))
16887 + goto _return_fm_mac_free;
16888 +
16889 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
16890 + /* 10G always works with pad and CRC */
16891 + _errno = fm_mac_config_pad_and_crc(priv->fm_mac, true);
16892 + if (unlikely(_errno < 0))
16893 + goto _return_fm_mac_free;
16894 +
16895 + _errno = fm_mac_config_half_duplex(priv->fm_mac,
16896 + mac_dev->half_duplex);
16897 + if (unlikely(_errno < 0))
16898 + goto _return_fm_mac_free;
16899 + } else {
16900 + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
16901 + if (unlikely(_errno < 0))
16902 + goto _return_fm_mac_free;
16903 + }
16904 +
16905 + _errno = fm_mac_init(priv->fm_mac);
16906 + if (unlikely(_errno < 0))
16907 + goto _return_fm_mac_free;
16908 +
16909 +#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN
16910 + /* For 1G MAC, disable by default the MIB counters overflow interrupt */
16911 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) {
16912 + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
16913 + e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE);
16914 + if (unlikely(_errno < 0))
16915 + goto _return_fm_mac_free;
16916 + }
16917 +#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */
16918 +
16919 + /* For 10G MAC, disable Tx ECC exception */
16920 + if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) {
16921 + _errno = fm_mac_set_exception(mac_dev->get_mac_handle(mac_dev),
16922 + e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE);
16923 + if (unlikely(_errno < 0))
16924 + goto _return_fm_mac_free;
16925 + }
16926 +
16927 + _errno = fm_mac_get_version(priv->fm_mac, &version);
16928 + if (unlikely(_errno < 0))
16929 + goto _return_fm_mac_free;
16930 +
16931 + dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n",
16932 + ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
16933 + "dTSEC" : "XGEC"), version);
16934 +
16935 + goto _return;
16936 +
16937 +
16938 +_return_fm_mac_free:
16939 + fm_mac_free(mac_dev->get_mac_handle(mac_dev));
16940 +
16941 +_return:
16942 + return _errno;
16943 +}
16944 +
16945 +static int __cold memac_init(struct mac_device *mac_dev)
16946 +{
16947 + int _errno;
16948 + struct mac_priv_s *priv;
16949 + t_FmMacParams param;
16950 +
16951 + priv = macdev_priv(mac_dev);
16952 +
16953 + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap(
16954 + mac_dev->dev, mac_dev->res->start, 0x2000);
16955 + param.enetMode = macdev2enetinterface(mac_dev);
16956 + memcpy(&param.addr, mac_dev->addr, sizeof(mac_dev->addr));
16957 + param.macId = mac_dev->cell_index;
16958 + param.h_Fm = (handle_t)mac_dev->fm;
16959 + param.mdioIrq = NO_IRQ;
16960 + param.f_Exception = mac_exception;
16961 + param.f_Event = mac_exception;
16962 + param.h_App = mac_dev;
16963 +
16964 + priv->fm_mac = fm_mac_config(&param);
16965 + if (unlikely(priv->fm_mac == NULL)) {
16966 + _errno = -EINVAL;
16967 + goto _return;
16968 + }
16969 +
16970 + fm_mac_set_handle(mac_dev->fm_dev, priv->fm_mac,
16971 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ?
16972 + param.macId : param.macId + FM_MAX_NUM_OF_1G_MACS);
16973 +
16974 + _errno = fm_mac_config_max_frame_length(priv->fm_mac, fm_get_max_frm());
16975 + if (unlikely(_errno < 0))
16976 + goto _return_fm_mac_free;
16977 +
16978 + _errno = fm_mac_config_reset_on_init(priv->fm_mac, true);
16979 + if (unlikely(_errno < 0))
16980 + goto _return_fm_mac_free;
16981 +
16982 + _errno = fm_mac_init(priv->fm_mac);
16983 + if (unlikely(_errno < 0))
16984 + goto _return_fm_mac_free;
16985 +
16986 + dev_info(mac_dev->dev, "FMan MEMAC\n");
16987 +
16988 + goto _return;
16989 +
16990 +_return_fm_mac_free:
16991 + fm_mac_free(priv->fm_mac);
16992 +
16993 +_return:
16994 + return _errno;
16995 +}
16996 +
16997 +static int __cold start(struct mac_device *mac_dev)
16998 +{
16999 + int _errno;
17000 + struct phy_device *phy_dev = mac_dev->phy_dev;
17001 +
17002 + _errno = fm_mac_enable(mac_dev->get_mac_handle(mac_dev));
17003 +
17004 + if (!_errno && phy_dev) {
17005 + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)
17006 + phy_start(phy_dev);
17007 + else if (phy_dev->drv->read_status)
17008 + phy_dev->drv->read_status(phy_dev);
17009 + }
17010 +
17011 + return _errno;
17012 +}
17013 +
17014 +static int __cold stop(struct mac_device *mac_dev)
17015 +{
17016 + if (mac_dev->phy_dev &&
17017 + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000))
17018 + phy_stop(mac_dev->phy_dev);
17019 +
17020 + return fm_mac_disable(mac_dev->get_mac_handle(mac_dev));
17021 +}
17022 +
17023 +static int __cold set_multi(struct net_device *net_dev,
17024 + struct mac_device *mac_dev)
17025 +{
17026 + struct mac_priv_s *mac_priv;
17027 + struct mac_address *old_addr, *tmp;
17028 + struct netdev_hw_addr *ha;
17029 + int _errno;
17030 +
17031 + mac_priv = macdev_priv(mac_dev);
17032 +
17033 + /* Clear previous address list */
17034 + list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) {
17035 + _errno = fm_mac_remove_hash_mac_addr(mac_priv->fm_mac,
17036 + (t_EnetAddr *)old_addr->addr);
17037 + if (_errno < 0)
17038 + return _errno;
17039 +
17040 + list_del(&old_addr->list);
17041 + kfree(old_addr);
17042 + }
17043 +
17044 + /* Add all the addresses from the new list */
17045 + netdev_for_each_mc_addr(ha, net_dev) {
17046 + _errno = fm_mac_add_hash_mac_addr(mac_priv->fm_mac,
17047 + (t_EnetAddr *)ha->addr);
17048 + if (_errno < 0)
17049 + return _errno;
17050 +
17051 + tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC);
17052 + if (!tmp) {
17053 + dev_err(mac_dev->dev, "Out of memory\n");
17054 + return -ENOMEM;
17055 + }
17056 + memcpy(tmp->addr, ha->addr, ETH_ALEN);
17057 + list_add(&tmp->list, &mac_dev->mc_addr_list);
17058 + }
17059 + return 0;
17060 +}
17061 +
17062 +/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
17063 + * active PAUSE settings. Otherwise, the new active settings should be reflected
17064 + * in FMan.
17065 + */
17066 +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
17067 +{
17068 + struct fm_mac_dev *fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
17069 + int _errno = 0;
17070 +
17071 + if (unlikely(rx != mac_dev->rx_pause_active)) {
17072 + _errno = fm_mac_set_rx_pause_frames(fm_mac_dev, rx);
17073 + if (likely(_errno == 0))
17074 + mac_dev->rx_pause_active = rx;
17075 + }
17076 +
17077 + if (unlikely(tx != mac_dev->tx_pause_active)) {
17078 + _errno = fm_mac_set_tx_pause_frames(fm_mac_dev, tx);
17079 + if (likely(_errno == 0))
17080 + mac_dev->tx_pause_active = tx;
17081 + }
17082 +
17083 + return _errno;
17084 +}
17085 +EXPORT_SYMBOL(set_mac_active_pause);
17086 +
17087 +/* Determine the MAC RX/TX PAUSE frames settings based on PHY
17088 + * autonegotiation or values set by eththool.
17089 + */
17090 +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause)
17091 +{
17092 + struct phy_device *phy_dev = mac_dev->phy_dev;
17093 + u16 lcl_adv, rmt_adv;
17094 + u8 flowctrl;
17095 +
17096 + *rx_pause = *tx_pause = false;
17097 +
17098 + if (!phy_dev->duplex)
17099 + return;
17100 +
17101 + /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
17102 + * are those set by ethtool.
17103 + */
17104 + if (!mac_dev->autoneg_pause) {
17105 + *rx_pause = mac_dev->rx_pause_req;
17106 + *tx_pause = mac_dev->tx_pause_req;
17107 + return;
17108 + }
17109 +
17110 + /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
17111 + * settings depend on the result of the link negotiation.
17112 + */
17113 +
17114 + /* get local capabilities */
17115 + lcl_adv = 0;
17116 + if (phy_dev->advertising & ADVERTISED_Pause)
17117 + lcl_adv |= ADVERTISE_PAUSE_CAP;
17118 + if (phy_dev->advertising & ADVERTISED_Asym_Pause)
17119 + lcl_adv |= ADVERTISE_PAUSE_ASYM;
17120 +
17121 + /* get link partner capabilities */
17122 + rmt_adv = 0;
17123 + if (phy_dev->pause)
17124 + rmt_adv |= LPA_PAUSE_CAP;
17125 + if (phy_dev->asym_pause)
17126 + rmt_adv |= LPA_PAUSE_ASYM;
17127 +
17128 + /* Calculate TX/RX settings based on local and peer advertised
17129 + * symmetric/asymmetric PAUSE capabilities.
17130 + */
17131 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
17132 + if (flowctrl & FLOW_CTRL_RX)
17133 + *rx_pause = true;
17134 + if (flowctrl & FLOW_CTRL_TX)
17135 + *tx_pause = true;
17136 +}
17137 +EXPORT_SYMBOL(get_pause_cfg);
17138 +
17139 +static void adjust_link(struct net_device *net_dev)
17140 +{
17141 + struct dpa_priv_s *priv = netdev_priv(net_dev);
17142 + struct mac_device *mac_dev = priv->mac_dev;
17143 + struct phy_device *phy_dev = mac_dev->phy_dev;
17144 + struct fm_mac_dev *fm_mac_dev;
17145 + bool rx_pause, tx_pause;
17146 + int _errno;
17147 +
17148 + fm_mac_dev = mac_dev->get_mac_handle(mac_dev);
17149 + fm_mac_adjust_link(fm_mac_dev, phy_dev->link, phy_dev->speed,
17150 + phy_dev->duplex);
17151 +
17152 + get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
17153 + _errno = set_mac_active_pause(mac_dev, rx_pause, tx_pause);
17154 + if (unlikely(_errno < 0))
17155 + netdev_err(net_dev, "set_mac_active_pause() = %d\n", _errno);
17156 +}
17157 +
17158 +/* Initializes driver's PHY state, and attaches to the PHY.
17159 + * Returns 0 on success.
17160 + */
17161 +static int dtsec_init_phy(struct net_device *net_dev,
17162 + struct mac_device *mac_dev)
17163 +{
17164 + struct phy_device *phy_dev;
17165 +
17166 + if (!mac_dev->phy_node)
17167 + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
17168 + &adjust_link, mac_dev->phy_if);
17169 + else
17170 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
17171 + &adjust_link, 0, mac_dev->phy_if);
17172 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
17173 + netdev_err(net_dev, "Could not connect to PHY %s\n",
17174 + mac_dev->phy_node ?
17175 + mac_dev->phy_node->full_name :
17176 + mac_dev->fixed_bus_id);
17177 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
17178 + }
17179 +
17180 + /* Remove any features not supported by the controller */
17181 + phy_dev->supported &= mac_dev->if_support;
17182 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
17183 + * as most of the PHY drivers do not enable them by default.
17184 + */
17185 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
17186 + phy_dev->advertising = phy_dev->supported;
17187 +
17188 + mac_dev->phy_dev = phy_dev;
17189 +
17190 + return 0;
17191 +}
17192 +
17193 +static int xgmac_init_phy(struct net_device *net_dev,
17194 + struct mac_device *mac_dev)
17195 +{
17196 + struct phy_device *phy_dev;
17197 +
17198 + if (!mac_dev->phy_node)
17199 + phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id,
17200 + mac_dev->phy_if);
17201 + else
17202 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
17203 + mac_dev->phy_if);
17204 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
17205 + netdev_err(net_dev, "Could not attach to PHY %s\n",
17206 + mac_dev->phy_node ?
17207 + mac_dev->phy_node->full_name :
17208 + mac_dev->fixed_bus_id);
17209 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
17210 + }
17211 +
17212 + phy_dev->supported &= mac_dev->if_support;
17213 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
17214 + * as most of the PHY drivers do not enable them by default.
17215 + */
17216 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
17217 + phy_dev->advertising = phy_dev->supported;
17218 +
17219 + mac_dev->phy_dev = phy_dev;
17220 +
17221 + return 0;
17222 +}
17223 +
17224 +static int memac_init_phy(struct net_device *net_dev,
17225 + struct mac_device *mac_dev)
17226 +{
17227 + struct phy_device *phy_dev;
17228 +
17229 + if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
17230 + (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)){
17231 + if (!mac_dev->phy_node) {
17232 + mac_dev->phy_dev = NULL;
17233 + return 0;
17234 + } else
17235 + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0,
17236 + mac_dev->phy_if);
17237 + } else {
17238 + if (!mac_dev->phy_node)
17239 + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id,
17240 + &adjust_link, mac_dev->phy_if);
17241 + else
17242 + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
17243 + &adjust_link, 0,
17244 + mac_dev->phy_if);
17245 + }
17246 +
17247 + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
17248 + netdev_err(net_dev, "Could not connect to PHY %s\n",
17249 + mac_dev->phy_node ?
17250 + mac_dev->phy_node->full_name :
17251 + mac_dev->fixed_bus_id);
17252 + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev);
17253 + }
17254 +
17255 + /* Remove any features not supported by the controller */
17256 + phy_dev->supported &= mac_dev->if_support;
17257 + /* Enable the symmetric and asymmetric PAUSE frame advertisements,
17258 + * as most of the PHY drivers do not enable them by default.
17259 + */
17260 + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
17261 + phy_dev->advertising = phy_dev->supported;
17262 +
17263 + mac_dev->phy_dev = phy_dev;
17264 +
17265 + return 0;
17266 +}
17267 +
17268 +static int __cold uninit(struct fm_mac_dev *fm_mac_dev)
17269 +{
17270 + int _errno, __errno;
17271 +
17272 + _errno = fm_mac_disable(fm_mac_dev);
17273 + __errno = fm_mac_free(fm_mac_dev);
17274 +
17275 + if (unlikely(__errno < 0))
17276 + _errno = __errno;
17277 +
17278 + return _errno;
17279 +}
17280 +
17281 +static struct fm_mac_dev *get_mac_handle(struct mac_device *mac_dev)
17282 +{
17283 + const struct mac_priv_s *priv;
17284 + priv = macdev_priv(mac_dev);
17285 + return priv->fm_mac;
17286 +}
17287 +
17288 +static int dtsec_dump_regs(struct mac_device *h_mac, char *buf, int nn)
17289 +{
17290 + struct dtsec_regs *p_mm = (struct dtsec_regs *) h_mac->vaddr;
17291 + int i = 0, n = nn;
17292 +
17293 + FM_DMP_SUBTITLE(buf, n, "\n");
17294 +
17295 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - DTSEC-%d", h_mac->cell_index);
17296 +
17297 + FM_DMP_V32(buf, n, p_mm, tsec_id);
17298 + FM_DMP_V32(buf, n, p_mm, tsec_id2);
17299 + FM_DMP_V32(buf, n, p_mm, ievent);
17300 + FM_DMP_V32(buf, n, p_mm, imask);
17301 + FM_DMP_V32(buf, n, p_mm, ecntrl);
17302 + FM_DMP_V32(buf, n, p_mm, ptv);
17303 + FM_DMP_V32(buf, n, p_mm, tmr_ctrl);
17304 + FM_DMP_V32(buf, n, p_mm, tmr_pevent);
17305 + FM_DMP_V32(buf, n, p_mm, tmr_pemask);
17306 + FM_DMP_V32(buf, n, p_mm, tctrl);
17307 + FM_DMP_V32(buf, n, p_mm, rctrl);
17308 + FM_DMP_V32(buf, n, p_mm, maccfg1);
17309 + FM_DMP_V32(buf, n, p_mm, maccfg2);
17310 + FM_DMP_V32(buf, n, p_mm, ipgifg);
17311 + FM_DMP_V32(buf, n, p_mm, hafdup);
17312 + FM_DMP_V32(buf, n, p_mm, maxfrm);
17313 +
17314 + FM_DMP_V32(buf, n, p_mm, macstnaddr1);
17315 + FM_DMP_V32(buf, n, p_mm, macstnaddr2);
17316 +
17317 + for (i = 0; i < 7; ++i) {
17318 + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match1);
17319 + FM_DMP_V32(buf, n, p_mm, macaddr[i].exact_match2);
17320 + }
17321 +
17322 + FM_DMP_V32(buf, n, p_mm, car1);
17323 + FM_DMP_V32(buf, n, p_mm, car2);
17324 +
17325 + return n;
17326 +}
17327 +
17328 +static int xgmac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
17329 +{
17330 + struct tgec_regs *p_mm = (struct tgec_regs *) h_mac->vaddr;
17331 + int n = nn;
17332 +
17333 + FM_DMP_SUBTITLE(buf, n, "\n");
17334 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - TGEC -%d", h_mac->cell_index);
17335 +
17336 + FM_DMP_V32(buf, n, p_mm, tgec_id);
17337 + FM_DMP_V32(buf, n, p_mm, command_config);
17338 + FM_DMP_V32(buf, n, p_mm, mac_addr_0);
17339 + FM_DMP_V32(buf, n, p_mm, mac_addr_1);
17340 + FM_DMP_V32(buf, n, p_mm, maxfrm);
17341 + FM_DMP_V32(buf, n, p_mm, pause_quant);
17342 + FM_DMP_V32(buf, n, p_mm, rx_fifo_sections);
17343 + FM_DMP_V32(buf, n, p_mm, tx_fifo_sections);
17344 + FM_DMP_V32(buf, n, p_mm, rx_fifo_almost_f_e);
17345 + FM_DMP_V32(buf, n, p_mm, tx_fifo_almost_f_e);
17346 + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
17347 + FM_DMP_V32(buf, n, p_mm, mdio_cfg_status);
17348 + FM_DMP_V32(buf, n, p_mm, mdio_command);
17349 + FM_DMP_V32(buf, n, p_mm, mdio_data);
17350 + FM_DMP_V32(buf, n, p_mm, mdio_regaddr);
17351 + FM_DMP_V32(buf, n, p_mm, status);
17352 + FM_DMP_V32(buf, n, p_mm, tx_ipg_len);
17353 + FM_DMP_V32(buf, n, p_mm, mac_addr_2);
17354 + FM_DMP_V32(buf, n, p_mm, mac_addr_3);
17355 + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_rd);
17356 + FM_DMP_V32(buf, n, p_mm, rx_fifo_ptr_wr);
17357 + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_rd);
17358 + FM_DMP_V32(buf, n, p_mm, tx_fifo_ptr_wr);
17359 + FM_DMP_V32(buf, n, p_mm, imask);
17360 + FM_DMP_V32(buf, n, p_mm, ievent);
17361 +
17362 + return n;
17363 +}
17364 +
17365 +static int memac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
17366 +{
17367 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
17368 + int i = 0, n = nn;
17369 +
17370 + FM_DMP_SUBTITLE(buf, n, "\n");
17371 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d", h_mac->cell_index);
17372 +
17373 + FM_DMP_V32(buf, n, p_mm, command_config);
17374 + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_l);
17375 + FM_DMP_V32(buf, n, p_mm, mac_addr0.mac_addr_u);
17376 + FM_DMP_V32(buf, n, p_mm, maxfrm);
17377 + FM_DMP_V32(buf, n, p_mm, hashtable_ctrl);
17378 + FM_DMP_V32(buf, n, p_mm, ievent);
17379 + FM_DMP_V32(buf, n, p_mm, tx_ipg_length);
17380 + FM_DMP_V32(buf, n, p_mm, imask);
17381 +
17382 + for (i = 0; i < 4; ++i)
17383 + FM_DMP_V32(buf, n, p_mm, pause_quanta[i]);
17384 +
17385 + for (i = 0; i < 4; ++i)
17386 + FM_DMP_V32(buf, n, p_mm, pause_thresh[i]);
17387 +
17388 + FM_DMP_V32(buf, n, p_mm, rx_pause_status);
17389 +
17390 + for (i = 0; i < MEMAC_NUM_OF_PADDRS; ++i) {
17391 + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_l);
17392 + FM_DMP_V32(buf, n, p_mm, mac_addr[i].mac_addr_u);
17393 + }
17394 +
17395 + FM_DMP_V32(buf, n, p_mm, lpwake_timer);
17396 + FM_DMP_V32(buf, n, p_mm, sleep_timer);
17397 + FM_DMP_V32(buf, n, p_mm, statn_config);
17398 + FM_DMP_V32(buf, n, p_mm, if_mode);
17399 + FM_DMP_V32(buf, n, p_mm, if_status);
17400 + FM_DMP_V32(buf, n, p_mm, hg_config);
17401 + FM_DMP_V32(buf, n, p_mm, hg_pause_quanta);
17402 + FM_DMP_V32(buf, n, p_mm, hg_pause_thresh);
17403 + FM_DMP_V32(buf, n, p_mm, hgrx_pause_status);
17404 + FM_DMP_V32(buf, n, p_mm, hg_fifos_status);
17405 + FM_DMP_V32(buf, n, p_mm, rhm);
17406 + FM_DMP_V32(buf, n, p_mm, thm);
17407 +
17408 + return n;
17409 +}
17410 +
17411 +static int memac_dump_regs_rx(struct mac_device *h_mac, char *buf, int nn)
17412 +{
17413 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
17414 + int n = nn;
17415 +
17416 + FM_DMP_SUBTITLE(buf, n, "\n");
17417 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Rx stats", h_mac->cell_index);
17418 +
17419 + /* Rx Statistics Counter */
17420 + FM_DMP_V32(buf, n, p_mm, reoct_l);
17421 + FM_DMP_V32(buf, n, p_mm, reoct_u);
17422 + FM_DMP_V32(buf, n, p_mm, roct_l);
17423 + FM_DMP_V32(buf, n, p_mm, roct_u);
17424 + FM_DMP_V32(buf, n, p_mm, raln_l);
17425 + FM_DMP_V32(buf, n, p_mm, raln_u);
17426 + FM_DMP_V32(buf, n, p_mm, rxpf_l);
17427 + FM_DMP_V32(buf, n, p_mm, rxpf_u);
17428 + FM_DMP_V32(buf, n, p_mm, rfrm_l);
17429 + FM_DMP_V32(buf, n, p_mm, rfrm_u);
17430 + FM_DMP_V32(buf, n, p_mm, rfcs_l);
17431 + FM_DMP_V32(buf, n, p_mm, rfcs_u);
17432 + FM_DMP_V32(buf, n, p_mm, rvlan_l);
17433 + FM_DMP_V32(buf, n, p_mm, rvlan_u);
17434 + FM_DMP_V32(buf, n, p_mm, rerr_l);
17435 + FM_DMP_V32(buf, n, p_mm, rerr_u);
17436 + FM_DMP_V32(buf, n, p_mm, ruca_l);
17437 + FM_DMP_V32(buf, n, p_mm, ruca_u);
17438 + FM_DMP_V32(buf, n, p_mm, rmca_l);
17439 + FM_DMP_V32(buf, n, p_mm, rmca_u);
17440 + FM_DMP_V32(buf, n, p_mm, rbca_l);
17441 + FM_DMP_V32(buf, n, p_mm, rbca_u);
17442 + FM_DMP_V32(buf, n, p_mm, rdrp_l);
17443 + FM_DMP_V32(buf, n, p_mm, rdrp_u);
17444 + FM_DMP_V32(buf, n, p_mm, rpkt_l);
17445 + FM_DMP_V32(buf, n, p_mm, rpkt_u);
17446 + FM_DMP_V32(buf, n, p_mm, rund_l);
17447 + FM_DMP_V32(buf, n, p_mm, rund_u);
17448 + FM_DMP_V32(buf, n, p_mm, r64_l);
17449 + FM_DMP_V32(buf, n, p_mm, r64_u);
17450 + FM_DMP_V32(buf, n, p_mm, r127_l);
17451 + FM_DMP_V32(buf, n, p_mm, r127_u);
17452 + FM_DMP_V32(buf, n, p_mm, r255_l);
17453 + FM_DMP_V32(buf, n, p_mm, r255_u);
17454 + FM_DMP_V32(buf, n, p_mm, r511_l);
17455 + FM_DMP_V32(buf, n, p_mm, r511_u);
17456 + FM_DMP_V32(buf, n, p_mm, r1023_l);
17457 + FM_DMP_V32(buf, n, p_mm, r1023_u);
17458 + FM_DMP_V32(buf, n, p_mm, r1518_l);
17459 + FM_DMP_V32(buf, n, p_mm, r1518_u);
17460 + FM_DMP_V32(buf, n, p_mm, r1519x_l);
17461 + FM_DMP_V32(buf, n, p_mm, r1519x_u);
17462 + FM_DMP_V32(buf, n, p_mm, rovr_l);
17463 + FM_DMP_V32(buf, n, p_mm, rovr_u);
17464 + FM_DMP_V32(buf, n, p_mm, rjbr_l);
17465 + FM_DMP_V32(buf, n, p_mm, rjbr_u);
17466 + FM_DMP_V32(buf, n, p_mm, rfrg_l);
17467 + FM_DMP_V32(buf, n, p_mm, rfrg_u);
17468 + FM_DMP_V32(buf, n, p_mm, rcnp_l);
17469 + FM_DMP_V32(buf, n, p_mm, rcnp_u);
17470 + FM_DMP_V32(buf, n, p_mm, rdrntp_l);
17471 + FM_DMP_V32(buf, n, p_mm, rdrntp_u);
17472 +
17473 + return n;
17474 +}
17475 +
17476 +static int memac_dump_regs_tx(struct mac_device *h_mac, char *buf, int nn)
17477 +{
17478 + struct memac_regs *p_mm = (struct memac_regs *) h_mac->vaddr;
17479 + int n = nn;
17480 +
17481 + FM_DMP_SUBTITLE(buf, n, "\n");
17482 + FM_DMP_TITLE(buf, n, p_mm, "FM MAC - MEMAC -%d Tx stats", h_mac->cell_index);
17483 +
17484 +
17485 + /* Tx Statistics Counter */
17486 + FM_DMP_V32(buf, n, p_mm, teoct_l);
17487 + FM_DMP_V32(buf, n, p_mm, teoct_u);
17488 + FM_DMP_V32(buf, n, p_mm, toct_l);
17489 + FM_DMP_V32(buf, n, p_mm, toct_u);
17490 + FM_DMP_V32(buf, n, p_mm, txpf_l);
17491 + FM_DMP_V32(buf, n, p_mm, txpf_u);
17492 + FM_DMP_V32(buf, n, p_mm, tfrm_l);
17493 + FM_DMP_V32(buf, n, p_mm, tfrm_u);
17494 + FM_DMP_V32(buf, n, p_mm, tfcs_l);
17495 + FM_DMP_V32(buf, n, p_mm, tfcs_u);
17496 + FM_DMP_V32(buf, n, p_mm, tvlan_l);
17497 + FM_DMP_V32(buf, n, p_mm, tvlan_u);
17498 + FM_DMP_V32(buf, n, p_mm, terr_l);
17499 + FM_DMP_V32(buf, n, p_mm, terr_u);
17500 + FM_DMP_V32(buf, n, p_mm, tuca_l);
17501 + FM_DMP_V32(buf, n, p_mm, tuca_u);
17502 + FM_DMP_V32(buf, n, p_mm, tmca_l);
17503 + FM_DMP_V32(buf, n, p_mm, tmca_u);
17504 + FM_DMP_V32(buf, n, p_mm, tbca_l);
17505 + FM_DMP_V32(buf, n, p_mm, tbca_u);
17506 + FM_DMP_V32(buf, n, p_mm, tpkt_l);
17507 + FM_DMP_V32(buf, n, p_mm, tpkt_u);
17508 + FM_DMP_V32(buf, n, p_mm, tund_l);
17509 + FM_DMP_V32(buf, n, p_mm, tund_u);
17510 + FM_DMP_V32(buf, n, p_mm, t64_l);
17511 + FM_DMP_V32(buf, n, p_mm, t64_u);
17512 + FM_DMP_V32(buf, n, p_mm, t127_l);
17513 + FM_DMP_V32(buf, n, p_mm, t127_u);
17514 + FM_DMP_V32(buf, n, p_mm, t255_l);
17515 + FM_DMP_V32(buf, n, p_mm, t255_u);
17516 + FM_DMP_V32(buf, n, p_mm, t511_l);
17517 + FM_DMP_V32(buf, n, p_mm, t511_u);
17518 + FM_DMP_V32(buf, n, p_mm, t1023_l);
17519 + FM_DMP_V32(buf, n, p_mm, t1023_u);
17520 + FM_DMP_V32(buf, n, p_mm, t1518_l);
17521 + FM_DMP_V32(buf, n, p_mm, t1518_u);
17522 + FM_DMP_V32(buf, n, p_mm, t1519x_l);
17523 + FM_DMP_V32(buf, n, p_mm, t1519x_u);
17524 + FM_DMP_V32(buf, n, p_mm, tcnp_l);
17525 + FM_DMP_V32(buf, n, p_mm, tcnp_u);
17526 +
17527 + return n;
17528 +}
17529 +
17530 +int fm_mac_dump_regs(struct mac_device *h_mac, char *buf, int nn)
17531 +{
17532 + int n = nn;
17533 +
17534 + n = h_mac->dump_mac_regs(h_mac, buf, n);
17535 +
17536 + return n;
17537 +}
17538 +EXPORT_SYMBOL(fm_mac_dump_regs);
17539 +
17540 +int fm_mac_dump_rx_stats(struct mac_device *h_mac, char *buf, int nn)
17541 +{
17542 + int n = nn;
17543 +
17544 + if(h_mac->dump_mac_rx_stats)
17545 + n = h_mac->dump_mac_rx_stats(h_mac, buf, n);
17546 +
17547 + return n;
17548 +}
17549 +EXPORT_SYMBOL(fm_mac_dump_rx_stats);
17550 +
17551 +int fm_mac_dump_tx_stats(struct mac_device *h_mac, char *buf, int nn)
17552 +{
17553 + int n = nn;
17554 +
17555 + if(h_mac->dump_mac_tx_stats)
17556 + n = h_mac->dump_mac_tx_stats(h_mac, buf, n);
17557 +
17558 + return n;
17559 +}
17560 +EXPORT_SYMBOL(fm_mac_dump_tx_stats);
17561 +
17562 +static void __cold setup_dtsec(struct mac_device *mac_dev)
17563 +{
17564 + mac_dev->init_phy = dtsec_init_phy;
17565 + mac_dev->init = init;
17566 + mac_dev->start = start;
17567 + mac_dev->stop = stop;
17568 + mac_dev->set_promisc = fm_mac_set_promiscuous;
17569 + mac_dev->change_addr = fm_mac_modify_mac_addr;
17570 + mac_dev->set_multi = set_multi;
17571 + mac_dev->uninit = uninit;
17572 + mac_dev->ptp_enable = fm_mac_enable_1588_time_stamp;
17573 + mac_dev->ptp_disable = fm_mac_disable_1588_time_stamp;
17574 + mac_dev->get_mac_handle = get_mac_handle;
17575 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
17576 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
17577 + mac_dev->fm_rtc_enable = fm_rtc_enable;
17578 + mac_dev->fm_rtc_disable = fm_rtc_disable;
17579 + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
17580 + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
17581 + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
17582 + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
17583 + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
17584 + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
17585 + mac_dev->set_wol = fm_mac_set_wol;
17586 + mac_dev->dump_mac_regs = dtsec_dump_regs;
17587 +}
17588 +
17589 +static void __cold setup_xgmac(struct mac_device *mac_dev)
17590 +{
17591 + mac_dev->init_phy = xgmac_init_phy;
17592 + mac_dev->init = init;
17593 + mac_dev->start = start;
17594 + mac_dev->stop = stop;
17595 + mac_dev->set_promisc = fm_mac_set_promiscuous;
17596 + mac_dev->change_addr = fm_mac_modify_mac_addr;
17597 + mac_dev->set_multi = set_multi;
17598 + mac_dev->uninit = uninit;
17599 + mac_dev->get_mac_handle = get_mac_handle;
17600 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
17601 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
17602 + mac_dev->set_wol = fm_mac_set_wol;
17603 + mac_dev->dump_mac_regs = xgmac_dump_regs;
17604 +}
17605 +
17606 +static void __cold setup_memac(struct mac_device *mac_dev)
17607 +{
17608 + mac_dev->init_phy = memac_init_phy;
17609 + mac_dev->init = memac_init;
17610 + mac_dev->start = start;
17611 + mac_dev->stop = stop;
17612 + mac_dev->set_promisc = fm_mac_set_promiscuous;
17613 + mac_dev->change_addr = fm_mac_modify_mac_addr;
17614 + mac_dev->set_multi = set_multi;
17615 + mac_dev->uninit = uninit;
17616 + mac_dev->get_mac_handle = get_mac_handle;
17617 + mac_dev->set_tx_pause = fm_mac_set_tx_pause_frames;
17618 + mac_dev->set_rx_pause = fm_mac_set_rx_pause_frames;
17619 + mac_dev->fm_rtc_enable = fm_rtc_enable;
17620 + mac_dev->fm_rtc_disable = fm_rtc_disable;
17621 + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt;
17622 + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt;
17623 + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift;
17624 + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift;
17625 + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm;
17626 + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper;
17627 + mac_dev->set_wol = fm_mac_set_wol;
17628 + mac_dev->dump_mac_regs = memac_dump_regs;
17629 + mac_dev->dump_mac_rx_stats = memac_dump_regs_rx;
17630 + mac_dev->dump_mac_tx_stats = memac_dump_regs_tx;
17631 +}
17632 +
17633 +void (*const mac_setup[])(struct mac_device *mac_dev) = {
17634 + [DTSEC] = setup_dtsec,
17635 + [XGMAC] = setup_xgmac,
17636 + [MEMAC] = setup_memac
17637 +};
17638 --- /dev/null
17639 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
17640 @@ -0,0 +1,470 @@
17641 +/* Copyright 2008-2012 Freescale Semiconductor, Inc.
17642 + *
17643 + * Redistribution and use in source and binary forms, with or without
17644 + * modification, are permitted provided that the following conditions are met:
17645 + * * Redistributions of source code must retain the above copyright
17646 + * notice, this list of conditions and the following disclaimer.
17647 + * * Redistributions in binary form must reproduce the above copyright
17648 + * notice, this list of conditions and the following disclaimer in the
17649 + * documentation and/or other materials provided with the distribution.
17650 + * * Neither the name of Freescale Semiconductor nor the
17651 + * names of its contributors may be used to endorse or promote products
17652 + * derived from this software without specific prior written permission.
17653 + *
17654 + *
17655 + * ALTERNATIVELY, this software may be distributed under the terms of the
17656 + * GNU General Public License ("GPL") as published by the Free Software
17657 + * Foundation, either version 2 of that License or (at your option) any
17658 + * later version.
17659 + *
17660 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
17661 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17662 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17663 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
17664 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17665 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
17666 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
17667 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
17668 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
17669 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
17670 + */
17671 +
17672 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
17673 +#define pr_fmt(fmt) \
17674 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
17675 + KBUILD_BASENAME".c", __LINE__, __func__
17676 +#else
17677 +#define pr_fmt(fmt) \
17678 + KBUILD_MODNAME ": " fmt
17679 +#endif
17680 +
17681 +#include <linux/init.h>
17682 +#include <linux/module.h>
17683 +#include <linux/of_address.h>
17684 +#include <linux/of_platform.h>
17685 +#include <linux/of_net.h>
17686 +#include <linux/device.h>
17687 +#include <linux/phy.h>
17688 +#include <linux/io.h>
17689 +
17690 +#include "lnxwrp_fm_ext.h"
17691 +
17692 +#include "mac.h"
17693 +
17694 +#define DTSEC_SUPPORTED \
17695 + (SUPPORTED_10baseT_Half \
17696 + | SUPPORTED_10baseT_Full \
17697 + | SUPPORTED_100baseT_Half \
17698 + | SUPPORTED_100baseT_Full \
17699 + | SUPPORTED_Autoneg \
17700 + | SUPPORTED_Pause \
17701 + | SUPPORTED_Asym_Pause \
17702 + | SUPPORTED_MII)
17703 +
17704 +static const char phy_str[][11] = {
17705 + [PHY_INTERFACE_MODE_MII] = "mii",
17706 + [PHY_INTERFACE_MODE_GMII] = "gmii",
17707 + [PHY_INTERFACE_MODE_SGMII] = "sgmii",
17708 + [PHY_INTERFACE_MODE_QSGMII] = "qsgmii",
17709 + [PHY_INTERFACE_MODE_TBI] = "tbi",
17710 + [PHY_INTERFACE_MODE_RMII] = "rmii",
17711 + [PHY_INTERFACE_MODE_RGMII] = "rgmii",
17712 + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
17713 + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
17714 + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
17715 + [PHY_INTERFACE_MODE_RTBI] = "rtbi",
17716 + [PHY_INTERFACE_MODE_XGMII] = "xgmii",
17717 + [PHY_INTERFACE_MODE_QSGMII] = "sgmii-2500"
17718 +};
17719 +
17720 +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
17721 +{
17722 + int i;
17723 +
17724 + for (i = 0; i < ARRAY_SIZE(phy_str); i++)
17725 + if (strcmp(str, phy_str[i]) == 0)
17726 + return (phy_interface_t)i;
17727 +
17728 + return PHY_INTERFACE_MODE_MII;
17729 +}
17730 +
17731 +static const uint16_t phy2speed[] = {
17732 + [PHY_INTERFACE_MODE_MII] = SPEED_100,
17733 + [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
17734 + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
17735 + [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
17736 + [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
17737 + [PHY_INTERFACE_MODE_RMII] = SPEED_100,
17738 + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
17739 + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
17740 + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
17741 + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
17742 + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
17743 + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000,
17744 + [PHY_INTERFACE_MODE_QSGMII] = SPEED_2500
17745 +};
17746 +
17747 +static struct mac_device * __cold
17748 +alloc_macdev(struct device *dev, size_t sizeof_priv,
17749 + void (*setup)(struct mac_device *mac_dev))
17750 +{
17751 + struct mac_device *mac_dev;
17752 +
17753 + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL);
17754 + if (unlikely(mac_dev == NULL))
17755 + mac_dev = ERR_PTR(-ENOMEM);
17756 + else {
17757 + mac_dev->dev = dev;
17758 + dev_set_drvdata(dev, mac_dev);
17759 + setup(mac_dev);
17760 + }
17761 +
17762 + return mac_dev;
17763 +}
17764 +
17765 +static int __cold free_macdev(struct mac_device *mac_dev)
17766 +{
17767 + dev_set_drvdata(mac_dev->dev, NULL);
17768 +
17769 + return mac_dev->uninit(mac_dev->get_mac_handle(mac_dev));
17770 +}
17771 +
17772 +static const struct of_device_id mac_match[] = {
17773 + [DTSEC] = {
17774 + .compatible = "fsl,fman-1g-mac"
17775 + },
17776 + [XGMAC] = {
17777 + .compatible = "fsl,fman-10g-mac"
17778 + },
17779 + [MEMAC] = {
17780 + .compatible = "fsl,fman-memac"
17781 + },
17782 + {}
17783 +};
17784 +MODULE_DEVICE_TABLE(of, mac_match);
17785 +
17786 +static int __cold mac_probe(struct platform_device *_of_dev)
17787 +{
17788 + int _errno, i;
17789 + struct device *dev;
17790 + struct device_node *mac_node, *dev_node;
17791 + struct mac_device *mac_dev;
17792 + struct platform_device *of_dev;
17793 + struct resource res;
17794 + const uint8_t *mac_addr;
17795 + const char *char_prop;
17796 + int nph;
17797 + u32 cell_index;
17798 + const struct of_device_id *match;
17799 +
17800 + dev = &_of_dev->dev;
17801 + mac_node = dev->of_node;
17802 +
17803 + match = of_match_device(mac_match, dev);
17804 + if (!match)
17805 + return -EINVAL;
17806 +
17807 + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i;
17808 + i++)
17809 + ;
17810 + BUG_ON(i >= ARRAY_SIZE(mac_match) - 1);
17811 +
17812 + mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]);
17813 + if (IS_ERR(mac_dev)) {
17814 + _errno = PTR_ERR(mac_dev);
17815 + dev_err(dev, "alloc_macdev() = %d\n", _errno);
17816 + goto _return;
17817 + }
17818 +
17819 + INIT_LIST_HEAD(&mac_dev->mc_addr_list);
17820 +
17821 + /* Get the FM node */
17822 + dev_node = of_get_parent(mac_node);
17823 + if (unlikely(dev_node == NULL)) {
17824 + dev_err(dev, "of_get_parent(%s) failed\n",
17825 + mac_node->full_name);
17826 + _errno = -EINVAL;
17827 + goto _return_dev_set_drvdata;
17828 + }
17829 +
17830 + of_dev = of_find_device_by_node(dev_node);
17831 + if (unlikely(of_dev == NULL)) {
17832 + dev_err(dev, "of_find_device_by_node(%s) failed\n",
17833 + dev_node->full_name);
17834 + _errno = -EINVAL;
17835 + goto _return_of_node_put;
17836 + }
17837 +
17838 + mac_dev->fm_dev = fm_bind(&of_dev->dev);
17839 + if (unlikely(mac_dev->fm_dev == NULL)) {
17840 + dev_err(dev, "fm_bind(%s) failed\n", dev_node->full_name);
17841 + _errno = -ENODEV;
17842 + goto _return_of_node_put;
17843 + }
17844 +
17845 + mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev);
17846 + of_node_put(dev_node);
17847 +
17848 + /* Get the address of the memory mapped registers */
17849 + _errno = of_address_to_resource(mac_node, 0, &res);
17850 + if (unlikely(_errno < 0)) {
17851 + dev_err(dev, "of_address_to_resource(%s) = %d\n",
17852 + mac_node->full_name, _errno);
17853 + goto _return_dev_set_drvdata;
17854 + }
17855 +
17856 + mac_dev->res = __devm_request_region(
17857 + dev,
17858 + fm_get_mem_region(mac_dev->fm_dev),
17859 + res.start, res.end + 1 - res.start, "mac");
17860 + if (unlikely(mac_dev->res == NULL)) {
17861 + dev_err(dev, "__devm_request_mem_region(mac) failed\n");
17862 + _errno = -EBUSY;
17863 + goto _return_dev_set_drvdata;
17864 + }
17865 +
17866 + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
17867 + mac_dev->res->end + 1
17868 + - mac_dev->res->start);
17869 + if (unlikely(mac_dev->vaddr == NULL)) {
17870 + dev_err(dev, "devm_ioremap() failed\n");
17871 + _errno = -EIO;
17872 + goto _return_dev_set_drvdata;
17873 + }
17874 +
17875 +#define TBIPA_OFFSET 0x1c
17876 +#define TBIPA_DEFAULT_ADDR 5 /* override if used as external PHY addr. */
17877 + mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
17878 + if (mac_dev->tbi_node) {
17879 + u32 tbiaddr = TBIPA_DEFAULT_ADDR;
17880 + const __be32 *tbi_reg;
17881 + void __iomem *addr;
17882 +
17883 + tbi_reg = of_get_property(mac_dev->tbi_node, "reg", NULL);
17884 + if (tbi_reg)
17885 + tbiaddr = be32_to_cpup(tbi_reg);
17886 + addr = mac_dev->vaddr + TBIPA_OFFSET;
17887 + /* TODO: out_be32 does not exist on ARM */
17888 + out_be32(addr, tbiaddr);
17889 + }
17890 +
17891 + if (!of_device_is_available(mac_node)) {
17892 + devm_iounmap(dev, mac_dev->vaddr);
17893 + __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev),
17894 + res.start, res.end + 1 - res.start);
17895 + fm_unbind(mac_dev->fm_dev);
17896 + devm_kfree(dev, mac_dev);
17897 + dev_set_drvdata(dev, NULL);
17898 + return -ENODEV;
17899 + }
17900 +
17901 + /* Get the cell-index */
17902 + _errno = of_property_read_u32(mac_node, "cell-index", &cell_index);
17903 + if (unlikely(_errno)) {
17904 + dev_err(dev, "Cannot read cell-index of mac node %s from device tree\n",
17905 + mac_node->full_name);
17906 + goto _return_dev_set_drvdata;
17907 + }
17908 + mac_dev->cell_index = (uint8_t)cell_index;
17909 +
17910 + /* Get the MAC address */
17911 + mac_addr = of_get_mac_address(mac_node);
17912 + if (unlikely(mac_addr == NULL)) {
17913 + dev_err(dev, "of_get_mac_address(%s) failed\n",
17914 + mac_node->full_name);
17915 + _errno = -EINVAL;
17916 + goto _return_dev_set_drvdata;
17917 + }
17918 + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
17919 +
17920 + /* Verify the number of port handles */
17921 + nph = of_count_phandle_with_args(mac_node, "fsl,port-handles", NULL);
17922 + if (unlikely(nph < 0)) {
17923 + dev_err(dev, "Cannot read port handles of mac node %s from device tree\n",
17924 + mac_node->full_name);
17925 + _errno = nph;
17926 + goto _return_dev_set_drvdata;
17927 + }
17928 +
17929 + if (nph != ARRAY_SIZE(mac_dev->port_dev)) {
17930 + dev_err(dev, "Not supported number of port handles of mac node %s from device tree\n",
17931 + mac_node->full_name);
17932 + _errno = -EINVAL;
17933 + goto _return_dev_set_drvdata;
17934 + }
17935 +
17936 + for_each_port_device(i, mac_dev->port_dev) {
17937 + dev_node = of_parse_phandle(mac_node, "fsl,port-handles", i);
17938 + if (unlikely(dev_node == NULL)) {
17939 + dev_err(dev, "Cannot find port node referenced by mac node %s from device tree\n",
17940 + mac_node->full_name);
17941 + _errno = -EINVAL;
17942 + goto _return_of_node_put;
17943 + }
17944 +
17945 + of_dev = of_find_device_by_node(dev_node);
17946 + if (unlikely(of_dev == NULL)) {
17947 + dev_err(dev, "of_find_device_by_node(%s) failed\n",
17948 + dev_node->full_name);
17949 + _errno = -EINVAL;
17950 + goto _return_of_node_put;
17951 + }
17952 +
17953 + mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev);
17954 + if (unlikely(mac_dev->port_dev[i] == NULL)) {
17955 + dev_err(dev, "dev_get_drvdata(%s) failed\n",
17956 + dev_node->full_name);
17957 + _errno = -EINVAL;
17958 + goto _return_of_node_put;
17959 + }
17960 + of_node_put(dev_node);
17961 + }
17962 +
17963 + /* Get the PHY connection type */
17964 + _errno = of_property_read_string(mac_node, "phy-connection-type",
17965 + &char_prop);
17966 + if (unlikely(_errno)) {
17967 + dev_warn(dev,
17968 + "Cannot read PHY connection type of mac node %s from device tree. Defaulting to MII\n",
17969 + mac_node->full_name);
17970 + mac_dev->phy_if = PHY_INTERFACE_MODE_MII;
17971 + } else
17972 + mac_dev->phy_if = str2phy(char_prop);
17973 +
17974 + mac_dev->link = false;
17975 + mac_dev->half_duplex = false;
17976 + mac_dev->speed = phy2speed[mac_dev->phy_if];
17977 + mac_dev->max_speed = mac_dev->speed;
17978 + mac_dev->if_support = DTSEC_SUPPORTED;
17979 + /* We don't support half-duplex in SGMII mode */
17980 + if (strstr(char_prop, "sgmii") || strstr(char_prop, "qsgmii"))
17981 + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
17982 + SUPPORTED_100baseT_Half);
17983 +
17984 + if (strstr(char_prop, "sgmii-2500"))
17985 + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
17986 + SUPPORTED_100baseT_Half);
17987 +
17988 + /* Gigabit support (no half-duplex) */
17989 + if (mac_dev->max_speed == 1000)
17990 + mac_dev->if_support |= SUPPORTED_1000baseT_Full;
17991 +
17992 + /* The 10G interface only supports one mode */
17993 + if (strstr(char_prop, "xgmii"))
17994 + mac_dev->if_support = SUPPORTED_10000baseT_Full;
17995 +
17996 + /* Get the rest of the PHY information */
17997 + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
17998 + if (mac_dev->phy_node == NULL) {
17999 + u32 phy_id;
18000 +
18001 + _errno = of_property_read_u32(mac_node, "fixed-link", &phy_id);
18002 + if (_errno) {
18003 + dev_err(dev, "No PHY (or fixed link) found\n");
18004 + _errno = -EINVAL;
18005 + goto _return_dev_set_drvdata;
18006 + }
18007 +
18008 + sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "fixed-0",
18009 + phy_id);
18010 + }
18011 +
18012 + _errno = mac_dev->init(mac_dev);
18013 + if (unlikely(_errno < 0)) {
18014 + dev_err(dev, "mac_dev->init() = %d\n", _errno);
18015 + goto _return_dev_set_drvdata;
18016 + }
18017 +
18018 + /* pause frame autonegotiation enabled*/
18019 + mac_dev->autoneg_pause = true;
18020 +
18021 + /* by intializing the values to false, force FMD to enable PAUSE frames
18022 + * on RX and TX
18023 + */
18024 + mac_dev->rx_pause_req = mac_dev->tx_pause_req = true;
18025 + mac_dev->rx_pause_active = mac_dev->tx_pause_active = false;
18026 + _errno = set_mac_active_pause(mac_dev, true, true);
18027 + if (unlikely(_errno < 0))
18028 + dev_err(dev, "set_mac_active_pause() = %d\n", _errno);
18029 +
18030 + dev_info(dev,
18031 + "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
18032 + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
18033 + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
18034 +
18035 + goto _return;
18036 +
18037 +_return_of_node_put:
18038 + of_node_put(dev_node);
18039 +_return_dev_set_drvdata:
18040 + dev_set_drvdata(dev, NULL);
18041 +_return:
18042 + return _errno;
18043 +}
18044 +
18045 +static int __cold mac_remove(struct platform_device *of_dev)
18046 +{
18047 + int i, _errno;
18048 + struct device *dev;
18049 + struct mac_device *mac_dev;
18050 +
18051 + dev = &of_dev->dev;
18052 + mac_dev = (struct mac_device *)dev_get_drvdata(dev);
18053 +
18054 + for_each_port_device(i, mac_dev->port_dev)
18055 + fm_port_unbind(mac_dev->port_dev[i]);
18056 +
18057 + fm_unbind(mac_dev->fm_dev);
18058 +
18059 + _errno = free_macdev(mac_dev);
18060 +
18061 + return _errno;
18062 +}
18063 +
18064 +static struct platform_driver mac_driver = {
18065 + .driver = {
18066 + .name = KBUILD_MODNAME,
18067 + .of_match_table = mac_match,
18068 + .owner = THIS_MODULE,
18069 + },
18070 + .probe = mac_probe,
18071 + .remove = mac_remove
18072 +};
18073 +
18074 +static int __init __cold mac_load(void)
18075 +{
18076 + int _errno;
18077 +
18078 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
18079 + KBUILD_BASENAME".c", __func__);
18080 +
18081 + pr_info(KBUILD_MODNAME ": %s\n", mac_driver_description);
18082 +
18083 + _errno = platform_driver_register(&mac_driver);
18084 + if (unlikely(_errno < 0)) {
18085 + pr_err(KBUILD_MODNAME ": %s:%hu:%s(): platform_driver_register() = %d\n",
18086 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
18087 + goto _return;
18088 + }
18089 +
18090 + goto _return;
18091 +
18092 +_return:
18093 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
18094 + KBUILD_BASENAME".c", __func__);
18095 +
18096 + return _errno;
18097 +}
18098 +module_init(mac_load);
18099 +
18100 +static void __exit __cold mac_unload(void)
18101 +{
18102 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
18103 + KBUILD_BASENAME".c", __func__);
18104 +
18105 + platform_driver_unregister(&mac_driver);
18106 +
18107 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
18108 + KBUILD_BASENAME".c", __func__);
18109 +}
18110 +module_exit(mac_unload);
18111 --- /dev/null
18112 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
18113 @@ -0,0 +1,134 @@
18114 +/* Copyright 2008-2011 Freescale Semiconductor, Inc.
18115 + *
18116 + * Redistribution and use in source and binary forms, with or without
18117 + * modification, are permitted provided that the following conditions are met:
18118 + * * Redistributions of source code must retain the above copyright
18119 + * notice, this list of conditions and the following disclaimer.
18120 + * * Redistributions in binary form must reproduce the above copyright
18121 + * notice, this list of conditions and the following disclaimer in the
18122 + * documentation and/or other materials provided with the distribution.
18123 + * * Neither the name of Freescale Semiconductor nor the
18124 + * names of its contributors may be used to endorse or promote products
18125 + * derived from this software without specific prior written permission.
18126 + *
18127 + *
18128 + * ALTERNATIVELY, this software may be distributed under the terms of the
18129 + * GNU General Public License ("GPL") as published by the Free Software
18130 + * Foundation, either version 2 of that License or (at your option) any
18131 + * later version.
18132 + *
18133 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18134 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18135 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18136 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18137 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18138 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18139 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18140 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18141 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18142 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18143 + */
18144 +
18145 +#ifndef __MAC_H
18146 +#define __MAC_H
18147 +
18148 +#include <linux/device.h> /* struct device, BUS_ID_SIZE */
18149 +#include <linux/if_ether.h> /* ETH_ALEN */
18150 +#include <linux/phy.h> /* phy_interface_t, struct phy_device */
18151 +#include <linux/list.h>
18152 +
18153 +#include "lnxwrp_fsl_fman.h" /* struct port_device */
18154 +
18155 +enum {DTSEC, XGMAC, MEMAC};
18156 +
18157 +struct mac_device {
18158 + struct device *dev;
18159 + void *priv;
18160 + uint8_t cell_index;
18161 + struct resource *res;
18162 + void __iomem *vaddr;
18163 + uint8_t addr[ETH_ALEN];
18164 + bool promisc;
18165 +
18166 + struct fm *fm_dev;
18167 + struct fm_port *port_dev[2];
18168 +
18169 + phy_interface_t phy_if;
18170 + u32 if_support;
18171 + bool link;
18172 + bool half_duplex;
18173 + uint16_t speed;
18174 + uint16_t max_speed;
18175 + struct device_node *phy_node;
18176 + char fixed_bus_id[MII_BUS_ID_SIZE + 3];
18177 + struct device_node *tbi_node;
18178 + struct phy_device *phy_dev;
18179 + void *fm;
18180 + /* List of multicast addresses */
18181 + struct list_head mc_addr_list;
18182 +
18183 + bool autoneg_pause;
18184 + bool rx_pause_req;
18185 + bool tx_pause_req;
18186 + bool rx_pause_active;
18187 + bool tx_pause_active;
18188 +
18189 + struct fm_mac_dev *(*get_mac_handle)(struct mac_device *mac_dev);
18190 + int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
18191 + int (*init)(struct mac_device *mac_dev);
18192 + int (*start)(struct mac_device *mac_dev);
18193 + int (*stop)(struct mac_device *mac_dev);
18194 + int (*set_promisc)(struct fm_mac_dev *fm_mac_dev, bool enable);
18195 + int (*change_addr)(struct fm_mac_dev *fm_mac_dev, uint8_t *addr);
18196 + int (*set_multi)(struct net_device *net_dev,
18197 + struct mac_device *mac_dev);
18198 + int (*uninit)(struct fm_mac_dev *fm_mac_dev);
18199 + int (*ptp_enable)(struct fm_mac_dev *fm_mac_dev);
18200 + int (*ptp_disable)(struct fm_mac_dev *fm_mac_dev);
18201 + int (*set_rx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
18202 + int (*set_tx_pause)(struct fm_mac_dev *fm_mac_dev, bool en);
18203 + int (*fm_rtc_enable)(struct fm *fm_dev);
18204 + int (*fm_rtc_disable)(struct fm *fm_dev);
18205 + int (*fm_rtc_get_cnt)(struct fm *fm_dev, uint64_t *ts);
18206 + int (*fm_rtc_set_cnt)(struct fm *fm_dev, uint64_t ts);
18207 + int (*fm_rtc_get_drift)(struct fm *fm_dev, uint32_t *drift);
18208 + int (*fm_rtc_set_drift)(struct fm *fm_dev, uint32_t drift);
18209 + int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
18210 + int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
18211 + uint64_t fiper);
18212 +#ifdef CONFIG_PTP_1588_CLOCK_DPAA
18213 + int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
18214 + int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
18215 +#endif
18216 + int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
18217 + bool en);
18218 + int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
18219 + int (*dump_mac_rx_stats)(struct mac_device *h_mac, char *buf, int nn);
18220 + int (*dump_mac_tx_stats)(struct mac_device *h_mac, char *buf, int nn);
18221 +};
18222 +
18223 +struct mac_address {
18224 + uint8_t addr[ETH_ALEN];
18225 + struct list_head list;
18226 +};
18227 +
18228 +#define get_fm_handle(net_dev) \
18229 + (((struct dpa_priv_s *)netdev_priv(net_dev))->mac_dev->fm_dev)
18230 +
18231 +#define for_each_port_device(i, port_dev) \
18232 + for (i = 0; i < ARRAY_SIZE(port_dev); i++)
18233 +
18234 +static inline __attribute((nonnull)) void *macdev_priv(
18235 + const struct mac_device *mac_dev)
18236 +{
18237 + return (void *)mac_dev + sizeof(*mac_dev);
18238 +}
18239 +
18240 +extern const char *mac_driver_description;
18241 +extern const size_t mac_sizeof_priv[];
18242 +extern void (*const mac_setup[])(struct mac_device *mac_dev);
18243 +
18244 +int set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
18245 +void get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause);
18246 +
18247 +#endif /* __MAC_H */
18248 --- /dev/null
18249 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.c
18250 @@ -0,0 +1,848 @@
18251 +/* Copyright 2011-2012 Freescale Semiconductor Inc.
18252 + *
18253 + * Redistribution and use in source and binary forms, with or without
18254 + * modification, are permitted provided that the following conditions are met:
18255 + * * Redistributions of source code must retain the above copyright
18256 + * notice, this list of conditions and the following disclaimer.
18257 + * * Redistributions in binary form must reproduce the above copyright
18258 + * notice, this list of conditions and the following disclaimer in the
18259 + * documentation and/or other materials provided with the distribution.
18260 + * * Neither the name of Freescale Semiconductor nor the
18261 + * names of its contributors may be used to endorse or promote products
18262 + * derived from this software without specific prior written permission.
18263 + *
18264 + *
18265 + * ALTERNATIVELY, this software may be distributed under the terms of the
18266 + * GNU General Public License ("GPL") as published by the Free Software
18267 + * Foundation, either version 2 of that License or (at your option) any
18268 + * later version.
18269 + *
18270 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
18271 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18272 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18273 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
18274 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18275 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
18276 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
18277 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
18278 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18279 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18280 + */
18281 +
18282 +/* Offline Parsing / Host Command port driver for FSL QorIQ FMan.
18283 + * Validates device-tree configuration and sets up the offline ports.
18284 + */
18285 +
18286 +#ifdef CONFIG_FSL_DPAA_ETH_DEBUG
18287 +#define pr_fmt(fmt) \
18288 + KBUILD_MODNAME ": %s:%hu:%s() " fmt, \
18289 + KBUILD_BASENAME".c", __LINE__, __func__
18290 +#else
18291 +#define pr_fmt(fmt) \
18292 + KBUILD_MODNAME ": " fmt
18293 +#endif
18294 +
18295 +
18296 +#include <linux/init.h>
18297 +#include <linux/module.h>
18298 +#include <linux/of_platform.h>
18299 +#include <linux/fsl_qman.h>
18300 +
18301 +#include "offline_port.h"
18302 +#include "dpaa_eth.h"
18303 +#include "dpaa_eth_common.h"
18304 +
18305 +#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver"
18306 +/* Manip extra space and data alignment for fragmentation */
18307 +#define FRAG_MANIP_SPACE 128
18308 +#define FRAG_DATA_ALIGN 64
18309 +
18310 +
18311 +MODULE_LICENSE("Dual BSD/GPL");
18312 +MODULE_AUTHOR("Bogdan Hamciuc <bogdan.hamciuc@freescale.com>");
18313 +MODULE_DESCRIPTION(OH_MOD_DESCRIPTION);
18314 +
18315 +
18316 +static const struct of_device_id oh_port_match_table[] = {
18317 + {
18318 + .compatible = "fsl,dpa-oh"
18319 + },
18320 + {
18321 + .compatible = "fsl,dpa-oh-shared"
18322 + },
18323 + {}
18324 +};
18325 +MODULE_DEVICE_TABLE(of, oh_port_match_table);
18326 +
18327 +#ifdef CONFIG_PM
18328 +
18329 +static int oh_suspend(struct device *dev)
18330 +{
18331 + struct dpa_oh_config_s *oh_config;
18332 +
18333 + oh_config = dev_get_drvdata(dev);
18334 + return fm_port_suspend(oh_config->oh_port);
18335 +}
18336 +
18337 +static int oh_resume(struct device *dev)
18338 +{
18339 + struct dpa_oh_config_s *oh_config;
18340 +
18341 + oh_config = dev_get_drvdata(dev);
18342 + return fm_port_resume(oh_config->oh_port);
18343 +}
18344 +
18345 +static const struct dev_pm_ops oh_pm_ops = {
18346 + .suspend = oh_suspend,
18347 + .resume = oh_resume,
18348 +};
18349 +
18350 +#define OH_PM_OPS (&oh_pm_ops)
18351 +
18352 +#else /* CONFIG_PM */
18353 +
18354 +#define OH_PM_OPS NULL
18355 +
18356 +#endif /* CONFIG_PM */
18357 +
18358 +/* Creates Frame Queues */
18359 +static uint32_t oh_fq_create(struct qman_fq *fq,
18360 + uint32_t fq_id, uint16_t channel,
18361 + uint16_t wq_id)
18362 +{
18363 + struct qm_mcc_initfq fq_opts;
18364 + uint32_t create_flags, init_flags;
18365 + uint32_t ret = 0;
18366 +
18367 + if (fq == NULL)
18368 + return 1;
18369 +
18370 + /* Set flags for FQ create */
18371 + create_flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_TO_DCPORTAL;
18372 +
18373 + /* Create frame queue */
18374 + ret = qman_create_fq(fq_id, create_flags, fq);
18375 + if (ret != 0)
18376 + return 1;
18377 +
18378 + /* Set flags for FQ init */
18379 + init_flags = QMAN_INITFQ_FLAG_SCHED;
18380 +
18381 + /* Set FQ init options. Specify destination WQ ID and channel */
18382 + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ;
18383 + fq_opts.fqd.dest.wq = wq_id;
18384 + fq_opts.fqd.dest.channel = channel;
18385 +
18386 + /* Initialize frame queue */
18387 + ret = qman_init_fq(fq, init_flags, &fq_opts);
18388 + if (ret != 0) {
18389 + qman_destroy_fq(fq, 0);
18390 + return 1;
18391 + }
18392 +
18393 + return 0;
18394 +}
18395 +
18396 +static void dump_fq(struct device *dev, int fqid, uint16_t channel)
18397 +{
18398 + if (channel) {
18399 + /* display fqs with a valid (!= 0) destination channel */
18400 + dev_info(dev, "FQ ID:%d Channel ID:%d\n", fqid, channel);
18401 + }
18402 +}
18403 +
18404 +static void dump_fq_duple(struct device *dev, struct qman_fq *fqs,
18405 + int fqs_count, uint16_t channel_id)
18406 +{
18407 + int i;
18408 + for (i = 0; i < fqs_count; i++)
18409 + dump_fq(dev, (fqs + i)->fqid, channel_id);
18410 +}
18411 +
18412 +static void dump_oh_config(struct device *dev, struct dpa_oh_config_s *conf)
18413 +{
18414 + struct list_head *fq_list;
18415 + struct fq_duple *fqd;
18416 + int i;
18417 +
18418 + dev_info(dev, "Default egress frame queue: %d\n", conf->default_fqid);
18419 + dev_info(dev, "Default error frame queue: %d\n", conf->error_fqid);
18420 +
18421 + /* TX queues (old initialization) */
18422 + dev_info(dev, "Initialized queues:");
18423 + for (i = 0; i < conf->egress_cnt; i++)
18424 + dump_fq_duple(dev, conf->egress_fqs, conf->egress_cnt,
18425 + conf->channel);
18426 +
18427 + /* initialized ingress queues */
18428 + list_for_each(fq_list, &conf->fqs_ingress_list) {
18429 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
18430 + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
18431 + }
18432 +
18433 + /* initialized egress queues */
18434 + list_for_each(fq_list, &conf->fqs_egress_list) {
18435 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
18436 + dump_fq_duple(dev, fqd->fqs, fqd->fqs_count, fqd->channel_id);
18437 + }
18438 +}
18439 +
18440 +/* Destroys Frame Queues */
18441 +static void oh_fq_destroy(struct qman_fq *fq)
18442 +{
18443 + int _errno = 0;
18444 +
18445 + _errno = qman_retire_fq(fq, NULL);
18446 + if (unlikely(_errno < 0))
18447 + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_retire_fq(%u)=%d\n",
18448 + KBUILD_BASENAME".c", __LINE__, __func__,
18449 + qman_fq_fqid(fq), _errno);
18450 +
18451 + _errno = qman_oos_fq(fq);
18452 + if (unlikely(_errno < 0)) {
18453 + pr_err(KBUILD_MODNAME": %s:%hu:%s(): qman_oos_fq(%u)=%d\n",
18454 + KBUILD_BASENAME".c", __LINE__, __func__,
18455 + qman_fq_fqid(fq), _errno);
18456 + }
18457 +
18458 + qman_destroy_fq(fq, 0);
18459 +}
18460 +
18461 +/* Allocation code for the OH port's PCD frame queues */
18462 +static int __cold oh_alloc_pcd_fqids(struct device *dev,
18463 + uint32_t num,
18464 + uint8_t alignment,
18465 + uint32_t *base_fqid)
18466 +{
18467 + dev_crit(dev, "callback not implemented!\n");
18468 + BUG();
18469 +
18470 + return 0;
18471 +}
18472 +
18473 +static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid)
18474 +{
18475 + dev_crit(dev, "callback not implemented!\n");
18476 + BUG();
18477 +
18478 + return 0;
18479 +}
18480 +
18481 +static void oh_set_buffer_layout(struct fm_port *port,
18482 + struct dpa_buffer_layout_s *layout)
18483 +{
18484 + struct fm_port_params params;
18485 +
18486 + layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE;
18487 + layout->parse_results = true;
18488 + layout->hash_results = true;
18489 + layout->time_stamp = false;
18490 +
18491 + fm_port_get_buff_layout_ext_params(port, &params);
18492 + layout->manip_extra_space = params.manip_extra_space;
18493 + layout->data_align = params.data_align;
18494 +}
18495 +
18496 +static int
18497 +oh_port_probe(struct platform_device *_of_dev)
18498 +{
18499 + struct device *dpa_oh_dev;
18500 + struct device_node *dpa_oh_node;
18501 + int lenp, _errno = 0, fq_idx, duple_idx;
18502 + int n_size, i, j, ret, duples_count;
18503 + struct platform_device *oh_of_dev;
18504 + struct device_node *oh_node, *bpool_node = NULL, *root_node;
18505 + struct device *oh_dev;
18506 + struct dpa_oh_config_s *oh_config = NULL;
18507 + const __be32 *oh_all_queues;
18508 + const __be32 *channel_ids;
18509 + const __be32 *oh_tx_queues;
18510 + uint32_t queues_count;
18511 + uint32_t crt_fqid_base;
18512 + uint32_t crt_fq_count;
18513 + bool frag_enabled = false;
18514 + struct fm_port_params oh_port_tx_params;
18515 + struct fm_port_pcd_param oh_port_pcd_params;
18516 + struct dpa_buffer_layout_s buf_layout;
18517 +
18518 + /* True if the current partition owns the OH port. */
18519 + bool init_oh_port;
18520 +
18521 + const struct of_device_id *match;
18522 + int crt_ext_pools_count;
18523 + u32 ext_pool_size;
18524 + u32 port_id;
18525 + u32 channel_id;
18526 +
18527 + int channel_ids_count;
18528 + int channel_idx;
18529 + struct fq_duple *fqd;
18530 + struct list_head *fq_list, *fq_list_tmp;
18531 +
18532 + const __be32 *bpool_cfg;
18533 + uint32_t bpid;
18534 +
18535 + memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
18536 + dpa_oh_dev = &_of_dev->dev;
18537 + dpa_oh_node = dpa_oh_dev->of_node;
18538 + BUG_ON(dpa_oh_node == NULL);
18539 +
18540 + match = of_match_device(oh_port_match_table, dpa_oh_dev);
18541 + if (!match)
18542 + return -EINVAL;
18543 +
18544 + dev_dbg(dpa_oh_dev, "Probing OH port...\n");
18545 +
18546 + /* Find the referenced OH node */
18547 + oh_node = of_parse_phandle(dpa_oh_node, "fsl,fman-oh-port", 0);
18548 + if (oh_node == NULL) {
18549 + dev_err(dpa_oh_dev,
18550 + "Can't find OH node referenced from node %s\n",
18551 + dpa_oh_node->full_name);
18552 + return -EINVAL;
18553 + }
18554 + dev_info(dpa_oh_dev, "Found OH node handle compatible with %s\n",
18555 + match->compatible);
18556 +
18557 + _errno = of_property_read_u32(oh_node, "cell-index", &port_id);
18558 + if (_errno) {
18559 + dev_err(dpa_oh_dev, "No port id found in node %s\n",
18560 + dpa_oh_node->full_name);
18561 + goto return_kfree;
18562 + }
18563 +
18564 + _errno = of_property_read_u32(oh_node, "fsl,qman-channel-id",
18565 + &channel_id);
18566 + if (_errno) {
18567 + dev_err(dpa_oh_dev, "No channel id found in node %s\n",
18568 + dpa_oh_node->full_name);
18569 + goto return_kfree;
18570 + }
18571 +
18572 + oh_of_dev = of_find_device_by_node(oh_node);
18573 + BUG_ON(oh_of_dev == NULL);
18574 + oh_dev = &oh_of_dev->dev;
18575 +
18576 + /* The OH port must be initialized exactly once.
18577 + * The following scenarios are of interest:
18578 + * - the node is Linux-private (will always initialize it);
18579 + * - the node is shared between two Linux partitions
18580 + * (only one of them will initialize it);
18581 + * - the node is shared between a Linux and a LWE partition
18582 + * (Linux will initialize it) - "fsl,dpa-oh-shared"
18583 + */
18584 +
18585 + /* Check if the current partition owns the OH port
18586 + * and ought to initialize it. It may be the case that we leave this
18587 + * to another (also Linux) partition.
18588 + */
18589 + init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");
18590 +
18591 + /* If we aren't the "owner" of the OH node, we're done here. */
18592 + if (!init_oh_port) {
18593 + dev_dbg(dpa_oh_dev,
18594 + "Not owning the shared OH port %s, will not initialize it.\n",
18595 + oh_node->full_name);
18596 + of_node_put(oh_node);
18597 + return 0;
18598 + }
18599 +
18600 + /* Allocate OH dev private data */
18601 + oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
18602 + if (oh_config == NULL) {
18603 + dev_err(dpa_oh_dev,
18604 + "Can't allocate private data for OH node %s referenced from node %s!\n",
18605 + oh_node->full_name, dpa_oh_node->full_name);
18606 + _errno = -ENOMEM;
18607 + goto return_kfree;
18608 + }
18609 +
18610 + INIT_LIST_HEAD(&oh_config->fqs_ingress_list);
18611 + INIT_LIST_HEAD(&oh_config->fqs_egress_list);
18612 +
18613 + /* FQs that enter OH port */
18614 + lenp = 0;
18615 + oh_all_queues = of_get_property(dpa_oh_node,
18616 + "fsl,qman-frame-queues-ingress", &lenp);
18617 + if (lenp % (2 * sizeof(*oh_all_queues))) {
18618 + dev_warn(dpa_oh_dev,
18619 + "Wrong ingress queues format for OH node %s referenced from node %s!\n",
18620 + oh_node->full_name, dpa_oh_node->full_name);
18621 + /* just ignore the last unpaired value */
18622 + }
18623 +
18624 + duples_count = lenp / (2 * sizeof(*oh_all_queues));
18625 + dev_err(dpa_oh_dev, "Allocating %d ingress frame queues duples\n",
18626 + duples_count);
18627 + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
18628 + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
18629 + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
18630 +
18631 + fqd = devm_kzalloc(dpa_oh_dev,
18632 + sizeof(struct fq_duple), GFP_KERNEL);
18633 + if (!fqd) {
18634 + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
18635 + oh_node->full_name,
18636 + dpa_oh_node->full_name);
18637 + _errno = -ENOMEM;
18638 + goto return_kfree;
18639 + }
18640 +
18641 + fqd->fqs = devm_kzalloc(dpa_oh_dev,
18642 + crt_fq_count * sizeof(struct qman_fq),
18643 + GFP_KERNEL);
18644 + if (!fqd->fqs) {
18645 + dev_err(dpa_oh_dev, "Can't allocate structures for ingress frame queues for OH node %s referenced from node %s!\n",
18646 + oh_node->full_name,
18647 + dpa_oh_node->full_name);
18648 + _errno = -ENOMEM;
18649 + goto return_kfree;
18650 + }
18651 +
18652 + for (j = 0; j < crt_fq_count; j++)
18653 + (fqd->fqs + j)->fqid = crt_fqid_base + j;
18654 + fqd->fqs_count = crt_fq_count;
18655 + fqd->channel_id = (uint16_t)channel_id;
18656 + list_add(&fqd->fq_list, &oh_config->fqs_ingress_list);
18657 + }
18658 +
18659 + /* create the ingress queues */
18660 + list_for_each(fq_list, &oh_config->fqs_ingress_list) {
18661 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
18662 +
18663 + for (j = 0; j < fqd->fqs_count; j++) {
18664 + ret = oh_fq_create(fqd->fqs + j,
18665 + (fqd->fqs + j)->fqid,
18666 + fqd->channel_id, 3);
18667 + if (ret != 0) {
18668 + dev_err(dpa_oh_dev, "Unable to create ingress frame queue %d for OH node %s referenced from node %s!\n",
18669 + (fqd->fqs + j)->fqid,
18670 + oh_node->full_name,
18671 + dpa_oh_node->full_name);
18672 + _errno = -EINVAL;
18673 + goto return_kfree;
18674 + }
18675 + }
18676 + }
18677 +
18678 + /* FQs that exit OH port */
18679 + lenp = 0;
18680 + oh_all_queues = of_get_property(dpa_oh_node,
18681 + "fsl,qman-frame-queues-egress", &lenp);
18682 + if (lenp % (2 * sizeof(*oh_all_queues))) {
18683 + dev_warn(dpa_oh_dev,
18684 + "Wrong egress queues format for OH node %s referenced from node %s!\n",
18685 + oh_node->full_name, dpa_oh_node->full_name);
18686 + /* just ignore the last unpaired value */
18687 + }
18688 +
18689 + duples_count = lenp / (2 * sizeof(*oh_all_queues));
18690 + dev_dbg(dpa_oh_dev, "Allocating %d egress frame queues duples\n",
18691 + duples_count);
18692 + for (duple_idx = 0; duple_idx < duples_count; duple_idx++) {
18693 + crt_fqid_base = be32_to_cpu(oh_all_queues[2 * duple_idx]);
18694 + crt_fq_count = be32_to_cpu(oh_all_queues[2 * duple_idx + 1]);
18695 +
18696 + fqd = devm_kzalloc(dpa_oh_dev,
18697 + sizeof(struct fq_duple), GFP_KERNEL);
18698 + if (!fqd) {
18699 + dev_err(dpa_oh_dev, "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
18700 + oh_node->full_name,
18701 + dpa_oh_node->full_name);
18702 + _errno = -ENOMEM;
18703 + goto return_kfree;
18704 + }
18705 +
18706 + fqd->fqs = devm_kzalloc(dpa_oh_dev,
18707 + crt_fq_count * sizeof(struct qman_fq),
18708 + GFP_KERNEL);
18709 + if (!fqd->fqs) {
18710 + dev_err(dpa_oh_dev,
18711 + "Can't allocate structures for egress frame queues for OH node %s referenced from node %s!\n",
18712 + oh_node->full_name,
18713 + dpa_oh_node->full_name);
18714 + _errno = -ENOMEM;
18715 + goto return_kfree;
18716 + }
18717 +
18718 + for (j = 0; j < crt_fq_count; j++)
18719 + (fqd->fqs + j)->fqid = crt_fqid_base + j;
18720 + fqd->fqs_count = crt_fq_count;
18721 + /* channel ID is specified in another attribute */
18722 + fqd->channel_id = 0;
18723 + list_add_tail(&fqd->fq_list, &oh_config->fqs_egress_list);
18724 +
18725 + /* allocate the queue */
18726 +
18727 + }
18728 +
18729 + /* channel_ids for FQs that exit OH port */
18730 + lenp = 0;
18731 + channel_ids = of_get_property(dpa_oh_node,
18732 + "fsl,qman-channel-ids-egress", &lenp);
18733 +
18734 + channel_ids_count = lenp / (sizeof(*channel_ids));
18735 + if (channel_ids_count != duples_count) {
18736 + dev_warn(dpa_oh_dev,
18737 + "Not all egress queues have a channel id for OH node %s referenced from node %s!\n",
18738 + oh_node->full_name, dpa_oh_node->full_name);
18739 + /* just ignore the queues that do not have a Channel ID */
18740 + }
18741 +
18742 + channel_idx = 0;
18743 + list_for_each(fq_list, &oh_config->fqs_egress_list) {
18744 + if (channel_idx + 1 > channel_ids_count)
18745 + break;
18746 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
18747 + fqd->channel_id =
18748 + (uint16_t)be32_to_cpu(channel_ids[channel_idx++]);
18749 + }
18750 +
18751 + /* create egress queues */
18752 + list_for_each(fq_list, &oh_config->fqs_egress_list) {
18753 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
18754 +
18755 + if (fqd->channel_id == 0) {
18756 + /* missing channel id in dts */
18757 + continue;
18758 + }
18759 +
18760 + for (j = 0; j < fqd->fqs_count; j++) {
18761 + ret = oh_fq_create(fqd->fqs + j,
18762 + (fqd->fqs + j)->fqid,
18763 + fqd->channel_id, 3);
18764 + if (ret != 0) {
18765 + dev_err(dpa_oh_dev, "Unable to create egress frame queue %d for OH node %s referenced from node %s!\n",
18766 + (fqd->fqs + j)->fqid,
18767 + oh_node->full_name,
18768 + dpa_oh_node->full_name);
18769 + _errno = -EINVAL;
18770 + goto return_kfree;
18771 + }
18772 + }
18773 + }
18774 +
18775 + /* Read FQ ids/nums for the DPA OH node */
18776 + oh_all_queues = of_get_property(dpa_oh_node,
18777 + "fsl,qman-frame-queues-oh", &lenp);
18778 + if (oh_all_queues == NULL) {
18779 + dev_err(dpa_oh_dev,
18780 + "No frame queues have been defined for OH node %s referenced from node %s\n",
18781 + oh_node->full_name, dpa_oh_node->full_name);
18782 + _errno = -EINVAL;
18783 + goto return_kfree;
18784 + }
18785 +
18786 + /* Check that the OH error and default FQs are there */
18787 + BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
18788 + queues_count = lenp / (2 * sizeof(*oh_all_queues));
18789 + if (queues_count != 2) {
18790 + dev_err(dpa_oh_dev,
18791 + "Error and Default queues must be defined for OH node %s referenced from node %s\n",
18792 + oh_node->full_name, dpa_oh_node->full_name);
18793 + _errno = -EINVAL;
18794 + goto return_kfree;
18795 + }
18796 +
18797 + /* Read the FQIDs defined for this OH port */
18798 + dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
18799 + fq_idx = 0;
18800 +
18801 + /* Error FQID - must be present */
18802 + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
18803 + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
18804 + if (crt_fq_count != 1) {
18805 + dev_err(dpa_oh_dev,
18806 + "Only 1 Error FQ allowed in OH node %s referenced from node %s (read: %d FQIDs).\n",
18807 + oh_node->full_name, dpa_oh_node->full_name,
18808 + crt_fq_count);
18809 + _errno = -EINVAL;
18810 + goto return_kfree;
18811 + }
18812 + oh_config->error_fqid = crt_fqid_base;
18813 + dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
18814 + oh_config->error_fqid, oh_node->full_name);
18815 +
18816 + /* Default FQID - must be present */
18817 + crt_fqid_base = be32_to_cpu(oh_all_queues[fq_idx++]);
18818 + crt_fq_count = be32_to_cpu(oh_all_queues[fq_idx++]);
18819 + if (crt_fq_count != 1) {
18820 + dev_err(dpa_oh_dev,
18821 + "Only 1 Default FQ allowed in OH node %s referenced from %s (read: %d FQIDs).\n",
18822 + oh_node->full_name, dpa_oh_node->full_name,
18823 + crt_fq_count);
18824 + _errno = -EINVAL;
18825 + goto return_kfree;
18826 + }
18827 + oh_config->default_fqid = crt_fqid_base;
18828 + dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
18829 + oh_config->default_fqid, oh_node->full_name);
18830 +
18831 + /* TX FQID - presence is optional */
18832 + oh_tx_queues = of_get_property(dpa_oh_node, "fsl,qman-frame-queues-tx",
18833 + &lenp);
18834 + if (oh_tx_queues == NULL) {
18835 + dev_dbg(dpa_oh_dev,
18836 + "No tx queues have been defined for OH node %s referenced from node %s\n",
18837 + oh_node->full_name, dpa_oh_node->full_name);
18838 + goto config_port;
18839 + }
18840 +
18841 + /* Check that queues-tx has only a base and a count defined */
18842 + BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
18843 + queues_count = lenp / (2 * sizeof(*oh_tx_queues));
18844 + if (queues_count != 1) {
18845 + dev_err(dpa_oh_dev,
18846 + "TX queues must be defined in only one <base count> tuple for OH node %s referenced from node %s\n",
18847 + oh_node->full_name, dpa_oh_node->full_name);
18848 + _errno = -EINVAL;
18849 + goto return_kfree;
18850 + }
18851 +
18852 + fq_idx = 0;
18853 + crt_fqid_base = be32_to_cpu(oh_tx_queues[fq_idx++]);
18854 + crt_fq_count = be32_to_cpu(oh_tx_queues[fq_idx++]);
18855 + oh_config->egress_cnt = crt_fq_count;
18856 +
18857 + /* Allocate TX queues */
18858 + dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
18859 + oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
18860 + crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
18861 + if (oh_config->egress_fqs == NULL) {
18862 + dev_err(dpa_oh_dev,
18863 + "Can't allocate private data for TX queues for OH node %s referenced from node %s!\n",
18864 + oh_node->full_name, dpa_oh_node->full_name);
18865 + _errno = -ENOMEM;
18866 + goto return_kfree;
18867 + }
18868 +
18869 + /* Create TX queues */
18870 + for (i = 0; i < crt_fq_count; i++) {
18871 + ret = oh_fq_create(oh_config->egress_fqs + i,
18872 + crt_fqid_base + i, (uint16_t)channel_id, 3);
18873 + if (ret != 0) {
18874 + dev_err(dpa_oh_dev,
18875 + "Unable to create TX frame queue %d for OH node %s referenced from node %s!\n",
18876 + crt_fqid_base + i, oh_node->full_name,
18877 + dpa_oh_node->full_name);
18878 + _errno = -EINVAL;
18879 + goto return_kfree;
18880 + }
18881 + }
18882 +
18883 +config_port:
18884 + /* Get a handle to the fm_port so we can set
18885 + * its configuration params
18886 + */
18887 + oh_config->oh_port = fm_port_bind(oh_dev);
18888 + if (oh_config->oh_port == NULL) {
18889 + dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
18890 + oh_node->full_name);
18891 + _errno = -EINVAL;
18892 + goto return_kfree;
18893 + }
18894 +
18895 + oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
18896 +
18897 + /* read the pool handlers */
18898 + crt_ext_pools_count = of_count_phandle_with_args(dpa_oh_node,
18899 + "fsl,bman-buffer-pools", NULL);
18900 + if (crt_ext_pools_count <= 0) {
18901 + dev_info(dpa_oh_dev,
18902 + "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
18903 + oh_node->full_name);
18904 + goto init_port;
18905 + }
18906 +
18907 + /* used for reading ext_pool_size*/
18908 + root_node = of_find_node_by_path("/");
18909 + if (root_node == NULL) {
18910 + dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
18911 + _errno = -EINVAL;
18912 + goto return_kfree;
18913 + }
18914 +
18915 + n_size = of_n_size_cells(root_node);
18916 + of_node_put(root_node);
18917 +
18918 + dev_dbg(dpa_oh_dev, "OH port number of pools = %d\n",
18919 + crt_ext_pools_count);
18920 +
18921 + oh_port_tx_params.num_pools = (uint8_t)crt_ext_pools_count;
18922 +
18923 + for (i = 0; i < crt_ext_pools_count; i++) {
18924 + bpool_node = of_parse_phandle(dpa_oh_node,
18925 + "fsl,bman-buffer-pools", i);
18926 + if (bpool_node == NULL) {
18927 + dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
18928 + _errno = -EINVAL;
18929 + goto return_kfree;
18930 + }
18931 +
18932 + _errno = of_property_read_u32(bpool_node, "fsl,bpid", &bpid);
18933 + if (_errno) {
18934 + dev_err(dpa_oh_dev, "Invalid Buffer Pool ID\n");
18935 + _errno = -EINVAL;
18936 + goto return_kfree;
18937 + }
18938 +
18939 + oh_port_tx_params.pool_param[i].id = (uint8_t)bpid;
18940 + dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", bpid);
18941 +
18942 + bpool_cfg = of_get_property(bpool_node,
18943 + "fsl,bpool-ethernet-cfg", &lenp);
18944 + if (bpool_cfg == NULL) {
18945 + dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
18946 + _errno = -EINVAL;
18947 + goto return_kfree;
18948 + }
18949 +
18950 + ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
18951 + oh_port_tx_params.pool_param[i].size = (uint16_t)ext_pool_size;
18952 + dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
18953 + ext_pool_size);
18954 + of_node_put(bpool_node);
18955 +
18956 + }
18957 +
18958 + if (buf_layout.data_align != FRAG_DATA_ALIGN ||
18959 + buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
18960 + goto init_port;
18961 +
18962 + frag_enabled = true;
18963 + dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
18964 + port_id);
18965 +
18966 +init_port:
18967 + of_node_put(oh_node);
18968 + /* Set Tx params */
18969 + dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
18970 + oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
18971 + frag_enabled);
18972 + /* Set PCD params */
18973 + oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
18974 + oh_port_pcd_params.cbf = oh_free_pcd_fqids;
18975 + oh_port_pcd_params.dev = dpa_oh_dev;
18976 + fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);
18977 +
18978 + dev_set_drvdata(dpa_oh_dev, oh_config);
18979 +
18980 + /* Enable the OH port */
18981 + _errno = fm_port_enable(oh_config->oh_port);
18982 + if (_errno)
18983 + goto return_kfree;
18984 +
18985 + dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);
18986 +
18987 + /* print of all referenced & created queues */
18988 + dump_oh_config(dpa_oh_dev, oh_config);
18989 +
18990 + return 0;
18991 +
18992 +return_kfree:
18993 + if (bpool_node)
18994 + of_node_put(bpool_node);
18995 + if (oh_node)
18996 + of_node_put(oh_node);
18997 + if (oh_config && oh_config->egress_fqs)
18998 + devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
18999 +
19000 + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_ingress_list) {
19001 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
19002 + list_del(fq_list);
19003 + devm_kfree(dpa_oh_dev, fqd->fqs);
19004 + devm_kfree(dpa_oh_dev, fqd);
19005 + }
19006 +
19007 + list_for_each_safe(fq_list, fq_list_tmp, &oh_config->fqs_egress_list) {
19008 + fqd = list_entry(fq_list, struct fq_duple, fq_list);
19009 + list_del(fq_list);
19010 + devm_kfree(dpa_oh_dev, fqd->fqs);
19011 + devm_kfree(dpa_oh_dev, fqd);
19012 + }
19013 +
19014 + devm_kfree(dpa_oh_dev, oh_config);
19015 + return _errno;
19016 +}
19017 +
19018 +static int __cold oh_port_remove(struct platform_device *_of_dev)
19019 +{
19020 + int _errno = 0, i;
19021 + struct dpa_oh_config_s *oh_config;
19022 +
19023 + pr_info("Removing OH port...\n");
19024 +
19025 + oh_config = dev_get_drvdata(&_of_dev->dev);
19026 + if (oh_config == NULL) {
19027 + pr_err(KBUILD_MODNAME
19028 + ": %s:%hu:%s(): No OH config in device private data!\n",
19029 + KBUILD_BASENAME".c", __LINE__, __func__);
19030 + _errno = -ENODEV;
19031 + goto return_error;
19032 + }
19033 +
19034 + if (oh_config->egress_fqs)
19035 + for (i = 0; i < oh_config->egress_cnt; i++)
19036 + oh_fq_destroy(oh_config->egress_fqs + i);
19037 +
19038 + if (oh_config->oh_port == NULL) {
19039 + pr_err(KBUILD_MODNAME
19040 + ": %s:%hu:%s(): No fm port in device private data!\n",
19041 + KBUILD_BASENAME".c", __LINE__, __func__);
19042 + _errno = -EINVAL;
19043 + goto free_egress_fqs;
19044 + }
19045 +
19046 + _errno = fm_port_disable(oh_config->oh_port);
19047 +
19048 +free_egress_fqs:
19049 + if (oh_config->egress_fqs)
19050 + devm_kfree(&_of_dev->dev, oh_config->egress_fqs);
19051 + devm_kfree(&_of_dev->dev, oh_config);
19052 + dev_set_drvdata(&_of_dev->dev, NULL);
19053 +
19054 +return_error:
19055 + return _errno;
19056 +}
19057 +
19058 +static struct platform_driver oh_port_driver = {
19059 + .driver = {
19060 + .name = KBUILD_MODNAME,
19061 + .of_match_table = oh_port_match_table,
19062 + .owner = THIS_MODULE,
19063 + .pm = OH_PM_OPS,
19064 + },
19065 + .probe = oh_port_probe,
19066 + .remove = oh_port_remove
19067 +};
19068 +
19069 +static int __init __cold oh_port_load(void)
19070 +{
19071 + int _errno;
19072 +
19073 + pr_info(OH_MOD_DESCRIPTION "\n");
19074 +
19075 + _errno = platform_driver_register(&oh_port_driver);
19076 + if (_errno < 0) {
19077 + pr_err(KBUILD_MODNAME
19078 + ": %s:%hu:%s(): platform_driver_register() = %d\n",
19079 + KBUILD_BASENAME".c", __LINE__, __func__, _errno);
19080 + }
19081 +
19082 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
19083 + KBUILD_BASENAME".c", __func__);
19084 + return _errno;
19085 +}
19086 +module_init(oh_port_load);
19087 +
19088 +static void __exit __cold oh_port_unload(void)
19089 +{
19090 + pr_debug(KBUILD_MODNAME ": -> %s:%s()\n",
19091 + KBUILD_BASENAME".c", __func__);
19092 +
19093 + platform_driver_unregister(&oh_port_driver);
19094 +
19095 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
19096 + KBUILD_BASENAME".c", __func__);
19097 +}
19098 +module_exit(oh_port_unload);
19099 --- /dev/null
19100 +++ b/drivers/net/ethernet/freescale/sdk_dpaa/offline_port.h
19101 @@ -0,0 +1,59 @@
19102 +/* Copyright 2011 Freescale Semiconductor Inc.
19103 + *
19104 + * Redistribution and use in source and binary forms, with or without
19105 + * modification, are permitted provided that the following conditions are met:
19106 + * * Redistributions of source code must retain the above copyright
19107 + * notice, this list of conditions and the following disclaimer.
19108 + * * Redistributions in binary form must reproduce the above copyright
19109 + * notice, this list of conditions and the following disclaimer in the
19110 + * documentation and/or other materials provided with the distribution.
19111 + * * Neither the name of Freescale Semiconductor nor the
19112 + * names of its contributors may be used to endorse or promote products
19113 + * derived from this software without specific prior written permission.
19114 + *
19115 + *
19116 + * ALTERNATIVELY, this software may be distributed under the terms of the
19117 + * GNU General Public License ("GPL") as published by the Free Software
19118 + * Foundation, either version 2 of that License or (at your option) any
19119 + * later version.
19120 + *
19121 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
19122 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19123 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19124 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
19125 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19126 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19127 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
19128 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19129 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19130 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19131 + */
19132 +
19133 +#ifndef __OFFLINE_PORT_H
19134 +#define __OFFLINE_PORT_H
19135 +
19136 +struct fm_port;
19137 +struct qman_fq;
19138 +
19139 +/* fqs are defined in duples (base_fq, fq_count) */
19140 +struct fq_duple {
19141 + struct qman_fq *fqs;
19142 + int fqs_count;
19143 + uint16_t channel_id;
19144 + struct list_head fq_list;
19145 +};
19146 +
19147 +/* OH port configuration */
19148 +struct dpa_oh_config_s {
19149 + uint32_t error_fqid;
19150 + uint32_t default_fqid;
19151 + struct fm_port *oh_port;
19152 + uint32_t egress_cnt;
19153 + struct qman_fq *egress_fqs;
19154 + uint16_t channel;
19155 +
19156 + struct list_head fqs_ingress_list;
19157 + struct list_head fqs_egress_list;
19158 +};
19159 +
19160 +#endif /* __OFFLINE_PORT_H */